python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
## Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration file for the Sphinx documentation builder."""
import doctest
import inspect
import os
import sys
# -- Path setup --------------------------------------------------------------
#
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
sys.path.append(os.path.abspath('ext'))
import sphinxcontrib.katex as katex # pylint: disable=g-import-not-at-top
# -- Project information -----------------------------------------------------
project = 'MuJoCo'
copyright = 'DeepMind Technologies Limited' # pylint: disable=redefined-builtin
author = 'DeepMind'
# -- General configuration ---------------------------------------------------
master_doc = 'index'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinxcontrib.katex',
'sphinx_reredirects',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'includes/*']
redirects = {
# index.rst just contains the table of contents definition.
'index': 'overview.html',
}
# -- Options for autodoc -----------------------------------------------------
autodoc_default_options = {
'member-order': 'bysource',
'special-members': True,
'exclude-members': '__repr__, __str__, __weakref__',
}
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = [
'_static',
'css',
'favicons',
]
html_css_files = [
'theme_overrides.css',
]
html_favicon = 'favicons/favicon-32x32.png'
# -- Options for katex ------------------------------------------------------
# See: https://sphinxcontrib-katex.readthedocs.io/en/0.4.1/macros.html
latex_macros = r"""
\def \d #1{\operatorname{#1}}
"""
# Translate LaTeX macros to KaTeX and add to options for HTML builder
katex_macros = katex.latex_defs_to_katex_macros(latex_macros)
katex_options = 'macros: {' + katex_macros + '}'
# Add LaTeX macros for LATEX builder
latex_elements = {'preamble': latex_macros}
| mujoco-main | doc/conf.py |
from setuptools import setup, find_packages
setup(
name = 'muse-maskgit-pytorch',
packages = find_packages(exclude=[]),
version = '0.2.4',
license='MIT',
description = 'MUSE - Text-to-Image Generation via Masked Generative Transformers, in Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/muse-maskgit-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism',
'text-to-image'
],
install_requires=[
'accelerate',
'beartype',
'einops>=0.6',
'ema-pytorch>=0.2.2',
'memory-efficient-attention-pytorch>=0.1.4',
'pillow',
'sentencepiece',
'torch>=1.6',
'transformers',
'torch>=1.6',
'torchvision',
'tqdm',
'vector-quantize-pytorch>=0.10.14'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| muse-maskgit-pytorch-main | setup.py |
import logging
import torch
import transformers
from transformers import T5Tokenizer, T5EncoderModel, T5Config
from beartype import beartype
from typing import List, Union
transformers.logging.set_verbosity_error()
def exists(val):
return val is not None
# config
MAX_LENGTH = 256
DEFAULT_T5_NAME = 'google/t5-v1_1-base'
T5_CONFIGS = {}
# singleton globals
def get_tokenizer(name):
tokenizer = T5Tokenizer.from_pretrained(name)
return tokenizer
def get_model(name):
model = T5EncoderModel.from_pretrained(name)
return model
def get_model_and_tokenizer(name):
global T5_CONFIGS
if name not in T5_CONFIGS:
T5_CONFIGS[name] = dict()
if "model" not in T5_CONFIGS[name]:
T5_CONFIGS[name]["model"] = get_model(name)
if "tokenizer" not in T5_CONFIGS[name]:
T5_CONFIGS[name]["tokenizer"] = get_tokenizer(name)
return T5_CONFIGS[name]['model'], T5_CONFIGS[name]['tokenizer']
def get_encoded_dim(name):
if name not in T5_CONFIGS:
# avoids loading the model if we only want to get the dim
config = T5Config.from_pretrained(name)
T5_CONFIGS[name] = dict(config=config)
elif "config" in T5_CONFIGS[name]:
config = T5_CONFIGS[name]["config"]
elif "model" in T5_CONFIGS[name]:
config = T5_CONFIGS[name]["model"].config
else:
assert False
return config.d_model
# encoding text
@beartype
def t5_encode_text(
texts: Union[str, List[str]],
name = DEFAULT_T5_NAME,
output_device = None
):
if isinstance(texts, str):
texts = [texts]
t5, tokenizer = get_model_and_tokenizer(name)
if torch.cuda.is_available():
t5 = t5.cuda()
device = next(t5.parameters()).device
encoded = tokenizer.batch_encode_plus(
texts,
return_tensors = "pt",
padding = 'longest',
max_length = MAX_LENGTH,
truncation = True
)
input_ids = encoded.input_ids.to(device)
attn_mask = encoded.attention_mask.to(device)
t5.eval()
with torch.no_grad():
output = t5(input_ids = input_ids, attention_mask = attn_mask)
encoded_text = output.last_hidden_state.detach()
attn_mask = attn_mask.bool()
encoded_text = encoded_text.masked_fill(attn_mask[..., None], 0.)
if not exists(output_device):
return encoded_text
encoded_text.to(output_device)
return encoded_text
| muse-maskgit-pytorch-main | muse_maskgit_pytorch/t5.py |
import math
from random import random
from functools import partial
import torch
import torch.nn.functional as F
from torch import nn, einsum
import pathlib
from pathlib import Path
import torchvision.transforms as T
from typing import Callable, Optional, List
from einops import rearrange, repeat
from beartype import beartype
from muse_maskgit_pytorch.vqgan_vae import VQGanVAE
from muse_maskgit_pytorch.t5 import t5_encode_text, get_encoded_dim, DEFAULT_T5_NAME
from muse_maskgit_pytorch.attend import Attend
from tqdm.auto import tqdm
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
def l2norm(t):
return F.normalize(t, dim = -1)
# tensor helpers
def get_mask_subset_prob(mask, prob, min_mask = 0):
batch, seq, device = *mask.shape, mask.device
num_to_mask = (mask.sum(dim = -1, keepdim = True) * prob).clamp(min = min_mask)
logits = torch.rand((batch, seq), device = device)
logits = logits.masked_fill(~mask, -1)
randperm = logits.argsort(dim = -1).float()
num_padding = (~mask).sum(dim = -1, keepdim = True)
randperm -= num_padding
subset_mask = randperm < num_to_mask
subset_mask.masked_fill_(~mask, False)
return subset_mask
# classes
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(torch.ones(dim))
self.register_buffer('beta', torch.zeros(dim))
def forward(self, x):
return F.layer_norm(x, x.shape[-1:], self.gamma, self.beta)
class GEGLU(nn.Module):
""" https://arxiv.org/abs/2002.05202 """
def forward(self, x):
x, gate = x.chunk(2, dim = -1)
return gate * F.gelu(x)
def FeedForward(dim, mult = 4):
""" https://arxiv.org/abs/2110.09456 """
inner_dim = int(dim * mult * 2 / 3)
return nn.Sequential(
LayerNorm(dim),
nn.Linear(dim, inner_dim * 2, bias = False),
GEGLU(),
LayerNorm(inner_dim),
nn.Linear(inner_dim, dim, bias = False)
)
class Attention(nn.Module):
def __init__(
self,
dim,
dim_head = 64,
heads = 8,
cross_attend = False,
scale = 8,
flash = True,
dropout = 0.
):
super().__init__()
self.scale = scale
self.heads = heads
inner_dim = dim_head * heads
self.cross_attend = cross_attend
self.norm = LayerNorm(dim)
self.attend = Attend(
flash = flash,
dropout = dropout,
scale = scale
)
self.null_kv = nn.Parameter(torch.randn(2, heads, 1, dim_head))
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)
self.q_scale = nn.Parameter(torch.ones(dim_head))
self.k_scale = nn.Parameter(torch.ones(dim_head))
self.to_out = nn.Linear(inner_dim, dim, bias = False)
def forward(
self,
x,
context = None,
context_mask = None
):
assert not (exists(context) ^ self.cross_attend)
n = x.shape[-2]
h, is_cross_attn = self.heads, exists(context)
x = self.norm(x)
kv_input = context if self.cross_attend else x
q, k, v = (self.to_q(x), *self.to_kv(kv_input).chunk(2, dim = -1))
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
nk, nv = self.null_kv
nk, nv = map(lambda t: repeat(t, 'h 1 d -> b h 1 d', b = x.shape[0]), (nk, nv))
k = torch.cat((nk, k), dim = -2)
v = torch.cat((nv, v), dim = -2)
q, k = map(l2norm, (q, k))
q = q * self.q_scale
k = k * self.k_scale
if exists(context_mask):
context_mask = repeat(context_mask, 'b j -> b h i j', h = h, i = n)
context_mask = F.pad(context_mask, (1, 0), value = True)
out = self.attend(q, k, v, mask = context_mask)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class TransformerBlocks(nn.Module):
def __init__(
self,
*,
dim,
depth,
dim_head = 64,
heads = 8,
ff_mult = 4,
flash = True
):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
Attention(dim = dim, dim_head = dim_head, heads = heads, flash = flash),
Attention(dim = dim, dim_head = dim_head, heads = heads, cross_attend = True, flash = flash),
FeedForward(dim = dim, mult = ff_mult)
]))
self.norm = LayerNorm(dim)
def forward(self, x, context = None, context_mask = None):
for attn, cross_attn, ff in self.layers:
x = attn(x) + x
x = cross_attn(x, context = context, context_mask = context_mask) + x
x = ff(x) + x
return self.norm(x)
# transformer - it's all we need
class Transformer(nn.Module):
def __init__(
self,
*,
num_tokens,
dim,
seq_len,
dim_out = None,
t5_name = DEFAULT_T5_NAME,
self_cond = False,
add_mask_id = False,
**kwargs
):
super().__init__()
self.dim = dim
self.mask_id = num_tokens if add_mask_id else None
self.num_tokens = num_tokens
self.token_emb = nn.Embedding(num_tokens + int(add_mask_id), dim)
self.pos_emb = nn.Embedding(seq_len, dim)
self.seq_len = seq_len
self.transformer_blocks = TransformerBlocks(dim = dim, **kwargs)
self.norm = LayerNorm(dim)
self.dim_out = default(dim_out, num_tokens)
self.to_logits = nn.Linear(dim, self.dim_out, bias = False)
# text conditioning
self.encode_text = partial(t5_encode_text, name = t5_name)
text_embed_dim = get_encoded_dim(t5_name)
self.text_embed_proj = nn.Linear(text_embed_dim, dim, bias = False) if text_embed_dim != dim else nn.Identity()
# optional self conditioning
self.self_cond = self_cond
self.self_cond_to_init_embed = FeedForward(dim)
def forward_with_cond_scale(
self,
*args,
cond_scale = 3.,
return_embed = False,
**kwargs
):
if cond_scale == 1:
return self.forward(*args, return_embed = return_embed, cond_drop_prob = 0., **kwargs)
logits, embed = self.forward(*args, return_embed = True, cond_drop_prob = 0., **kwargs)
null_logits = self.forward(*args, cond_drop_prob = 1., **kwargs)
scaled_logits = null_logits + (logits - null_logits) * cond_scale
if return_embed:
return scaled_logits, embed
return scaled_logits
def forward_with_neg_prompt(
self,
text_embed: torch.Tensor,
neg_text_embed: torch.Tensor,
cond_scale = 3.,
return_embed = False,
**kwargs
):
neg_logits = self.forward(*args, neg_text_embed = neg_text_embed, cond_drop_prob = 0., **kwargs)
pos_logits, embed = self.forward(*args, return_embed = True, text_embed = text_embed, cond_drop_prob = 0., **kwargs)
logits = neg_logits + (pos_logits - neg_logits) * cond_scale
if return_embed:
return scaled_logits, embed
return scaled_logits
def forward(
self,
x,
return_embed = False,
return_logits = False,
labels = None,
ignore_index = 0,
self_cond_embed = None,
cond_drop_prob = 0.,
conditioning_token_ids: Optional[torch.Tensor] = None,
texts: Optional[List[str]] = None,
text_embeds: Optional[torch.Tensor] = None
):
device, b, n = x.device, *x.shape
assert n <= self.seq_len
# prepare texts
assert exists(texts) ^ exists(text_embeds)
if exists(texts):
text_embeds = self.encode_text(texts)
context = self.text_embed_proj(text_embeds)
context_mask = (text_embeds != 0).any(dim = -1)
# classifier free guidance
if self.training and cond_drop_prob > 0.:
mask = prob_mask_like((b, 1), 1. - cond_drop_prob, device)
context_mask = context_mask & mask
# concat conditioning image token ids if needed
if exists(conditioning_token_ids):
conditioning_token_ids = rearrange(conditioning_token_ids, 'b ... -> b (...)')
cond_token_emb = self.token_emb(conditioning_token_ids)
context = torch.cat((context, cond_token_emb), dim = -2)
context_mask = F.pad(context_mask, (0, conditioning_token_ids.shape[-1]), value = True)
# embed tokens
x = self.token_emb(x)
x = x + self.pos_emb(torch.arange(n, device = device))
if self.self_cond:
if not exists(self_cond_embed):
self_cond_embed = torch.zeros_like(x)
x = x + self.self_cond_to_init_embed(self_cond_embed)
embed = self.transformer_blocks(x, context = context, context_mask = context_mask)
logits = self.to_logits(embed)
if return_embed:
return logits, embed
if not exists(labels):
return logits
if self.dim_out == 1:
loss = F.binary_cross_entropy_with_logits(rearrange(logits, '... 1 -> ...'), labels)
else:
loss = F.cross_entropy(rearrange(logits, 'b n c -> b c n'), labels, ignore_index = ignore_index)
if not return_logits:
return loss
return loss, logits
# self critic wrapper
class SelfCritic(nn.Module):
def __init__(self, net):
super().__init__()
self.net = net
self.to_pred = nn.Linear(net.dim, 1)
def forward_with_cond_scale(self, x, *args, **kwargs):
_, embeds = self.net.forward_with_cond_scale(x, *args, return_embed = True, **kwargs)
return self.to_pred(embeds)
def forward_with_neg_prompt(self, x, *args, **kwargs):
_, embeds = self.net.forward_with_neg_prompt(x, *args, return_embed = True, **kwargs)
return self.to_pred(embeds)
def forward(self, x, *args, labels = None, **kwargs):
_, embeds = self.net(x, *args, return_embed = True, **kwargs)
logits = self.to_pred(embeds)
if not exists(labels):
return logits
logits = rearrange(logits, '... 1 -> ...')
return F.binary_cross_entropy_with_logits(logits, labels)
# specialized transformers
class MaskGitTransformer(Transformer):
def __init__(self, *args, **kwargs):
assert 'add_mask_id' not in kwargs
super().__init__(*args, add_mask_id = True, **kwargs)
class TokenCritic(Transformer):
def __init__(self, *args, **kwargs):
assert 'dim_out' not in kwargs
super().__init__(*args, dim_out = 1, **kwargs)
# classifier free guidance functions
def uniform(shape, min = 0, max = 1, device = None):
return torch.zeros(shape, device = device).float().uniform_(0, 1)
def prob_mask_like(shape, prob, device = None):
if prob == 1:
return torch.ones(shape, device = device, dtype = torch.bool)
elif prob == 0:
return torch.zeros(shape, device = device, dtype = torch.bool)
else:
return uniform(shape, device = device) < prob
# sampling helpers
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature = 1., dim = -1):
return ((t / max(temperature, 1e-10)) + gumbel_noise(t)).argmax(dim = dim)
def top_k(logits, thres = 0.9):
k = math.ceil((1 - thres) * logits.shape[-1])
val, ind = logits.topk(k, dim = -1)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(2, ind, val)
return probs
# noise schedules
def cosine_schedule(t):
return torch.cos(t * math.pi * 0.5)
# main maskgit classes
@beartype
class MaskGit(nn.Module):
def __init__(
self,
image_size,
transformer: MaskGitTransformer,
noise_schedule: Callable = cosine_schedule,
token_critic: Optional[TokenCritic] = None,
self_token_critic = False,
vae: Optional[VQGanVAE] = None,
cond_vae: Optional[VQGanVAE] = None,
cond_image_size = None,
cond_drop_prob = 0.5,
self_cond_prob = 0.9,
no_mask_token_prob = 0.,
critic_loss_weight = 1.
):
super().__init__()
self.vae = vae.copy_for_eval() if exists(vae) else None
if exists(cond_vae):
self.cond_vae = cond_vae.eval()
else:
self.cond_vae = self.vae
assert not (exists(cond_vae) and not exists(cond_image_size)), 'cond_image_size must be specified if conditioning'
self.image_size = image_size
self.cond_image_size = cond_image_size
self.resize_image_for_cond_image = exists(cond_image_size)
self.cond_drop_prob = cond_drop_prob
self.transformer = transformer
self.self_cond = transformer.self_cond
assert self.vae.codebook_size == self.cond_vae.codebook_size == transformer.num_tokens, 'transformer num_tokens must be set to be equal to the vae codebook size'
self.mask_id = transformer.mask_id
self.noise_schedule = noise_schedule
assert not (self_token_critic and exists(token_critic))
self.token_critic = token_critic
if self_token_critic:
self.token_critic = SelfCritic(transformer)
self.critic_loss_weight = critic_loss_weight
# self conditioning
self.self_cond_prob = self_cond_prob
# percentage of tokens to be [mask]ed to remain the same token, so that transformer produces better embeddings across all tokens as done in original BERT paper
# may be needed for self conditioning
self.no_mask_token_prob = no_mask_token_prob
def save(self, path):
torch.save(self.state_dict(), path)
def load(self, path):
path = Path(path)
assert path.exists()
state_dict = torch.load(str(path))
self.load_state_dict(state_dict)
@torch.no_grad()
@eval_decorator
def generate(
self,
texts: List[str],
negative_texts: Optional[List[str]] = None,
cond_images: Optional[torch.Tensor] = None,
fmap_size = None,
temperature = 1.,
topk_filter_thres = 0.9,
can_remask_prev_masked = False,
force_not_use_token_critic = False,
timesteps = 18, # ideal number of steps is 18 in maskgit paper
cond_scale = 3,
critic_noise_scale = 1
):
fmap_size = default(fmap_size, self.vae.get_encoded_fmap_size(self.image_size))
# begin with all image token ids masked
device = next(self.parameters()).device
seq_len = fmap_size ** 2
batch_size = len(texts)
shape = (batch_size, seq_len)
ids = torch.full(shape, self.mask_id, dtype = torch.long, device = device)
scores = torch.zeros(shape, dtype = torch.float32, device = device)
starting_temperature = temperature
cond_ids = None
text_embeds = self.transformer.encode_text(texts)
demask_fn = self.transformer.forward_with_cond_scale
# whether to use token critic for scores
use_token_critic = exists(self.token_critic) and not force_not_use_token_critic
if use_token_critic:
token_critic_fn = self.token_critic.forward_with_cond_scale
# negative prompting, as in paper
neg_text_embeds = None
if exists(negative_texts):
assert len(texts) == len(negative_texts)
neg_text_embeds = self.transformer.encode_text(negative_texts)
demask_fn = partial(self.transformer.forward_with_neg_prompt, neg_text_embeds = neg_text_embeds)
if use_token_critic:
token_critic_fn = partial(self.token_critic.forward_with_neg_prompt, neg_text_embeds = neg_text_embeds)
if self.resize_image_for_cond_image:
assert exists(cond_images), 'conditioning image must be passed in to generate for super res maskgit'
with torch.no_grad():
_, cond_ids, _ = self.cond_vae.encode(cond_images)
self_cond_embed = None
for timestep, steps_until_x0 in tqdm(zip(torch.linspace(0, 1, timesteps, device = device), reversed(range(timesteps))), total = timesteps):
rand_mask_prob = self.noise_schedule(timestep)
num_token_masked = max(int((rand_mask_prob * seq_len).item()), 1)
masked_indices = scores.topk(num_token_masked, dim = -1).indices
ids = ids.scatter(1, masked_indices, self.mask_id)
logits, embed = demask_fn(
ids,
text_embeds = text_embeds,
self_cond_embed = self_cond_embed,
conditioning_token_ids = cond_ids,
cond_scale = cond_scale,
return_embed = True
)
self_cond_embed = embed if self.self_cond else None
filtered_logits = top_k(logits, topk_filter_thres)
temperature = starting_temperature * (steps_until_x0 / timesteps) # temperature is annealed
pred_ids = gumbel_sample(filtered_logits, temperature = temperature, dim = -1)
is_mask = ids == self.mask_id
ids = torch.where(
is_mask,
pred_ids,
ids
)
if use_token_critic:
scores = token_critic_fn(
ids,
text_embeds = text_embeds,
conditioning_token_ids = cond_ids,
cond_scale = cond_scale
)
scores = rearrange(scores, '... 1 -> ...')
scores = scores + (uniform(scores.shape, device = device) - 0.5) * critic_noise_scale * (steps_until_x0 / timesteps)
else:
probs_without_temperature = logits.softmax(dim = -1)
scores = 1 - probs_without_temperature.gather(2, pred_ids[..., None])
scores = rearrange(scores, '... 1 -> ...')
if not can_remask_prev_masked:
scores = scores.masked_fill(~is_mask, -1e5)
else:
assert self.no_mask_token_prob > 0., 'without training with some of the non-masked tokens forced to predict, not sure if the logits will be meaningful for these token'
# get ids
ids = rearrange(ids, 'b (i j) -> b i j', i = fmap_size, j = fmap_size)
if not exists(self.vae):
return ids
images = self.vae.decode_from_ids(ids)
return images
def forward(
self,
images_or_ids: torch.Tensor,
ignore_index = -1,
cond_images: Optional[torch.Tensor] = None,
cond_token_ids: Optional[torch.Tensor] = None,
texts: Optional[List[str]] = None,
text_embeds: Optional[torch.Tensor] = None,
cond_drop_prob = None,
train_only_generator = False,
sample_temperature = None
):
# tokenize if needed
if images_or_ids.dtype == torch.float:
assert exists(self.vae), 'vqgan vae must be passed in if training from raw images'
assert all([height_or_width == self.image_size for height_or_width in images_or_ids.shape[-2:]]), 'the image you passed in is not of the correct dimensions'
with torch.no_grad():
_, ids, _ = self.vae.encode(images_or_ids)
else:
assert not self.resize_image_for_cond_image, 'you cannot pass in raw image token ids if you want the framework to autoresize image for conditioning super res transformer'
ids = images_or_ids
# take care of conditioning image if specified
if self.resize_image_for_cond_image:
cond_images_or_ids = F.interpolate(images_or_ids, self.cond_image_size, mode = 'nearest')
# get some basic variables
ids = rearrange(ids, 'b ... -> b (...)')
batch, seq_len, device, cond_drop_prob = *ids.shape, ids.device, default(cond_drop_prob, self.cond_drop_prob)
# tokenize conditional images if needed
assert not (exists(cond_images) and exists(cond_token_ids)), 'if conditioning on low resolution, cannot pass in both images and token ids'
if exists(cond_images):
assert exists(self.cond_vae), 'cond vqgan vae must be passed in'
assert all([height_or_width == self.cond_image_size for height_or_width in cond_images.shape[-2:]])
with torch.no_grad():
_, cond_token_ids, _ = self.cond_vae.encode(cond_images)
# prepare mask
rand_time = uniform((batch,), device = device)
rand_mask_probs = self.noise_schedule(rand_time)
num_token_masked = (seq_len * rand_mask_probs).round().clamp(min = 1)
mask_id = self.mask_id
batch_randperm = torch.rand((batch, seq_len), device = device).argsort(dim = -1)
mask = batch_randperm < rearrange(num_token_masked, 'b -> b 1')
mask_id = self.transformer.mask_id
labels = torch.where(mask, ids, ignore_index)
if self.no_mask_token_prob > 0.:
no_mask_mask = get_mask_subset_prob(mask, self.no_mask_token_prob)
mask &= ~no_mask_mask
x = torch.where(mask, mask_id, ids)
# get text embeddings
if exists(texts):
text_embeds = self.transformer.encode_text(texts)
texts = None
# self conditioning
self_cond_embed = None
if self.transformer.self_cond and random() < self.self_cond_prob:
with torch.no_grad():
_, self_cond_embed = self.transformer(
x,
text_embeds = text_embeds,
conditioning_token_ids = cond_token_ids,
cond_drop_prob = 0.,
return_embed = True
)
self_cond_embed.detach_()
# get loss
ce_loss, logits = self.transformer(
x,
text_embeds = text_embeds,
self_cond_embed = self_cond_embed,
conditioning_token_ids = cond_token_ids,
labels = labels,
cond_drop_prob = cond_drop_prob,
ignore_index = ignore_index,
return_logits = True
)
if not exists(self.token_critic) or train_only_generator:
return ce_loss
# token critic loss
sampled_ids = gumbel_sample(logits, temperature = default(sample_temperature, random()))
critic_input = torch.where(mask, sampled_ids, x)
critic_labels = (ids != critic_input).float()
bce_loss = self.token_critic(
critic_input,
text_embeds = text_embeds,
conditioning_token_ids = cond_token_ids,
labels = critic_labels,
cond_drop_prob = cond_drop_prob
)
return ce_loss + self.critic_loss_weight * bce_loss
# final Muse class
@beartype
class Muse(nn.Module):
def __init__(
self,
base: MaskGit,
superres: MaskGit
):
super().__init__()
self.base_maskgit = base.eval()
assert superres.resize_image_for_cond_image
self.superres_maskgit = superres.eval()
@torch.no_grad()
def forward(
self,
texts: List[str],
cond_scale = 3.,
temperature = 1.,
timesteps = 18,
superres_timesteps = None,
return_lowres = False,
return_pil_images = True
):
lowres_image = self.base_maskgit.generate(
texts = texts,
cond_scale = cond_scale,
temperature = temperature,
timesteps = timesteps
)
superres_image = self.superres_maskgit.generate(
texts = texts,
cond_scale = cond_scale,
cond_images = lowres_image,
temperature = temperature,
timesteps = default(superres_timesteps, timesteps)
)
if return_pil_images:
lowres_image = list(map(T.ToPILImage(), lowres_image))
superres_image = list(map(T.ToPILImage(), superres_image))
if not return_lowres:
return superres_image
return superres_image, lowres_image
| muse-maskgit-pytorch-main | muse_maskgit_pytorch/muse_maskgit_pytorch.py |
from muse_maskgit_pytorch.vqgan_vae import VQGanVAE
from muse_maskgit_pytorch.muse_maskgit_pytorch import Transformer, MaskGit, Muse, MaskGitTransformer, TokenCritic
from muse_maskgit_pytorch.trainers import VQGanVAETrainer
| muse-maskgit-pytorch-main | muse_maskgit_pytorch/__init__.py |
from functools import wraps
from packaging import version
from collections import namedtuple
import torch
from torch import nn, einsum
import torch.nn.functional as F
from memory_efficient_attention_pytorch.flash_attention import FlashAttentionFunction
# constants
AttentionConfig = namedtuple('AttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
# helpers
def exists(val):
return val is not None
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# main class
class Attend(nn.Module):
def __init__(
self,
scale = 8,
dropout = 0.,
flash = False
):
super().__init__()
self.scale = scale
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
self.flash = flash
assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cuda_config = None
self.no_hardware_detected = False
if not torch.cuda.is_available() or not flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = AttentionConfig(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = AttentionConfig(False, True, False)
def flash_attn(self, q, k, v, mask = None):
default_scale = q.shape[-1] ** -0.5
is_cuda = q.is_cuda
q, k, v = map(lambda t: t.contiguous(), (q, k, v))
# scaled_dot_product_attention does not allow for custom scale
# so hack it in, to support rmsnorm-ed queries and keys
rescale = self.scale / default_scale
q = q * (rescale ** 0.5)
k = k * (rescale ** 0.5)
# use naive implementation if not correct hardware
# the below logic can also incorporate whether masking is needed or not
use_naive = not is_cuda or not exists(self.cuda_config)
if not is_cuda or self.no_hardware_detected:
return FlashAttentionFunction.apply(q, k, v, mask, False, 512, 512)
# use naive implementation
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
try:
raise Exception()
with torch.backends.cuda.sdp_kernel(**self.cuda_config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = mask,
dropout_p = self.dropout if self.training else 0.
)
except:
print_once('no hardware detected, falling back to naive implementation from memory-efficient-attention-pytorch library')
self.no_hardware_detected = True
out = FlashAttentionFunction.apply(q, k, v, mask, False, 512, 512)
return out
def forward(self, q, k, v, mask = None, force_non_flash = False):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
if self.flash and not force_non_flash:
return self.flash_attn(q, k, v, mask = mask)
# similarity
sim = einsum("b h i d, b h j d -> b h i j", q, k) * self.scale
# masking
if exists(mask):
mask_value = -torch.finfo(sim.dtype).max
sim = sim.masked_fill(~mask, mask_value)
# attention
attn = sim.softmax(dim = -1)
attn = self.attn_dropout(attn)
# aggregate values
out = einsum("b h i j, b h j d -> b h i d", attn, v)
return out
| muse-maskgit-pytorch-main | muse_maskgit_pytorch/attend.py |
from math import sqrt
from random import choice
from pathlib import Path
from shutil import rmtree
from functools import partial
from beartype import beartype
import torch
from torch import nn
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader, random_split
import torchvision.transforms as T
from torchvision.datasets import ImageFolder
from torchvision.utils import make_grid, save_image
from muse_maskgit_pytorch.vqgan_vae import VQGanVAE
from einops import rearrange
from accelerate import Accelerator, DistributedType, DistributedDataParallelKwargs
from ema_pytorch import EMA
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
# helper functions
def exists(val):
return val is not None
def identity(t, *args, **kwargs):
return t
def noop(*args, **kwargs):
pass
def find_index(arr, cond):
for ind, el in enumerate(arr):
if cond(el):
return ind
return None
def find_and_pop(arr, cond, default = None):
ind = find_index(arr, cond)
if exists(ind):
return arr.pop(ind)
if callable(default):
return default()
return default
def cycle(dl):
while True:
for data in dl:
yield data
def cast_tuple(t):
return t if isinstance(t, (tuple, list)) else (t,)
def yes_or_no(question):
answer = input(f'{question} (y/n) ')
return answer.lower() in ('yes', 'y')
def accum_log(log, new_logs):
for key, new_value in new_logs.items():
old_value = log.get(key, 0.)
log[key] = old_value + new_value
return log
def pair(val):
return val if isinstance(val, tuple) else (val, val)
def convert_image_to_fn(img_type, image):
if image.mode != img_type:
return image.convert(img_type)
return image
# image related helpers fnuctions and dataset
class ImageDataset(Dataset):
def __init__(
self,
folder,
image_size,
exts = ['jpg', 'jpeg', 'png']
):
super().__init__()
self.folder = folder
self.image_size = image_size
self.paths = [p for ext in exts for p in Path(f'{folder}').glob(f'**/*.{ext}')]
print(f'{len(self.paths)} training samples found at {folder}')
self.transform = T.Compose([
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
T.Resize(image_size),
T.RandomHorizontalFlip(),
T.CenterCrop(image_size),
T.ToTensor()
])
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
path = self.paths[index]
img = Image.open(path)
return self.transform(img)
# main trainer class
@beartype
class VQGanVAETrainer(nn.Module):
def __init__(
self,
vae: VQGanVAE,
*,
folder,
num_train_steps,
batch_size,
image_size,
lr = 3e-4,
grad_accum_every = 1,
max_grad_norm = None,
discr_max_grad_norm = None,
save_results_every = 100,
save_model_every = 1000,
results_folder = './results',
valid_frac = 0.05,
random_split_seed = 42,
use_ema = True,
ema_beta = 0.995,
ema_update_after_step = 0,
ema_update_every = 1,
apply_grad_penalty_every = 4,
accelerate_kwargs: dict = dict()
):
super().__init__()
# instantiate accelerator
kwargs_handlers = accelerate_kwargs.get('kwargs_handlers', [])
ddp_kwargs = find_and_pop(
kwargs_handlers,
lambda x: isinstance(x, DistributedDataParallelKwargs),
partial(DistributedDataParallelKwargs, find_unused_parameters = True)
)
ddp_kwargs.find_unused_parameters = True
kwargs_handlers.append(ddp_kwargs)
accelerate_kwargs.update(kwargs_handlers = kwargs_handlers)
self.accelerator = Accelerator(**accelerate_kwargs)
# vae
self.vae = vae
# training params
self.register_buffer('steps', torch.Tensor([0]))
self.num_train_steps = num_train_steps
self.batch_size = batch_size
self.grad_accum_every = grad_accum_every
all_parameters = set(vae.parameters())
discr_parameters = set(vae.discr.parameters())
vae_parameters = all_parameters - discr_parameters
self.vae_parameters = vae_parameters
# optimizers
self.optim = Adam(vae_parameters, lr = lr)
self.discr_optim = Adam(discr_parameters, lr = lr)
self.max_grad_norm = max_grad_norm
self.discr_max_grad_norm = discr_max_grad_norm
# create dataset
self.ds = ImageDataset(folder, image_size)
# split for validation
if valid_frac > 0:
train_size = int((1 - valid_frac) * len(self.ds))
valid_size = len(self.ds) - train_size
self.ds, self.valid_ds = random_split(self.ds, [train_size, valid_size], generator = torch.Generator().manual_seed(random_split_seed))
self.print(f'training with dataset of {len(self.ds)} samples and validating with randomly splitted {len(self.valid_ds)} samples')
else:
self.valid_ds = self.ds
self.print(f'training with shared training and valid dataset of {len(self.ds)} samples')
# dataloader
self.dl = DataLoader(
self.ds,
batch_size = batch_size,
shuffle = True
)
self.valid_dl = DataLoader(
self.valid_ds,
batch_size = batch_size,
shuffle = True
)
# prepare with accelerator
(
self.vae,
self.optim,
self.discr_optim,
self.dl,
self.valid_dl
) = self.accelerator.prepare(
self.vae,
self.optim,
self.discr_optim,
self.dl,
self.valid_dl
)
self.use_ema = use_ema
if use_ema:
self.ema_vae = EMA(vae, update_after_step = ema_update_after_step, update_every = ema_update_every)
self.ema_vae = self.accelerator.prepare(self.ema_vae)
self.dl_iter = cycle(self.dl)
self.valid_dl_iter = cycle(self.valid_dl)
self.save_model_every = save_model_every
self.save_results_every = save_results_every
self.apply_grad_penalty_every = apply_grad_penalty_every
self.results_folder = Path(results_folder)
if len([*self.results_folder.glob('**/*')]) > 0 and yes_or_no('do you want to clear previous experiment checkpoints and results?'):
rmtree(str(self.results_folder))
self.results_folder.mkdir(parents = True, exist_ok = True)
def save(self, path):
if not self.accelerator.is_local_main_process:
return
pkg = dict(
model = self.accelerator.get_state_dict(self.vae),
optim = self.optim.state_dict(),
discr_optim = self.discr_optim.state_dict()
)
torch.save(pkg, path)
def load(self, path):
path = Path(path)
assert path.exists()
pkg = torch.load(path)
vae = self.accelerator.unwrap_model(self.vae)
vae.load_state_dict(pkg['model'])
self.optim.load_state_dict(pkg['optim'])
self.discr_optim.load_state_dict(pkg['discr_optim'])
def print(self, msg):
self.accelerator.print(msg)
@property
def device(self):
return self.accelerator.device
@property
def is_distributed(self):
return not (self.accelerator.distributed_type == DistributedType.NO and self.accelerator.num_processes == 1)
@property
def is_main(self):
return self.accelerator.is_main_process
@property
def is_local_main(self):
return self.accelerator.is_local_main_process
def train_step(self):
device = self.device
steps = int(self.steps.item())
apply_grad_penalty = not (steps % self.apply_grad_penalty_every)
self.vae.train()
discr = self.vae.module.discr if self.is_distributed else self.vae.discr
if self.use_ema:
ema_vae = self.ema_vae.module if self.is_distributed else self.ema_vae
# logs
logs = {}
# update vae (generator)
for _ in range(self.grad_accum_every):
img = next(self.dl_iter)
img = img.to(device)
with self.accelerator.autocast():
loss = self.vae(
img,
add_gradient_penalty = apply_grad_penalty,
return_loss = True
)
self.accelerator.backward(loss / self.grad_accum_every)
accum_log(logs, {'loss': loss.item() / self.grad_accum_every})
if exists(self.max_grad_norm):
self.accelerator.clip_grad_norm_(self.vae.parameters(), self.max_grad_norm)
self.optim.step()
self.optim.zero_grad()
# update discriminator
if exists(discr):
self.discr_optim.zero_grad()
for _ in range(self.grad_accum_every):
img = next(self.dl_iter)
img = img.to(device)
loss = self.vae(img, return_discr_loss = True)
self.accelerator.backward(loss / self.grad_accum_every)
accum_log(logs, {'discr_loss': loss.item() / self.grad_accum_every})
if exists(self.discr_max_grad_norm):
self.accelerator.clip_grad_norm_(discr.parameters(), self.discr_max_grad_norm)
self.discr_optim.step()
# log
self.print(f"{steps}: vae loss: {logs['loss']} - discr loss: {logs['discr_loss']}")
# update exponential moving averaged generator
if self.use_ema:
ema_vae.update()
# sample results every so often
if not (steps % self.save_results_every):
vaes_to_evaluate = ((self.vae, str(steps)),)
if self.use_ema:
vaes_to_evaluate = ((ema_vae.ema_model, f'{steps}.ema'),) + vaes_to_evaluate
for model, filename in vaes_to_evaluate:
model.eval()
valid_data = next(self.valid_dl_iter)
valid_data = valid_data.to(device)
recons = model(valid_data, return_recons = True)
# else save a grid of images
imgs_and_recons = torch.stack((valid_data, recons), dim = 0)
imgs_and_recons = rearrange(imgs_and_recons, 'r b ... -> (b r) ...')
imgs_and_recons = imgs_and_recons.detach().cpu().float().clamp(0., 1.)
grid = make_grid(imgs_and_recons, nrow = 2, normalize = True, value_range = (0, 1))
logs['reconstructions'] = grid
save_image(grid, str(self.results_folder / f'{filename}.png'))
self.print(f'{steps}: saving to {str(self.results_folder)}')
# save model every so often
self.accelerator.wait_for_everyone()
if self.is_main and not (steps % self.save_model_every):
state_dict = self.accelerator.unwrap_model(self.vae).state_dict()
model_path = str(self.results_folder / f'vae.{steps}.pt')
self.accelerator.save(state_dict, model_path)
if self.use_ema:
ema_state_dict = self.accelerator.unwrap_model(self.ema_vae).state_dict()
model_path = str(self.results_folder / f'vae.{steps}.ema.pt')
self.accelerator.save(ema_state_dict, model_path)
self.print(f'{steps}: saving model to {str(self.results_folder)}')
self.steps += 1
return logs
def train(self, log_fn = noop):
device = next(self.vae.parameters()).device
while self.steps < self.num_train_steps:
logs = self.train_step()
log_fn(logs)
self.print('training complete')
| muse-maskgit-pytorch-main | muse_maskgit_pytorch/trainers.py |
from pathlib import Path
import copy
import math
from math import sqrt
from functools import partial, wraps
from vector_quantize_pytorch import VectorQuantize as VQ
import torch
from torch import nn, einsum
import torch.nn.functional as F
from torch.autograd import grad as torch_grad
import torchvision
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange
# constants
MList = nn.ModuleList
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# decorators
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
def remove_vgg(fn):
@wraps(fn)
def inner(self, *args, **kwargs):
has_vgg = hasattr(self, '_vgg')
if has_vgg:
vgg = self._vgg
delattr(self, '_vgg')
out = fn(self, *args, **kwargs)
if has_vgg:
self._vgg = vgg
return out
return inner
# keyword argument helpers
def pick_and_pop(keys, d):
values = list(map(lambda key: d.pop(key), keys))
return dict(zip(keys, values))
def group_dict_by_key(cond, d):
return_val = [dict(),dict()]
for key in d.keys():
match = bool(cond(key))
ind = int(not match)
return_val[ind][key] = d[key]
return (*return_val,)
def string_begins_with(prefix, string_input):
return string_input.startswith(prefix)
def group_by_key_prefix(prefix, d):
return group_dict_by_key(partial(string_begins_with, prefix), d)
def groupby_prefix_and_trim(prefix, d):
kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)
kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
return kwargs_without_prefix, kwargs
# tensor helper functions
def log(t, eps = 1e-10):
return torch.log(t + eps)
def gradient_penalty(images, output, weight = 10):
batch_size = images.shape[0]
gradients = torch_grad(
outputs = output,
inputs = images,
grad_outputs = torch.ones(output.size(), device = images.device),
create_graph = True,
retain_graph = True,
only_inputs = True
)[0]
gradients = rearrange(gradients, 'b ... -> b (...)')
return weight * ((gradients.norm(2, dim = 1) - 1) ** 2).mean()
def leaky_relu(p = 0.1):
return nn.LeakyReLU(0.1)
def safe_div(numer, denom, eps = 1e-8):
return numer / denom.clamp(min = eps)
# gan losses
def hinge_discr_loss(fake, real):
return (F.relu(1 + fake) + F.relu(1 - real)).mean()
def hinge_gen_loss(fake):
return -fake.mean()
def bce_discr_loss(fake, real):
return (-log(1 - torch.sigmoid(fake)) - log(torch.sigmoid(real))).mean()
def bce_gen_loss(fake):
return -log(torch.sigmoid(fake)).mean()
def grad_layer_wrt_loss(loss, layer):
return torch_grad(
outputs = loss,
inputs = layer,
grad_outputs = torch.ones_like(loss),
retain_graph = True
)[0].detach()
# vqgan vae
class LayerNormChan(nn.Module):
def __init__(
self,
dim,
eps = 1e-5
):
super().__init__()
self.eps = eps
self.gamma = nn.Parameter(torch.ones(1, dim, 1, 1))
def forward(self, x):
var = torch.var(x, dim = 1, unbiased = False, keepdim = True)
mean = torch.mean(x, dim = 1, keepdim = True)
return (x - mean) * var.clamp(min = self.eps).rsqrt() * self.gamma
# discriminator
class Discriminator(nn.Module):
def __init__(
self,
dims,
channels = 3,
groups = 16,
init_kernel_size = 5
):
super().__init__()
dim_pairs = zip(dims[:-1], dims[1:])
self.layers = MList([nn.Sequential(nn.Conv2d(channels, dims[0], init_kernel_size, padding = init_kernel_size // 2), leaky_relu())])
for dim_in, dim_out in dim_pairs:
self.layers.append(nn.Sequential(
nn.Conv2d(dim_in, dim_out, 4, stride = 2, padding = 1),
nn.GroupNorm(groups, dim_out),
leaky_relu()
))
dim = dims[-1]
self.to_logits = nn.Sequential( # return 5 x 5, for PatchGAN-esque training
nn.Conv2d(dim, dim, 1),
leaky_relu(),
nn.Conv2d(dim, 1, 4)
)
def forward(self, x):
for net in self.layers:
x = net(x)
return self.to_logits(x)
# resnet encoder / decoder
class ResnetEncDec(nn.Module):
def __init__(
self,
dim,
*,
channels = 3,
layers = 4,
layer_mults = None,
num_resnet_blocks = 1,
resnet_groups = 16,
first_conv_kernel_size = 5
):
super().__init__()
assert dim % resnet_groups == 0, f'dimension {dim} must be divisible by {resnet_groups} (groups for the groupnorm)'
self.layers = layers
self.encoders = MList([])
self.decoders = MList([])
layer_mults = default(layer_mults, list(map(lambda t: 2 ** t, range(layers))))
assert len(layer_mults) == layers, 'layer multipliers must be equal to designated number of layers'
layer_dims = [dim * mult for mult in layer_mults]
dims = (dim, *layer_dims)
self.encoded_dim = dims[-1]
dim_pairs = zip(dims[:-1], dims[1:])
append = lambda arr, t: arr.append(t)
prepend = lambda arr, t: arr.insert(0, t)
if not isinstance(num_resnet_blocks, tuple):
num_resnet_blocks = (*((0,) * (layers - 1)), num_resnet_blocks)
assert len(num_resnet_blocks) == layers, 'number of resnet blocks config must be equal to number of layers'
for layer_index, (dim_in, dim_out), layer_num_resnet_blocks in zip(range(layers), dim_pairs, num_resnet_blocks):
append(self.encoders, nn.Sequential(nn.Conv2d(dim_in, dim_out, 4, stride = 2, padding = 1), leaky_relu()))
prepend(self.decoders, nn.Sequential(nn.ConvTranspose2d(dim_out, dim_in, 4, 2, 1), leaky_relu()))
for _ in range(layer_num_resnet_blocks):
append(self.encoders, ResBlock(dim_out, groups = resnet_groups))
prepend(self.decoders, GLUResBlock(dim_out, groups = resnet_groups))
prepend(self.encoders, nn.Conv2d(channels, dim, first_conv_kernel_size, padding = first_conv_kernel_size // 2))
append(self.decoders, nn.Conv2d(dim, channels, 1))
def get_encoded_fmap_size(self, image_size):
return image_size // (2 ** self.layers)
@property
def last_dec_layer(self):
return self.decoders[-1].weight
def encode(self, x):
for enc in self.encoders:
x = enc(x)
return x
def decode(self, x):
for dec in self.decoders:
x = dec(x)
return x
class GLUResBlock(nn.Module):
def __init__(self, chan, groups = 16):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(chan, chan * 2, 3, padding = 1),
nn.GLU(dim = 1),
nn.GroupNorm(groups, chan),
nn.Conv2d(chan, chan * 2, 3, padding = 1),
nn.GLU(dim = 1),
nn.GroupNorm(groups, chan),
nn.Conv2d(chan, chan, 1)
)
def forward(self, x):
return self.net(x) + x
class ResBlock(nn.Module):
def __init__(self, chan, groups = 16):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(chan, chan, 3, padding = 1),
nn.GroupNorm(groups, chan),
leaky_relu(),
nn.Conv2d(chan, chan, 3, padding = 1),
nn.GroupNorm(groups, chan),
leaky_relu(),
nn.Conv2d(chan, chan, 1)
)
def forward(self, x):
return self.net(x) + x
# main vqgan-vae classes
class VQGanVAE(nn.Module):
def __init__(
self,
*,
dim,
channels = 3,
layers = 4,
l2_recon_loss = False,
use_hinge_loss = True,
vgg = None,
vq_codebook_dim = 256,
vq_codebook_size = 512,
vq_decay = 0.8,
vq_commitment_weight = 1.,
vq_kmeans_init = True,
vq_use_cosine_sim = True,
use_vgg_and_gan = True,
discr_layers = 4,
**kwargs
):
super().__init__()
vq_kwargs, kwargs = groupby_prefix_and_trim('vq_', kwargs)
encdec_kwargs, kwargs = groupby_prefix_and_trim('encdec_', kwargs)
self.channels = channels
self.codebook_size = vq_codebook_size
self.dim_divisor = 2 ** layers
enc_dec_klass = ResnetEncDec
self.enc_dec = enc_dec_klass(
dim = dim,
channels = channels,
layers = layers,
**encdec_kwargs
)
self.vq = VQ(
dim = self.enc_dec.encoded_dim,
codebook_dim = vq_codebook_dim,
codebook_size = vq_codebook_size,
decay = vq_decay,
commitment_weight = vq_commitment_weight,
accept_image_fmap = True,
kmeans_init = vq_kmeans_init,
use_cosine_sim = vq_use_cosine_sim,
**vq_kwargs
)
# reconstruction loss
self.recon_loss_fn = F.mse_loss if l2_recon_loss else F.l1_loss
# turn off GAN and perceptual loss if grayscale
self._vgg = None
self.discr = None
self.use_vgg_and_gan = use_vgg_and_gan
if not use_vgg_and_gan:
return
# preceptual loss
if exists(vgg):
self._vgg = vgg
# gan related losses
layer_mults = list(map(lambda t: 2 ** t, range(discr_layers)))
layer_dims = [dim * mult for mult in layer_mults]
dims = (dim, *layer_dims)
self.discr = Discriminator(dims = dims, channels = channels)
self.discr_loss = hinge_discr_loss if use_hinge_loss else bce_discr_loss
self.gen_loss = hinge_gen_loss if use_hinge_loss else bce_gen_loss
@property
def device(self):
return next(self.parameters()).device
@property
def vgg(self):
if exists(self._vgg):
return self._vgg
vgg = torchvision.models.vgg16(pretrained = True)
vgg.classifier = nn.Sequential(*vgg.classifier[:-2])
self._vgg = vgg.to(self.device)
return self._vgg
@property
def encoded_dim(self):
return self.enc_dec.encoded_dim
def get_encoded_fmap_size(self, image_size):
return self.enc_dec.get_encoded_fmap_size(image_size)
def copy_for_eval(self):
device = next(self.parameters()).device
vae_copy = copy.deepcopy(self.cpu())
if vae_copy.use_vgg_and_gan:
del vae_copy.discr
del vae_copy._vgg
vae_copy.eval()
return vae_copy.to(device)
@remove_vgg
def state_dict(self, *args, **kwargs):
return super().state_dict(*args, **kwargs)
@remove_vgg
def load_state_dict(self, *args, **kwargs):
return super().load_state_dict(*args, **kwargs)
def save(self, path):
torch.save(self.state_dict(), path)
def load(self, path):
path = Path(path)
assert path.exists()
state_dict = torch.load(str(path))
self.load_state_dict(state_dict)
@property
def codebook(self):
return self.vq.codebook
def encode(self, fmap):
fmap = self.enc_dec.encode(fmap)
fmap, indices, commit_loss = self.vq(fmap)
return fmap, indices, commit_loss
def decode_from_ids(self, ids):
codes = self.codebook[ids]
fmap = self.vq.project_out(codes)
fmap = rearrange(fmap, 'b h w c -> b c h w')
return self.decode(fmap)
def decode(self, fmap):
return self.enc_dec.decode(fmap)
def forward(
self,
img,
return_loss = False,
return_discr_loss = False,
return_recons = False,
add_gradient_penalty = True
):
batch, channels, height, width, device = *img.shape, img.device
for dim_name, size in (('height', height), ('width', width)):
assert (size % self.dim_divisor) == 0, f'{dim_name} must be divisible by {self.dim_divisor}'
assert channels == self.channels, 'number of channels on image or sketch is not equal to the channels set on this VQGanVAE'
fmap, indices, commit_loss = self.encode(img)
fmap = self.decode(fmap)
if not return_loss and not return_discr_loss:
return fmap
assert return_loss ^ return_discr_loss, 'you should either return autoencoder loss or discriminator loss, but not both'
# whether to return discriminator loss
if return_discr_loss:
assert exists(self.discr), 'discriminator must exist to train it'
fmap.detach_()
img.requires_grad_()
fmap_discr_logits, img_discr_logits = map(self.discr, (fmap, img))
discr_loss = self.discr_loss(fmap_discr_logits, img_discr_logits)
if add_gradient_penalty:
gp = gradient_penalty(img, img_discr_logits)
loss = discr_loss + gp
if return_recons:
return loss, fmap
return loss
# reconstruction loss
recon_loss = self.recon_loss_fn(fmap, img)
# early return if training on grayscale
if not self.use_vgg_and_gan:
if return_recons:
return recon_loss, fmap
return recon_loss
# perceptual loss
img_vgg_input = img
fmap_vgg_input = fmap
if img.shape[1] == 1:
# handle grayscale for vgg
img_vgg_input, fmap_vgg_input = map(lambda t: repeat(t, 'b 1 ... -> b c ...', c = 3), (img_vgg_input, fmap_vgg_input))
img_vgg_feats = self.vgg(img_vgg_input)
recon_vgg_feats = self.vgg(fmap_vgg_input)
perceptual_loss = F.mse_loss(img_vgg_feats, recon_vgg_feats)
# generator loss
gen_loss = self.gen_loss(self.discr(fmap))
# calculate adaptive weight
last_dec_layer = self.enc_dec.last_dec_layer
norm_grad_wrt_gen_loss = grad_layer_wrt_loss(gen_loss, last_dec_layer).norm(p = 2)
norm_grad_wrt_perceptual_loss = grad_layer_wrt_loss(perceptual_loss, last_dec_layer).norm(p = 2)
adaptive_weight = safe_div(norm_grad_wrt_perceptual_loss, norm_grad_wrt_gen_loss)
adaptive_weight.clamp_(max = 1e4)
# combine losses
loss = recon_loss + perceptual_loss + commit_loss + adaptive_weight * gen_loss
if return_recons:
return loss, fmap
return loss
| muse-maskgit-pytorch-main | muse_maskgit_pytorch/vqgan_vae.py |
import sys
from setuptools import setup, find_packages
sys.path[0:0] = ['transganformer']
from version import __version__
setup(
name = 'transganformer',
packages = find_packages(),
entry_points={
'console_scripts': [
'transganformer = transganformer.cli:main',
],
},
version = __version__,
license='MIT',
description = 'TransGanFormer',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/transganformer',
keywords = [
'artificial intelligence',
'deep learning',
'generative adversarial networks',
'transformers',
'attention-mechanism'
],
install_requires=[
'einops>=0.3',
'fire',
'kornia',
'numpy',
'pillow',
'retry',
'torch>=1.6',
'torchvision',
'tqdm'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
) | transganformer-main | setup.py |
import random
import torch
import torch.nn.functional as F
def DiffAugment(x, types=[]):
for p in types:
for f in AUGMENT_FNS[p]:
x = f(x)
return x.contiguous()
# """
# Augmentation functions got images as `x`
# where `x` is tensor with this dimensions:
# 0 - count of images
# 1 - channels
# 2 - width
# 3 - height of image
# """
def rand_brightness(x):
x = x + (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) - 0.5)
return x
def rand_saturation(x):
x_mean = x.mean(dim=1, keepdim=True)
x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) * 2) + x_mean
return x
def rand_contrast(x):
x_mean = x.mean(dim=[1, 2, 3], keepdim=True)
x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) + 0.5) + x_mean
return x
def rand_translation(x, ratio=0.125):
shift_x, shift_y = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)
translation_x = torch.randint(-shift_x, shift_x + 1, size=[x.size(0), 1, 1], device=x.device)
translation_y = torch.randint(-shift_y, shift_y + 1, size=[x.size(0), 1, 1], device=x.device)
grid_batch, grid_x, grid_y = torch.meshgrid(
torch.arange(x.size(0), dtype=torch.long, device=x.device),
torch.arange(x.size(2), dtype=torch.long, device=x.device),
torch.arange(x.size(3), dtype=torch.long, device=x.device),
)
grid_x = torch.clamp(grid_x + translation_x + 1, 0, x.size(2) + 1)
grid_y = torch.clamp(grid_y + translation_y + 1, 0, x.size(3) + 1)
x_pad = F.pad(x, [1, 1, 1, 1, 0, 0, 0, 0])
x = x_pad.permute(0, 2, 3, 1).contiguous()[grid_batch, grid_x, grid_y].permute(0, 3, 1, 2)
return x
def rand_offset(x, ratio=1, ratio_h=1, ratio_v=1):
w, h = x.size(2), x.size(3)
imgs = []
for img in x.unbind(dim = 0):
max_h = int(w * ratio * ratio_h)
max_v = int(h * ratio * ratio_v)
value_h = random.randint(0, max_h) * 2 - max_h
value_v = random.randint(0, max_v) * 2 - max_v
if abs(value_h) > 0:
img = torch.roll(img, value_h, 2)
if abs(value_v) > 0:
img = torch.roll(img, value_v, 1)
imgs.append(img)
return torch.stack(imgs)
def rand_offset_h(x, ratio=1):
return rand_offset(x, ratio=1, ratio_h=ratio, ratio_v=0)
def rand_offset_v(x, ratio=1):
return rand_offset(x, ratio=1, ratio_h=0, ratio_v=ratio)
def rand_cutout(x, ratio=0.5):
cutout_size = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)
offset_x = torch.randint(0, x.size(2) + (1 - cutout_size[0] % 2), size=[x.size(0), 1, 1], device=x.device)
offset_y = torch.randint(0, x.size(3) + (1 - cutout_size[1] % 2), size=[x.size(0), 1, 1], device=x.device)
grid_batch, grid_x, grid_y = torch.meshgrid(
torch.arange(x.size(0), dtype=torch.long, device=x.device),
torch.arange(cutout_size[0], dtype=torch.long, device=x.device),
torch.arange(cutout_size[1], dtype=torch.long, device=x.device),
)
grid_x = torch.clamp(grid_x + offset_x - cutout_size[0] // 2, min=0, max=x.size(2) - 1)
grid_y = torch.clamp(grid_y + offset_y - cutout_size[1] // 2, min=0, max=x.size(3) - 1)
mask = torch.ones(x.size(0), x.size(2), x.size(3), dtype=x.dtype, device=x.device)
mask[grid_batch, grid_x, grid_y] = 0
x = x * mask.unsqueeze(1)
return x
AUGMENT_FNS = {
'color': [rand_brightness, rand_saturation, rand_contrast],
'offset': [rand_offset],
'offset_h': [rand_offset_h],
'offset_v': [rand_offset_v],
'translation': [rand_translation],
'cutout': [rand_cutout],
}
| transganformer-main | transganformer/diff_augment.py |
__version__ = '0.0.17'
| transganformer-main | transganformer/version.py |
from transganformer.transganformer import Transganformer, Generator, Discriminator, Trainer, NanException
| transganformer-main | transganformer/__init__.py |
import os
import fire
import random
from retry.api import retry_call
from tqdm import tqdm
from datetime import datetime
from functools import wraps
from transganformer import Trainer, NanException
import torch
import torch.multiprocessing as mp
import torch.distributed as dist
import numpy as np
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def cast_list(el):
return el if isinstance(el, list) else [el]
def timestamped_filename(prefix = 'generated-'):
now = datetime.now()
timestamp = now.strftime("%m-%d-%Y_%H-%M-%S")
return f'{prefix}{timestamp}'
def set_seed(seed):
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
random.seed(seed)
def run_training(rank, world_size, model_args, data, load_from, new, num_train_steps, name, seed):
is_main = rank == 0
is_ddp = world_size > 1
if is_ddp:
set_seed(seed)
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '12355'
dist.init_process_group('nccl', rank=rank, world_size=world_size)
print(f"{rank + 1}/{world_size} process initialized.")
model_args.update(
is_ddp = is_ddp,
rank = rank,
world_size = world_size
)
model = Trainer(**model_args)
if not new:
model.load(load_from)
else:
model.clear()
model.set_data_src(data)
for _ in tqdm(range(num_train_steps - model.steps), initial = model.steps, total = num_train_steps, mininterval=10., desc=f'{name}<{data}>'):
retry_call(model.train, tries=3, exceptions=NanException)
if is_main and _ % 50 == 0:
model.print_log()
model.save(model.checkpoint_num)
if is_ddp:
dist.destroy_process_group()
def train_from_folder(
data = './data',
results_dir = './results',
models_dir = './models',
name = 'default',
new = False,
load_from = -1,
image_size = 32,
fmap_max = 512,
transparent = False,
greyscale = False,
batch_size = 10,
gradient_accumulate_every = 4,
num_train_steps = 150000,
learning_rate = 2e-4,
save_every = 1000,
evaluate_every = 1000,
generate = False,
generate_types = ['default', 'ema'],
generate_interpolation = False,
aug_test = False,
aug_prob=None,
aug_types=['cutout', 'translation'],
dataset_aug_prob=0.,
interpolation_num_steps = 100,
save_frames = False,
num_image_tiles = None,
num_workers = None,
multi_gpus = False,
calculate_fid_every = None,
calculate_fid_num_images = 12800,
clear_fid_cache = False,
seed = 42,
amp = False,
show_progress = False,
):
num_image_tiles = default(num_image_tiles, 4 if image_size > 512 else 8)
model_args = dict(
name = name,
results_dir = results_dir,
models_dir = models_dir,
batch_size = batch_size,
gradient_accumulate_every = gradient_accumulate_every,
image_size = image_size,
num_image_tiles = num_image_tiles,
num_workers = num_workers,
fmap_max = fmap_max,
transparent = transparent,
greyscale = greyscale,
lr = learning_rate,
save_every = save_every,
evaluate_every = evaluate_every,
aug_prob = aug_prob,
aug_types = cast_list(aug_types),
dataset_aug_prob = dataset_aug_prob,
calculate_fid_every = calculate_fid_every,
calculate_fid_num_images = calculate_fid_num_images,
clear_fid_cache = clear_fid_cache,
amp = amp
)
if generate:
model = Trainer(**model_args)
model.load(load_from)
samples_name = timestamped_filename()
checkpoint = model.checkpoint_num
dir_result = model.generate(samples_name, num_image_tiles, checkpoint, generate_types)
print(f'sample images generated at {dir_result}')
return
if generate_interpolation:
model = Trainer(**model_args)
model.load(load_from)
samples_name = timestamped_filename()
model.generate_interpolation(samples_name, num_image_tiles, num_steps = interpolation_num_steps, save_frames = save_frames)
print(f'interpolation generated at {results_dir}/{name}/{samples_name}')
return
if show_progress:
model = Trainer(**model_args)
model.show_progress(num_images=num_image_tiles, types=generate_types)
return
world_size = torch.cuda.device_count()
if world_size == 1 or not multi_gpus:
run_training(0, 1, model_args, data, load_from, new, num_train_steps, name, seed)
return
mp.spawn(run_training,
args=(world_size, model_args, data, load_from, new, num_train_steps, name, seed),
nprocs=world_size,
join=True)
def main():
fire.Fire(train_from_folder)
| transganformer-main | transganformer/cli.py |
import os
import json
import multiprocessing
from random import random
import math
from math import log2, floor, sqrt, log, pi
from functools import partial
from contextlib import contextmanager, ExitStack
from pathlib import Path
from shutil import rmtree
import torch
from torch.cuda.amp import autocast, GradScaler
from torch.optim import Adam
from torch import nn, einsum
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch.autograd import grad as torch_grad
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel as DDP
from PIL import Image
import torchvision
from torchvision import transforms
from kornia import filter2D
from transganformer.diff_augment import DiffAugment
from transganformer.version import __version__
from tqdm import tqdm
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange
# asserts
assert torch.cuda.is_available(), 'You need to have an Nvidia GPU with CUDA installed.'
# constants
NUM_CORES = multiprocessing.cpu_count()
EXTS = ['jpg', 'jpeg', 'png']
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
@contextmanager
def null_context():
yield
def combine_contexts(contexts):
@contextmanager
def multi_contexts():
with ExitStack() as stack:
yield [stack.enter_context(ctx()) for ctx in contexts]
return multi_contexts
def is_power_of_two(val):
return log2(val).is_integer()
def default(val, d):
return val if exists(val) else d
def set_requires_grad(model, bool):
for p in model.parameters():
p.requires_grad = bool
def cycle(iterable):
while True:
for i in iterable:
yield i
def raise_if_nan(t):
if torch.isnan(t):
raise NanException
def gradient_accumulate_contexts(gradient_accumulate_every, is_ddp, ddps):
if is_ddp:
num_no_syncs = gradient_accumulate_every - 1
head = [combine_contexts(map(lambda ddp: ddp.no_sync, ddps))] * num_no_syncs
tail = [null_context]
contexts = head + tail
else:
contexts = [null_context] * gradient_accumulate_every
for context in contexts:
with context():
yield
def evaluate_in_chunks(max_batch_size, model, *args):
split_args = list(zip(*list(map(lambda x: x.split(max_batch_size, dim=0), args))))
chunked_outputs = [model(*i) for i in split_args]
if len(chunked_outputs) == 1:
return chunked_outputs[0]
return torch.cat(chunked_outputs, dim=0)
def slerp(val, low, high):
low_norm = low / torch.norm(low, dim=1, keepdim=True)
high_norm = high / torch.norm(high, dim=1, keepdim=True)
omega = torch.acos((low_norm * high_norm).sum(1))
so = torch.sin(omega)
res = (torch.sin((1.0 - val) * omega) / so).unsqueeze(1) * low + (torch.sin(val * omega) / so).unsqueeze(1) * high
return res
def safe_div(n, d):
try:
res = n / d
except ZeroDivisionError:
prefix = '' if int(n >= 0) else '-'
res = float(f'{prefix}inf')
return res
# helper classes
class NanException(Exception):
pass
class EMA():
def __init__(self, beta):
super().__init__()
self.beta = beta
def update_average(self, old, new):
if not exists(old):
return new
return old * self.beta + (1 - self.beta) * new
class RandomApply(nn.Module):
def __init__(self, prob, fn, fn_else = lambda x: x):
super().__init__()
self.fn = fn
self.fn_else = fn_else
self.prob = prob
def forward(self, x):
fn = self.fn if random() < self.prob else self.fn_else
return fn(x)
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
out = self.fn(x, **kwargs)
if isinstance(out, tuple):
out, latent = out
ret = (out + x, latent)
return ret
return x + out
class SumBranches(nn.Module):
def __init__(self, branches):
super().__init__()
self.branches = nn.ModuleList(branches)
def forward(self, x):
return sum(map(lambda fn: fn(x), self.branches))
# attention and transformer modules
class ChanNorm(nn.Module):
def __init__(self, dim, eps = 1e-5):
super().__init__()
self.eps = eps
self.g = nn.Parameter(torch.ones(1, dim, 1, 1))
self.b = nn.Parameter(torch.zeros(1, dim, 1, 1))
def forward(self, x):
std = torch.var(x, dim = 1, unbiased = False, keepdim = True).sqrt()
mean = torch.mean(x, dim = 1, keepdim = True)
return (x - mean) / (std + self.eps) * self.g + self.b
class PreNorm(nn.Module):
def __init__(self, dim, fn, dim_context = None):
super().__init__()
self.norm = ChanNorm(dim)
self.norm_context = ChanNorm(dim_context) if exists(dim_context) else None
self.fn = fn
def forward(self, x, **kwargs):
x = self.norm(x)
if exists(self.norm_context):
context = kwargs.pop('context')
context = self.norm_context(context)
kwargs.update(context = context)
return self.fn(x, **kwargs)
class DepthWiseConv2d(nn.Module):
def __init__(self, dim_in, dim_out, kernel_size, padding = 0, stride = 1, bias = True):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(dim_in, dim_in, kernel_size = kernel_size, padding = padding, groups = dim_in, stride = stride, bias = bias),
nn.Conv2d(dim_in, dim_out, kernel_size = 1, bias = bias)
)
def forward(self, x):
return self.net(x)
def FeedForward(dim, mult = 4, kernel_size = 3, bn = False):
padding = kernel_size // 2
return nn.Sequential(
nn.Conv2d(dim, dim * mult * 2, 1),
nn.GLU(dim = 1),
nn.BatchNorm2d(dim * mult) if bn else nn.Identity(),
DepthWiseConv2d(dim * mult, dim * mult * 2, kernel_size, padding = padding),
nn.GLU(dim = 1),
nn.Conv2d(dim * mult, dim, 1)
)
# sinusoidal embedding
class FixedPositionalEmbedding(nn.Module):
def __init__(self, dim):
super().__init__()
dim //= 2
inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
def forward(self, x):
h = torch.linspace(-1., 1., x.shape[-2], device = x.device).type_as(self.inv_freq)
w = torch.linspace(-1., 1., x.shape[-1], device = x.device).type_as(self.inv_freq)
sinu_inp_h = torch.einsum('i , j -> i j', h, self.inv_freq)
sinu_inp_w = torch.einsum('i , j -> i j', w, self.inv_freq)
sinu_inp_h = repeat(sinu_inp_h, 'h c -> () c h w', w = x.shape[-1])
sinu_inp_w = repeat(sinu_inp_w, 'w c -> () c h w', h = x.shape[-2])
sinu_inp = torch.cat((sinu_inp_w, sinu_inp_h), dim = 1)
emb = torch.cat((sinu_inp.sin(), sinu_inp.cos()), dim = 1)
return emb
# classes
class Attention(nn.Module):
def __init__(
self,
dim,
fmap_size = None,
dim_out = None,
kv_dim = None,
heads = 8,
dim_head = 64,
q_kernel_size = 1,
kv_kernel_size = 3,
out_kernel_size = 1,
q_stride = 1,
include_self = False,
downsample = False,
downsample_kv = 1,
bn = False,
latent_dim = None
):
super().__init__()
self.sinu_emb = FixedPositionalEmbedding(dim)
inner_dim = dim_head * heads
kv_dim = default(kv_dim, dim)
dim_out = default(dim_out, dim)
self.heads = heads
self.scale = dim_head ** -0.5
q_padding = q_kernel_size // 2
kv_padding = kv_kernel_size // 2
out_padding = out_kernel_size // 2
q_conv_params = (1, 1, 0)
self.to_q = nn.Conv2d(dim, inner_dim, *q_conv_params, bias = False)
if downsample_kv == 1:
kv_conv_params = (3, 1, 1)
elif downsample_kv == 2:
kv_conv_params = (3, 2, 1)
elif downsample_kv == 4:
kv_conv_params = (7, 4, 3)
else:
raise ValueError(f'invalid downsample factor for key / values {downsample_kv}')
self.to_k = nn.Conv2d(kv_dim, inner_dim, *kv_conv_params, bias = False)
self.to_v = nn.Conv2d(kv_dim, inner_dim, *kv_conv_params, bias = False)
self.bn = bn
if self.bn:
self.q_bn = nn.BatchNorm2d(inner_dim) if bn else nn.Identity()
self.k_bn = nn.BatchNorm2d(inner_dim) if bn else nn.Identity()
self.v_bn = nn.BatchNorm2d(inner_dim) if bn else nn.Identity()
self.has_latents = exists(latent_dim)
if self.has_latents:
self.latent_norm = ChanNorm(latent_dim)
self.latents_to_qkv = nn.Conv2d(latent_dim, inner_dim * 3, 1, bias = False)
self.latents_to_out = nn.Sequential(
nn.Conv2d(inner_dim, latent_dim * 2, 1),
nn.GLU(dim = 1),
nn.BatchNorm2d(latent_dim) if bn else nn.Identity()
)
self.include_self = include_self
if include_self:
self.to_self_k = nn.Conv2d(dim, inner_dim, *kv_conv_params, bias = False)
self.to_self_v = nn.Conv2d(dim, inner_dim, *kv_conv_params, bias = False)
self.mix_heads_post = nn.Parameter(torch.randn(heads, heads))
out_conv_params = (3, 2, 1) if downsample else q_conv_params
self.to_out = nn.Sequential(
nn.Conv2d(inner_dim, dim_out * 2, *out_conv_params),
nn.GLU(dim = 1),
nn.BatchNorm2d(dim_out) if bn else nn.Identity()
)
self.fmap_size = fmap_size
self.pos_emb = RotaryEmbedding(dim_head, downsample_keys = downsample_kv)
def forward(self, x, latents = None, context = None, include_self = False):
assert not exists(self.fmap_size) or x.shape[-1] == self.fmap_size, 'fmap size must equal the given shape'
b, n, _, y, h, device = *x.shape, self.heads, x.device
has_context = exists(context)
context = default(context, x)
q_inp = x
k_inp = context
v_inp = context
if not has_context:
sinu_emb = self.sinu_emb(context)
q_inp += sinu_emb
k_inp += sinu_emb
q, k, v = (self.to_q(q_inp), self.to_k(k_inp), self.to_v(v_inp))
if self.bn:
q = self.q_bn(q)
k = self.k_bn(k)
v = self.v_bn(v)
out_h, out_w = q.shape[-2:]
split_head = lambda t: rearrange(t, 'b (h d) x y -> b h (x y) d', h = h)
q, k, v = map(split_head, (q, k, v))
if not has_context:
q, k = self.pos_emb(q, k)
if self.include_self:
kx = self.to_self_k(x)
vx = self.to_self_v(x)
kx, vx = map(split_head, (kx, vx))
k = torch.cat((kx, k), dim = -2)
v = torch.cat((vx, v), dim = -2)
if self.has_latents:
assert exists(latents), 'latents must be passed in'
latents = self.latent_norm(latents)
lq, lk, lv = self.latents_to_qkv(latents).chunk(3, dim = 1)
lq, lk, lv = map(split_head, (lq, lk, lv))
latent_shape = lq.shape
num_latents = lq.shape[-2]
q = torch.cat((lq, q), dim = -2)
k = torch.cat((lk, k), dim = -2)
v = torch.cat((lv, v), dim = -2)
dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
attn = dots.softmax(dim = -1)
attn = einsum('b h i j, h g -> b g i j', attn, self.mix_heads_post)
out = torch.einsum('b h i j, b h j d -> b h i d', attn, v)
if self.has_latents:
lout, out = out[..., :num_latents, :], out[..., num_latents:, :]
lout = rearrange(lout, 'b h (x y) d -> b (h d) x y', h = h, x = latents.shape[-2], y = latents.shape[-1])
lout = self.latents_to_out(lout)
out = rearrange(out, 'b h (x y) d -> b (h d) x y', h = h, x = out_h, y = out_w)
out = self.to_out(out)
if self.has_latents:
return out, lout
return out
# dataset
def convert_image_to(img_type, image):
if image.mode != img_type:
return image.convert(img_type)
return image
class identity(object):
def __call__(self, tensor):
return tensor
class expand_greyscale(object):
def __init__(self, transparent):
self.transparent = transparent
def __call__(self, tensor):
channels = tensor.shape[0]
num_target_channels = 4 if self.transparent else 3
if channels == num_target_channels:
return tensor
alpha = None
if channels == 1:
color = tensor.expand(3, -1, -1)
elif channels == 2:
color = tensor[:1].expand(3, -1, -1)
alpha = tensor[1:]
else:
raise Exception(f'image with invalid number of channels given {channels}')
if not exists(alpha) and self.transparent:
alpha = torch.ones(1, *tensor.shape[1:], device=tensor.device)
return color if not self.transparent else torch.cat((color, alpha))
def resize_to_minimum_size(min_size, image):
if max(*image.size) < min_size:
return torchvision.transforms.functional.resize(image, min_size)
return image
class ImageDataset(Dataset):
def __init__(
self,
folder,
image_size,
transparent = False,
greyscale = False,
aug_prob = 0.
):
super().__init__()
self.folder = folder
self.image_size = image_size
self.paths = [p for ext in EXTS for p in Path(f'{folder}').glob(f'**/*.{ext}')]
assert len(self.paths) > 0, f'No images were found in {folder} for training'
if transparent:
num_channels = 4
pillow_mode = 'RGBA'
expand_fn = expand_greyscale(transparent)
elif greyscale:
num_channels = 1
pillow_mode = 'L'
expand_fn = identity()
else:
num_channels = 3
pillow_mode = 'RGB'
expand_fn = expand_greyscale(transparent)
convert_image_fn = partial(convert_image_to, pillow_mode)
self.transform = transforms.Compose([
transforms.Lambda(convert_image_fn),
transforms.Lambda(partial(resize_to_minimum_size, image_size)),
transforms.Resize(image_size),
RandomApply(aug_prob, transforms.RandomResizedCrop(image_size, scale=(0.5, 1.0), ratio=(0.98, 1.02)), transforms.CenterCrop(image_size)),
transforms.ToTensor(),
transforms.Lambda(expand_fn)
])
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
path = self.paths[index]
img = Image.open(path)
return self.transform(img)
# augmentations
def random_hflip(tensor, prob):
if prob > random():
return tensor
return torch.flip(tensor, dims=(3,))
class AugWrapper(nn.Module):
def __init__(self, D, image_size):
super().__init__()
self.D = D
def forward(self, images, prob = 0., types = [], detach = False, **kwargs):
context = torch.no_grad if detach else null_context
with context():
if random() < prob:
images = random_hflip(images, prob=0.5)
images = DiffAugment(images, types=types)
return self.D(images, **kwargs)
# modifiable global variables
def upsample(scale_factor = 2):
return nn.Upsample(scale_factor = scale_factor)
# activation
def leaky_relu(p = 0.1):
return nn.LeakyReLU(p)
# rotary positional embedding helpers
def rotate_every_two(x):
x = rearrange(x, '... (d j) -> ... d j', j = 2)
x1, x2 = x.unbind(dim = -1)
x = torch.stack((-x2, x1), dim = -1)
return rearrange(x, '... d j -> ... (d j)')
def get_sin_cos(seq):
n = seq.shape[0]
x_sinu = repeat(seq, 'i d -> i j d', j = n)
y_sinu = repeat(seq, 'j d -> i j d', i = n)
sin = torch.cat((x_sinu.sin(), y_sinu.sin()), dim = -1)
cos = torch.cat((x_sinu.cos(), y_sinu.cos()), dim = -1)
sin, cos = map(lambda t: rearrange(t, 'i j d -> (i j) d'), (sin, cos))
sin, cos = map(lambda t: repeat(t, 'n d -> () () n (d j)', j = 2), (sin, cos))
return sin, cos
# positional encoding
class RotaryEmbedding(nn.Module):
def __init__(self, dim, downsample_keys = 1):
super().__init__()
self.dim = dim
self.downsample_keys = downsample_keys
def forward(self, q, k):
device, dtype, n = q.device, q.dtype, int(sqrt(q.shape[-2]))
seq = torch.linspace(-1., 1., steps = n, device = device)
seq = seq.unsqueeze(-1)
scales = torch.logspace(0., log(10 / 2) / log(2), self.dim // 4, base = 2, device = device, dtype = dtype)
scales = scales[(*((None,) * (len(seq.shape) - 1)), Ellipsis)]
seq = seq * scales * pi
x = seq
y = seq
y = reduce(y, '(j n) c -> j c', 'mean', n = self.downsample_keys)
q_sin, q_cos = get_sin_cos(x)
k_sin, k_cos = get_sin_cos(y)
q = (q * q_cos) + (rotate_every_two(q) * q_sin)
k = (k * k_cos) + (rotate_every_two(k) * k_sin)
return q, k
# mapping network
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim, lr_mul = 1, bias = True):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim))
self.lr_mul = lr_mul
def forward(self, input):
return F.linear(input, self.weight * self.lr_mul, bias=self.bias * self.lr_mul)
class MappingNetwork(nn.Module):
def __init__(self, dim, depth, lr_mul = 0.1):
super().__init__()
layers = []
for i in range(depth):
layers.extend([EqualLinear(dim, dim, lr_mul), leaky_relu()])
self.net = nn.Sequential(
*layers,
nn.Linear(dim, dim * 4)
)
def forward(self, x):
x = F.normalize(x, dim=1)
x = self.net(x)
return rearrange(x, 'b (c h w) -> b c h w', h = 2, w = 2)
# generative adversarial network
class Generator(nn.Module):
def __init__(
self,
*,
image_size,
latent_dim = 256,
fmap_max = 512,
init_channel = 3,
mapping_network_depth = 4
):
super().__init__()
assert is_power_of_two(image_size), 'image size must be a power of 2'
num_layers = int(log2(image_size)) - 1
self.mapping = MappingNetwork(latent_dim, mapping_network_depth)
self.initial_block = nn.Parameter(torch.randn((latent_dim, 4, 4)))
self.layers = nn.ModuleList([])
fmap_size = 4
chan = latent_dim
min_chan = 8
for ind in range(num_layers):
is_last = ind == (num_layers - 1)
downsample_factor = int(2 ** max(log2(fmap_size) - log2(32), 0))
attn_class = partial(Attention, bn = True, fmap_size = fmap_size, downsample_kv = downsample_factor)
if not is_last:
chan_out = max(min_chan, chan // 4)
upsample = nn.Sequential(
attn_class(dim = chan, dim_head = chan, heads = 1, dim_out = chan_out * 4),
nn.PixelShuffle(2)
)
else:
upsample = nn.Identity()
self.layers.append(nn.ModuleList([
Residual(PreNorm(chan, attn_class(dim = chan, latent_dim = latent_dim))),
Residual(FeedForward(chan, bn = True, kernel_size = (3 if image_size > 4 else 1))),
upsample,
]))
chan = chan_out
fmap_size *= 2
self.final_attn = Residual(PreNorm(chan, attn_class(chan, latent_dim = latent_dim)))
self.to_img = nn.Sequential(
Residual(FeedForward(chan_out, bn = True)),
nn.Conv2d(chan, init_channel, 1)
)
def forward(self, x):
b = x.shape[0]
latents = self.mapping(x)
fmap = repeat(self.initial_block, 'c h w -> b c h w', b = b)
for attn, ff, upsample in self.layers:
fmap, latents_out = attn(fmap, latents = latents)
latents = latents + latents_out
fmap = ff(fmap)
fmap = upsample(fmap)
fmap, _ = self.final_attn(fmap, latents = latents)
return self.to_img(fmap)
class SimpleDecoder(nn.Module):
def __init__(
self,
*,
chan_in,
chan_out = 3,
num_upsamples = 4,
):
super().__init__()
layers = nn.ModuleList([])
final_chan = chan_out
chans = chan_in
for ind in range(num_upsamples):
last_layer = ind == (num_upsamples - 1)
chan_out = chans if not last_layer else final_chan * 2
layer = nn.Sequential(
upsample(),
nn.Conv2d(chans, chan_out, 3, padding = 1),
nn.GLU(dim = 1)
)
layers.append(layer)
chans //= 2
self.net = nn.Sequential(*layers)
def forward(self, x):
return self.net(x)
class Discriminator(nn.Module):
def __init__(
self,
*,
image_size,
fmap_max = 256,
init_channel = 3,
):
super().__init__()
assert is_power_of_two(image_size), 'image size must be a power of 2'
num_layers = int(log2(image_size)) - 2
fmap_dim = 64
self.conv_embed = nn.Sequential(
nn.Conv2d(init_channel, 32, kernel_size = 4, stride = 2, padding = 1),
nn.Conv2d(32, fmap_dim, kernel_size = 3, padding = 1)
)
image_size //= 2
self.ax_pos_emb_h = nn.Parameter(torch.randn(image_size, fmap_dim))
self.ax_pos_emb_w = nn.Parameter(torch.randn(image_size, fmap_dim))
self.image_sizes = []
self.layers = nn.ModuleList([])
fmap_dims = []
for ind in range(num_layers):
image_size //= 2
self.image_sizes.append(image_size)
fmap_dim_out = min(fmap_dim * 2, fmap_max)
downsample = SumBranches([
nn.Conv2d(fmap_dim, fmap_dim_out, 3, 2, 1),
nn.Sequential(
nn.AvgPool2d(2),
nn.Conv2d(fmap_dim, fmap_dim_out, 3, padding = 1),
leaky_relu()
)
])
downsample_factor = 2 ** max(log2(image_size) - log2(32), 0)
attn_class = partial(Attention, fmap_size = image_size, downsample_kv = downsample_factor)
self.layers.append(nn.ModuleList([
downsample,
Residual(PreNorm(fmap_dim_out, attn_class(dim = fmap_dim_out))),
Residual(PreNorm(fmap_dim_out, FeedForward(dim = fmap_dim_out, kernel_size = (3 if image_size > 4 else 1))))
]))
fmap_dim = fmap_dim_out
fmap_dims.append(fmap_dim)
self.aux_decoder = SimpleDecoder(chan_in = fmap_dims[-2], chan_out = init_channel, num_upsamples = num_layers)
self.to_logits = nn.Sequential(
Residual(PreNorm(fmap_dim, Attention(dim = fmap_dim, fmap_size = 2))),
Residual(PreNorm(fmap_dim, FeedForward(dim = fmap_dim, kernel_size = (3 if image_size > 64 else 1)))),
nn.Conv2d(fmap_dim, 1, 2),
Rearrange('b () () () -> b')
)
def forward(self, x, calc_aux_loss = False):
x_ = x
x = self.conv_embed(x)
ax_pos_emb = rearrange(self.ax_pos_emb_h, 'h c -> () c h ()') + rearrange(self.ax_pos_emb_w, 'w c -> () c () w')
x += ax_pos_emb
fmaps = []
for (downsample, attn, ff), image_size in zip(self.layers, self.image_sizes):
x = downsample(x)
x = attn(x)
x = ff(x)
fmaps.append(x)
x = self.to_logits(x)
if not calc_aux_loss:
return x, None
recon = self.aux_decoder(fmaps[-2])
recon_loss = F.mse_loss(x_, recon)
return x, recon_loss
class Transganformer(nn.Module):
def __init__(
self,
*,
latent_dim,
image_size,
fmap_max = 512,
transparent = False,
greyscale = False,
ttur_mult = 1.,
lr = 2e-4,
rank = 0,
ddp = False
):
super().__init__()
self.latent_dim = latent_dim
self.image_size = image_size
if transparent:
init_channel = 4
elif greyscale:
init_channel = 1
else:
init_channel = 3
G_kwargs = dict(
image_size = image_size,
latent_dim = latent_dim,
fmap_max = fmap_max,
init_channel = init_channel
)
self.G = Generator(**G_kwargs)
self.D = Discriminator(
image_size = image_size,
fmap_max = fmap_max,
init_channel = init_channel
)
self.ema_updater = EMA(0.995)
self.GE = Generator(**G_kwargs)
set_requires_grad(self.GE, False)
self.G_opt = Adam(self.G.parameters(), lr = lr, betas=(0.5, 0.9))
self.D_opt = Adam(self.D.parameters(), lr = lr * ttur_mult, betas=(0.5, 0.9))
self.apply(self._init_weights)
self.reset_parameter_averaging()
self.cuda(rank)
self.D_aug = AugWrapper(self.D, image_size)
def _init_weights(self, m):
if type(m) in {nn.Conv2d, nn.Linear}:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in', nonlinearity='leaky_relu')
def EMA(self):
def update_moving_average(ma_model, current_model):
for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):
old_weight, up_weight = ma_params.data, current_params.data
ma_params.data = self.ema_updater.update_average(old_weight, up_weight)
for current_buffer, ma_buffer in zip(current_model.buffers(), ma_model.buffers()):
new_buffer_value = self.ema_updater.update_average(ma_buffer, current_buffer)
ma_buffer.copy_(new_buffer_value)
update_moving_average(self.GE, self.G)
def reset_parameter_averaging(self):
self.GE.load_state_dict(self.G.state_dict())
def forward(self, x):
raise NotImplemented
# trainer
class Trainer():
def __init__(
self,
name = 'default',
results_dir = 'results',
models_dir = 'models',
base_dir = './',
num_workers = None,
latent_dim = 256,
image_size = 128,
num_image_tiles = 8,
fmap_max = 512,
transparent = False,
greyscale = False,
batch_size = 4,
gp_weight = 10,
gradient_accumulate_every = 1,
lr = 2e-4,
lr_mlp = 1.,
ttur_mult = 1.,
save_every = 1000,
evaluate_every = 1000,
aug_prob = None,
aug_types = ['translation', 'cutout'],
dataset_aug_prob = 0.,
calculate_fid_every = None,
calculate_fid_num_images = 12800,
clear_fid_cache = False,
is_ddp = False,
rank = 0,
world_size = 1,
log = False,
amp = False,
*args,
**kwargs
):
self.GAN_params = [args, kwargs]
self.GAN = None
self.name = name
base_dir = Path(base_dir)
self.base_dir = base_dir
self.results_dir = base_dir / results_dir
self.models_dir = base_dir / models_dir
self.fid_dir = base_dir / 'fid' / name
self.config_path = self.models_dir / name / '.config.json'
assert is_power_of_two(image_size), 'image size must be a power of 2 (32, 64, 128, 256, 512, 1024)'
self.image_size = image_size
self.num_image_tiles = num_image_tiles
self.latent_dim = latent_dim
self.fmap_max = fmap_max
self.transparent = transparent
self.greyscale = greyscale
assert (int(self.transparent) + int(self.greyscale)) < 2, 'you can only set either transparency or greyscale'
self.aug_prob = aug_prob
self.aug_types = aug_types
self.lr = lr
self.num_workers = num_workers
self.ttur_mult = ttur_mult
self.batch_size = batch_size
self.gradient_accumulate_every = gradient_accumulate_every
self.gp_weight = gp_weight
self.evaluate_every = evaluate_every
self.save_every = save_every
self.steps = 0
self.d_loss = 0
self.g_loss = 0
self.last_gp_loss = None
self.last_recon_loss = None
self.last_fid = None
self.init_folders()
self.loader = None
self.dataset_aug_prob = dataset_aug_prob
self.calculate_fid_every = calculate_fid_every
self.calculate_fid_num_images = calculate_fid_num_images
self.clear_fid_cache = clear_fid_cache
self.is_ddp = is_ddp
self.is_main = rank == 0
self.rank = rank
self.world_size = world_size
self.amp = amp
self.G_scaler = GradScaler(enabled = self.amp)
self.D_scaler = GradScaler(enabled = self.amp)
@property
def image_extension(self):
return 'jpg' if not self.transparent else 'png'
@property
def checkpoint_num(self):
return floor(self.steps // self.save_every)
def init_GAN(self):
args, kwargs = self.GAN_params
# instantiate GAN
self.GAN = Transganformer(
lr = self.lr,
latent_dim = self.latent_dim,
image_size = self.image_size,
ttur_mult = self.ttur_mult,
fmap_max = self.fmap_max,
transparent = self.transparent,
greyscale = self.greyscale,
rank = self.rank,
*args,
**kwargs
)
if self.is_ddp:
ddp_kwargs = {'device_ids': [self.rank], 'output_device': self.rank, 'find_unused_parameters': True}
self.G_ddp = DDP(self.GAN.G, **ddp_kwargs)
self.D_ddp = DDP(self.GAN.D, **ddp_kwargs)
self.D_aug_ddp = DDP(self.GAN.D_aug, **ddp_kwargs)
def write_config(self):
self.config_path.write_text(json.dumps(self.config()))
def load_config(self):
config = self.config() if not self.config_path.exists() else json.loads(self.config_path.read_text())
self.image_size = config['image_size']
self.transparent = config['transparent']
self.greyscale = config.pop('greyscale', False)
self.fmap_max = config.pop('fmap_max', 512)
del self.GAN
self.init_GAN()
def config(self):
return {
'image_size': self.image_size,
'transparent': self.transparent,
'greyscale': self.greyscale
}
def set_data_src(self, folder):
num_workers = default(self.num_workers, math.ceil(NUM_CORES / self.world_size))
self.dataset = ImageDataset(folder, self.image_size, transparent = self.transparent, greyscale = self.greyscale, aug_prob = self.dataset_aug_prob)
sampler = DistributedSampler(self.dataset, rank=self.rank, num_replicas=self.world_size, shuffle=True) if self.is_ddp else None
dataloader = DataLoader(self.dataset, num_workers = num_workers, batch_size = math.ceil(self.batch_size / self.world_size), sampler = sampler, shuffle = not self.is_ddp, drop_last = True, pin_memory = True)
self.loader = cycle(dataloader)
# auto set augmentation prob for user if dataset is detected to be low
num_samples = len(self.dataset)
if not exists(self.aug_prob) and num_samples < 1e5:
self.aug_prob = min(0.5, (1e5 - num_samples) * 3e-6)
print(f'autosetting augmentation probability to {round(self.aug_prob * 100)}%')
def train(self):
assert exists(self.loader), 'You must first initialize the data source with `.set_data_src(<folder of images>)`'
device = torch.device(f'cuda:{self.rank}')
if not exists(self.GAN):
self.init_GAN()
self.GAN.train()
total_disc_loss = torch.zeros([], device=device)
total_gen_loss = torch.zeros([], device=device)
batch_size = math.ceil(self.batch_size / self.world_size)
image_size = self.GAN.image_size
latent_dim = self.GAN.latent_dim
aug_prob = default(self.aug_prob, 0)
aug_types = self.aug_types
aug_kwargs = {'prob': aug_prob, 'types': aug_types}
G = self.GAN.G if not self.is_ddp else self.G_ddp
D = self.GAN.D if not self.is_ddp else self.D_ddp
D_aug = self.GAN.D_aug if not self.is_ddp else self.D_aug_ddp
apply_gradient_penalty = self.steps % 4 == 0
# amp related contexts and functions
amp_context = autocast if self.amp else null_context
# train discriminator
self.GAN.D_opt.zero_grad()
for i in gradient_accumulate_contexts(self.gradient_accumulate_every, self.is_ddp, ddps=[D_aug, G]):
latents = torch.randn(batch_size, latent_dim).cuda(self.rank)
image_batch = next(self.loader).cuda(self.rank)
image_batch.requires_grad_()
with amp_context():
with torch.no_grad():
generated_images = G(latents)
fake_output, _ = D_aug(generated_images, detach = True, **aug_kwargs)
real_output, real_aux_loss = D_aug(image_batch, calc_aux_loss = True, **aug_kwargs)
real_output_loss = real_output
fake_output_loss = fake_output
divergence = (F.relu(1 + real_output_loss) + F.relu(1 - fake_output_loss)).mean()
disc_loss = divergence
aux_loss = real_aux_loss
disc_loss = disc_loss + aux_loss
if apply_gradient_penalty:
outputs = [real_output]
outputs = list(map(self.D_scaler.scale, outputs)) if self.amp else outputs
scaled_gradients = torch_grad(outputs=outputs, inputs=image_batch,
grad_outputs=list(map(lambda t: torch.ones(t.size(), device = image_batch.device), outputs)),
create_graph=True, retain_graph=True, only_inputs=True)[0]
inv_scale = safe_div(1., self.D_scaler.get_scale()) if self.amp else 1.
if inv_scale != float('inf'):
gradients = scaled_gradients * inv_scale
with amp_context():
gradients = gradients.reshape(batch_size, -1)
gp = self.gp_weight * ((gradients.norm(2, dim=1) - 1) ** 2).mean()
if not torch.isnan(gp):
disc_loss = disc_loss + gp
self.last_gp_loss = gp.clone().detach().item()
with amp_context():
disc_loss = disc_loss / self.gradient_accumulate_every
disc_loss.register_hook(raise_if_nan)
self.D_scaler.scale(disc_loss).backward()
total_disc_loss += divergence
self.last_recon_loss = aux_loss.item()
self.d_loss = float(total_disc_loss.item() / self.gradient_accumulate_every)
self.D_scaler.step(self.GAN.D_opt)
self.D_scaler.update()
# train generator
self.GAN.G_opt.zero_grad()
for i in gradient_accumulate_contexts(self.gradient_accumulate_every, self.is_ddp, ddps=[G, D_aug]):
latents = torch.randn(batch_size, latent_dim).cuda(self.rank)
with amp_context():
generated_images = G(latents)
fake_output, _ = D_aug(generated_images, **aug_kwargs)
loss = fake_output.mean()
gen_loss = loss
gen_loss = gen_loss / self.gradient_accumulate_every
gen_loss.register_hook(raise_if_nan)
self.G_scaler.scale(gen_loss).backward()
total_gen_loss += loss
self.g_loss = float(total_gen_loss.item() / self.gradient_accumulate_every)
self.G_scaler.step(self.GAN.G_opt)
self.G_scaler.update()
# calculate moving averages
if self.is_main and self.steps % 10 == 0 and self.steps > 20000:
self.GAN.EMA()
if self.is_main and self.steps <= 25000 and self.steps % 1000 == 2:
self.GAN.reset_parameter_averaging()
# save from NaN errors
if any(torch.isnan(l) for l in (total_gen_loss, total_disc_loss)):
print(f'NaN detected for generator or discriminator. Loading from checkpoint #{self.checkpoint_num}')
self.load(self.checkpoint_num)
raise NanException
del total_disc_loss
del total_gen_loss
# periodically save results
if self.is_main:
if self.steps % self.save_every == 0:
self.save(self.checkpoint_num)
if self.steps % self.evaluate_every == 0 or (self.steps % 100 == 0 and self.steps < 20000):
self.evaluate(floor(self.steps / self.evaluate_every), num_image_tiles = self.num_image_tiles)
if exists(self.calculate_fid_every) and self.steps % self.calculate_fid_every == 0 and self.steps != 0:
num_batches = math.ceil(self.calculate_fid_num_images / self.batch_size)
fid = self.calculate_fid(num_batches)
self.last_fid = fid
with open(str(self.results_dir / self.name / f'fid_scores.txt'), 'a') as f:
f.write(f'{self.steps},{fid}\n')
self.steps += 1
@torch.no_grad()
def evaluate(self, num = 0, num_image_tiles = 4):
self.GAN.eval()
ext = self.image_extension
num_rows = num_image_tiles
latent_dim = self.GAN.latent_dim
image_size = self.GAN.image_size
# latents and noise
latents = torch.randn((num_rows ** 2, latent_dim)).cuda(self.rank)
# regular
generated_images = self.generate_(self.GAN.G, latents)
torchvision.utils.save_image(generated_images, str(self.results_dir / self.name / f'{str(num)}.{ext}'), nrow=num_rows)
# moving averages
generated_images = self.generate_(self.GAN.GE, latents)
torchvision.utils.save_image(generated_images, str(self.results_dir / self.name / f'{str(num)}-ema.{ext}'), nrow=num_rows)
@torch.no_grad()
def generate(self, num=0, num_image_tiles=4, checkpoint=None, types=['default', 'ema']):
self.GAN.eval()
latent_dim = self.GAN.latent_dim
dir_name = self.name + str('-generated-') + str(checkpoint)
dir_full = Path().absolute() / self.results_dir / dir_name
ext = self.image_extension
if not dir_full.exists():
os.mkdir(dir_full)
# regular
if 'default' in types:
for i in tqdm(range(num_image_tiles), desc='Saving generated default images'):
latents = torch.randn((1, latent_dim)).cuda(self.rank)
generated_image = self.generate_(self.GAN.G, latents)
path = str(self.results_dir / dir_name / f'{str(num)}-{str(i)}.{ext}')
torchvision.utils.save_image(generated_image[0], path, nrow=1)
# moving averages
if 'ema' in types:
for i in tqdm(range(num_image_tiles), desc='Saving generated EMA images'):
latents = torch.randn((1, latent_dim)).cuda(self.rank)
generated_image = self.generate_(self.GAN.GE, latents)
path = str(self.results_dir / dir_name / f'{str(num)}-{str(i)}-ema.{ext}')
torchvision.utils.save_image(generated_image[0], path, nrow=1)
return dir_full
@torch.no_grad()
def show_progress(self, num_images=4, types=['default', 'ema']):
checkpoints = self.get_checkpoints()
assert exists(checkpoints), 'cannot find any checkpoints to create a training progress video for'
dir_name = self.name + str('-progress')
dir_full = Path().absolute() / self.results_dir / dir_name
ext = self.image_extension
latents = None
zfill_length = math.ceil(math.log10(len(checkpoints)))
if not dir_full.exists():
os.mkdir(dir_full)
for checkpoint in tqdm(checkpoints, desc='Generating progress images'):
self.load(checkpoint, print_version=False)
self.GAN.eval()
if checkpoint == 0:
latents = torch.randn((num_images, self.GAN.latent_dim)).cuda(self.rank)
# regular
if 'default' in types:
generated_image = self.generate_(self.GAN.G, latents)
path = str(self.results_dir / dir_name / f'{str(checkpoint).zfill(zfill_length)}.{ext}')
torchvision.utils.save_image(generated_image, path, nrow=num_images)
# moving averages
if 'ema' in types:
generated_image = self.generate_(self.GAN.GE, latents)
path = str(self.results_dir / dir_name / f'{str(checkpoint).zfill(zfill_length)}-ema.{ext}')
torchvision.utils.save_image(generated_image, path, nrow=num_images)
@torch.no_grad()
def calculate_fid(self, num_batches):
from pytorch_fid import fid_score
torch.cuda.empty_cache()
real_path = self.fid_dir / 'real'
fake_path = self.fid_dir / 'fake'
# remove any existing files used for fid calculation and recreate directories
if not real_path.exists() or self.clear_fid_cache:
rmtree(real_path, ignore_errors=True)
os.makedirs(real_path)
for batch_num in tqdm(range(num_batches), desc='calculating FID - saving reals'):
real_batch = next(self.loader)
for k, image in enumerate(real_batch.unbind(0)):
ind = k + batch_num * self.batch_size
torchvision.utils.save_image(image, real_path / f'{ind}.png')
# generate a bunch of fake images in results / name / fid_fake
rmtree(fake_path, ignore_errors=True)
os.makedirs(fake_path)
self.GAN.eval()
ext = self.image_extension
latent_dim = self.GAN.latent_dim
image_size = self.GAN.image_size
for batch_num in tqdm(range(num_batches), desc='calculating FID - saving generated'):
# latents and noise
latents = torch.randn(self.batch_size, latent_dim).cuda(self.rank)
# moving averages
generated_images = self.generate_(self.GAN.GE, latents)
for j, image in enumerate(generated_images.unbind(0)):
ind = j + batch_num * self.batch_size
torchvision.utils.save_image(image, str(fake_path / f'{str(ind)}-ema.{ext}'))
return fid_score.calculate_fid_given_paths([str(real_path), str(fake_path)], 256, latents.device, 2048)
@torch.no_grad()
def generate_(self, G, style, num_image_tiles = 8):
generated_images = evaluate_in_chunks(self.batch_size, G, style)
return generated_images.clamp_(0., 1.)
@torch.no_grad()
def generate_interpolation(self, num = 0, num_image_tiles = 8, num_steps = 100, save_frames = False):
self.GAN.eval()
ext = self.image_extension
num_rows = num_image_tiles
latent_dim = self.GAN.latent_dim
image_size = self.GAN.image_size
# latents and noise
latents_low = torch.randn(num_rows ** 2, latent_dim).cuda(self.rank)
latents_high = torch.randn(num_rows ** 2, latent_dim).cuda(self.rank)
ratios = torch.linspace(0., 8., num_steps)
frames = []
for ratio in tqdm(ratios):
interp_latents = slerp(ratio, latents_low, latents_high)
generated_images = self.generate_(self.GAN.GE, interp_latents)
images_grid = torchvision.utils.make_grid(generated_images, nrow = num_rows)
pil_image = transforms.ToPILImage()(images_grid.cpu())
if self.transparent:
background = Image.new('RGBA', pil_image.size, (255, 255, 255))
pil_image = Image.alpha_composite(background, pil_image)
frames.append(pil_image)
frames[0].save(str(self.results_dir / self.name / f'{str(num)}.gif'), save_all=True, append_images=frames[1:], duration=80, loop=0, optimize=True)
if save_frames:
folder_path = (self.results_dir / self.name / f'{str(num)}')
folder_path.mkdir(parents=True, exist_ok=True)
for ind, frame in enumerate(frames):
frame.save(str(folder_path / f'{str(ind)}.{ext}'))
def print_log(self):
data = [
('G', self.g_loss),
('D', self.d_loss),
('GP', self.last_gp_loss),
('SS', self.last_recon_loss),
('FID', self.last_fid)
]
data = [d for d in data if exists(d[1])]
log = ' | '.join(map(lambda n: f'{n[0]}: {n[1]:.2f}', data))
print(log)
def model_name(self, num):
return str(self.models_dir / self.name / f'model_{num}.pt')
def init_folders(self):
(self.results_dir / self.name).mkdir(parents=True, exist_ok=True)
(self.models_dir / self.name).mkdir(parents=True, exist_ok=True)
def clear(self):
rmtree(str(self.models_dir / self.name), True)
rmtree(str(self.results_dir / self.name), True)
rmtree(str(self.fid_dir), True)
rmtree(str(self.config_path), True)
self.init_folders()
def save(self, num):
save_data = {
'GAN': self.GAN.state_dict(),
'version': __version__,
'G_scaler': self.G_scaler.state_dict(),
'D_scaler': self.D_scaler.state_dict()
}
torch.save(save_data, self.model_name(num))
self.write_config()
def load(self, num=-1, print_version=True):
self.load_config()
name = num
if num == -1:
checkpoints = self.get_checkpoints()
if not exists(checkpoints):
return
name = checkpoints[-1]
print(f'continuing from previous epoch - {name}')
self.steps = name * self.save_every
load_data = torch.load(self.model_name(name))
if print_version and 'version' in load_data and self.is_main:
print(f"loading from version {load_data['version']}")
try:
self.GAN.load_state_dict(load_data['GAN'])
except Exception as e:
print('unable to load save model. please try downgrading the package to the version specified by the saved model')
raise e
if 'G_scaler' in load_data:
self.G_scaler.load_state_dict(load_data['G_scaler'])
if 'D_scaler' in load_data:
self.D_scaler.load_state_dict(load_data['D_scaler'])
def get_checkpoints(self):
file_paths = [p for p in Path(self.models_dir / self.name).glob('model_*.pt')]
saved_nums = sorted(map(lambda x: int(x.stem.split('_')[1]), file_paths))
if len(saved_nums) == 0:
return None
return saved_nums
| transganformer-main | transganformer/transganformer.py |
from setuptools import setup, find_packages
setup(
name = 'compressive-transformer-pytorch',
packages = find_packages(exclude=['examples']),
version = '0.4.0',
license='MIT',
description = 'Implementation of Compressive Transformer in Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/compressive-transformer-pytorch',
keywords = [
'attention',
'artificial intelligence',
'transformer',
'deep learning'
],
install_requires=[
'torch',
'mogrifier'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
) | compressive-transformer-pytorch-master | setup.py |
from compressive_transformer_pytorch import CompressiveTransformer
from compressive_transformer_pytorch.autoregressive_wrapper import AutoregressiveWrapper
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 16
MAX_BATCH_SIZE = 4
LEARNING_RATE = 1e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
PRIME_LENGTH = 512
GENERATE_LENGTH = 1024
SEQ_LEN = 512
NUM_SEGMENTS = 4
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate model
model = CompressiveTransformer(
num_tokens = 256,
dim = 512,
depth = 8,
seq_len = SEQ_LEN,
mem_len = SEQ_LEN,
cmem_len = SEQ_LEN // 4,
heads = 8,
memory_layers = [6,7,8]
)
model = AutoregressiveWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len, segments):
super().__init__()
self.data = data
self.seq_len = seq_len
self.segments = segments
self.total_len = seq_len * segments
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.total_len - 1, (1,))
full_seq = self.data[rand_start: rand_start + self.total_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.total_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN, NUM_SEGMENTS)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN, NUM_SEGMENTS)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
grad_accum_every = BATCH_SIZE / MAX_BATCH_SIZE
for mlm_loss, aux_loss, is_last in model(next(train_loader), max_batch_size = MAX_BATCH_SIZE, return_loss = True):
loss = mlm_loss + aux_loss
(loss / grad_accum_every).backward()
print(f'training loss: {mlm_loss.item():.4f} | aux_loss: {aux_loss.item():.4f}')
if is_last:
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
for loss, aux_loss, _ in model(next(val_loader), return_loss = True):
print(f'validation loss: {loss.item():.4f}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
inp = inp[:PRIME_LENGTH]
prime = decode_tokens(inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp, GENERATE_LENGTH)
output_str = decode_tokens(sample)
print(output_str)
| compressive-transformer-pytorch-master | examples/enwik8_simple/train.py |
import torch
from torch import nn
import torch.nn.functional as F
from mogrifier import Mogrifier
import math
from collections import namedtuple
from functools import partial
from inspect import isfunction
# structs
Memory = namedtuple('Memory', ['mem', 'compressed_mem'])
# helper functions
def to(t):
return {'dtype': t.dtype, 'device': t.device}
def cast_tuple(el):
return el if isinstance(el, tuple) else (el,)
def default(x, val):
if x is not None:
return x
return val if not isfunction(val) else val()
def max_neg_value(tensor):
return -torch.finfo(tensor.dtype).max
def reshape_dim(t, dim, split_dims):
shape = list(t.shape)
num_dims = len(shape)
dim = (dim + num_dims) % num_dims
shape[dim:dim+1] = split_dims
return t.reshape(shape)
def split_at_index(dim, index, t):
pre_slices = (slice(None),) * dim
l = (*pre_slices, slice(None, index))
r = (*pre_slices, slice(index, None))
return t[l], t[r]
def queue_fifo(*args, length, dim=-2):
queue = torch.cat(args, dim=dim)
if length > 0:
return split_at_index(dim, -length, queue)
device = queue.device
shape = list(queue.shape)
shape[dim] = 0
return queue, torch.empty(shape, device = device)
def shift(x):
*_, i, j = x.shape
zero_pad = torch.zeros((*_, i, i), **to(x))
x = torch.cat([x, zero_pad], -1)
l = i + j - 1
x = x.view(*_, -1)
zero_pad = torch.zeros(*_, -x.size(-1) % l, **to(x))
shifted = torch.cat([x, zero_pad], -1).view(*_, -1, l)
return shifted[..., :i, i - 1:]
def iterate_tensor(t):
length = t.shape[0]
for ind in range(length):
yield t[ind]
# full attention for calculating auxiliary reconstruction loss
def full_attn(q, k, v, dropout_fn = None):
*_, dim = q.shape
dots = torch.einsum('bhid,bhjd->bhij', q, k) * (dim ** -0.5)
attn = dots.softmax(dim=-1)
if dropout_fn is not None:
attn = dropout_fn(attn)
return torch.einsum('bhij,bhjd->bhid', attn, v)
# helper classes
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
out = self.fn(x, **kwargs)
out = cast_tuple(out)
ret = (out[0] + x), *out[1:]
return ret
class GRUGating(nn.Module):
def __init__(self, dim, fn, mogrify = False):
super().__init__()
self.dim = dim
self.fn = fn
self.gru = nn.GRUCell(dim, dim)
self.mogrify = Mogrifier(dim, factorize_k = dim // 4) if mogrify else None
def forward(self, x, **kwargs):
batch, dim = x.shape[0], self.dim
out = self.fn(x, **kwargs)
(y, *rest) = cast_tuple(out)
if self.mogrify is not None:
y, x = self.mogrify(y, x)
gated_output = self.gru(
y.reshape(-1, dim),
x.reshape(-1, dim)
)
gated_output = gated_output.reshape(batch, -1, dim)
ret = gated_output, *rest
return ret
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
x = self.norm(x)
return self.fn(x, **kwargs)
class ConvCompress(nn.Module):
def __init__(self, dim, ratio = 4):
super().__init__()
self.conv = nn.Conv1d(dim, dim, ratio, stride = ratio)
def forward(self, mem):
mem = mem.transpose(1, 2)
compressed_mem = self.conv(mem)
return compressed_mem.transpose(1, 2)
# feedforward
class GELU_(nn.Module):
def forward(self, x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
GELU = nn.GELU if hasattr(nn, 'GELU') else GELU_
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4, dropout = 0., activation = None, glu = False):
super().__init__()
activation = default(activation, GELU)
self.glu = glu
self.w1 = nn.Linear(dim, dim * mult * (2 if glu else 1))
self.act = activation()
self.dropout = nn.Dropout(dropout)
self.w2 = nn.Linear(dim * mult, dim)
def forward(self, x, **kwargs):
if not self.glu:
x = self.w1(x)
x = self.act(x)
else:
x, v = self.w1(x).chunk(2, dim=-1)
x = self.act(x) * v
x = self.dropout(x)
x = self.w2(x)
return x
# attention.
class SelfAttention(nn.Module):
def __init__(self, dim, seq_len, mem_len, cmem_len, cmem_ratio = 4, heads = 8, attn_dropout = 0., dropout = 0., reconstruction_attn_dropout = 0.):
super().__init__()
assert (dim % heads) == 0, 'dimension must be divisible by the number of heads'
self.heads = heads
self.dim_head = dim // heads
self.seq_len = seq_len
self.mem_len = mem_len
self.cmem_len = cmem_len
self.cmem_ratio = cmem_ratio
self.scale = self.dim_head ** (-0.5)
self.compress_mem_fn = ConvCompress(dim, cmem_ratio)
self.to_q = nn.Linear(dim, dim, bias = False)
self.to_kv = nn.Linear(dim, dim * 2, bias = False)
self.to_out = nn.Linear(dim, dim)
self.attn_dropout = nn.Dropout(attn_dropout)
self.dropout = nn.Dropout(dropout)
self.reconstruction_attn_dropout = nn.Dropout(reconstruction_attn_dropout)
def forward(self, x, memories = None, pos_emb = None, input_mask = None, calc_memory = True, **kwargs):
b, t, e, h, dim_h = *x.shape, self.heads, self.dim_head
memories = default(memories, (None, None))
mem, cmem = memories
init_empty_mem = lambda: torch.empty(b, 0, e, **to(x))
mem = default(mem, init_empty_mem)
cmem = default(cmem, init_empty_mem)
mem_len = mem.shape[1]
cmem_len = cmem.shape[1]
q = self.to_q(x)
kv_input = torch.cat((cmem, mem, x), dim=1)
kv_len = kv_input.shape[1]
k, v = self.to_kv(kv_input).chunk(2, dim=-1)
merge_heads = lambda x: reshape_dim(x, -1, (-1, dim_h)).transpose(1, 2)
q, k, v = map(merge_heads, (q, k, v))
k, v = map(lambda x: x.expand(-1, h, -1, -1), (k, v))
dots = torch.einsum('bhid,bhjd->bhij', q, k) * self.scale
mask_value = max_neg_value(dots)
if pos_emb is not None:
pos_emb = pos_emb[:, -kv_len:].type(q.dtype)
pos_dots = torch.einsum('bhid,hjd->bhij', q, pos_emb) * self.scale
pos_dots = shift(pos_dots)
dots = dots + pos_dots
if input_mask is not None:
mask = input_mask[:, None, :, None] * input_mask[:, None, None, :]
mask = F.pad(mask, (mem_len + cmem_len, 0), value = True)
dots.masked_fill_(~mask, mask_value)
total_mem_len = mem_len + cmem_len
mask = torch.ones(t, t + total_mem_len, **to(x)).triu_(diagonal = 1 + total_mem_len).bool()
dots.masked_fill_(mask[None, None, ...], mask_value)
attn = dots.softmax(dim=-1)
attn = self.attn_dropout(attn)
out = torch.einsum('bhij,bhjd->bhid', attn, v)
out = out.transpose(1, 2).reshape(b, t, -1)
logits = self.to_out(out)
logits = self.dropout(logits)
new_mem = mem
new_cmem = cmem
aux_loss = torch.zeros(1, requires_grad = True, **to(q))
if self.seq_len > t or not calc_memory:
return logits, Memory(new_mem, new_cmem), aux_loss
# calculate memory and compressed memory
old_mem, new_mem = queue_fifo(mem, x, length = self.mem_len, dim = 1)
old_mem_padding = old_mem.shape[1] % self.cmem_ratio
if old_mem_padding != 0:
old_mem = F.pad(old_mem, (0, 0, old_mem_padding, 0), value = 0.)
if old_mem.shape[1] == 0 or self.cmem_len <= 0:
return logits, Memory(new_mem, new_cmem), aux_loss
compressed_mem = self.compress_mem_fn(old_mem.detach())
old_cmem, new_cmem = split_at_index(1, -self.cmem_len, torch.cat((cmem, compressed_mem), dim=1))
if not self.training:
return logits, Memory(new_mem, new_cmem), aux_loss
# calculate compressed memory auxiliary loss if training
self.to_kv.weight.detach_()
cmem_k, cmem_v = self.to_kv(compressed_mem).chunk(2, dim=-1)
cmem_k, cmem_v = map(merge_heads, (cmem_k, cmem_v))
cmem_k, cmem_v = map(lambda x: x.expand(-1, h, -1, -1), (cmem_k, cmem_v))
old_mem_range = slice(- min(mem_len, self.mem_len) - self.seq_len, -self.seq_len)
old_mem_k, old_mem_v = map(lambda x: x[:, :, old_mem_range].clone(), (k, v))
q, old_mem_k, old_mem_v = map(torch.detach, (q, old_mem_k, old_mem_v))
attn_fn = partial(full_attn, dropout_fn = self.reconstruction_attn_dropout)
aux_loss = F.mse_loss(
attn_fn(q, old_mem_k, old_mem_v),
attn_fn(q, cmem_k, cmem_v)
)
return logits, Memory(new_mem, new_cmem), aux_loss
# transformer
class CompressiveTransformer(nn.Module):
def __init__(
self,
num_tokens,
dim,
seq_len,
depth,
emb_dim = None,
memory_layers = None,
enhanced_recurrence = True,
mem_len = None,
cmem_len = None,
cmem_ratio = 4,
heads = 8,
gru_gated_residual = True,
mogrify_gru = False,
attn_dropout = 0.,
ff_glu = False,
ff_dropout = 0.,
attn_layer_dropout = 0.,
reconstruction_attn_dropout = 0.,
reconstruction_loss_weight = 1.
):
super().__init__()
emb_dim = default(emb_dim, dim)
mem_len = default(mem_len, seq_len)
cmem_len = default(cmem_len, mem_len // cmem_ratio)
memory_layers = default(memory_layers, list(range(1, depth + 1)))
assert mem_len >= seq_len, 'length of memory should be at least the sequence length'
assert cmem_len >= (mem_len // cmem_ratio), f'length of compressed memory should be at least the memory length divided by the compression ratio {int(mem_len // cmem_ratio)}'
assert all([layer > 0 and layer <= depth for layer in memory_layers]), 'one of the indicated memory layers is invalid'
self.seq_len = seq_len
self.depth = depth
self.memory_layers = list(memory_layers)
self.enhanced_recurrence = enhanced_recurrence
self.token_emb = nn.Embedding(num_tokens, emb_dim)
self.to_model_dim = nn.Identity() if emb_dim == dim else nn.Linear(emb_dim, dim)
seq_and_mem_len = seq_len + mem_len + cmem_len
self.pos_emb = nn.Parameter(torch.zeros(heads, seq_and_mem_len, dim // heads))
self.to_logits = nn.Sequential(
nn.Identity() if emb_dim == dim else nn.Linear(dim, emb_dim),
nn.Linear(emb_dim, num_tokens)
)
wrapper = partial(GRUGating, dim, mogrify = mogrify_gru) if gru_gated_residual else Residual
self.attn_layers = nn.ModuleList([wrapper(PreNorm(dim, SelfAttention(dim, seq_len, mem_len, cmem_len, cmem_ratio, heads, dropout = attn_layer_dropout, attn_dropout = attn_dropout, reconstruction_attn_dropout = reconstruction_attn_dropout))) for _ in range(depth)])
self.ff_layers = nn.ModuleList([wrapper(PreNorm(dim, FeedForward(dim, dropout = ff_dropout, glu = ff_glu))) for _ in range(depth)])
self.reconstruction_loss_weight = reconstruction_loss_weight
def forward(self, x, memories = None, mask = None):
x = self.token_emb(x)
x = self.to_model_dim(x)
b, t, d = x.shape
assert t <= self.seq_len, f'input contains a sequence length {t} that is greater than the designated maximum sequence length {self.seq_len}'
memories = default(memories, (None, None))
mem, cmem = memories
num_memory_layers = len(self.memory_layers)
init_empty_mem = lambda: torch.empty(num_memory_layers, b, 0, d, **to(x))
mem = default(mem, init_empty_mem)
cmem = default(cmem, init_empty_mem)
total_len = mem.shape[2] + cmem.shape[2] + self.seq_len
pos_emb = self.pos_emb[:, (self.seq_len - t):total_len]
next_mem = []
next_cmem = []
aux_loss = torch.tensor(0., requires_grad = True, **to(x))
if self.enhanced_recurrence:
mem = torch.roll(mem, -1, 0)
cmem = torch.roll(cmem, -1, 0)
mem_iter, cmem_iter = map(iterate_tensor, (mem, cmem))
for ind, (attn, ff) in enumerate(zip(self.attn_layers, self.ff_layers)):
layer_num = ind + 1
use_memory = layer_num in self.memory_layers
memories = (next(mem_iter), next(cmem_iter)) if use_memory else None
x, (mem_out, cmem_out), layer_aux_loss = attn(x, memories = memories, calc_memory = use_memory, input_mask = mask, pos_emb = pos_emb)
x, = ff(x)
aux_loss = aux_loss + layer_aux_loss
if not use_memory:
continue
next_mem.append(mem_out)
next_cmem.append(cmem_out)
out = self.to_logits(x)
next_mem, next_cmem = map(torch.stack, (next_mem, next_cmem))
next_mem, next_cmem = map(torch.detach, (next_mem, next_cmem))
aux_loss = aux_loss * self.reconstruction_loss_weight / num_memory_layers
return out, Memory(mem = next_mem, compressed_mem = next_cmem), aux_loss
| compressive-transformer-pytorch-master | compressive_transformer_pytorch/compressive_transformer_pytorch.py |
import math
from functools import partial
from collections import namedtuple
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
# structs
Return = namedtuple('Return', ['loss', 'aux_loss', 'is_last_batch'])
# helper functions
def top_p(logits, thres = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > (1 - thres)
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
sorted_indices_to_remove[:, 0] = 0
sorted_logits[sorted_indices_to_remove] = float('-inf')
return sorted_logits.scatter(1, sorted_indices, sorted_logits)
def top_k(logits, thres = 0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
# main class
class AutoregressiveWrapper(nn.Module):
def __init__(self, net, ignore_index = -100, pad_value = 0):
super().__init__()
self.pad_value = pad_value
self.ignore_index = ignore_index
self.net = net
self.seq_len = net.seq_len
@torch.no_grad()
def generate(self, start_tokens, seq_len, eos_token = None, temperature = 1., filter_logits_fn = top_k, filter_thres = 0.9, **kwargs):
was_training = self.net.training
num_dims = len(start_tokens.shape)
if num_dims == 1:
start_tokens = start_tokens[None, :]
b, t = start_tokens.shape
self.net.eval()
out = start_tokens
# take care of default masking
full_mask_like = lambda x: torch.full_like(x, True, dtype=torch.bool, device=x.device)
mask = kwargs.pop('mask', None)
if mask is None:
mask = full_mask_like(out)
# take care of a primed sequence of any length
mem = None
*primes, out = out.split(self.seq_len, dim=1)
*prime_masks, mask = mask.split(self.seq_len, dim=1)
for prime, prime_mask in zip(primes, prime_masks):
_, mem, _ = self.net(prime, memories = mem, mask = prime_mask, **kwargs)
# generate until hit sequence length
input_len = out.shape[1]
for _ in range(seq_len):
logits, mem, aux_loss = self.net(out[:, -input_len:], memories = mem, mask = mask[:, -input_len:], **kwargs)
logits = logits[:, -1, :]
filtered_logits = filter_logits_fn(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
# unlike most models, inputs start from sequence length of 1 once full sequence length is filled
out = torch.cat((out, sample), dim=-1)
mask = F.pad(mask, (0, 1), value=True)
# append sample to accumulated output
input_len = input_len % self.seq_len
input_len += 1
if eos_token is not None and (sample == eos_token).all():
break
out = out[:, t:]
if num_dims == 1:
out = out.squeeze(0)
self.net.train(was_training)
return out
def forward(self, x, max_batch_size = None, return_loss = False, **kwargs):
pad = partial(pad_sequence, batch_first = True, padding_value = self.pad_value)
if not return_loss:
if not isinstance(x, torch.Tensor):
x = pad(x)
return self.net(x, **kwargs)
if isinstance(x, torch.Tensor):
xi = x[:, :-1]
xo = x[:, 1:]
else:
xi = pad(list(map(lambda t: t[:-1], x)))
xo = pad(list(map(lambda t: t[1:], x)))
# help auto-solve an area of confusion around input masks in auto-regressive
# if user supplies a mask that is only off by one from the source sequence, resolve it for them
mask = kwargs.pop('mask', None)
if mask is not None and mask.shape[1] == x.shape[1]:
mask = mask[:, :-1]
segment_fn = lambda x: x.split(self.seq_len, dim=1)
(xi, xo) = map(segment_fn, (xi, xo))
num_segments = len(xi)
mask = segment_fn(mask) if mask is not None else ((None,) * num_segments)
max_batch_size = x.shape[0] if max_batch_size is None else max_batch_size
split_batch_fn = lambda x: x.split(max_batch_size, dim=0)
grad_accumulate_every = math.ceil(x.shape[0] / max_batch_size)
mems = [None] * grad_accumulate_every
for xi_seg, xo_seg, mask_seg in zip(xi, xo, mask):
xi_seg, xo_seg = map(split_batch_fn, (xi_seg, xo_seg))
mask_seg = split_batch_fn(mask_seg) if mask_seg is not None else ((None,) * grad_accumulate_every)
new_mems = []
for ind, (xi_seg_b, xo_seg_b, mask_seg_b, mem) in enumerate(zip(xi_seg, xo_seg, mask_seg, mems)):
is_last = ind == (grad_accumulate_every - 1)
logits, new_mem, aux_loss = self.net(xi_seg_b, mask = mask_seg_b, memories = mem, **kwargs)
new_mems.append(new_mem)
loss = F.cross_entropy(logits.transpose(1, 2), xo_seg_b, ignore_index = self.ignore_index)
yield Return(loss, aux_loss, is_last)
mems = new_mems
| compressive-transformer-pytorch-master | compressive_transformer_pytorch/autoregressive_wrapper.py |
from compressive_transformer_pytorch.compressive_transformer_pytorch import CompressiveTransformer
from compressive_transformer_pytorch.autoregressive_wrapper import AutoregressiveWrapper | compressive-transformer-pytorch-master | compressive_transformer_pytorch/__init__.py |
from setuptools import setup, find_packages
setup(
name = 'zorro-pytorch',
packages = find_packages(exclude=[]),
version = '0.1.0',
license='MIT',
description = 'Zorro - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/zorro-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism',
'multimodal fusion'
],
install_requires=[
'beartype',
'einops>=0.4',
'torch>=1.6',
'torchaudio'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| zorro-pytorch-main | setup.py |
from enum import Enum
import functools
from functools import wraps
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat, pack, unpack
from einops.layers.torch import Rearrange
from beartype import beartype
from beartype.typing import Tuple, Optional, Union
from torchaudio.transforms import Spectrogram
# constants
class TokenTypes(Enum):
AUDIO = 0
VIDEO = 1
FUSION = 2
GLOBAL = 3
# functions
def exists(val):
return val is not None
def default(*args):
for arg in args:
if exists(arg):
return arg
return None
def round_down_nearest_multiple(n, divisor):
return n // divisor * divisor
def pair(t):
return (t, t) if not isinstance(t, tuple) else t
def cum_mul(it):
return functools.reduce(lambda x, y: x * y, it, 1)
def divisible_by(numer, denom):
return (numer % denom) == 0
# decorators
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# bias-less layernorm
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(torch.ones(dim))
self.register_buffer("beta", torch.zeros(dim))
def forward(self, x):
return F.layer_norm(x, x.shape[-1:], self.gamma, self.beta)
# geglu feedforward
class GEGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim = -1)
return F.gelu(gate) * x
def FeedForward(dim, mult = 4):
inner_dim = int(dim * mult * 2 / 3)
return nn.Sequential(
LayerNorm(dim),
nn.Linear(dim, inner_dim * 2, bias = False),
GEGLU(),
nn.Linear(inner_dim, dim, bias = False)
)
# attention
class Attention(nn.Module):
def __init__(
self,
dim,
dim_head = 64,
heads = 8
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
inner_dim = dim_head * heads
self.norm = LayerNorm(dim)
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)
self.to_out = nn.Linear(inner_dim, dim, bias = False)
def forward(
self,
x,
context = None,
attn_mask = None
):
x = self.norm(x)
kv_x = default(context, x)
q, k, v = (self.to_q(x), *self.to_kv(kv_x).chunk(2, dim = -1))
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), (q, k, v))
q = q * self.scale
sim = einsum('b h i d, b h j d -> b h i j', q, k)
if exists(attn_mask):
sim = sim.masked_fill(~attn_mask, -torch.finfo(sim.dtype).max)
attn = sim.softmax(dim = -1)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
# main class
class Zorro(nn.Module):
def __init__(
self,
dim,
depth,
dim_head = 64,
heads = 8,
ff_mult = 4,
num_fusion_tokens = 16,
audio_patch_size: Union[int, Tuple[int, int]] = 16,
video_patch_size: Union[int, Tuple[int, int]] = 16,
video_temporal_patch_size = 2,
video_channels = 3,
spec_n_fft = 128,
spec_power = 2,
spec_win_length = 24,
spec_hop_length = None,
spec_pad = 0,
spec_center = True,
spec_pad_mode = 'reflect',
spec_aug_stretch_factor = 0.8,
spec_aug_freq_mask = 80,
spec_aug_time_mask = 80,
return_token_types: Tuple[TokenTypes] = (TokenTypes.AUDIO, TokenTypes.VIDEO, TokenTypes.FUSION)
):
super().__init__()
self.max_return_tokens = len(return_token_types)
self.return_token_types = return_token_types
return_token_types_tensor = torch.tensor(list(map(lambda t: t.value, return_token_types)))
self.register_buffer('return_token_types_tensor', return_token_types_tensor, persistent = False)
self.return_tokens = nn.Parameter(torch.randn(self.max_return_tokens, dim))
self.attn_pool = Attention(dim = dim, dim_head = dim_head, heads = heads)
# audio input
self.audio_patch_size = audio_patch_height, audio_patch_width = pair(audio_patch_size)
self.spec = Spectrogram(
n_fft = spec_n_fft,
power = spec_power,
win_length = spec_win_length,
hop_length = spec_hop_length,
pad = spec_pad,
center = spec_center,
pad_mode = spec_pad_mode
)
audio_input_dim = cum_mul(self.audio_patch_size)
self.audio_to_tokens = nn.Sequential(
Rearrange('b (h p1) (w p2) -> b h w (p1 p2)', p1 = audio_patch_height, p2 = audio_patch_width),
nn.LayerNorm(audio_input_dim),
nn.Linear(audio_input_dim, dim),
nn.LayerNorm(dim)
)
# video input
self.video_patch_size = (video_temporal_patch_size, *pair(video_patch_size))
video_input_dim = cum_mul(self.video_patch_size) * video_channels
video_patch_time, video_patch_height, video_patch_width = self.video_patch_size
self.video_to_tokens = nn.Sequential(
Rearrange('b c (t p1) (h p2) (w p3) -> b t h w (c p1 p2 p3)', p1 = video_patch_time, p2 = video_patch_height, p3 = video_patch_width),
nn.LayerNorm(video_input_dim),
nn.Linear(video_input_dim, dim),
nn.LayerNorm(dim)
)
# fusion tokens
self.fusion_tokens = nn.Parameter(torch.randn(num_fusion_tokens, dim))
# transformer
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
Attention(dim = dim, dim_head = dim_head, heads = heads),
FeedForward(dim = dim, mult = ff_mult)
]))
self.norm = LayerNorm(dim)
def forward(
self,
*,
audio,
video,
return_token_indices: Optional[Tuple[int]] = None
):
batch, device = audio.shape[0], audio.device
# validate video can be patched
assert all([divisible_by(numer, denom) for denom, numer in zip(self.video_patch_size, tuple(video.shape[-3:]))]), f'video shape {video.shape[-3:]} needs to be divisible by {self.video_patch_size}'
# automatically crop if audio does not yield a 2d spectrogram that is divisible by patch sizes
audio = self.spec(audio)
height, width = audio.shape[-2:]
patch_height, patch_width = self.audio_patch_size
rounded_height, rounded_width = map(lambda args: round_down_nearest_multiple(*args), ((height, patch_height), (width, patch_width)))
if (height, width) != (rounded_height, rounded_width): # just keep printing to be annoying until it is fixed
print_once(f'spectrogram yielded shape of {(height, width)}, but had to be cropped to {(rounded_height, rounded_width)} to be patchified for transformer')
audio = audio[..., :rounded_height, :rounded_width]
# to tokens
audio_tokens = self.audio_to_tokens(audio)
video_tokens = self.video_to_tokens(video)
fusion_tokens = repeat(self.fusion_tokens, 'n d -> b n d', b = batch)
# construct all tokens
audio_tokens, fusion_tokens, video_tokens = map(lambda t: rearrange(t, 'b ... d -> b (...) d'), (audio_tokens, fusion_tokens, video_tokens))
tokens, ps = pack((
audio_tokens,
fusion_tokens,
video_tokens
), 'b * d')
# construct mask (thus zorro)
token_types = torch.tensor(list((
*((TokenTypes.AUDIO.value,) * audio_tokens.shape[-2]),
*((TokenTypes.FUSION.value,) * fusion_tokens.shape[-2]),
*((TokenTypes.VIDEO.value,) * video_tokens.shape[-2]),
)), device = device, dtype = torch.long)
token_types_attend_from = rearrange(token_types, 'i -> i 1')
token_types_attend_to = rearrange(token_types, 'j -> 1 j')
# the logic goes
# every modality, including fusion can attend to self
zorro_mask = token_types_attend_from == token_types_attend_to
# fusion can attend to everything
zorro_mask = zorro_mask | token_types_attend_from == TokenTypes.FUSION.value
# attend and feedforward
for attn, ff in self.layers:
tokens = attn(tokens, attn_mask = zorro_mask) + tokens
tokens = ff(tokens) + tokens
tokens = self.norm(tokens)
# final attention pooling - each modality pool token can only attend to its own tokens
return_tokens = self.return_tokens
return_token_types_tensor = self.return_token_types_tensor
if exists(return_token_indices):
assert len(set(return_token_indices)) == len(return_token_indices), 'all indices must be unique'
assert all([indice < self.max_return_tokens for indice in return_token_indices]), 'indices must range from 0 to max_num_return_tokens - 1'
return_token_indices = torch.tensor(return_token_indices, dtype = torch.long, device = device)
return_token_types_tensor = return_token_types_tensor[return_token_indices]
return_tokens = return_tokens[return_token_indices]
return_tokens = repeat(return_tokens, 'n d -> b n d', b = batch)
pool_mask = rearrange(return_token_types_tensor, 'i -> i 1') == token_types_attend_to
# global queries can attend to all tokens
pool_mask = pool_mask | rearrange(return_token_types_tensor, 'i -> i 1') == torch.ones_like(token_types_attend_to, dtype=torch.long) * TokenTypes.GLOBAL.value
pooled_tokens = self.attn_pool(return_tokens, context = tokens, attn_mask = pool_mask) + return_tokens
return pooled_tokens
| zorro-pytorch-main | zorro_pytorch/zorro_pytorch.py |
from zorro_pytorch.zorro_pytorch import Zorro, TokenTypes
| zorro-pytorch-main | zorro_pytorch/__init__.py |
from setuptools import setup, find_packages
setup(
name = 'g-mlp-gpt',
packages = find_packages(),
version = '0.0.15',
license='MIT',
description = 'gMLP - GPT',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/g-mlp-gpt',
keywords = [
'artificial intelligence',
'deep learning',
'multi-layered-preceptrons'
],
install_requires=[
'einops>=0.3',
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| g-mlp-gpt-main | setup.py |
from g_mlp_gpt import gMLPGPT
from g_mlp_gpt.autoregressive_wrapper import AutoregressiveWrapper
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 2e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 768
SEQ_LEN = 768
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate GPT-like decoder model
model = gMLPGPT(
num_tokens = 256,
dim = 512,
seq_len = SEQ_LEN,
depth = 8,
window = (16, 32, 64, 128, 256, 512, 768, SEQ_LEN),
attn_dim = 16
)
model = AutoregressiveWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len - 1, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader))
loss.backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader))
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp, GENERATE_LENGTH)
output_str = decode_tokens(sample)
print(output_str)
| g-mlp-gpt-main | train.py |
import torch
from torch import nn
import torch.nn.functional as F
# helper function
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# top k filtering
def top_k(logits, thres = 0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
class AutoregressiveWrapper(nn.Module):
def __init__(self, net, ignore_index = -100, pad_value = 0):
super().__init__()
self.pad_value = pad_value
self.ignore_index = ignore_index
self.net = net
self.max_seq_len = net.seq_len
@torch.no_grad()
@eval_decorator
def generate(self, start_tokens, seq_len, eos_token = None, temperature = 1., filter_logits_fn = top_k, filter_thres = 0.9, **kwargs):
device = start_tokens.device
num_dims = len(start_tokens.shape)
if num_dims == 1:
start_tokens = start_tokens[None, :]
b, t = start_tokens.shape
out = start_tokens
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
logits = self.net(x, **kwargs)[:, -1, :]
filtered_logits = top_k(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
if eos_token is not None and (sample == eos_token).all():
break
out = out[:, t:]
if num_dims == 1:
out = out.squeeze(0)
return out
def forward(self, x, **kwargs):
xi, xo = x[:, :-1], x[:, 1:]
out = self.net(xi, **kwargs)
loss = F.cross_entropy(out.transpose(1, 2), xo, ignore_index = self.ignore_index)
return loss
| g-mlp-gpt-main | g_mlp_gpt/autoregressive_wrapper.py |
import torch
import torch.nn as nn
from operator import itemgetter
from torch.autograd.function import Function
from torch.utils.checkpoint import get_device_states, set_device_states
# for routing arguments into the functions of the reversible layer
def route_args(router, args, depth):
routed_args = [(dict(), dict()) for _ in range(depth)]
matched_keys = [key for key in args.keys() if key in router]
for key in matched_keys:
val = args[key]
for depth, ((f_args, g_args), routes) in enumerate(zip(routed_args, router[key])):
new_f_args, new_g_args = map(lambda route: ({key: val} if route else {}), routes)
routed_args[depth] = ({**f_args, **new_f_args}, {**g_args, **new_g_args})
return routed_args
def layer_drop(layers, prob):
to_drop = torch.empty(len(layers)).uniform_(0, 1) < prob
blocks = [block for block, drop in zip(layers, to_drop) if not drop]
blocks = layers[:1] if len(blocks) == 0 else blocks
return blocks
# following example for saving and setting rng here https://pytorch.org/docs/stable/_modules/torch/utils/checkpoint.html
class Deterministic(nn.Module):
def __init__(self, net):
super().__init__()
self.net = net
self.cpu_state = None
self.cuda_in_fwd = None
self.gpu_devices = None
self.gpu_states = None
def record_rng(self, *args):
self.cpu_state = torch.get_rng_state()
if torch.cuda._initialized:
self.cuda_in_fwd = True
self.gpu_devices, self.gpu_states = get_device_states(*args)
def forward(self, *args, record_rng = False, set_rng = False, **kwargs):
if record_rng:
self.record_rng(*args)
if not set_rng:
return self.net(*args, **kwargs)
rng_devices = []
if self.cuda_in_fwd:
rng_devices = self.gpu_devices
with torch.random.fork_rng(devices=rng_devices, enabled=True):
torch.set_rng_state(self.cpu_state)
if self.cuda_in_fwd:
set_device_states(self.gpu_devices, self.gpu_states)
return self.net(*args, **kwargs)
# heavily inspired by https://github.com/RobinBruegger/RevTorch/blob/master/revtorch/revtorch.py
# once multi-GPU is confirmed working, refactor and send PR back to source
class ReversibleBlock(nn.Module):
def __init__(self, f, g):
super().__init__()
self.f = Deterministic(f)
self.g = Deterministic(g)
def forward(self, x, f_args = {}, g_args = {}):
x1, x2 = torch.chunk(x, 2, dim=2)
y1, y2 = None, None
with torch.no_grad():
y1 = x1 + self.f(x2, record_rng=self.training, **f_args)
y2 = x2 + self.g(y1, record_rng=self.training, **g_args)
return torch.cat([y1, y2], dim=2)
def backward_pass(self, y, dy, f_args = {}, g_args = {}):
y1, y2 = torch.chunk(y, 2, dim=2)
del y
dy1, dy2 = torch.chunk(dy, 2, dim=2)
del dy
with torch.enable_grad():
y1.requires_grad = True
gy1 = self.g(y1, set_rng=True, **g_args)
torch.autograd.backward(gy1, dy2)
with torch.no_grad():
x2 = y2 - gy1
del y2, gy1
dx1 = dy1 + y1.grad
del dy1
y1.grad = None
with torch.enable_grad():
x2.requires_grad = True
fx2 = self.f(x2, set_rng=True, **f_args)
torch.autograd.backward(fx2, dx1, retain_graph=True)
with torch.no_grad():
x1 = y1 - fx2
del y1, fx2
dx2 = dy2 + x2.grad
del dy2
x2.grad = None
x = torch.cat([x1, x2.detach()], dim=2)
dx = torch.cat([dx1, dx2], dim=2)
return x, dx
class _ReversibleFunction(Function):
@staticmethod
def forward(ctx, x, blocks, args):
ctx.args = args
for block, kwarg in zip(blocks, args):
x = block(x, **kwarg)
ctx.y = x.detach()
ctx.blocks = blocks
return x
@staticmethod
def backward(ctx, dy):
y = ctx.y
args = ctx.args
for block, kwargs in zip(ctx.blocks[::-1], args[::-1]):
y, dy = block.backward_pass(y, dy, **kwargs)
return dy, None, None
class SequentialSequence(nn.Module):
def __init__(self, layers, args_route = {}, layer_dropout = 0.):
super().__init__()
assert all(len(route) == len(layers) for route in args_route.values()), 'each argument route map must have the same depth as the number of sequential layers'
self.layers = layers
self.args_route = args_route
self.layer_dropout = layer_dropout
def forward(self, x, **kwargs):
args = route_args(self.args_route, kwargs, len(self.layers))
layers_and_args = list(zip(self.layers, args))
if self.training and self.layer_dropout > 0:
layers_and_args = layer_drop(layers_and_args, self.layer_dropout)
for (f,), (f_args, _) in layers_and_args:
x = x + f(x, **f_args)
return x
class ReversibleSequence(nn.Module):
def __init__(self, blocks, args_route = {}, layer_dropout = 0.):
super().__init__()
self.args_route = args_route
self.layer_dropout = layer_dropout
self.blocks = nn.ModuleList([ReversibleBlock(f=f, g=g) for f, g in blocks])
def forward(self, x, layer_dropout = 0., **kwargs):
x = torch.cat([x, x], dim=-1)
blocks = self.blocks
args = route_args(self.args_route, kwargs, len(blocks))
args = list(map(lambda x: {'f_args': x[0], 'g_args': x[1]}, args))
layers_and_args = list(zip(blocks, args))
if self.training and layer_dropout > 0:
layers_and_args = layer_drop(layers_and_args, layer_dropout)
blocks, args = map(lambda ind: list(map(itemgetter(ind), layers_and_args)), (0, 1))
out = _ReversibleFunction.apply(x, blocks, args)
return torch.stack(out.chunk(2, dim=-1)).sum(dim=0)
| g-mlp-gpt-main | g_mlp_gpt/reversible.py |
from g_mlp_gpt.g_mlp_gpt import gMLPGPT
| g-mlp-gpt-main | g_mlp_gpt/__init__.py |
from math import ceil
from functools import partial
from random import randrange
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
from g_mlp_gpt.reversible import ReversibleSequence, SequentialSequence
# functions
def exists(val):
return val is not None
def cast_tuple(val, num):
return ((val,) * num) if not isinstance(val, tuple) else val
def pad_to_multiple(tensor, multiple, dim = -1, value = 0):
seqlen = tensor.shape[dim]
m = seqlen / multiple
if m.is_integer():
return tensor
remainder = ceil(m) * multiple - seqlen
pad_offset = (0,) * (-1 - dim) * 2
return F.pad(tensor, (*pad_offset, 0, remainder), value = value)
def dropout_layers(layers, prob_survival):
if prob_survival == 1:
return layers
num_layers = len(layers)
to_drop = torch.zeros(num_layers).uniform_(0., 1.) > prob_survival
# make sure at least one layer makes it
if all(to_drop):
rand_index = randrange(num_layers)
to_drop[rand_index] = False
layers = [layer for (layer, drop) in zip(layers, to_drop) if not drop]
return layers
# helper classes
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
return self.fn(x) + x
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x, **kwargs):
x = self.norm(x)
return self.fn(x, **kwargs)
class GEGLU(nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim = -1)
return x * F.gelu(gates)
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4):
super().__init__()
inner_dim = int(dim * mult * 2 / 3)
self.net = nn.Sequential(
nn.Linear(dim, inner_dim * 2),
GEGLU(),
nn.Linear(inner_dim, dim)
)
def forward(self, x):
return self.net(x)
class Attention(nn.Module):
def __init__(self, dim_in, dim_out, dim_inner):
super().__init__()
self.scale = dim_inner ** -0.5
self.to_qkv = nn.Linear(dim_in, dim_inner * 3, bias = False)
self.to_out = nn.Linear(dim_inner, dim_out)
def forward(self, x):
device = x.device
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
mask = torch.ones(sim.shape[-2:], device = device).triu(1).bool()
sim.masked_fill_(mask[None, ...], -torch.finfo(q.dtype).max)
attn = sim.softmax(dim = -1)
out = einsum('b i j, b j d -> b i d', attn, v)
return self.to_out(out)
class LocalAttention(nn.Module):
def __init__(self, dim_in, dim_inner, dim_out, window = 128):
super().__init__()
self.scale = dim_inner ** -0.5
self.window = window
self.to_qkv = nn.Linear(dim_in, dim_inner * 3, bias = False)
self.to_out = nn.Linear(dim_inner, dim_out)
def forward(self, x):
b, n, *_, device, w = *x.shape, x.device, self.window
x = pad_to_multiple(x, w, dim = -2, value = 0.)
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
window_fn = lambda t: rearrange(t, 'b (w n) d -> b w n d', n = w)
q, k, v = map(window_fn, (q, k, v))
k, v = map(lambda t: F.pad(t, (0, 0, 0, 0, 1, 0)), (k, v))
k, v = map(lambda t: torch.cat((k[:, :-1], k[:, 1:]), dim = 2), (k, v))
sim = einsum('b w i d, b w j d -> b w i j', q, k) * self.scale
buckets, i, j = sim.shape[-3:]
mask_value = -torch.finfo(sim.dtype).max
mask = torch.ones(i, j, device = device).triu_(j - i + 1).bool()
mask = repeat(mask, 'i j -> () u i j', u = buckets)
sim.masked_fill_(mask, mask_value)
attn = sim.softmax(dim = -1)
out = einsum('b w i j, b w j d -> b w i d', attn, v)
out = rearrange(out, 'b w n d -> b (w n) d')
out = self.to_out(out[:, :n])
return out
class CausalSGU(nn.Module):
def __init__(
self,
dim,
dim_seq,
init_eps = 1e-3,
heads = 4,
act = nn.Identity()
):
super().__init__()
dim_out = dim // 2
self.norm = nn.LayerNorm(dim_out)
self.heads = heads
self.weight = nn.Parameter(torch.zeros(heads, dim_seq, dim_seq))
self.bias = nn.Parameter(torch.zeros(heads, dim_seq))
init_eps /= dim_seq
nn.init.uniform_(self.weight, -init_eps, init_eps)
nn.init.constant_(self.bias, 1.)
self.act = act
self.register_buffer('mask', ~torch.ones(dim_seq, dim_seq).triu_(1).bool())
def forward(self, x, gate_res = None):
device, n, h = x.device, x.shape[1], self.heads
res, gate = x.chunk(2, dim = -1)
gate = self.norm(gate)
weight, bias = self.weight, self.bias
weight, bias = weight[:, :n, :n], bias[:, :n]
weight = weight * self.mask[None, :n, :n].int().float()
gate = rearrange(gate, 'b n (h d) -> b h n d', h = h)
gate = einsum('b h n d, h m n -> b h m d', gate, weight)
gate = gate + rearrange(bias, 'h n -> () h n ()')
gate = rearrange(gate, 'b h n d -> b n (h d)')
if exists(gate_res):
gate = gate + gate_res
return self.act(gate) * res
class CausalLocalSGU(nn.Module):
def __init__(
self,
dim,
dim_seq,
init_eps = 1e-3,
heads = 4,
window = 128,
act = nn.Identity()
):
super().__init__()
dim_out = dim // 2
self.norm = nn.LayerNorm(dim_out)
self.heads = heads
self.window = window
self.weight = nn.Parameter(torch.zeros(heads, window, window * 2))
self.bias = nn.Parameter(torch.zeros(heads, window))
init_eps /= window
nn.init.uniform_(self.weight, -init_eps, init_eps)
nn.init.constant_(self.bias, 1.)
self.act = act
self.register_buffer('mask', ~torch.ones(window, window * 2).triu_(window + 1).bool())
def forward(self, x, gate_res = None):
device, n, h, w = x.device, x.shape[1], self.heads, self.window
res, gate = x.chunk(2, dim = -1)
gate = pad_to_multiple(gate, w, dim = -2)
gate = rearrange(gate, 'b (w n) d -> b w n d', n = w)
gate = self.norm(gate)
gate = F.pad(gate, (0, 0, 0, 0, 1, 0), value = 0.)
gate = torch.cat((gate[:, :-1], gate[:, 1:]), dim = 2)
weight, bias = self.weight, self.bias
weight = weight * self.mask[None, ...].int().float()
gate = rearrange(gate, 'b w n (h d) -> b w h n d', h = h)
gate = einsum('b w h n d, h m n -> b w h m d', gate, weight)
gate = gate + rearrange(bias, 'h n -> () () h n ()')
gate = rearrange(gate, 'b w h n d -> b w n (h d)')
gate = rearrange(gate, 'b w n d -> b (w n) d')
gate = gate[:, :n]
if exists(gate_res):
gate = gate + gate_res
return self.act(gate) * res
class AxiallyFold(nn.Module):
def __init__(self, dim, every, fn):
super().__init__()
self.fn = fn
self.every = every
self.conv = nn.Conv1d(dim, dim, kernel_size = every, groups = dim) if every > 1 else None
def forward(self, x):
every = self.every
if every <= 1:
return self.fn(x)
n = x.shape[1]
x = pad_to_multiple(x, self.every, dim = -2)
x = rearrange(x, 'b (n e) d -> (b e) n d', e = every)
x = self.fn(x)
x = rearrange(x, '(b e) n d -> b d (n e)', e = every)
x = F.pad(x, (every - 1, 0), value = 0)
out = self.conv(x)
out = rearrange(out, 'b d n -> b n d')
return out[:, :n]
class gMLPBlock(nn.Module):
def __init__(
self,
*,
dim,
seq_len,
dim_ff,
heads = 4,
causal = False,
window = None,
attn_dim = None,
act = nn.Identity()
):
super().__init__()
is_windowed = exists(window) and window < seq_len
SGU_klass = partial(CausalLocalSGU, window = window) if is_windowed else CausalSGU
Attention_klass = partial(LocalAttention, window = window) if is_windowed else Attention
self.attn = Attention_klass(dim_in = dim, dim_inner = attn_dim, dim_out = dim_ff // 2) if exists(attn_dim) else None
self.proj_in = nn.Sequential(
nn.Linear(dim, dim_ff),
nn.GELU()
)
self.sgu = SGU_klass(dim_ff, seq_len, causal, heads = heads, act = act)
self.proj_out = nn.Linear(dim_ff // 2, dim)
def forward(self, x):
gate_res = self.attn(x) if exists(self.attn) else None
x = self.proj_in(x)
x = self.sgu(x, gate_res = gate_res)
x = self.proj_out(x)
return x
# main classes
class gMLPGPT(nn.Module):
def __init__(
self,
*,
num_tokens,
dim,
depth,
seq_len,
heads = 1,
ff_mult = 4,
prob_survival = 1.,
reversible = False,
window = None,
attn_dim = None,
act = nn.Identity()
):
super().__init__()
dim_ff = dim * ff_mult
self.seq_len = seq_len
self.prob_survival = prob_survival
self.to_embed = nn.Embedding(num_tokens, dim)
window = cast_tuple(window, depth)
window = tuple(map(lambda t: t if isinstance(t, tuple) else (t, 1), window))
attn_dims = cast_tuple(attn_dim, depth)
assert len(window) == depth, f'num window sizes {len(window)} must be equal to depth {depth}'
layers = nn.ModuleList([])
for ind, (w, ax), attn_dim in zip(range(depth), window, attn_dims):
attn_dim = attn_dim if exists(window) else None
get_gmlp = lambda: PreNorm(dim, AxiallyFold(dim, ax, gMLPBlock(dim = dim, dim_ff = dim_ff, seq_len = seq_len, heads = heads, window = w, act = act, attn_dim = attn_dim)))
layer_blocks = nn.ModuleList([
get_gmlp()
])
if reversible:
layer_blocks.append(FeedForward(dim, mult = ff_mult))
layers.append(layer_blocks)
execute_klass = SequentialSequence if not reversible else ReversibleSequence
self.net = execute_klass(layers)
self.to_logits = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, num_tokens)
)
def forward(self, x):
layer_dropout = 1. - self.prob_survival
x = self.to_embed(x)
out = self.net(x, layer_dropout = layer_dropout)
return self.to_logits(out)
| g-mlp-gpt-main | g_mlp_gpt/g_mlp_gpt.py |
from setuptools import setup, find_packages
setup(
name = 'linear_attention_transformer',
packages = find_packages(exclude=['examples']),
version = '0.19.1',
license='MIT',
description = 'Linear Attention Transformer',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/linear-attention-transformer',
keywords = ['transformers', 'attention', 'artificial intelligence'],
install_requires=[
'axial-positional-embedding',
'einops',
'linformer>=0.1.0',
'local-attention',
'product-key-memory>=0.1.5',
'torch',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| linear-attention-transformer-master | setup.py |
import math
import torch
from torch import nn
import torch.nn.functional as F
from linear_attention_transformer.linear_attention_transformer import LinearAttentionTransformer, LinearAttentionTransformerLM
def find_module(nn_module, type):
for module in nn_module.modules():
if isinstance(module, type):
return module
return None
def pad_to_multiple(tensor, multiple, dim=-1, pad_left = False):
seqlen = tensor.shape[dim]
m = seqlen / multiple
if m.is_integer():
return tensor, 0
pre_pad_offset = (0,) * (-1 - dim) * 2
padding = math.ceil(m) * multiple - seqlen
offset = (padding, 0) if pad_left else (0, padding)
padded_tensor = F.pad(tensor, (*pre_pad_offset, *offset), value=0)
return padded_tensor, padding
class Autopadder(nn.Module):
def __init__(self, net, pad_left=False):
super().__init__()
assert isinstance(net, (LinearAttentionTransformer, LinearAttentionTransformerLM)), 'only modules SinkhornTransformer and SinkhornTransformerLM accepted'
self.net = net
is_lm = isinstance(net, LinearAttentionTransformerLM)
transformer = find_module(net, LinearAttentionTransformer)
self.pad_to = transformer.pad_to_multiple
self.pad_dim = -1 if is_lm else -2
self.pad_left = pad_left
def forward(self, x, **kwargs):
if self.pad_to <= 1:
return self.net(x, **kwargs)
b, t, device = *x.shape[:2], x.device
input_mask = kwargs.get('input_mask')
if input_mask is None:
input_mask = torch.full((b, t), True, device=x.device, dtype=torch.bool)
x, padding = pad_to_multiple(x, self.pad_to, dim=self.pad_dim, pad_left=self.pad_left)
if padding != 0:
offset = (0, padding) if not self.pad_left else (padding, 0)
new_mask = F.pad(input_mask, offset, value=False)
kwargs.update(input_mask=new_mask)
out = self.net(x, **kwargs)
output_slice = slice(0, t) if not self.pad_left else slice(padding, None)
return out[:, output_slice]
| linear-attention-transformer-master | linear_attention_transformer/autopadder.py |
from functools import partial
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
from linear_attention_transformer.autopadder import Autopadder
def top_p(logits, thres = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > (1 - thres)
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
sorted_indices_to_remove[:, 0] = 0
sorted_logits[sorted_indices_to_remove] = float('-inf')
return sorted_logits.scatter(1, sorted_indices, sorted_logits)
def top_k(logits, thres = 0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
class AutoregressiveWrapper(nn.Module):
def __init__(self, net, ignore_index = -100, pad_value = 0):
super().__init__()
self.pad_value = pad_value
self.ignore_index = ignore_index
self.net = Autopadder(net)
self.max_seq_len = net.max_seq_len
@torch.no_grad()
def generate(self, start_tokens, seq_len, eos_token = None, temperature = 1., filter_logits_fn = top_k, filter_thres = 0.9, **kwargs):
was_training = self.net.training
num_dims = len(start_tokens.shape)
if num_dims == 1:
start_tokens = start_tokens[None, :]
b, t = start_tokens.shape
self.net.eval()
out = start_tokens
input_mask = kwargs.pop('input_mask', None)
if input_mask is None:
input_mask = torch.full_like(out, True, dtype=torch.bool, device=out.device)
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
input_mask = input_mask[:, -self.max_seq_len:]
logits = self.net(x, input_mask=input_mask, **kwargs)[:, -1, :]
filtered_logits = filter_logits_fn(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
input_mask = F.pad(input_mask, (0, 1), value=True)
if eos_token is not None and (sample == eos_token).all():
break
out = out[:, t:]
if num_dims == 1:
out = out.squeeze(0)
self.net.train(was_training)
return out
def forward(self, x, return_loss = False, **kwargs):
pad = partial(pad_sequence, batch_first = True, padding_value = self.pad_value)
if not return_loss:
if not isinstance(x, torch.Tensor):
x = pad(x)
return self.net(x, **kwargs)
if isinstance(x, torch.Tensor):
xi = x[:, :-1]
xo = x[:, 1:]
# help auto-solve an area of confusion around input masks in auto-regressive
# if user supplies a mask that is only off by one from the source sequence, resolve it for them
mask = kwargs.pop('input_mask', None)
if mask is not None and mask.shape[1] == x.shape[1]:
mask = mask[:, :-1]
kwargs.update(input_mask = mask)
else:
xi = pad(list(map(lambda t: t[:-1], x)))
xo = pad(list(map(lambda t: t[1:], x)))
out = self.net(xi, **kwargs)
loss = F.cross_entropy(out.transpose(1, 2), xo, ignore_index = self.ignore_index)
return loss
| linear-attention-transformer-master | linear_attention_transformer/autoregressive_wrapper.py |
import torch
import torch.nn as nn
from operator import itemgetter
from torch.autograd.function import Function
from torch.utils.checkpoint import get_device_states, set_device_states
# for routing arguments into the functions of the reversible layer
def route_args(router, args, depth):
routed_args = [(dict(), dict()) for _ in range(depth)]
matched_keys = [key for key in args.keys() if key in router]
for key in matched_keys:
val = args[key]
for depth, ((f_args, g_args), routes) in enumerate(zip(routed_args, router[key])):
new_f_args, new_g_args = map(lambda route: ({key: val} if route else {}), routes)
routed_args[depth] = ({**f_args, **new_f_args}, {**g_args, **new_g_args})
return routed_args
def layer_drop(layers, prob):
to_drop = torch.empty(len(layers)).uniform_(0, 1) < prob
blocks = [block for block, drop in zip(layers, to_drop) if not drop]
blocks = layers[:1] if len(blocks) == 0 else blocks
return blocks
# following example for saving and setting rng here https://pytorch.org/docs/stable/_modules/torch/utils/checkpoint.html
class Deterministic(nn.Module):
def __init__(self, net):
super().__init__()
self.net = net
self.cpu_state = None
self.cuda_in_fwd = None
self.gpu_devices = None
self.gpu_states = None
def record_rng(self, *args):
self.cpu_state = torch.get_rng_state()
if torch.cuda._initialized:
self.cuda_in_fwd = True
self.gpu_devices, self.gpu_states = get_device_states(*args)
def forward(self, *args, record_rng = False, set_rng = False, **kwargs):
if record_rng:
self.record_rng(*args)
if not set_rng:
return self.net(*args, **kwargs)
rng_devices = []
if self.cuda_in_fwd:
rng_devices = self.gpu_devices
with torch.random.fork_rng(devices=rng_devices, enabled=True):
torch.set_rng_state(self.cpu_state)
if self.cuda_in_fwd:
set_device_states(self.gpu_devices, self.gpu_states)
return self.net(*args, **kwargs)
# heavily inspired by https://github.com/RobinBruegger/RevTorch/blob/master/revtorch/revtorch.py
# once multi-GPU is confirmed working, refactor and send PR back to source
class ReversibleBlock(nn.Module):
def __init__(self, f, g):
super().__init__()
self.f = Deterministic(f)
self.g = Deterministic(g)
def forward(self, x, f_args = {}, g_args = {}):
x1, x2 = torch.chunk(x, 2, dim=2)
y1, y2 = None, None
with torch.no_grad():
y1 = x1 + self.f(x2, record_rng=self.training, **f_args)
y2 = x2 + self.g(y1, record_rng=self.training, **g_args)
return torch.cat([y1, y2], dim=2)
def backward_pass(self, y, dy, f_args = {}, g_args = {}):
y1, y2 = torch.chunk(y, 2, dim=2)
del y
dy1, dy2 = torch.chunk(dy, 2, dim=2)
del dy
with torch.enable_grad():
y1.requires_grad = True
gy1 = self.g(y1, set_rng=True, **g_args)
torch.autograd.backward(gy1, dy2)
with torch.no_grad():
x2 = y2 - gy1
del y2, gy1
dx1 = dy1 + y1.grad
del dy1
y1.grad = None
with torch.enable_grad():
x2.requires_grad = True
fx2 = self.f(x2, set_rng=True, **f_args)
torch.autograd.backward(fx2, dx1, retain_graph=True)
with torch.no_grad():
x1 = y1 - fx2
del y1, fx2
dx2 = dy2 + x2.grad
del dy2
x2.grad = None
x = torch.cat([x1, x2.detach()], dim=2)
dx = torch.cat([dx1, dx2], dim=2)
return x, dx
class _ReversibleFunction(Function):
@staticmethod
def forward(ctx, x, blocks, args):
ctx.args = args
for block, kwarg in zip(blocks, args):
x = block(x, **kwarg)
ctx.y = x.detach()
ctx.blocks = blocks
return x
@staticmethod
def backward(ctx, dy):
y = ctx.y
args = ctx.args
for block, kwargs in zip(ctx.blocks[::-1], args[::-1]):
y, dy = block.backward_pass(y, dy, **kwargs)
return dy, None, None
class SequentialSequence(nn.Module):
def __init__(self, layers, args_route = {}, layer_dropout = 0.):
super().__init__()
assert all(len(route) == len(layers) for route in args_route.values()), 'each argument route map must have the same depth as the number of sequential layers'
self.layers = layers
self.args_route = args_route
self.layer_dropout = layer_dropout
def forward(self, x, **kwargs):
args = route_args(self.args_route, kwargs, len(self.layers))
layers_and_args = list(zip(self.layers, args))
if self.training and self.layer_dropout > 0:
layers_and_args = layer_drop(layers_and_args, self.layer_dropout)
for (f, g), (f_args, g_args) in layers_and_args:
x = x + f(x, **f_args)
x = x + g(x, **g_args)
return x
class ReversibleSequence(nn.Module):
def __init__(self, blocks, args_route = {}, layer_dropout = 0.):
super().__init__()
self.args_route = args_route
self.layer_dropout = layer_dropout
self.blocks = nn.ModuleList([ReversibleBlock(f=f, g=g) for f, g in blocks])
def forward(self, x, **kwargs):
x = torch.cat([x, x], dim=-1)
blocks = self.blocks
args = route_args(self.args_route, kwargs, len(blocks))
args = list(map(lambda x: {'f_args': x[0], 'g_args': x[1]}, args))
layers_and_args = list(zip(blocks, args))
if self.training and self.layer_dropout > 0:
layers_and_args = layer_drop(layers_and_args, self.layer_dropout)
blocks, args = map(lambda ind: list(map(itemgetter(ind), layers_and_args)), (0, 1))
out = _ReversibleFunction.apply(x, blocks, args)
return torch.stack(out.chunk(2, dim=-1)).mean(dim=0)
| linear-attention-transformer-master | linear_attention_transformer/reversible.py |
import torch
import torch.nn.functional as F
from torch import nn, einsum
import math
from operator import mul
from math import gcd
from collections import namedtuple
from functools import partial, reduce
from local_attention import LocalAttention
from linformer import LinformerSelfAttention
from product_key_memory import PKM
from axial_positional_embedding import AxialPositionalEmbedding
from linear_attention_transformer.reversible import ReversibleSequence, SequentialSequence
from einops import rearrange, repeat
# namedtuple settings
LinformerSettings = namedtuple('LinformerSettings', ['k'])
LinformerContextSettings = namedtuple('LinformerContextSettings', ['seq_len', 'k'])
# helper functions
def exists(val):
return val is not None
def default(value, d):
return d if not exists(value) else value
def always(value):
return lambda *args, **kwargs: value
def cast_tuple(val):
return (val,) if not isinstance(val, tuple) else val
def safe_div(n, d, eps = 1e-6):
return n.div_(d + eps)
def lcm(*numbers):
return int(reduce(lambda x, y: int((x * y) / gcd(x, y)), numbers, 1))
def merge_dims(ind_from, ind_to, tensor):
shape = list(tensor.shape)
arr_slice = slice(ind_from, ind_to + 1)
shape[arr_slice] = [reduce(mul, shape[arr_slice])]
return tensor.reshape(*shape)
def expand_dim(t, dim, k, unsqueeze=True):
if unsqueeze:
t = t.unsqueeze(dim)
expand_shape = [-1] * len(t.shape)
expand_shape[dim] = k
return t.expand(*expand_shape)
def split_at_index(dim, index, t):
pre_slices = (slice(None),) * dim
l = (*pre_slices, slice(None, index))
r = (*pre_slices, slice(index, None))
return t[l], t[r]
def max_neg_value(tensor):
return -torch.finfo(tensor.dtype).max
# helper classes
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x, **kwargs):
x = self.norm(x)
return self.fn(x, **kwargs)
class Chunk(nn.Module):
def __init__(self, chunks, fn, along_dim = -1):
super().__init__()
self.dim = along_dim
self.chunks = chunks
self.fn = fn
def forward(self, x, **kwargs):
if self.chunks == 1:
return self.fn(x, **kwargs)
chunks = x.chunk(self.chunks, dim = self.dim)
return torch.cat([self.fn(c, **kwargs) for c in chunks], dim = self.dim)
class ProjectInOut(nn.Module):
def __init__(self, fn, dim_in, dim_out, project_out = True):
super().__init__()
self.fn = fn
self.project_in = nn.Linear(dim_in, dim_out)
self.project_out = nn.Linear(dim_out, dim_in) if project_out else nn.Identity()
def forward(self, x, **kwargs):
x = self.project_in(x)
x = self.fn(x, **kwargs)
x = self.project_out(x)
return x
# token shifting helper classes
def shift(t, amount, mask = None):
if amount == 0:
return t
if exists(mask):
t = t.masked_fill(~mask[..., None], 0.)
return F.pad(t, (0, 0, amount, -amount), value = 0.)
class PreShiftTokens(nn.Module):
def __init__(self, shifts, fn):
super().__init__()
self.fn = fn
self.shifts = tuple(shifts)
def forward(self, x, **kwargs):
mask = kwargs.get('mask', None)
shifts = self.shifts
segments = len(shifts)
feats_per_shift = x.shape[-1] // segments
splitted = x.split(feats_per_shift, dim = -1)
segments_to_shift, rest = splitted[:segments], splitted[segments:]
segments_to_shift = list(map(lambda args: shift(*args, mask = mask), zip(segments_to_shift, shifts)))
x = torch.cat((*segments_to_shift, *rest), dim = -1)
return self.fn(x, **kwargs)
# positional embeddings
class AbsolutePositionalEmbedding(nn.Module):
def __init__(self, dim, max_seq_len):
super().__init__()
self.emb = nn.Embedding(max_seq_len, dim)
def forward(self, x):
t = torch.arange(x.shape[1], device=x.device)
return self.emb(t)[None, :, :]
# sinusoidal positional embeddings
class FixedPositionalEmbedding(nn.Module):
def __init__(self, dim, max_seq_len):
super().__init__()
inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
position = torch.arange(0, max_seq_len, dtype=torch.float)
sinusoid_inp = torch.einsum("i,j->ij", position, inv_freq)
emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1)
self.register_buffer('emb', emb)
def forward(self, x):
return self.emb[None, :x.shape[1], :].to(x)
# rotary positional embedding helpers
def rotate_every_two(x):
x = rearrange(x, '... (d j) -> ... d j', j = 2)
x1, x2 = x.unbind(dim = -1)
x = torch.stack((-x2, x1), dim = -1)
return rearrange(x, '... d j -> ... (d j)')
def apply_rotory_pos_emb(q, k, sinu_pos):
sinu_pos = rearrange(sinu_pos, '() n (j d) -> n j d', j = 2)
sin, cos = sinu_pos.unbind(dim = -2)
sin, cos = map(lambda t: repeat(t, 'b n -> b (n j)', j = 2), (sin, cos))
q, k = map(lambda t: (t * cos) + (rotate_every_two(t) * sin), (q, k))
return q, k
# feedforward
class GELU_(nn.Module):
def forward(self, x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
GELU = nn.GELU if hasattr(nn, 'GELU') else GELU_
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4, dropout = 0., activation = None, glu = False):
super().__init__()
activation = default(activation, GELU)
self.glu = glu
self.w1 = nn.Linear(dim, dim * mult * (2 if glu else 1))
self.act = activation()
self.dropout = nn.Dropout(dropout)
self.w2 = nn.Linear(dim * mult, dim)
def forward(self, x, **kwargs):
if not self.glu:
x = self.w1(x)
x = self.act(x)
else:
x, v = self.w1(x).chunk(2, dim=-1)
x = self.act(x) * v
x = self.dropout(x)
x = self.w2(x)
return x
# self attention layer
def linear_attn(q, k, v, kv_mask = None):
dim = q.shape[-1]
if exists(kv_mask):
mask_value = max_neg_value(q)
mask = kv_mask[:, None, :, None]
k = k.masked_fill_(~mask, mask_value)
v = v.masked_fill_(~mask, 0.)
del mask
q = q.softmax(dim=-1)
k = k.softmax(dim=-2)
q = q * dim ** -0.5
context = einsum('bhnd,bhne->bhde', k, v)
attn = einsum('bhnd,bhde->bhne', q, context)
return attn.reshape(*q.shape)
def causal_linear_attn(q, k, v, kv_mask = None, bucket_size = None, eps = 1e-3):
b, h, n, e, dtype = *q.shape, q.dtype
bucket_size = default(bucket_size, 64)
bucket_size = max(bucket_size, 1)
assert bucket_size == 0 or (n % bucket_size) == 0, f'sequence length {n} must be divisible by the bucket size {bucket_size} for causal linear attention'
q = q.softmax(dim=-1)
k = torch.exp(k).type(dtype).clone()
q = q * e ** -0.5
if exists(kv_mask):
mask = kv_mask[:, None, :, None]
k = k.masked_fill_(~mask, 0.)
v = v.masked_fill_(~mask, 0.)
del mask
bucket_fn = lambda x: x.reshape(*x.shape[:-2], -1, bucket_size, e)
b_q, b_k, b_v = map(bucket_fn, (q, k, v))
b_k_sum = b_k.sum(dim=-2)
b_k_cumsum = b_k_sum.cumsum(dim = -2).type(dtype)
context = einsum('bhund,bhune->bhude', b_k, b_v)
context = context.cumsum(dim = -3).type(dtype)
if bucket_size > 1:
context = F.pad(context, (0, 0, 0, 0, 1, 0), value = 0.)
context, _ = split_at_index(2, -1, context)
b_k_cumsum = F.pad(b_k_cumsum, (0, 0, 1, 0), value = 0.)
b_k_cumsum, _ = split_at_index(2, -1, b_k_cumsum)
D_inv = 1. / einsum('bhud,bhund->bhun', b_k_cumsum, b_q).clamp(min = eps)
attn = einsum('bhund,bhude,bhun->bhune', b_q, context, D_inv)
return attn.reshape(*q.shape)
class SelfAttention(nn.Module):
def __init__(self, dim, heads, causal = False, dim_head = None, blindspot_size = 1, n_local_attn_heads = 0, local_attn_window_size = 128, receives_context = False, dropout = 0., attn_dropout = 0.):
super().__init__()
assert dim_head or (dim % heads) == 0, 'embedding dimension must be divisible by number of heads'
d_heads = default(dim_head, dim // heads)
self.heads = heads
self.d_heads = d_heads
self.receives_context = receives_context
self.global_attn_heads = heads - n_local_attn_heads
self.global_attn_fn = linear_attn if not causal else partial(causal_linear_attn, bucket_size = blindspot_size)
self.local_attn_heads = n_local_attn_heads
self.local_attn = LocalAttention(local_attn_window_size, causal = causal, dropout = attn_dropout)
self.to_q = nn.Linear(dim, d_heads * heads, bias = False)
kv_heads = heads
self.kv_heads = kv_heads
self.to_k = nn.Linear(dim, d_heads * kv_heads, bias = False)
self.to_v = nn.Linear(dim, d_heads * kv_heads, bias = False)
self.to_out = nn.Linear(d_heads * heads, dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x, input_mask = None, context = None, context_mask = None, pos_emb = None, **kwargs):
assert not (self.receives_context and not exists(context)), 'context must be supplied if self attention is in receives context mode'
if not self.receives_context:
q, k, v = (self.to_q(x), self.to_k(x), self.to_v(x))
else:
q, k, v = (self.to_q(x), self.to_k(context), self.to_v(context))
b, t, e, h, dh = *q.shape, self.heads, self.d_heads
merge_heads = lambda x: x.reshape(*x.shape[:2], -1, dh).transpose(1, 2)
q, k, v = map(merge_heads, (q, k, v))
if exists(pos_emb) and not self.receives_context:
q, k = apply_rotory_pos_emb(q, k, pos_emb)
out = []
split_index_fn = partial(split_at_index, 1, self.local_attn_heads)
(lq, q), (lk, k), (lv, v) = map(split_index_fn, (q, k, v))
has_local, has_global = map(lambda x: x.shape[1] > 0, (lq, q))
if has_local:
local_out = self.local_attn(lq, lk, lv, input_mask = input_mask)
out.append(local_out)
if has_global:
kv_mask = input_mask if not self.receives_context else context_mask
global_out = self.global_attn_fn(q, k, v, kv_mask = kv_mask)
out.append(global_out)
attn = torch.cat(out, dim=1)
attn = attn.transpose(1, 2).reshape(b, t, -1)
return self.dropout(self.to_out(attn))
# transformer and language model classes
class FoldAxially(nn.Module):
def __init__(self, axial_dim, fn):
super().__init__()
self.fn = fn
self.axial_dim = axial_dim
def forward(self, x, input_mask = None, **kwargs):
b, t, d, ax = *x.shape, self.axial_dim
x = x.reshape(b, -1, ax, d).transpose(1, 2).reshape(b * ax, -1, d)
mask = None
if exists(input_mask):
mask = input_mask.reshape(b, -1, ax).transpose(1, 2).reshape(b * ax, -1)
x = self.fn(x, input_mask = mask, **kwargs)
x = x.reshape(b, ax, -1, d).transpose(1, 2).reshape(b, t, d)
return x
class LinearAttentionTransformer(nn.Module):
def __init__(
self,
dim,
depth,
max_seq_len,
heads = 8,
dim_head = None,
bucket_size = 64,
causal = False,
ff_chunks = 1,
ff_glu = False,
ff_dropout = 0.,
attn_layer_dropout = 0.,
attn_dropout = 0.,
reversible = False,
blindspot_size = 1,
n_local_attn_heads = 0,
local_attn_window_size = 128,
receives_context = False,
attend_axially = False,
pkm_layers = tuple(),
pkm_num_keys = 128,
linformer_settings = None,
context_linformer_settings = None,
shift_tokens = False
):
super().__init__()
assert not (causal and exists(linformer_settings)), 'Linformer self attention layer can only be used for non-causal networks'
assert not exists(linformer_settings) or isinstance(linformer_settings, LinformerSettings), 'Linformer self-attention settings must be a LinformerSettings namedtuple'
assert not exists(context_linformer_settings) or isinstance(context_linformer_settings, LinformerContextSettings), 'Linformer contextual self-attention settings must be a LinformerSettings namedtuple'
if type(n_local_attn_heads) is not tuple:
n_local_attn_heads = tuple([n_local_attn_heads] * depth)
assert len(n_local_attn_heads) == depth, 'local attention heads tuple must have the same length as the depth'
assert all([(local_heads <= heads) for local_heads in n_local_attn_heads]), 'number of local attn heads must be less than the maximum number of heads'
layers = nn.ModuleList([])
for ind, local_heads in zip(range(depth), n_local_attn_heads):
layer_num = ind + 1
use_pkm = layer_num in cast_tuple(pkm_layers)
parallel_net = Chunk(ff_chunks, FeedForward(dim), along_dim = 1) if not use_pkm else PKM(dim)
if not exists(linformer_settings):
attn = SelfAttention(dim, heads, causal, dim_head = dim_head, blindspot_size = blindspot_size, n_local_attn_heads = local_heads, local_attn_window_size = local_attn_window_size, dropout = attn_layer_dropout, attn_dropout= attn_dropout)
else:
attn = LinformerSelfAttention(dim, max_seq_len, heads = heads, dim_head = dim_head, dropout = attn_dropout, **linformer_settings._asdict())
if shift_tokens:
shifts = (1, 0, -1) if not causal else (1, 0)
attn, parallel_net = map(partial(PreShiftTokens, shifts), (attn, parallel_net))
layers.append(nn.ModuleList([
PreNorm(dim, attn),
PreNorm(dim, parallel_net)
]))
if attend_axially:
layers.append(nn.ModuleList([
PreNorm(dim, FoldAxially(local_attn_window_size, SelfAttention(dim, heads, causal, dropout = attn_layer_dropout, attn_dropout= attn_dropout))),
PreNorm(dim, Chunk(ff_chunks, FeedForward(dim, glu = ff_glu, dropout= ff_dropout), along_dim = 1))
]))
if receives_context:
if not exists(context_linformer_settings):
attn = SelfAttention(dim, heads, dim_head = dim_head, dropout = attn_layer_dropout, attn_dropout= attn_dropout, receives_context = True)
else:
attn = LinformerSelfAttention(dim, heads = heads, dim_head = dim_head, dropout = attn_dropout, **context_linformer_settings._asdict())
layers.append(nn.ModuleList([
PreNorm(dim, attn),
PreNorm(dim, Chunk(ff_chunks, FeedForward(dim, glu = ff_glu, dropout= ff_dropout), along_dim = 1))
]))
execute_type = ReversibleSequence if reversible else SequentialSequence
axial_layer = ((True, False),) if attend_axially else tuple()
attn_context_layer = ((True, False),) if receives_context else tuple()
route_attn = ((True, False), *axial_layer, *attn_context_layer) * depth
route_context = ((False, False), *axial_layer, *attn_context_layer) * depth
context_route_map = {'context': route_context, 'context_mask': route_context} if receives_context else {}
attn_route_map = {'input_mask': route_attn, 'pos_emb': route_attn}
self.layers = execute_type(layers, args_route = {**attn_route_map, **context_route_map})
self.pad_to_multiple = lcm(
1 if not causal else blindspot_size,
1 if all([(h == 0) for h in n_local_attn_heads]) else local_attn_window_size
)
def forward(self, x, **kwargs):
return self.layers(x, **kwargs)
class LinearAttentionTransformerLM(nn.Module):
def __init__(
self,
num_tokens,
dim,
depth,
max_seq_len,
heads = 8,
dim_head = 64,
causal = False,
emb_dim = None,
reversible = False,
ff_chunks = 1,
ff_glu = False,
ff_dropout = 0.,
attn_layer_dropout = 0.,
attn_dropout = 0.,
blindspot_size = 1,
n_local_attn_heads = 0,
local_attn_window_size = 128,
return_embeddings = False,
receives_context = False,
pkm_layers = tuple(),
pkm_num_keys = 128,
attend_axially = False,
linformer_settings = None,
context_linformer_settings = None,
use_axial_pos_emb = True,
use_rotary_emb = False,
shift_tokens = False
):
assert n_local_attn_heads == 0 or (max_seq_len % local_attn_window_size) == 0, 'max sequence length must be divisible by the local attention window size'
super().__init__()
emb_dim = default(emb_dim, dim)
self.max_seq_len = max_seq_len
self.token_emb = nn.Embedding(num_tokens, emb_dim)
if use_rotary_emb:
self.pos_emb = FixedPositionalEmbedding(emb_dim, max_seq_len)
self.layer_pos_emb = FixedPositionalEmbedding(dim_head, max_seq_len)
elif use_axial_pos_emb:
self.pos_emb = AxialPositionalEmbedding(emb_dim, axial_shape=(math.ceil(max_seq_len / local_attn_window_size), local_attn_window_size))
self.layer_pos_emb = always(None)
else:
self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len)
self.layer_pos_emb = always(None)
self.transformer = LinearAttentionTransformer(dim, depth, max_seq_len, heads = heads, dim_head = dim_head, causal = causal, ff_chunks = ff_chunks, ff_glu = ff_glu, ff_dropout = ff_dropout, attn_layer_dropout = attn_layer_dropout, attn_dropout = attn_dropout, reversible = reversible, blindspot_size = blindspot_size, n_local_attn_heads = n_local_attn_heads, local_attn_window_size = local_attn_window_size, receives_context = receives_context, pkm_layers = pkm_layers, pkm_num_keys = pkm_num_keys, attend_axially = attend_axially, linformer_settings = linformer_settings, context_linformer_settings = context_linformer_settings, shift_tokens = shift_tokens)
if emb_dim != dim:
self.transformer = ProjectInOut(self.transformer, emb_dim, dim, project_out = not return_embeddings)
self.norm = nn.LayerNorm(emb_dim)
self.out = nn.Linear(emb_dim, num_tokens) if not return_embeddings else nn.Identity()
def forward(self, x, **kwargs):
x = self.token_emb(x)
x = x + self.pos_emb(x).type(x.type())
layer_pos_emb = self.layer_pos_emb(x)
x = self.transformer(x, pos_emb = layer_pos_emb, **kwargs)
x = self.norm(x)
return self.out(x)
| linear-attention-transformer-master | linear_attention_transformer/linear_attention_transformer.py |
from linear_attention_transformer.linear_attention_transformer import LinearAttentionTransformer, LinearAttentionTransformerLM, LinformerSettings, LinformerContextSettings
from linear_attention_transformer.autoregressive_wrapper import AutoregressiveWrapper
from linear_attention_transformer.images import ImageLinearAttention
| linear-attention-transformer-master | linear_attention_transformer/__init__.py |
import torch
from torch import nn
class ImageLinearAttention(nn.Module):
def __init__(self, chan, chan_out = None, kernel_size = 1, padding = 0, stride = 1, key_dim = 64, value_dim = 64, heads = 8, norm_queries = True):
super().__init__()
self.chan = chan
chan_out = chan if chan_out is None else chan_out
self.key_dim = key_dim
self.value_dim = value_dim
self.heads = heads
self.norm_queries = norm_queries
conv_kwargs = {'padding': padding, 'stride': stride}
self.to_q = nn.Conv2d(chan, key_dim * heads, kernel_size, **conv_kwargs)
self.to_k = nn.Conv2d(chan, key_dim * heads, kernel_size, **conv_kwargs)
self.to_v = nn.Conv2d(chan, value_dim * heads, kernel_size, **conv_kwargs)
out_conv_kwargs = {'padding': padding}
self.to_out = nn.Conv2d(value_dim * heads, chan_out, kernel_size, **out_conv_kwargs)
def forward(self, x, context = None):
b, c, h, w, k_dim, heads = *x.shape, self.key_dim, self.heads
q, k, v = (self.to_q(x), self.to_k(x), self.to_v(x))
q, k, v = map(lambda t: t.reshape(b, heads, -1, h * w), (q, k, v))
q, k = map(lambda x: x * (self.key_dim ** -0.25), (q, k))
if context is not None:
context = context.reshape(b, c, 1, -1)
ck, cv = self.to_k(context), self.to_v(context)
ck, cv = map(lambda t: t.reshape(b, heads, k_dim, -1), (ck, cv))
k = torch.cat((k, ck), dim=3)
v = torch.cat((v, cv), dim=3)
k = k.softmax(dim=-1)
if self.norm_queries:
q = q.softmax(dim=-2)
context = torch.einsum('bhdn,bhen->bhde', k, v)
out = torch.einsum('bhdn,bhde->bhen', q, context)
out = out.reshape(b, -1, h, w)
out = self.to_out(out)
return out
| linear-attention-transformer-master | linear_attention_transformer/images.py |
import deepspeed
from linear_attention_transformer import LinearAttentionTransformerLM
from linear_attention_transformer.autoregressive_wrapper import AutoregressiveWrapper
import argparse
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
def add_argument():
parser=argparse.ArgumentParser(description='enwik8')
parser.add_argument('--with_cuda', default=False, action='store_true',
help='use CPU in case there\'s no GPU support')
parser.add_argument('--use_ema', default=False, action='store_true',
help='whether use exponential moving average')
parser.add_argument('-b', '--batch_size', default=32, type=int,
help='mini-batch size (default: 32)')
parser.add_argument('-e', '--epochs', default=30, type=int,
help='number of total epochs (default: 30)')
parser.add_argument('--local_rank', type=int, default=-1,
help='local rank passed from distributed launcher')
parser = deepspeed.add_config_arguments(parser)
args = parser.parse_args()
return args
# constants
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 1024
SEQ_LEN = 4096
# helpers
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate model
model = LinearAttentionTransformerLM(
num_tokens = 256,
dim = 512,
depth = 8,
max_seq_len = SEQ_LEN,
heads = 8,
causal = True,
reversible = True,
blindspot_size = 2,
shift_tokens = True,
n_local_attn_heads = (8, 8, 8, 8, 4, 4, 2, 2)
)
model = AutoregressiveWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len - 1, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq, torch.ones_like(full_seq).bool()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
# setup deepspeed
cmd_args = add_argument()
model_engine, optimizer, trainloader, _ = deepspeed.initialize(args=cmd_args, model=model, model_parameters=model.parameters(), training_data=train_dataset)
# training
for i, (data, mask) in enumerate(trainloader):
model_engine.train()
data = data.to(model_engine.local_rank)
loss = model_engine(data, return_loss = True, randomly_truncate_sequence = True)
model_engine.backward(loss)
model_engine.step()
print(loss.item())
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
inp, _ = random.choice(val_dataset)
loss = model(inp[None, :].cuda(), return_loss = True)
print(f'validation loss: {loss.item()}')
if i != 0 and model_engine.local_rank == 0 and i % GENERATE_EVERY == 0:
model.eval()
inp, _ = random.choice(val_dataset)
print(inp.shape, inp)
prime = decode_tokens(inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp.cuda(), GENERATE_LENGTH)
output_str = decode_tokens(sample)
print(output_str)
| linear-attention-transformer-master | examples/enwik8_deepspeed/train.py |
import tqdm
import torch
import torch.optim as optim
from linear_attention_transformer import LinearAttentionTransformerLM
from linear_attention_transformer.autoregressive_wrapper import AutoregressiveWrapper
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 16
LEARNING_RATE = 1e-4
GENERATE_EVERY = 100
NUM_TOKENS = 16 + 2
ENC_SEQ_LEN = 32
DEC_SEQ_LEN = 64
# helpers
def cycle():
while True:
prefix = torch.ones((BATCH_SIZE, 1)).long().cuda()
src = torch.randint(2, NUM_TOKENS, (BATCH_SIZE, ENC_SEQ_LEN)).long().cuda()
tgt = torch.cat((prefix, src, src), 1)
src_mask = torch.ones(BATCH_SIZE, ENC_SEQ_LEN).bool().cuda()
tgt_mask = torch.ones(BATCH_SIZE, tgt.shape[1] - 1).bool().cuda()
yield (src, tgt, src_mask, tgt_mask)
# instantiate model
enc = LinearAttentionTransformerLM(
num_tokens = NUM_TOKENS,
dim = 512,
heads = 8,
depth = 1,
max_seq_len = ENC_SEQ_LEN,
shift_tokens = True,
return_embeddings = True
).cuda()
dec = LinearAttentionTransformerLM(
num_tokens = NUM_TOKENS,
dim = 512,
heads = 8,
depth = 3,
causal = True,
shift_tokens = True,
blindspot_size = 2, # a small blindspot greatly saves on memory
max_seq_len = DEC_SEQ_LEN,
receives_context = True
).cuda()
dec = AutoregressiveWrapper(dec)
# optimizer
optim = torch.optim.Adam([*enc.parameters(), *dec.parameters()], lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
enc.train(), dec.train()
src, tgt, src_mask, tgt_mask = next(cycle())
context = enc(src, input_mask = src_mask)
loss = dec(tgt, context = context, input_mask = tgt_mask, context_mask = src_mask, return_loss = True)
loss.backward()
print(loss.item())
optim.step()
optim.zero_grad()
if i % GENERATE_EVERY == 0:
enc.eval(), dec.eval()
src, _, src_mask, _ = next(cycle())
src, src_mask = src[0:1], src_mask[0:1]
start_tokens = (torch.ones((1, 1)) * 1).long().cuda()
context = enc(src)
sample = dec.generate(start_tokens, ENC_SEQ_LEN, context = context)
incorrects = (src != sample).abs().sum()
print(f"input: ", src)
print(f"predicted output: ", sample)
print(f"incorrects: {incorrects}")
| linear-attention-transformer-master | examples/toy_tasks/copy_task.py |
from linear_attention_transformer import LinearAttentionTransformerLM
from linear_attention_transformer.autoregressive_wrapper import AutoregressiveWrapper
from product_key_memory import fetch_optimizer_parameters
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 1e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 512
SEQ_LEN = 4096
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate model
model = LinearAttentionTransformerLM(
num_tokens = 256,
dim = 512,
depth = 6,
max_seq_len = SEQ_LEN,
heads = 8,
causal = True,
shift_tokens = True,
pkm_layers = (4,)
)
model = AutoregressiveWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len - 1, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
parameters = fetch_optimizer_parameters(model)
optim = torch.optim.Adam(parameters, lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader), return_loss = True)
loss.backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader), return_loss = True)
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp, GENERATE_LENGTH)
output_str = decode_tokens(sample)
print(output_str)
| linear-attention-transformer-master | examples/enwik8_simple/train.py |
"""Install Mesh TensorFlow."""
from setuptools import find_packages
from setuptools import setup
setup(
name='mesh-tensorflow',
version='0.1.18',
description='Mesh TensorFlow',
author='Google Inc.',
author_email='[email protected]',
url='http://github.com/tensorflow/mesh',
license='Apache 2.0',
packages=find_packages(),
package_data={
# Include gin files.
'': ['*.gin'],
},
scripts=[],
install_requires=[
'absl-py',
'future',
'gin-config',
'six',
],
extras_require={
'auto_mtf': ['ortools'],
'tensorflow': ['tensorflow>=1.15.0'],
'transformer': ['tensorflow-datasets', 'scipy'],
},
tests_require=[
'ortools',
'pytest',
'tensorflow',
'tensorflow-datasets',
],
setup_requires=['pytest-runner'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
keywords='tensorflow machine learning',
)
| mesh-master | setup.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mesh Tensorflow Optimizers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import re
import gin
from mesh_tensorflow import layers
from mesh_tensorflow import ops_with_redefined_builtins as mtf
import tensorflow.compat.v1 as tf
def make_optimizer(hparams, lr):
if hparams.optimizer == "SGD":
return SgdOptimizer(lr)
elif hparams.optimizer == "Adafactor":
return adafactor_optimizer_from_hparams(hparams, lr)
else:
raise ValueError("Unknown Optimizer")
class Optimizer(object):
"""Base optimizer class.
Constructor of subclasses must take `learning_rate` as an argument.
"""
def apply_grads(self, grads, variables):
"""Apply gradients to variables.
Call this function externally instead of apply_grad(). This causes the
operations to be combined, which is necessary for stacking variables
see mtf.rewrite_stack_variables().
Args:
grads: a list of Tensor
variables: a list of Variables
Returns:
a list of Operations
"""
ops = []
for grad, var in zip(grads, variables):
ops.extend(self.apply_grad(grad, var))
if not ops:
return ops
return variables[0].graph.combine_assignments(ops)
def apply_grad(self, grad, var):
"""Update variable and accumulators.
Args:
grad: a Tensor
var: a Variablle
Returns:
a list of Operations
"""
raise ValueError("apply_grad not implemented %s %s" % (grad, var))
@gin.configurable
class SgdOptimizer(Optimizer):
"""Optimizer implementing SGD."""
def __init__(self, learning_rate):
self._lr = learning_rate
@property
def lr(self):
return self._lr
def apply_grad(self, grad, var):
if grad is None:
tf.logging.warning("Gradient is None for variable %s" % var.name)
return []
# It is critical to use assign_sub instead of mtf.assign(var - ...)
# for the case of bfloat16 activations, so as to avoid repeatedly rounding
# the slice value, which results in poor quality.
return [mtf.assign_sub(var, grad * self.lr)]
@gin.configurable
class MomentumOptimizer(Optimizer):
"""SGD with momentum."""
def __init__(self, learning_rate, momentum):
self._lr = learning_rate
self._momentum = momentum
@property
def lr(self):
return self._lr
@property
def momentum(self):
return self._momentum
def apply_grad(self, grad, var):
if grad is None:
tf.logging.warning("Gradient is None for variable %s" % var.name)
return []
updates = []
v = mtf.get_variable(
var.mesh, var.name + "_momentum_v", var.shape,
dtype=var.dtype, initializer=tf.zeros_initializer(), trainable=False)
with tf.variable_scope(var.name + "/sgd_momentum"):
updates.append(mtf.assign(v, grad * self.lr + v * self.momentum))
updates.append(mtf.assign_sub(var, v))
return updates
@gin.configurable
class AdamWeightDecayOptimizer(Optimizer):
"""A basic Adam optimizer that includes "correct" L2 weight decay."""
def __init__(self,
learning_rate,
weight_decay_rate=0.0,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=None):
"""Constructs a AdamWeightDecayOptimizer."""
self.learning_rate = learning_rate
self.weight_decay_rate = weight_decay_rate
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.exclude_from_weight_decay = exclude_from_weight_decay
def apply_grad(self, grad, var):
"""See base class."""
if grad is None:
tf.logging.warning("Gradient is None for variable %s" % var.name)
return []
grad = mtf.to_float(grad)
assignments = []
m = mtf.get_variable(
var.mesh, var.name + "/adam_m", var.shape,
initializer=tf.zeros_initializer(), trainable=False)
v = mtf.get_variable(
var.mesh, var.name + "/adam_v", var.shape,
initializer=tf.zeros_initializer(), trainable=False)
# Standard Adam update.
next_m = self.beta_1 * m + (1.0 - self.beta_1) * grad
next_v = self.beta_2 * v + (1.0 - self.beta_2) * mtf.square(grad)
update = next_m / (mtf.sqrt(next_v) + self.epsilon)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want ot decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if self._do_use_weight_decay(var.name):
update += self.weight_decay_rate * var.value
update_with_lr = self.learning_rate * update
var_update = mtf.assign_sub(var, update_with_lr)
assignments.extend(
[var_update,
mtf.assign(m, next_m),
mtf.assign(v, next_v)])
return assignments
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if not self.weight_decay_rate:
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
@gin.configurable
class AdafactorOptimizer(Optimizer):
"""Adafactor."""
def __init__(self,
multiply_by_parameter_scale=True,
learning_rate=None,
decay_rate=None,
beta1=0.0,
clipping_threshold=1.0,
factored=True,
epsilon1=1e-30,
epsilon2=1e-3,
min_dim_size_to_factor=128):
"""Construct a new Adafactor optimizer.
See class comment.
Args:
multiply_by_parameter_scale: a boolean
learning_rate: an optional Scalar.
decay_rate: an optional Scalar.
beta1: a float value between 0 and 1
clipping_threshold: an optional float >= 1
factored: a boolean - whether to use factored second-moment estimator
for 2d variables
epsilon1: Regularization constant for squared gradient.
epsilon2: Regularization constant for parameter scale.
min_dim_size_to_factor: only factor accumulator if two tensor dimensions
are at least this size.
Raises:
ValueError: if absolute_update_scale and relative_update_scale_fn are both
present or both absent.
"""
self._multiply_by_parameter_scale = multiply_by_parameter_scale
if learning_rate is None:
learning_rate = self._learning_rate_default(multiply_by_parameter_scale)
self._learning_rate = learning_rate
if decay_rate is None:
decay_rate = self._decay_rate_default()
self._decay_rate = decay_rate
self._beta1 = beta1
self._clipping_threshold = clipping_threshold
self._factored = factored
self._epsilon1 = epsilon1
self._epsilon2 = epsilon2
self._min_dim_size_to_factor = min_dim_size_to_factor
def _factored_dims(self, shape):
"""Should we use a factored second moment estimator.
Based on the shape of the variable.
If we factor the accumulator, then this function returns a list of two
mtf.Dimensions to reduce over. We always pick the two largest dimensions.
If there are not two dimensions of size >= min_dim_size_to_factor, then we
do not factor.
Args:
shape: a Shape
Returns:
either a list of 2 Dimensions or None
"""
if not self._factored or shape.ndims < 2:
return None
sorted_dims = sorted(shape.dims, key=lambda d: -d.size)
if sorted_dims[1].size < self._min_dim_size_to_factor:
return None
return sorted_dims[:2]
def _parameter_scale(self, var):
"""Estimate the scale of the parameters from the current values.
We include a minimum value of 0.001 to give it a chance to escape 0
if it was zero-initialized.
Instead of using the value, we could impute the scale from the shape,
as initializers do.
Args:
var: a variable or Tensor.
Returns:
a Scalar
"""
return mtf.maximum(reduce_rms(var), self._epsilon2)
def apply_grad(self, grad, var):
if grad is None:
tf.logging.warning("Gradient is None for variable %s" % var.name)
return []
# create slots
grad = mtf.to_float(grad)
factored_dims = self._factored_dims(var.shape)
if factored_dims:
d0, d1 = factored_dims
vr_shape = var.shape - d0
vc_shape = var.shape - d1
vr = mtf.get_variable(
var.mesh, var.name + "_slot_vr", vr_shape,
initializer=tf.zeros_initializer(), trainable=False)
vc = mtf.get_variable(
var.mesh, var.name + "_slot_vc", vc_shape,
initializer=tf.zeros_initializer(), trainable=False)
else:
v = mtf.get_variable(
var.mesh, var.name + "_slot_v", var.shape,
initializer=tf.zeros_initializer(), trainable=False)
if self._beta1:
m = mtf.get_variable(
var.mesh, var.name + "_slot_m", var.shape,
initializer=tf.zeros_initializer(), trainable=False)
with tf.variable_scope(var.name + "/adafactor"):
grad_squared = mtf.square(grad) + self._epsilon1
decay_rate = self._decay_rate
old_val = mtf.to_float(var.value)
if self._multiply_by_parameter_scale:
update_scale = self._parameter_scale(old_val) * self._learning_rate
else:
update_scale = self._learning_rate
mixing_rate = 1.0 - decay_rate
updates = []
if factored_dims:
grad_squared_row_mean = mtf.reduce_mean(
grad_squared, output_shape=vr_shape)
grad_squared_col_mean = mtf.reduce_mean(
grad_squared, output_shape=vc_shape)
new_vr = vr * decay_rate + grad_squared_row_mean * mixing_rate
new_vc = vc * decay_rate + grad_squared_col_mean * mixing_rate
vr_update = mtf.assign(vr, new_vr)
vc_update = mtf.assign(vc, new_vc)
updates.extend([vr_update, vc_update])
long_term_mean = mtf.reduce_mean(new_vr, reduced_dim=d1)
r_factor = mtf.rsqrt(new_vr / long_term_mean)
c_factor = mtf.rsqrt(new_vc)
x = grad * r_factor * c_factor
else:
new_v = v * decay_rate + grad_squared * mixing_rate
v_update = mtf.assign(v, new_v)
updates.append(v_update)
x = grad * mtf.rsqrt(new_v)
if self._clipping_threshold is not None:
clipping_denom = mtf.maximum(
1.0, reduce_rms(x) / self._clipping_threshold)
x /= clipping_denom
subtrahend = x * update_scale
if self._beta1:
new_m = (m * tf.constant(self._beta1)
+ subtrahend * tf.constant(1.0 - self._beta1))
subtrahend = new_m
updates.append(mtf.assign(m, new_m))
# It is critical to use assign_sub instead of mtf.assign(var - subtrahend)
# for the case of bfloat16 activations, so as to avoid repeatedly
# rounding the slice value, which results in poor quality.
var_update = mtf.assign_sub(var, subtrahend)
updates.append(var_update)
return updates
def _decay_rate_default(self):
return adafactor_decay_rate_pow(0.8)
def _learning_rate_default(self, multiply_by_parameter_scale):
step_num = tf.cast(tf.train.get_or_create_global_step(), tf.float32)
learning_rate = tf.minimum(tf.math.rsqrt(step_num + 1.0), 0.01)
if (not multiply_by_parameter_scale
and not layers.unit_scaling_convention()):
learning_rate *= 0.05
return learning_rate
def adafactor_decay_rate_adam(beta2):
"""Second-moment decay rate like Adam, subsuming the correction factor.
Args:
beta2: a float between 0 and 1
Returns:
a scalar
"""
t = tf.cast(tf.train.get_or_create_global_step(), tf.float32) + 1.0
decay = beta2 * (1.0 - tf.pow(beta2, t - 1.0)) / (1.0 - tf.pow(beta2, t))
return decay
@gin.configurable
def adafactor_decay_rate_pow(exponent, offset=0):
"""Second moment decay rate where memory-length grows as step_num^exponent.
For fine-tuning, you may want to gin-configure offset to equal the starting
step-number for the fine-tuning phase.
Args:
exponent: a float between 0 and 1
offset: an integer (the starting step number)
Returns:
a scalar
"""
step_num = tf.cast(tf.train.get_or_create_global_step() - offset, tf.float32)
return 1.0 - tf.pow((step_num + 1.0), -exponent)
def adafactor_optimizer_from_hparams(hparams, lr):
"""Create an Adafactor optimizer based on model hparams.
Args:
hparams: model hyperparameters
lr: learning rate scalar.
Returns:
an AdafactorOptimizer
Raises:
ValueError: on illegal values
"""
if hparams.optimizer_adafactor_decay_type == "Adam":
decay_rate = adafactor_decay_rate_adam(
hparams.optimizer_adafactor_beta2)
elif hparams.optimizer_adafactor_decay_type == "pow":
decay_rate = adafactor_decay_rate_pow(
hparams.optimizer_adafactor_memory_exponent)
else:
raise ValueError("unknown optimizer_adafactor_decay_type")
return AdafactorOptimizer(
multiply_by_parameter_scale=(
hparams.optimizer_adafactor_multiply_by_parameter_scale),
learning_rate=lr,
decay_rate=decay_rate,
beta1=hparams.optimizer_adafactor_beta1,
clipping_threshold=hparams.optimizer_adafactor_clipping_threshold,
factored=hparams.optimizer_adafactor_factored)
def reduce_rms(x):
return mtf.sqrt(mtf.reduce_mean(mtf.square(x)))
# Workaround by copying this over
# Note: Importing this from transformers gives some circular import problems.
@gin.configurable
def product_learning_rate(step,
total_train_steps,
factors=gin.REQUIRED,
offset=0):
"""Learning rate is the product of one or more factors.
Takes a list of factors which are either numbers or learning-rate functions
each taking step and total_train_step arguments.
If `offset` is nonzero, then subtract offset from the step and from
total_train_steps before computing the learning rate.
Args:
step: a tf.Scalar
total_train_steps: a number
factors: a list of numbers and/or functions
offset: an optional float
Returns:
a tf.Scalar, the learning rate for the step.
"""
ret = 1.0
for f in factors:
ret *= f(step - offset, total_train_steps - offset) if callable(f) else f
return ret
@gin.configurable
def compute_lr_for_step(schedules, learning_rate,
train_steps=524288):
"""Get actual LR for step."""
actual_lr_rates = []
for lr_schedule in schedules:
if lr_schedule is None:
actual_lr_rates.append(learning_rate)
else:
converted_schedule = functools.partial(
product_learning_rate, factors=lr_schedule)
converted_schedule = functools.partial(
converted_schedule, total_train_steps=train_steps)
if callable(converted_schedule):
# the following happens on CPU since TPU can't handle summaries.
with mtf.utils.outside_all_rewrites():
converted_schedule = converted_schedule(
step=tf.train.get_global_step())
tf.summary.scalar("alt_learning_rate", converted_schedule)
actual_lr_rates.append(converted_schedule)
return actual_lr_rates
@gin.configurable
class AdafactorWithMultiLRSchedule(AdafactorOptimizer):
"""Adafactor with Multiple LR schedule."""
def __init__(self,
variable_search=None,
alt_lr_schedules=None,
**kwargs
):
"""Construct a new Adafactor optimizer.
See class comment.
Args:
variable_search: list of regex strings to use alt learning rate.
alt_lr_schedules: list of learning_rate_schedules
**kwargs: Adafactor keyword args
Raises:
ValueError: if absolute_update_scale and relative_update_scale_fn are both
present or both absent.
"""
super(AdafactorWithMultiLRSchedule, self).__init__(
**kwargs
)
self.variable_search = variable_search
self.alt_lr_schedules = alt_lr_schedules
def apply_grad(self, grad, var):
if self.alt_lr_schedules is None or self.variable_search is None:
return super(AdafactorWithMultiLRSchedule, self).apply_grad(grad, var)
actual_lr_rates = compute_lr_for_step(self.alt_lr_schedules,
self._learning_rate,
)
# Modify learning rate for exception variables
for idx, variable_search in enumerate(self.variable_search):
if re.search(variable_search, var.name) is not None:
# finds variable in LR schedule
old_lr = self._learning_rate
# get n-th learning rate schedule
self._learning_rate = actual_lr_rates[idx]
assignments = super(AdafactorWithMultiLRSchedule,
self).apply_grad(grad, var)
self._learning_rate = old_lr
else:
assignments = super(AdafactorWithMultiLRSchedule,
self).apply_grad(grad, var)
return assignments
@gin.configurable
class AdamWithMultiLRSchedule(AdamWeightDecayOptimizer):
"""An Adam optimizer that includes "correct" L2 weight decay.
Adam optimizer that is able to processes multiple learning rate schedules
for different variables within the optimizer class itself. This function
takes in a list of variables to search and a list of corresponding
alt lr schedules.
The original variables are processed with the original learning rate
controlled from outside the loop.
Learning rate schedule should use the product learning rate.
"""
def __init__(self,
variable_search=None,
alt_lr_schedules=None,
**kwargs
):
"""Adam LR with multi LR schedule.
Args:
variable_search: list of regex strings to use alt learning rate.
alt_lr_schedules: list of learning_rate_schedules
**kwargs: Adam keyword args
"""
super(AdamWithMultiLRSchedule, self).__init__(
**kwargs
)
self.variable_search = variable_search
self.alt_lr_schedules = alt_lr_schedules
def apply_grad(self, grad, var):
if self.alt_lr_schedules is None or self.variable_search is None:
return super(AdamWithMultiLRSchedule, self).apply_grad(grad, var)
actual_lr_rates = compute_lr_for_step(self.alt_lr_schedules,
self.learning_rate
)
# Modify learning rate for exception variables
for idx, variable_search in enumerate(self.variable_search):
if re.search(variable_search, var.name) is not None:
# finds variable in LR schedule
old_lr = self.learning_rate
# get n-th learning rate schedule
self.learning_rate = actual_lr_rates[idx]
assignments = super(AdamWithMultiLRSchedule,
self).apply_grad(grad, var)
self.learning_rate = old_lr
else:
assignments = super(AdamWithMultiLRSchedule,
self).apply_grad(grad, var)
return assignments
| mesh-master | mesh_tensorflow/optimize.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Mesh TensorFlow layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import mesh_tensorflow as mtf
from mesh_tensorflow import test_utils
import mock
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.python.framework import test_util # pylint:disable=g-direct-tensorflow-import
def initialize_by_shape(shape_to_value):
"""Create an initializer with values specified by tensor shape."""
def initialize(shape, dtype, **unused_kwargs):
shape = tuple(shape)
if shape not in shape_to_value:
raise ValueError(
"Shape {} not found in shape to value map.".format(shape))
return tf.reshape(
tf.constant(shape_to_value[tuple(shape)], dtype=dtype), shape)
return initialize
class LayersTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(4, True, "not_channels"),
(8, False, "channels"),
)
def testDense(self, units, use_bias, new_dim_name):
batch = 2
channels = 3
inputs = tf.random_normal([batch, channels])
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", batch)
channels_dim = mtf.Dimension("channels", channels)
new_dim = mtf.Dimension(new_dim_name, units)
mtf_inputs = mtf.import_tf_tensor(
mesh, inputs, shape=mtf.Shape([batch_dim, channels_dim]))
mtf_outputs = mtf.layers.dense(
mtf_inputs,
new_dims=new_dim,
reduced_dims=[channels_dim],
activation=mtf.relu,
use_bias=use_bias)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
expected_outputs = tf.keras.layers.Dense(units=units,
activation=tf.nn.relu,
use_bias=use_bias)(inputs)
tf_group = lowering.copy_masters_to_slices()
init = tf.global_variables_initializer()
self.evaluate(init)
self.evaluate(tf_group)
actual, expected = self.evaluate([actual_outputs, expected_outputs])
self.assertEqual(actual.shape, expected.shape)
@test_util.run_in_graph_and_eager_modes()
def testLayerNorm(self):
batch = 2
channels = 3
inputs = tf.random_normal([batch, channels])
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", batch)
channels_dim = mtf.Dimension("channels", channels)
mtf_inputs = mtf.import_tf_tensor(
mesh, inputs, shape=mtf.Shape([batch_dim, channels_dim]))
mtf_outputs = mtf.layers.layer_norm(mtf_inputs,
dim=channels_dim)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
expected_outputs = tf.keras.layers.LayerNormalization()(inputs)
tf_group = lowering.copy_masters_to_slices()
init = tf.global_variables_initializer()
self.evaluate(init)
self.evaluate(tf_group)
actual, expected = self.evaluate([actual_outputs, expected_outputs])
self.assertEqual(actual.shape, expected.shape)
@test_util.run_in_graph_and_eager_modes()
def testBatchNorm(self):
batch = 2
channels = 3
inputs = tf.constant([[0, 1, 2], [4, 5, 6]], dtype=np.float32)
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", batch)
channels_dim = mtf.Dimension("channels", channels)
mtf_inputs = mtf.import_tf_tensor(
mesh, inputs, shape=mtf.Shape([batch_dim, channels_dim]))
mtf_outputs_0, _ = mtf.layers.batch_norm(
mtf_inputs,
is_training=True, momentum=0.95, epsilon=1e-6,
dims_idx_start=0, dims_idx_end=1, name="bn0")
mtf_outputs_1, _ = mtf.layers.batch_norm(
mtf_outputs_0 * 2 + 1,
is_training=True, momentum=0.95, epsilon=1e-6,
dims_idx_start=0, dims_idx_end=1, name="bn1")
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs_0 = lowering.export_to_tf_tensor(mtf_outputs_0)
actual_outputs_1 = lowering.export_to_tf_tensor(mtf_outputs_1)
tf_group = lowering.copy_masters_to_slices()
init = tf.global_variables_initializer()
self.evaluate(init)
self.evaluate(tf_group)
[actual_0, actual_1] = self.evaluate([actual_outputs_0, actual_outputs_1])
expected = np.array([[-1, -1, -1], [1, 1, 1]])
self.assertAllClose(actual_0, expected)
self.assertAllClose(actual_1, expected)
@test_util.run_in_graph_and_eager_modes()
def testWeightsNonzero(self):
inputs = tf.constant([[3, 1, 0], [1, 0, 0]])
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", inputs.shape.as_list()[0])
channels_dim = mtf.Dimension("channels", inputs.shape.as_list()[1])
mtf_inputs = mtf.import_tf_tensor(
mesh, inputs, shape=mtf.Shape([batch_dim, channels_dim]))
mtf_outputs = mtf.layers.weights_nonzero(mtf_inputs)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
expected_outputs = tf.cast(tf.not_equal(inputs, 0), tf.float32)
tf_group = lowering.copy_masters_to_slices()
self.evaluate(tf_group)
actual, expected = self.evaluate([actual_outputs, expected_outputs])
self.assertAllEqual(actual, expected)
@test_util.run_in_graph_and_eager_modes()
def testDenseReluDense(self):
batch = 2
channels = 3
hidden = 5
inputs = tf.random_normal([batch, channels])
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", batch)
channels_dim = mtf.Dimension("channels", channels)
hidden_dim = mtf.Dimension("hidden", hidden)
mtf_inputs = mtf.import_tf_tensor(
mesh, inputs, shape=mtf.Shape([batch_dim, channels_dim]))
mtf_outputs = mtf.layers.dense_relu_dense(mtf_inputs,
hidden_channels=hidden_dim)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
tf_group = lowering.copy_masters_to_slices()
init = tf.global_variables_initializer()
self.evaluate(init)
self.evaluate(tf_group)
actual = self.evaluate(actual_outputs)
self.assertEqual(actual.shape, inputs.shape)
@parameterized.parameters(
(2, 16, 3, 4, 2, 2),
(1, 8, 5, 3, 1, 4),
)
def testMaskedLocalAttention1D(self, batch, length, io_channels, kv_channels,
heads, window_size):
length_q = length
query = tf.random_normal([batch, length_q, io_channels])
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", batch)
length_q_dim = mtf.Dimension("length_q", length_q)
io_channels_dim = mtf.Dimension("io_channels", io_channels)
kv_channels_dim = mtf.Dimension("kv_channels", kv_channels)
heads_dim = mtf.Dimension("heads", heads)
mtf_query = mtf.import_tf_tensor(
mesh, query,
shape=mtf.Shape([batch_dim, length_q_dim, io_channels_dim]))
mtf_outputs = mtf.layers.masked_local_attention_1d(
mtf_query,
kv_channels=kv_channels_dim,
heads=heads_dim,
window_size=window_size)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
tf_group = lowering.copy_masters_to_slices()
init = tf.global_variables_initializer()
self.evaluate(init)
self.evaluate(tf_group)
actual = self.evaluate(actual_outputs)
self.assertEqual(actual.shape, (batch, length_q, io_channels))
@parameterized.parameters(
(2, 4, 5, 7, 3, 1),
)
def testDotProductAttention(
self, batch, heads, length_q, length_kv, depth_k, depth_v):
query = tf.random_normal([batch, heads, length_q, depth_k])
key = tf.random_normal([batch, heads, length_kv, depth_k])
value = tf.random_normal([batch, heads, length_kv, depth_v])
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", batch)
heads_dim = mtf.Dimension("heads", heads)
length_q_dim = mtf.Dimension("length_q", length_q)
length_kv_dim = mtf.Dimension("length_kv", length_kv)
depth_k_dim = mtf.Dimension("depth_k", depth_k)
depth_v_dim = mtf.Dimension("depth_v", depth_v)
mtf_query = mtf.import_tf_tensor(
mesh, query,
shape=mtf.Shape(
[batch_dim, heads_dim, length_q_dim, depth_k_dim]))
mtf_key = mtf.import_tf_tensor(
mesh, key,
shape=mtf.Shape(
[batch_dim, heads_dim, length_kv_dim, depth_k_dim]))
mtf_value = mtf.import_tf_tensor(
mesh, value,
shape=mtf.Shape(
[batch_dim, heads_dim, length_kv_dim, depth_v_dim]))
mtf_outputs = mtf.layers.dot_product_attention(
mtf_query,
mtf_key,
mtf_value,
mask=None)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
tf_group = lowering.copy_masters_to_slices()
init = tf.global_variables_initializer()
self.evaluate(init)
self.evaluate(tf_group)
actual = self.evaluate(actual_outputs)
self.assertEqual(actual.shape, (batch, heads, length_q, depth_v))
@parameterized.parameters(
(16, 4),
(32, 8),
)
def testMultiheadAttention(self, kv_channels, heads):
batch = 2
length = 8
channels = 3
query = tf.random_normal([batch, length, channels])
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", batch)
length_dim = mtf.Dimension("length", length)
channels_dim = mtf.Dimension("channels", channels)
kv_channels_dim = mtf.Dimension("kv_channels", kv_channels)
heads_dim = mtf.Dimension("heads", heads)
mtf_query = mtf.import_tf_tensor(
mesh, query,
shape=mtf.Shape([batch_dim, length_dim, channels_dim]))
mtf_outputs = mtf.layers.multihead_attention(
mtf_query,
memory_antecedent=None,
mask=None,
kv_channels=kv_channels_dim,
heads=heads_dim)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
tf_group = lowering.copy_masters_to_slices()
init = tf.global_variables_initializer()
self.evaluate(init)
self.evaluate(tf_group)
actual = self.evaluate(actual_outputs)
self.assertEqual(actual.shape, query.shape)
@parameterized.parameters(
("MAX_2D",), ("AVG_2D",), ("MAX_3D",), ("AVG_3D",),
)
def testPool(self, pooling_method):
batch = 2
depth = 3
height = 4
width = 6
channels = 3
tf.random.set_random_seed(1234)
inputs = tf.random_normal([batch, depth, height, width, channels])
stride_d = 3
stride_h = 2
stride_w = 3
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", batch)
depth_dim = mtf.Dimension("depth", depth)
height_dim = mtf.Dimension("height", height)
width_dim = mtf.Dimension("width", width)
channels_dim = mtf.Dimension("channels", channels)
mtf_inputs = mtf.import_tf_tensor(
mesh, inputs, shape=mtf.Shape(
[batch_dim, depth_dim, height_dim, width_dim, channels_dim]))
if pooling_method == "MAX_2D":
mtf_outputs = mtf.layers.max_pool2d(
mtf_inputs, ksize=(stride_h, stride_w))
inputs = tf.reshape(inputs, [batch * depth, height, width, channels])
expected_outputs = tf.keras.layers.MaxPooling2D(
(stride_h, stride_w))(inputs)
expected_outputs = tf.reshape(
expected_outputs,
[batch, depth, int(height / stride_h),
int(width / stride_w), channels])
elif pooling_method == "AVG_2D":
mtf_outputs = mtf.layers.avg_pool2d(
mtf_inputs, ksize=(stride_h, stride_w))
inputs = tf.reshape(inputs, [batch * depth, height, width, channels])
expected_outputs = tf.keras.layers.AveragePooling2D(
(stride_h, stride_w))(inputs)
expected_outputs = tf.reshape(
expected_outputs,
[batch, depth, int(height / stride_h),
int(width / stride_w), channels])
elif pooling_method == "MAX_3D":
mtf_outputs = mtf.layers.max_pool3d(
mtf_inputs, ksize=[stride_d, stride_h, stride_w])
expected_outputs = tf.keras.layers.MaxPooling3D(
[stride_d, stride_h, stride_w])(inputs)
elif pooling_method == "AVG_3D":
mtf_outputs = mtf.layers.avg_pool3d(
mtf_inputs, ksize=[stride_d, stride_h, stride_w])
expected_outputs = tf.keras.layers.AveragePooling3D(
[stride_d, stride_h, stride_w])(inputs)
mtf_gradient = mtf.gradients([mtf_outputs], [mtf_inputs])[0]
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
actual_gradient = lowering.export_to_tf_tensor(mtf_gradient)
tf_group = lowering.copy_masters_to_slices()
init = tf.global_variables_initializer()
self.evaluate(init)
self.evaluate(tf_group)
actual, expected = self.evaluate([actual_outputs, expected_outputs])
self.assertAllClose(actual, expected)
actual = self.evaluate(actual_gradient)
if pooling_method == "MAX_2D":
expected_non_zeros = batch * depth * height * width * channels / (
stride_h * stride_w)
self.assertEqual(np.count_nonzero(actual), expected_non_zeros)
elif pooling_method == "AVG_2D":
expected = np.ones((batch, depth, height, width, channels),
dtype=np.float32) / stride_h / stride_w
self.assertAllClose(actual, expected)
elif pooling_method == "MAX_3D":
expected_non_zeros = batch * depth * height * width * channels / (
stride_d * stride_h * stride_w)
self.assertEqual(np.count_nonzero(actual), expected_non_zeros)
elif pooling_method == "AVG_3D":
expected = np.ones((batch, depth, height, width, channels),
dtype=np.float32) / stride_d / stride_h / stride_w
self.assertAllClose(actual, expected)
@test_util.run_in_graph_and_eager_modes()
def testConv1d(self):
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
filter_size = 3
depth_dim = mtf.Dimension("depth", 2)
length_dim = mtf.Dimension("length", 4)
output_dim = mtf.Dimension("output", 2)
x = tf.constant([[1, 0], [0, 1], [1, 1], [2, 1]], dtype=tf.float32)
mtf_x = mtf.import_tf_tensor(
mesh, x, shape=mtf.Shape([length_dim, depth_dim]))
initializer_mock = mock.MagicMock()
initializer_mock.side_effect = initialize_by_shape({
(1, 3, 2, 2): [[[[1, -1], [0, 0]], [[2, -2], [-1, 1]], [[3, -3],
[-2, 2]]]],
})
mtf_output = mtf.layers.conv1d(
mtf_x,
output_dim=output_dim,
filter_size=filter_size,
filter_initializer=initializer_mock)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_output = lowering.export_to_tf_tensor(mtf_output)
self.evaluate(tf.global_variables_initializer())
self.evaluate(lowering.copy_masters_to_slices())
actual = self.evaluate(actual_output)
self.assertAllClose(actual, [[0, 0], [1, -1], [5, -5], [4, -4]])
def testConv1dValidPadding(self):
converter = test_utils.NumpyConverter()
batch = 2
d_model = 6
d_out = 1
length = 4
filter_size = 3
x = np.random.randn(batch, length, d_model)
x_mtf = converter.convert_np_array_to_mtf_tensor(
x, dtype=tf.float32, dim_names=["batch", "length", "d_model"])
conv_filter = np.random.randn(1, filter_size, d_model, d_out)
initializer = lambda shape, dtype, **kwargs: conv_filter
output_mtf = mtf.layers.conv1d(
x_mtf,
output_dim=mtf.Dimension("output_dim", d_out),
filter_size=filter_size,
padding="VALID",
filter_initializer=initializer)
actual = converter.convert_mtf_tensor_to_np_array(output_mtf)
# Expected length is 2.
expected = np.empty(shape=(batch, 2, d_out), dtype=np.float32)
# [filter_size, d_model]
current_filter = conv_filter[0, :, :, 0]
# b: batch, k: filter_size, d: d_model.
expected[:, 0] = np.einsum("bkd,kd->b", x[:, :filter_size, :],
current_filter).reshape(batch, 1)
expected[:, 1] = np.einsum("bkd,kd->b", x[:, 1:, :],
current_filter).reshape(batch, 1)
self.assertAllClose(actual, expected)
def testConv1dValidPaddingMultipleBatchDims(self):
converter = test_utils.NumpyConverter()
batch = 2
outer_batch = 3
d_model = 6
d_out = 1
length = 4
filter_size = 3
x = np.random.randn(outer_batch, batch, length, d_model)
x_mtf = converter.convert_np_array_to_mtf_tensor(
x,
dtype=tf.float32,
dim_names=["outer_batch", "batch", "length", "d_model"])
conv_filter = np.random.randn(1, filter_size, d_model, d_out)
initializer = lambda shape, dtype, **kwargs: conv_filter
output_mtf = mtf.layers.conv1d(
x_mtf,
output_dim=mtf.Dimension("output_dim", d_out),
filter_size=filter_size,
padding="VALID",
filter_initializer=initializer)
actual = converter.convert_mtf_tensor_to_np_array(output_mtf)
# Expected length is 2.
expected = np.empty(shape=(outer_batch, batch, 2, d_out), dtype=np.float32)
# Effective filter: [filter_size, d_model]
f = conv_filter[0, :, :, 0]
# o: outer_batch, b: batch, k: filter_size, d: d_model.
expected[:, :, 0] = np.einsum("obkd,kd->ob", x[:, :, :filter_size, :],
f).reshape(outer_batch, batch, 1)
expected[:, :, 1] = np.einsum("obkd,kd->ob", x[:, :, 1:, :],
f).reshape(outer_batch, batch, 1)
self.assertAllClose(actual, expected)
@mock.patch.object(tf, "truncated_normal_initializer", autospec=True)
@test_util.run_in_graph_and_eager_modes()
def testSeparableConv1d(self, random_normal_initializer_mock):
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
depth_dim = mtf.Dimension("depth", 2)
length_dim = mtf.Dimension("length", 4)
output_dim = mtf.Dimension("output", 2)
x = tf.constant([[1, 0], [0, 1], [1, 1], [2, 1]], dtype=tf.float32)
mtf_x = mtf.import_tf_tensor(
mesh, x, shape=mtf.Shape([length_dim, depth_dim]))
initializer_mock = mock.MagicMock()
random_normal_initializer_mock.return_value = initializer_mock
initializer_mock.side_effect = initialize_by_shape({
(2,): [1, 2],
(2, 2): [[1, 0], [1, -1]],
})
mtf_output = mtf.layers.separable_conv1d(
mtf_x,
output_dim,
min_relative_pos=-1,
max_relative_pos=1,
use_bias=True)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_output = lowering.export_to_tf_tensor(mtf_output)
self.evaluate(tf.global_variables_initializer())
self.evaluate(lowering.copy_masters_to_slices())
actual = self.evaluate(actual_output)
self.assertAllClose(actual, [[3, -2], [6, -4], [9, -6], [7, -4]])
if __name__ == "__main__":
tf.disable_v2_behavior()
tf.enable_eager_execution()
tf.test.main()
| mesh-master | mesh_tensorflow/layers_test.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mesh TensorFlow test utilities."""
import mesh_tensorflow as mtf
import tensorflow.compat.v1 as tf
class NumpyConverter(object):
"""Converter class to convert between mtf.Tensor, tf.Tensor and np.array."""
def __init__(self):
self._graph = mtf.Graph()
self._mesh = mtf.Mesh(self._graph, "mtf_mesh")
self._session = tf.Session()
def convert_np_array_to_mtf_tensor(self, x, dim_names=None, dtype=tf.int32):
"""Convert a numpy array to an equivalent mtf.Tensor."""
dim_sizes = x.shape
if not dim_names:
dim_names = [f"dim{i}" for i in range(len(dim_sizes))]
dims = []
for dim_size, dim_name in zip(dim_sizes, dim_names):
dims.append(mtf.Dimension(dim_name, dim_size))
shape = mtf.Shape(dims)
x_mtf = mtf.constant(self.mesh, x, shape=shape, dtype=dtype)
return x_mtf
def convert_mtf_tensor_to_np_array(self, x_mtf):
"""Convert an mtf.Tensor to a numpy array."""
_, x_tf = self.convert_mtf_tensor_to_tf_tensor(x_mtf)
if tf.executing_eagerly():
return x_tf.numpy()
else:
self.session.run(tf.global_variables_initializer())
return x_tf.eval(session=self.session)
def convert_mtf_tensor_to_tf_tensor(self, mtf_tensor):
"""Convert an mtf.Tensor to a tf.Tensor."""
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(self.graph, {self.mesh: mesh_impl})
return lowering, lowering.export_to_tf_tensor(mtf_tensor)
@property
def graph(self):
return self._graph
@property
def mesh(self):
return self._mesh
@property
def session(self):
return self._session
| mesh-master | mesh_tensorflow/test_utils.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mesh_tensorflow as mtf
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow.compat.v1 as tf
class UtilsTest(tf.test.TestCase):
def test_variable_placer(self):
sizes = [100, 0, 0, 0]
device_list = ['cpu:0', 'cpu:1', 'cpu:2', 'cpu:3']
with tf.Graph().as_default() as g:
var_placer = mtf.utils.BalancedVariablePlacer(device_list, sizes)
graph = mtf.Graph()
mesh = mtf.Mesh(graph, 'my_mesh', var_placer)
hidden_dim = mtf.Dimension('hidden', 10)
output_dim = mtf.Dimension('output_feature', 10)
for i in xrange(5):
# Each variable takes 400 Bytes, and will be placed from cpu:1.
mtf.get_variable(mesh, 'w{}'.format(i), [hidden_dim, output_dim])
for i in xrange(5):
var = g.get_tensor_by_name('w{}:0'.format(i))
device = (i + 1) % len(device_list)
self.assertEqual('cpu:{}'.format(device), var.device)
if __name__ == '__main__':
tf.disable_v2_behavior()
tf.test.main()
| mesh-master | mesh_tensorflow/utils_test.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mesh TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from mesh_tensorflow.ops import * # pylint: disable=wildcard-import
from mesh_tensorflow.ops import mtf_abs as abs # pylint: disable=redefined-builtin,unused-import
from mesh_tensorflow.ops import mtf_pow as pow # pylint: disable=redefined-builtin,unused-import
from mesh_tensorflow.ops import mtf_range as range # pylint: disable=redefined-builtin,unused-import
from mesh_tensorflow.ops import mtf_slice as slice # pylint: disable=redefined-builtin,unused-import
# TODO(trandustin): Seal module.
# from tensorflow.python.util.all_util import remove_undocumented # pylint: disable=line-too-long
#
# _allowed_symbols = None
#
# remove_undocumented(__name__, _allowed_symbols)
| mesh-master | mesh_tensorflow/ops_with_redefined_builtins.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SIMD Mesh implementation (for TPU/XLA)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import gin
from mesh_tensorflow import ops_with_redefined_builtins as mtf
from mesh_tensorflow import tpu_variables
from mesh_tensorflow import utils
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow.compat.v1 as tf
from tensorflow.python.tpu.ops import tpu_ops # pylint: disable=g-direct-tensorflow-import
@gin.configurable
class SimdMeshImpl(mtf.MeshImpl):
"""Mesh implementation for TPU using SIMD and MPI operations."""
def __init__(self,
shape,
layout,
devices=None,
device_assignment=None,
logical_to_physical=None,
allreduce_in_bfloat16_max_group_size=8):
"""Create a SimdMeshImpl.
Args:
shape: an input to mtf.convert_to_shape()
layout: an input to mtf.convert_to_layout_rules()
devices: deprecated
device_assignment: a tf.tpu.experimental.DeviceAssignment -
devices must be asssigned in lexicographic order
logical_to_physical: an optional permutation representing the mapping
from logical cores to "physical" cores, where the physical cores are
listed in lexicographic order in the physical mesh, and the logical
cores are listed in lexicographic order in the logical mesh.
Default is lexicographic order.
allreduce_in_bfloat16_max_group_size: an integer. Allreduces of bfloat16
tensors are done in float32 if the group size exceeds this value.
"""
super(SimdMeshImpl, self).__init__(shape, layout)
if devices is not None:
tf.logging.warning("SimdMeshImpl ignoring devices %s" % devices)
self._device_assignment = device_assignment
tf.logging.info("SimdMeshImpl init: {0} {1}".format(shape, layout))
tf.logging.info("Device Assignment: {0}".format(device_assignment))
if logical_to_physical is None:
# TODO(noam): maybe use auto_logical_to_physical_tpu() here
logical_to_physical = list(range(self.size))
if sorted(logical_to_physical) != list(range(self.size)):
raise ValueError(
"logical_to_physical must be a permutation on range(shape.size)"
" shape=%s logical_to_physical=%s" % (shape, logical_to_physical))
self._logical_to_physical = logical_to_physical
self._physical_to_logical = [None] * self.size
for logical, physical in enumerate(self._logical_to_physical):
self._physical_to_logical[physical] = logical
self._pnum_tensor = None
self.graph_device_function_stacks = []
self.copy_master_to_slice_ops = []
self._allreduce_in_bfloat16_max_group_size = (
allreduce_in_bfloat16_max_group_size)
@property
def pnum_tensor(self):
if self._pnum_tensor is not None:
return self._pnum_tensor
with utils.outside_all_rewrites():
tf.logging.info("Create pnum_tensor")
self._pnum_tensor = tpu_ops.tpu_replicated_input(
self._physical_to_logical, name="pnum_constants")
return self._pnum_tensor
def l2p(self, logical_pnum):
return self._logical_to_physical[logical_pnum]
def p2l(self, physical_pnum):
return self._physical_to_logical[physical_pnum]
class LaidOutTensor(object):
"""One Slice."""
def __init__(self, tensor_list):
assert isinstance(tensor_list, list)
self._tensor_list = tensor_list
def __repr__(self):
return "[" + ",".join([str(t) for t in self._tensor_list]) + "]"
@property
def tensor_list(self):
return self._tensor_list
@property
def one_slice(self):
return self._tensor_list[0]
@classmethod
def from_tensor_list(cls, tensor_list):
return cls(tensor_list)
@property
def all_slices(self):
return self._tensor_list
@property
def slice_shape(self):
return self.one_slice.shape.as_list()
def to_laid_out_tensor(self):
return self
class LaidOutVariable(object):
"""Maintains slice-variables and copy operations."""
def __init__(self, variable, mesh_impl):
"""Create a LaidOutVariable.
Args:
variable: a Variable (Operation)
mesh_impl: a MeshImpl
"""
self._variable = variable
self._mesh_impl = mesh_impl
shape = variable.outputs[0].shape
slice_shape = mesh_impl.slice_shape(shape)
base_name = variable.name
slices = []
slices_with_master_dtype = []
with tf.device(variable.master_device), utils.outside_all_rewrites():
zero_tensor = tf.zeros(slice_shape, dtype=variable.slice_dtype)
# pylint: disable=protected-access
init_device_stack = tf.get_default_graph()._device_function_stack
if not mesh_impl.graph_device_function_stacks:
for pnum in xrange(mesh_impl.size):
tpu_device = mesh_impl.device_assignment.tpu_device(replica=pnum)
with tf.device(tpu_device):
mesh_impl.graph_device_function_stacks.append(
tf.get_default_graph()._device_function_stack.copy())
for physical_pnum in xrange(mesh_impl.size):
slice_var_name = base_name + "_slice_%d" % physical_pnum
# Use tf.Variable instead of tf.get_variable since latter adds lots of
# useless operations to the TF graph. Use tf.get_variable only if
# in a AUTO_REUSE scope.
# Note: Repeatedly 'with tf.device():' slows down the graph
# construction. Therefore we directly use the cached device_stack here.
tf.get_default_graph()._device_function_stack = (
mesh_impl.graph_device_function_stacks[physical_pnum])
if tf.get_variable_scope().reuse == tf.AUTO_REUSE:
slice_var = tf.get_variable(
initializer=zero_tensor,
trainable=self._variable.trainable,
collections=["TPU_VAR"],
dtype=variable.slice_dtype,
name=slice_var_name)
else:
slice_var = tf.Variable(
initial_value=zero_tensor,
trainable=self._variable.trainable,
collections=["TPU_VAR"],
dtype=variable.slice_dtype,
name=slice_var_name,
expected_shape=slice_shape)
slices.append(slice_var)
# Restore the initial stack
tf.get_default_graph()._device_function_stack = init_device_stack
# pylint: enable=protected-access
self._laid_out_tensor = mesh_impl.LaidOutTensor(
[tpu_variables.ReplicatedVariable(base_name, slices)])
with tf.device(variable.master_device), utils.outside_all_rewrites():
if os.environ.get("MTF_SEQUENCE_MODE", "") == "1":
if mesh_impl.copy_master_to_slice_ops:
with tf.control_dependencies(
[mesh_impl.copy_master_to_slice_ops[-1]]):
self._copy_master_to_slices = self._gen_copy_master_to_slices_op(
variable.get_master(), shape, slices, slice_shape)
else:
self._copy_master_to_slices = self._gen_copy_master_to_slices_op(
variable.get_master(), shape, slices, slice_shape)
mesh_impl.copy_master_to_slice_ops.append(self._copy_master_to_slices)
else:
self._copy_master_to_slices = self._gen_copy_master_to_slices_op(
variable.get_master(), shape, slices, slice_shape)
slices_with_master_dtype = [
tf.cast(s, variable.master_dtype) for s in slices]
slices_with_master_dtype = [
slices_with_master_dtype[mesh_impl.l2p(logical_pnum)]
for logical_pnum in range(mesh_impl.size)]
self._copy_slices_to_master = variable.assign_to_master(
mesh_impl.combine_slices(slices_with_master_dtype, shape,
device=variable.master_device))
def _gen_copy_master_to_slices_op(self, master_variable, master_shape,
slices, slice_shape):
"""Generate ops which slices master and assign to slices.
Args:
master_variable: The master variable.
master_shape: The shape of master variable.
slices: The list of slice-variables in physical order.
slice_shape: The shape of the slice variable.
Returns:
A grouped tf.assign ops.
"""
mesh_impl = self._mesh_impl
master_layout = mesh_impl.tensor_layout(master_shape)
# For handling case: master is float32 and slices are bfloat16.
if master_variable.dtype != slices[0].dtype:
master_variable = tf.cast(master_variable, slices[0].dtype)
assign_ops = []
if master_layout.is_fully_replicated:
assign_ops = [tf.assign(t, master_variable) for t in slices]
else:
slice_dict = {}
for logical_pnum in xrange(len(slices)):
slice_begin = mesh_impl.slice_begin(master_shape, logical_pnum)
slice_begin_tuple = tuple(slice_begin)
# Reuse the same slice if slice_begin doesn't change.
if slice_begin_tuple not in slice_dict:
slice_dict[slice_begin_tuple] = tf.slice(master_variable,
slice_begin, slice_shape)
physical_pnum = mesh_impl.l2p(logical_pnum)
assign_ops.append(
tf.assign(slices[physical_pnum], slice_dict[slice_begin_tuple]))
return tf.group(assign_ops)
def assign_to_slices(self, assign_fn, values, assign_to_tensor_list=None):
"""Assign to the slice variables.
Args:
assign_fn: a function from
(mtf.Variable, tf.Variable, tf.Tensor) -> tf.Operation
values: a list of tf.Tensor
assign_to_tensor_list: an optional list of tf.Variable
Returns:
a tf.operation
"""
if assign_to_tensor_list is None:
assign_to_tensor_list = self._laid_out_tensor.all_slices
# Handle both N -> 1 and N -> N cases.
num_slices = min(len(assign_to_tensor_list), len(values))
devices = [""] * num_slices
return tf.group(
mtf.parallel(devices, assign_fn,
[self._variable] * len(devices),
assign_to_tensor_list[:num_slices],
values[:num_slices]))
@property
def laid_out_tensor(self):
return self._laid_out_tensor
@property
def copy_master_to_slices(self):
return self._copy_master_to_slices
@property
def copy_slices_to_master(self):
return self._copy_slices_to_master
def laid_out_pnum(self):
"""Returns a LaidOutTensor containing the logical processor number.
Returns:
a LaidOutTensor where each slice is an integer scalar
"""
return self.LaidOutTensor([self.pnum_tensor])
def _create_group_assignment(self, mesh_axes):
"""Create group assignment for XLA cross replica ops (physical pnums)."""
partitioning = {}
for logical_pnum in xrange(self.size):
group = mtf.pnum_to_group(self.shape, mesh_axes, logical_pnum)
if group not in partitioning:
partitioning[group] = []
partitioning[group].append(self.l2p(logical_pnum))
group_assignment = []
for group, physical_pnums in partitioning.items():
group_assignment.append(physical_pnums)
return group_assignment
def allreduce(self, x, mesh_axes, reduction_fn_string):
"""Grouped allreduce, (summed across the given dimensions).
Args:
x: a LaidOutTensor
mesh_axes: a list of integers
reduction_fn_string: "SUM"
Returns:
a LaidOutTensor
Raises:
ValueError: if the reduction is not yet implemented.
"""
if not mesh_axes:
return x
x = x.to_laid_out_tensor()
if reduction_fn_string == "SUM":
group_assignment = self._create_group_assignment(mesh_axes)
group_size = len(group_assignment[0])
tf_in = x.one_slice
dtype = tf_in.dtype
if dtype == tf.float32:
cast_to_float32 = False
elif dtype == tf.bfloat16:
cast_to_float32 = (
group_size > self._allreduce_in_bfloat16_max_group_size)
else:
tf.logging.info("Casting %s to float32 for allreduce" % tf_in.dtype)
cast_to_float32 = True
if cast_to_float32:
tf_in = tf.cast(tf_in, tf.float32)
tf_out = tpu_ops.cross_replica_sum(tf_in, group_assignment)
if cast_to_float32:
tf_out = tf.cast(tf_out, dtype)
return self.LaidOutTensor([tf_out])
else:
for axis in mesh_axes:
x = self.allconcat(x, axis, 0, stack=True)
x = self.LaidOutTensor(
[mtf.reduction_fn(reduction_fn_string)(x.one_slice, 0)])
return x
def allconcat(self, x, mesh_axis, concat_axis, stack=False):
"""Grouped allconcat (like MPI allgather followed by concat).
TODO(noam): inefficient - replace with a XLA allconcat when available
Args:
x: a LaidOutTensor
mesh_axis: an integer - the mesh axis along which to group
concat_axis: an integer (the Tensor axis along which to concatenate)
stack: a boolean - whether to stack instead of concat
Returns:
a LaidOutTensor
"""
x = x.to_laid_out_tensor()
coord = self.laid_out_pcoord(mesh_axis)
t = x.one_slice
old_shape = t.shape.as_list()
num_parts = self.shape[mesh_axis].size
t = tf.expand_dims(t, concat_axis)
t *= tf.reshape(
tf.one_hot(coord.one_slice, num_parts, dtype=t.dtype),
[num_parts if i == concat_axis else 1
for i in xrange(len(old_shape) + 1)])
if not stack:
new_shape = old_shape[:]
new_shape[concat_axis] *= num_parts
t = tf.reshape(t, new_shape)
return self.allreduce(self.LaidOutTensor([t]), [mesh_axis], "SUM")
def alltoall(self, x, mesh_axis, split_axis, concat_axis):
"""Grouped alltoall (like MPI alltoall with splitting and concatenation).
Args:
x: a LaidOutTensor
mesh_axis: an integer the mesh axis along which to group
split_axis: an integer (the Tensor axis along which to split)
concat_axis: an integer (the Tensor axis along which to concatenate)
Returns:
a LaidOutTensor
"""
x = x.to_laid_out_tensor()
t = x.one_slice
group_assignment = self._create_group_assignment([mesh_axis])
dtype = t.dtype
if dtype == tf.float32:
# There seems to be a bug with float32 alltoall.
# Do it in bfloat16 until the bug is fixed.
# TODO(noam): file a bug
t = tf.to_bfloat16(t)
t = tpu_ops.all_to_all(
t,
concat_dimension=concat_axis,
split_dimension=split_axis,
split_count=len(group_assignment[0]),
group_assignment=group_assignment)
t = tf.cast(t, dtype)
x = self.LaidOutTensor([t])
return x
def receive(self, x, mesh_axis, source_pcoord):
"""Collective receive in groups.
Each group contains the processors that differ only in mesh_axis.
```python
group_size = self.shape[mesh_axis].size
```
Args:
x: a LaidOutTensor
mesh_axis: an integer
source_pcoord: a list of optional integers. Each element is either None
or an integer in [0, group_size). If source_pcoord[k] is None, then the
output for the k-th processor in each group is a zero tensor. If
source_pcoord[k] is not None, then the output for the k-th processor in
each group is equal to the input for the source_pcoord[k]-th processor
in that group.
Returns:
a LaidOutTensor
"""
x = x.to_laid_out_tensor()
t = x.one_slice
source_target_pairs = []
for pnum in xrange(self.size):
coord = mtf.pnum_to_processor_coordinates(self.shape, pnum)
k = coord[mesh_axis]
if source_pcoord[k] is not None:
coord[mesh_axis] = source_pcoord[k]
source_pnum = mtf.processor_coordinates_to_pnum(self.shape, coord)
source_target_pairs.append(
[self.l2p(source_pnum),
self.l2p(pnum)])
if not source_target_pairs:
ret = tf.zeros_like(t, t.dtype)
elif t.dtype in [tf.float32, tf.bfloat16, tf.int32]:
ret = tpu_ops.collective_permute(t, source_target_pairs)
else:
# If t is not one of the allowed types, cast and cast back.
ret = tf.cast(tpu_ops.collective_permute(
tf.cast(t, tf.float32), source_target_pairs), t.dtype)
return self.LaidOutTensor([ret])
def slice(self, tf_tensor, tensor_shape):
""""Slice out the corresponding part of tensor given the pnum variable."""
tensor_layout = self.tensor_layout(tensor_shape)
if tensor_layout.is_fully_replicated:
return self.LaidOutTensor([tf_tensor])
else:
slice_shape = self.slice_shape(tensor_shape)
slice_begins = [
self.slice_begin(tensor_shape, pnum) for pnum in xrange(self.size)
]
slice_begins_tensor = tf.stack(slice_begins)
# slice on source device
selected_slice_begin = tf.gather(slice_begins_tensor, self.pnum_tensor)
return self.LaidOutTensor(
[tf.slice(tf_tensor, selected_slice_begin, slice_shape)])
def slicewise(self, fn, *inputs):
"""Execute a function in parallel on all slices.
Args:
fn: a function from tf.Tensors to tf.Tensor or a tuple of tf.Tensors.
*inputs: a list of inputs. Each input is either a LaidOutTensor or
is convertible to a tf.Tensor.
Returns:
a LaidOutTensor, or a tuple of LaidOutTensors if fn returns a tuple.
"""
# convert all inputs to LaidOutTensor where possible
inputs = mtf.convert_args_to_laid_out_tensors(inputs)
ret = fn(*[
x.one_slice if isinstance(x, self.LaidOutTensor) else x
for x in inputs])
if isinstance(ret, tuple):
return tuple([self.LaidOutTensor([t]) for t in ret])
else:
return self.LaidOutTensor([ret])
@property
def device_assignment(self):
return self._device_assignment
@property
def devices(self):
return self._devices
def random(self, shape, tf_fn, kwargs):
"""Call a random tf operation (e.g. random_uniform).
Args:
shape: a Shape
tf_fn: a function such as tf.random.uniform
kwargs: kwargs to pass to tf_fn, except for seed
Returns:
a LaidOutTensor
"""
# TODO(noam): can we make things better with stateless_random?
slice_shape = self.slice_shape(shape)
x = tf_fn(slice_shape, **kwargs)
# TPU does not have seeds enabled. Sync up the
# random choices by zeroing out all but the first core per group of
# identical slices, then allreducing by group.
layout = self.tensor_layout(shape)
# we need to sync across these axes.
mesh_axes = [i for i in xrange(self.ndims)
if i not in layout.tensor_axis_to_mesh_axis]
multiplier = 1.0
for axis in mesh_axes:
multiplier *= tf.cast(
tf.equal(self.laid_out_pcoord(axis).one_slice, 0), x.dtype)
x *= multiplier
x = self.LaidOutTensor([x])
x = self.allreduce(x, mesh_axes, "SUM")
return x
def export_to_tf_tensor(self, x, laid_out_x):
"""Turn a Tensor into a tf.Tensor.
Args:
x: a Tensor
laid_out_x: a LaidOutTensor
Returns:
a tf.Tensor
"""
tensor_layout = self.tensor_layout(x.shape)
if not tensor_layout.is_fully_replicated:
raise NotImplementedError(
"SimdMeshImpl only supports export_to_tf_tensor of fully-replicated "
"Tensors. Try reshaping to new dimension names. "
" x.shape = %s tensor_layout=%s"
% (x.shape, tensor_layout))
return laid_out_x.one_slice
def import_tf_tensor(self, x, tf_x):
"""Import a tf.Tensor, producing a LaidOutTensor.
Args:
x: a Tensor
tf_x: a tf.Tensor
Returns:
a LaidOutTensor
"""
return self.slice(tf_x, x.shape)
@property
def supports_control_dependencies(self):
return False
def einsum(self, equation, *slices):
"""Override this for custom einsum implementation.
Args:
equation: a string
*slices: a list of tf.Tensor
Returns:
a tf.Tensor
"""
return tf.einsum(equation, *slices)
def _ring_2d(m, n):
"""Ring-order of a mxn mesh.
If m and n are both even, then we generate a ring like this:
0 -- 1 -- 2 -- 3
| | | |
15-- 6 -- 5 -- 4
| | | |
14-- 7 -- 8 -- 9
| | | |
13-- 12-- 11-- 10
Args:
m: an integer
n: an integer
Returns:
a list of mxn pairs
"""
if m == 1:
return [(0, i) for i in range(n)]
if n == 1:
return [(i, 0) for i in range(m)]
if m % 2 != 0:
tf.logging.warning("Odd dimension")
return [(i % m, i // m) for i in range(n * m)]
ret = [(0, 0)]
for i in range(m // 2):
for j in range(1, n):
ret.append((2 * i, j))
for j in range(n-1, 0, -1):
ret.append((2 * i + 1, j))
for i in range(m-1, 0, -1):
ret.append((i, 0))
return ret
def _logical_1d_to_physical_subspace_auto(sizes_and_strides, physical_shape):
"""Maps logical 1d mesh to subspace of physical nd mesh.
We are mapping a 1d logical mesh to a subspace (a strided slice containing the
origin) of a n-dimensional physical mesh.
output[i] contains the coordinate-tuple in the physical mesh for the i-th
logical processor.
sizes_and_strides is a list of (size, stride) pairs specifying the dimensions
of the strided slice. For example,
sizes_and_strides=[(2, 16), (4, 1)] would represent the slice containing
[(0, 0), (0, 1), (0, 2), (0, 3),
(16, 0), (16, 1), (16, 2), (16, 3)]
This function heuristically picks an order, with the goal of optimizing
allreduce performance.
Args:
sizes_and_strides: a list of n (size, stride) pairs
physical_shape: ignored
Returns:
a list of coordinate-lists
"""
del physical_shape
ndims = len(sizes_and_strides)
sizes = [p[0] for p in sizes_and_strides]
strides = [p[1] for p in sizes_and_strides]
n = mtf.list_product(sizes)
if ndims >= 2 and sizes[0] > 1 and sizes[1] > 1:
ring = _ring_2d(sizes[0], sizes[1])
ret = []
sizes_combined = [sizes[0] * sizes[1]] + sizes[2:]
for logical_pnum in range(n):
logical_coord = mtf.pnum_to_processor_coordinates(
sizes_combined, logical_pnum)
ret.append(list(ring[logical_coord[0]]) + logical_coord[1:])
else:
ret = [mtf.pnum_to_processor_coordinates(sizes, logical_pnum)
for logical_pnum in range(n)]
# multiply by strides
ret = [[x * stride for x, stride in zip(pcoord, strides)] for pcoord in ret]
return ret
def _logical_to_physical_v1(
sizes_and_strides, physical_shape,
fn_1d=_logical_1d_to_physical_subspace_auto):
"""Maps logical m-dimensional mesh to physical n-dimensional mesh.
Also see comments to _logical_1d_to_physical_subspace_auto.
We are mapping a m-dimensonal logical mesh to a n-dimensional physical mesh.
output[i] contains the coordinate-tuple in the physical mesh for the i-th
logical processor (if the logical processors are ordered lexicographically).
sizes_and_strides is a list of m lists of n (size, stride) pairs.
sizes_and_strides[i] specifies the subspace (strided slice containing the
origin) of the physical mesh covered by axis i of the logical mesh. See
comments to _logical_1d_to_physical_subspace_auto for more detail.
For example, say we have a physical mesh with shape [4, 4, 2] and a logical
mesh with shape [4, 8]. We want to divide the physical mesh into 4 tiles,
each with shape [2, 2, 2]. The first logical dimension corresponds to which
tile, and the second logical dimension corresponds to position within a tile.
This would correspond to:
physical_shape=[4, 4, 2]
sizes_and_strides=[[(2, 2), (2, 2), (1, 2)], [(2, 1), (2, 1), (2, 1)]]
physical_shape can be inferred from sizes_and_strides, but is passed in for
error checking.
Args:
sizes_and_strides: a list of m list of n (size, stride) pairs
physical_shape: a list of integers
fn_1d: a function like _logical_1d_to_physical_subspace_auto
Returns:
a list of coordinate-lists
"""
pndims = len(physical_shape)
logical_shape = [
mtf.list_product([p[0] for p in l]) for l in sizes_and_strides]
n = mtf.list_product(physical_shape)
if n != mtf.list_product(logical_shape):
raise ValueError(
"logical size and physical size must match "
"- got sizes_and_strides=%s physical_shape=%s"
% (sizes_and_strides, physical_shape))
dimension_layouts = [fn_1d(l, physical_shape) for l in sizes_and_strides]
tf.logging.info("physical_shape: %s" % physical_shape)
tf.logging.info("sizes_and_strides: %s" % sizes_and_strides)
for i, l in enumerate(dimension_layouts):
tf.logging.info("dimension_layout %s: %s" % (i, l))
ret = []
for logical_pnum in range(n):
logical_coordinates = mtf.pnum_to_processor_coordinates(
logical_shape, logical_pnum)
physical_coordinates = [0] * pndims
for logical_axis, logical_coord in enumerate(logical_coordinates):
for physical_axis in range(pndims):
physical_coordinates[physical_axis] += (
dimension_layouts[logical_axis][logical_coord][physical_axis])
ret.append(physical_coordinates)
# verify that we have indeed covered all the processors
l2p = [mtf.processor_coordinates_to_pnum(physical_shape, c) for c in ret]
if sorted(l2p) != list(range(n)):
raise ValueError(
"logical_to_physical produced something that was not a permutation."
" sizes_and_strides=%s physical_shape=%s ret=%s"
% (sizes_and_strides, physical_shape, ret))
return ret
class HierarchicalTiling(object):
"""One kind of mapping of a logical mesh to a physical mesh."""
def __init__(self, spec, physical_shape):
"""Constructs a HierarchicalTiling.
spec is a list corresponding to the logical dimensions.
spec[i] corresponds to the i-th logical dimension and consists of a name
and a list of integers, the list being the shape of logical axis i when
it is physically projected to the physical mesh and then compacted.
Striding information is omitted. By convention, the earlier dimensions
get more strided. so the axis corresponding to the last dimension always
gets projected to the tile specified by its shape.
Args:
spec: a list of (string, list-of-integers) pairs
physical_shape: a list of integers
"""
self._names = [p[0] for p in spec]
logical_ndims = len(spec)
physical_ndims = len(physical_shape)
projected_shapes = [p[1] for p in spec]
if logical_ndims > 0 and projected_shapes[0] is None:
# fill in missing value
projected_shapes[0] = list(physical_shape)
for s in projected_shapes[1:]:
for i, x in enumerate(s):
projected_shapes[0][i] //= x
# compute strides, and verify that the spec is valid.
products = [1] * physical_ndims
sizes_and_strides = []
for s in reversed(projected_shapes):
sizes_and_strides.append(
[(size, stride) for size, stride in zip(s, products)])
for i, x in enumerate(s):
products[i] *= x
if products != physical_shape:
raise ValueError("mesh spec multiplies to the wrong size"
"spec=%s physical_shape=%s products=%s" %
(spec, physical_shape, products))
sizes_and_strides.reverse()
self._physical_coordinates = _logical_to_physical_v1(
sizes_and_strides, physical_shape)
self._logical_to_physical = [
mtf.processor_coordinates_to_pnum(physical_shape, c)
for c in self._physical_coordinates]
self._mesh_shape = mtf.Shape(
[mtf.Dimension(name, mtf.list_product(s))
for name, s in zip(self._names, projected_shapes)])
@property
def logical_to_physical(self):
"""List of physical processor numbers."""
return list(self._logical_to_physical)
@property
def mesh_shape(self):
return self._mesh_shape
@classmethod
def spec_to_mesh_shape(cls, spec, num_processors):
"""Compute mesh shape even without knowing the physical shape.
This is useful in cases where the mesh shape must be computed before
you know the physical_shape.
Args:
spec: a list of (string, list-of-integers) pairs
num_processors: an integer
Returns:
a mtf.Shape
"""
logical_ndims = len(spec)
names = [p[0] for p in spec]
sizes = [p[1] for p in spec]
sizes = [None if s is None else mtf.list_product(s) for s in sizes]
if logical_ndims > 0 and sizes[0] is None:
sizes[0] = num_processors // mtf.list_product(sizes[1:])
if mtf.list_product(sizes) != num_processors:
raise ValueError("product of spec must be num_processors"
" spec=%s num_processors=%s"
% (spec, num_processors))
return mtf.Shape(
[mtf.Dimension(name, s) for name, s in zip(names, sizes)])
def physical_shape_3d_from_topology_proto_4d(mesh_shape):
"""Convert a 4d shape that we get from TPU estimator to a 3d shape.
Args:
mesh_shape: a list of length 4
Returns:
a list of length 3
"""
if len(mesh_shape) != 4:
raise ValueError("Expected a 4d shape [x, y, z, core]")
return [mesh_shape[1]*mesh_shape[2], mesh_shape[0], mesh_shape[3]]
def auto_logical_to_physical_tpu(logical_shape,
physical_shape,
return_coordinates=False):
"""Set up a mapping from logical to physical cores for TPU.
We will try to set up a mapping so that allreduce operations are relatively
fast, prioritizing the later dimensions in the mesh_shape.
Example:
auto_logical_to_physical_tpu(
logical_shape=[16, 8], physical_shape=[8, 8, 1, 2])
Heuristics in this function subject to change.
Args:
logical_shape: a list of integers
physical_shape: a list of integers - typically [X, Y, 1, cores]
return_coordinates: a boolean - return a list of integer lists (coordinates)
instead of a list of processor indices
Returns:
logical_to_physical: a permutation of range(product(physical_shape)))
"""
tf.logging.info("auto_logical_to_physical_tpu "
"logical_shape=%s physical_shape=%s" %
(logical_shape, physical_shape))
if mtf.list_product(logical_shape) != mtf.list_product(physical_shape):
raise ValueError(
"physical and logical shapes must have the same product "
"physical_shape=%s logical_shape=%s" % (physical_shape, logical_shape))
# drop logical dimensions of size 1
logical_shape = [i for i in logical_shape if i != 1]
num_cores = mtf.list_product(logical_shape)
# For physical shapes different from what we are used to [2^a, 2^b, 2],
# return a simple default value (a lexicographic ordering)
def _default_value():
default = list(range(num_cores))
if return_coordinates:
default = [mtf.pnum_to_processor_coordinates(i) for i in default]
return default
if len(physical_shape) == 4 and physical_shape[2] == 1:
physical_shape = physical_shape_3d_from_topology_proto_4d(physical_shape)
elif len(physical_shape) != 3:
tf.logging.warning("Unrecognized format for tpu physical shape")
return _default_value()
# physical_shape is a triple of rows, cols, cores
p0, p1, p2 = physical_shape
if p2 != 2:
return _default_value
for dimsize in [p0, p1]:
# if dimsize not a power of 2, give up
if dimsize & (dimsize - 1):
return _default_value()
# At this point, the physical shape has at least 1x1x2=2 cores, so there
# must be at least one logical dimension.
assert logical_shape
if len(logical_shape) == 1:
# ring of p0 x p1 chips
ring = _ring_2d(p0, p1)
logical_to_physical = []
for logical_pnum in range(num_cores):
core_on_chip = logical_pnum % 2
chip_num = logical_pnum // 2
i, j = ring[chip_num]
logical_to_physical.append((i, j, core_on_chip))
else:
# We have a p0 x p1 rectangle of chips, which we will tile with rectangular
# tiles. The first logical dimension correspond to the number of tiles,
# and the other logical dimensions will correspond to position within a
# tile.
num_tiles = logical_shape[0]
tile_chips = num_cores // num_tiles // p2
# If we can, we make each tile occupy exactly one row or column of chips.
# Otherwise, we make each tile approximately square.
if len(logical_shape) == 2 and tile_chips == p0:
t0, t1 = [tile_chips, 1]
elif len(logical_shape) == 2 and tile_chips == p1:
t0, t1 = [1, tile_chips]
else:
# try to make the tile approximately square
lg_tile_chips = int(math.log(tile_chips, 2))
t0 = 2 ** (lg_tile_chips // 2)
# make sure that the tile fits in the mesh - i.e.
# t0 <= p0
# t1 == tile_chips // t0 <= p1
t0 = min(t0, p0)
t0 = max(t0, tile_chips // p1)
t1 = tile_chips // t0
# recursive call to find mapping for one tile
tile_logical_to_physical = auto_logical_to_physical_tpu(
logical_shape[1:], [t0, t1, p2], return_coordinates=True)
tiles_ring = _ring_2d(p0 // t0, p1 // t1)
logical_to_physical = []
for logical_pnum in range(num_cores):
logical_tile_num = logical_pnum // (t0 * t1 * p2)
logical_pos_in_tile = logical_pnum % (t0 * t1 * p2)
logical_to_physical.append((
tiles_ring[logical_tile_num][0] * t0 +
tile_logical_to_physical[logical_pos_in_tile][0],
tiles_ring[logical_tile_num][1] * t1 +
tile_logical_to_physical[logical_pos_in_tile][1],
tile_logical_to_physical[logical_pos_in_tile][2]))
tf.logging.info("auto_logical_to_physical_tpu logical_to_physical = %s"
% logical_to_physical)
if return_coordinates:
return logical_to_physical
else:
return [mtf.processor_coordinates_to_pnum(physical_shape, coord)
for coord in logical_to_physical]
| mesh-master | mesh_tensorflow/simd_mesh_impl.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mesh TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from mesh_tensorflow import beam_search
from mesh_tensorflow import layers
from mesh_tensorflow import optimize
from mesh_tensorflow import placement_mesh_impl
from mesh_tensorflow import simd_mesh_impl
from mesh_tensorflow import tpu_variables
from mesh_tensorflow import utils
from mesh_tensorflow.ops_with_redefined_builtins import * # pylint: disable=wildcard-import
# TODO(trandustin): Seal module.
# from tensorflow.python.util.all_util import remove_undocumented # pylint: disable=line-too-long
#
# _allowed_symbols = None
#
# remove_undocumented(__name__, _allowed_symbols)
| mesh-master | mesh_tensorflow/__init__.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Distributed variable implementation for TPUs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_resource_variable_ops
try:
from tensorflow.python.types import core # pylint:disable=g-import-not-at-top,g-direct-tensorflow-import
TF_23 = True
except ImportError:
TF_23 = False
if TF_23:
VariableBase = core.Tensor
else:
VariableBase = object
@contextlib.contextmanager
def _handle_graph(handle):
with handle.graph.as_default():
yield
def _enclosing_tpu_context():
# pylint: disable=protected-access
context = ops.get_default_graph()._get_control_flow_context()
# pylint: enable=protected-access
while context is not None and not isinstance(
context, control_flow_ops.XLAControlFlowContext):
context = context.outer_context
return context
class ReplicatedVariable(VariableBase):
"""A replicated variable for use on TPUs.
When accessed inside a tpu.replicate() context, this variable acts as if it
is a single variable whose handle is a replicated input to the computation.
Outside a tpu.replicate() context currently this object has pretty murky
semantics, especially with respect to things such as
* initialization
* colocation.
TODO(phawkins): merge this with the TPU DistributionStrategy code.
"""
def __init__(self, name, variables):
self._name = name
self._primary_var = variables[0]
self._vars = variables
self._cached_value = None
self._dtype = variables[0].dtype
@property
def handle(self):
tpu_context = _enclosing_tpu_context()
if tpu_context is None:
return self._primary_var.handle
return tpu_context.get_replicated_var_handle(self._name, self._vars)
@contextlib.contextmanager
def _assign_dependencies(self):
"""Makes assignments depend on the cached value, if any.
This prevents undefined behavior with reads not ordered wrt writes.
Yields:
None.
"""
if self._cached_value is not None:
with ops.control_dependencies([self._cached_value]):
yield
else:
yield
@property
def initializer(self):
return control_flow_ops.group([v.initializer for v in self._vars])
@property
def graph(self):
return self._primary_var.graph
@property
def _shared_name(self):
return self._common_name
@property
def _unique_id(self):
return self._primary_var._unique_id # pylint: disable=protected-access
@property
def name(self):
return self._name
@property
def dtype(self):
return self._primary_var.dtype
@property
def shape(self):
return self._primary_var.shape
def get_shape(self):
return self._primary_var.get_shape()
def to_proto(self, export_scope=None):
return self._primary_var.to_proto(export_scope=export_scope)
@property
def constraint(self):
return None
@property
def op(self):
return self.get().op
def _read_variable_op(self):
if _enclosing_tpu_context() is None:
return self._primary_var.read_value()
v = gen_resource_variable_ops.read_variable_op(self.handle, self._dtype)
return v
def read_value(self):
return self._read_variable_op()
def assign(self, value, use_locking=None, name=None, read_value=False):
del use_locking
with _handle_graph(self.handle), self._assign_dependencies():
value_tensor = ops.convert_to_tensor(value, dtype=self.dtype)
assign_op = gen_resource_variable_ops.assign_variable_op(
self.handle, value_tensor, name=name)
if read_value:
return self._read_variable_op()
return assign_op
def assign_add(self, delta, use_locking=None, name=None, read_value=True):
del use_locking
with _handle_graph(self.handle), self._assign_dependencies():
assign_add_op = gen_resource_variable_ops.assign_add_variable_op(
self.handle,
ops.convert_to_tensor(delta, dtype=self.dtype),
name=name)
if read_value:
return self._read_variable_op()
return assign_add_op
def assign_sub(self, delta, use_locking=None, name=None, read_value=True):
del use_locking
with _handle_graph(self.handle), self._assign_dependencies():
assign_sub_op = gen_resource_variable_ops.assign_sub_variable_op(
self.handle,
ops.convert_to_tensor(delta, dtype=self.dtype),
name=name)
if read_value:
return self._read_variable_op()
return assign_sub_op
def get(self):
return self._primary_var
@property
def _in_graph_mode(self):
return self._primary_var._in_graph_mode # pylint: disable=protected-access
def _should_act_as_resource_variable(self):
"""Pass resource_variable_ops.is_resource_variable check."""
pass
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
"""Converts a variable to a tensor."""
# pylint: disable=protected-access
if _enclosing_tpu_context() is None:
if hasattr(self._primary_var, '_dense_var_to_tensor'):
return self._primary_var._dense_var_to_tensor(dtype, name, as_ref)
else:
return ops.convert_to_tensor(self._primary_var)
# pylint: enable=protected-access
if dtype is not None and dtype != self.dtype:
return NotImplemented
if as_ref:
return self.handle
else:
return self.read_value()
# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.
def _tensor_conversion(var, dtype=None, name=None, as_ref=False):
return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref) # pylint: disable=protected-access
ops.register_tensor_conversion_function(ReplicatedVariable, _tensor_conversion)
if not TF_23:
ops.register_dense_tensor_like_type(ReplicatedVariable)
| mesh-master | mesh_tensorflow/tpu_variables.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of beam search with penalties."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gin
from mesh_tensorflow import ops_with_redefined_builtins as mtf
import tensorflow.compat.v1 as tf
# Assuming EOS_ID is 1
EOS_ID = 1
# Default value for INF
INF = 1. * 1e7
def compute_topk_scores_and_seq(sequences, scores, scores_to_gather, flags,
beam_dim, prefix="default"):
"""Given sequences and scores, will gather the top k=beam size sequences.
This function is used to grow alive, and finished. It takes sequences,
scores, and flags, and returns the top k from sequences, scores_to_gather,
and flags based on the values in scores.
This method permits easy introspection using tfdbg. It adds two named ops
that are prefixed by `prefix`:
- _topk_seq: the tensor for topk_seq returned by this method.
- _topk_flags: the tensor for topk_finished_flags returned by this method.
Args:
sequences: Tensor of sequences that we need to gather from.
[batch_size, beam_size, seq_length]
scores: Tensor of scores for each sequence in sequences.
[batch_size, beam_size]. We will use these to compute the topk.
scores_to_gather: Tensor of scores for each sequence in sequences.
[batch_size, beam_size]. We will return the gathered scores from here.
Scores to gather is different from scores because for grow_alive, we will
need to return log_probs, while for grow_finished, we will need to return
the length penalized scores.
flags: Tensor of bools for sequences that say whether a sequence has reached
EOS or not
beam_dim: mtf.Dimension
prefix: an optional string
Returns:
Tuple of
(topk_seq [batch_size, beam_size, decode_length],
topk_gathered_scores [batch_size, beam_size],
topk_finished_flags[batch_size, beam_size],
selector)
"""
unused_batch_dim, old_beam_dim, unused_length_dim = sequences.shape.dims
_, topk_indices = mtf.top_k(scores, old_beam_dim, k_dim=beam_dim)
selector = mtf.one_hot(topk_indices, old_beam_dim, dtype=tf.float32)
# Gather up the highest scoring sequences.
# For each operation added, give it
# a concrete name to simplify observing these operations with tfdbg.
# Clients can capture these tensors by watching these node names.
def gather(tensor, name):
with tf.name_scope(prefix + name):
output_shape = mtf.Shape(
[beam_dim if d == old_beam_dim else d for d in tensor.shape.dims])
return mtf.gather(
tensor, topk_indices, old_beam_dim, output_shape=output_shape)
topk_seq = gather(sequences, "_seq")
topk_flags = gather(flags, "_flags")
topk_gathered_scores = gather(scores_to_gather, "_scores")
return topk_seq, topk_gathered_scores, topk_flags, selector
@gin.configurable
def beam_search(logits_fn,
initial_ids,
alpha,
states=None,
eos_id=EOS_ID,
stop_early=True,
decode_length=None,
use_tpu=True,
dtype=tf.float32,
layout=None,
mesh_shape=None,
num_prefilter=2):
"""Beam search with length penalties.
Requires a function that can take the currently decoded symbols and return
the logits for the next symbol. The implementation is inspired by
https://arxiv.org/abs/1609.08144.
When running, the beam search steps can be visualized by using tfdbg to watch
the operations generating the output ids for each beam step. These operations
have the pattern:
(alive|finished)_topk_(seq,scores)
Operations marked `alive` represent the new beam sequences that will be
processed in the next step. Operations marked `finished` represent the
completed beam sequences, which may be padded with 0s if no beams finished.
Operations marked `seq` store the full beam sequence for the time step.
Operations marked `scores` store the sequence's final log scores.
The beam search steps will be processed sequentially in order, so when
capturing observed from these operations, tensors, clients can make
assumptions about which step is being recorded.
num_prefilter is a theoretically lossy shortcut around slow performance of
top_k on TPU on large Tensors and large k. This option should be removed once
better top_k implementations on TPU are avialable. If num_prefilter is set to
a nonzero value, then at each step we first compute the top num_prefilter
sequences per beam and then compute the top k sequences overall from among
those. Empirically, there seems to be no quality difference in setting
num_prefilter to 2.
Args:
logits_fn: Interface to the model, to provide logits.
Should take:
step_num - mtf Scalar
ids - mtf Tensor with shape [batch, beam, length]
Should return:
logits - [batch, beam, vocab_size], dtype=dtype
initial_ids: a mtf.Tensor with shape [batch_dim, beam_dim, length_dim])
alpha: alpha for length penalty.
states: list of mtf.Tensor
eos_id: ID for end of sentence.
stop_early: a boolean - stop once best sequence is provably determined.
decode_length: a mtf Scalar of dtype tf.int32 - maximum length of decodes
use_tpu: a boolean
dtype: a tf.dtype
layout: an optional string
mesh_shape: an optional string
num_prefilter: an optional integer
Returns:
Tuple of
(decoded beams [batch, beam, length]
decoding probabilities [batch, beam_size])
"""
batch_dim, beam_dim, length_dim = initial_ids.shape.dims
batch_and_beam_dim = mtf.Dimension(
batch_dim.name, batch_dim.size * beam_dim.size)
mesh = initial_ids.mesh
batch_by_beam = mtf.Shape([batch_dim, beam_dim])
initial_log_probs = mtf.broadcast(
mtf.one_hot(
mtf.constant(mesh, 0, dtype=tf.int32),
beam_dim,
on_value=0.0,
off_value=-INF,
dtype=dtype),
batch_by_beam)
length_scalar = mtf.constant(mesh, length_dim.size, dtype=tf.int32)
if decode_length is None:
decode_length = length_scalar
else:
decode_length = mtf.minimum(decode_length, length_scalar)
alive_log_probs = initial_log_probs
alive_seq = initial_ids
# Finished will keep track of all the sequences that have finished so far
# Finished log probs will be negative infinity in the beginning
# finished_flags will keep track of booleans
finished_seq = initial_ids
finished_scores = mtf.constant(mesh, -INF, batch_by_beam, dtype=dtype)
# Setting the scores of the initial to negative infinity.
finished_flags = mtf.constant(mesh, False, batch_by_beam, tf.bool)
def grow_finished(finished_seq, finished_scores, finished_flags, curr_seq,
curr_scores, curr_finished):
"""Given sequences and scores, will gather the top k=beam size sequences.
Args:
finished_seq: Current finished sequences.
[batch, beam, length]
finished_scores: scores for each of these sequences.
[batch, beam]
finished_flags: finished bools for each of these sequences.
[batch, beam]
curr_seq: current topk sequence that has been grown by one position.
[batch, beam, length]
curr_scores: scores for each of these sequences. [batch, beam]
curr_finished: Finished flags for each of these sequences.
[batch, beam]
Returns:
Tuple of
(Topk sequences based on scores,
log probs of these sequences,
Finished flags of these sequences,
None (no states))
"""
# Set the scores of the unfinished seq in curr_seq to large negative
# values
curr_scores += (1. - mtf.cast(curr_finished, curr_scores.dtype)) * -INF
unused_batch_dim, beam_dim, unused_length_dim = finished_seq.shape.dims
# concatenating the sequences and scores along beam axis
def _my_concat(a, b):
a = mtf.rename_dimension(a, "beam", "triple_beam")
b = mtf.rename_dimension(b, "double_beam", "triple_beam")
return mtf.concat([a, b], "triple_beam")
curr_finished_seq = _my_concat(finished_seq, curr_seq)
curr_finished_scores = _my_concat(finished_scores, curr_scores)
curr_finished_flags = _my_concat(finished_flags, curr_finished)
return compute_topk_scores_and_seq(
curr_finished_seq, curr_finished_scores, curr_finished_scores,
curr_finished_flags, beam_dim, "grow_finished")
def grow_alive(curr_seq, curr_scores, curr_log_probs, curr_finished):
"""Given sequences and scores, will gather the top k=beam size sequences.
Args:
curr_seq: current topk sequence that has been grown by one position.
[batch, beam, length]
curr_scores: scores for each of these sequences. [batch_size, beam_size]
curr_log_probs: log probs for each of these sequences.
[batch, beam]
curr_finished: Finished flags for each of these sequences.
[batch, beam]
Returns:
Tuple of
(Topk sequences based on scores,
log probs of these sequences,
Finished flags of these sequences)
"""
# Set the scores of the finished seq in curr_seq to large negative
# values
curr_scores += mtf.cast(curr_finished, curr_scores.dtype) * -INF
return compute_topk_scores_and_seq(curr_seq, curr_scores, curr_log_probs,
curr_finished, beam_dim,
"grow_alive")
def grow_topk(i, alive_seq, alive_log_probs, states=None):
r"""Inner beam search loop.
This function takes the current alive sequences, and grows them to topk
sequences where k = 2*beam. We use 2*beam because, we could have beam_size
number of sequences that might hit <EOS> and there will be no alive
sequences to continue. With 2*beam_size, this will not happen. This relies
on the assumption the vocab size is > beam size. If this is true, we'll
have at least beam_size non <EOS> extensions if we extract the next top
2*beam words.
Length penalty is given by = (5+len(decode)/6) ^ -\alpha. Pls refer to
https://arxiv.org/abs/1609.08144.
Args:
i: loop index
alive_seq: Topk sequences decoded so far [batch, beam, length]
alive_log_probs: probabilities of these sequences. [batch, beam]
states: optional list of mtf.Tensor
Returns:
Tuple of
(Topk sequences extended by the next word,
The log probs of these sequences,
The scores with length penalty of these sequences,
Flags indicating which of these sequences have finished decoding,
list of transformed decoding states)
"""
logits, new_states = logits_fn(i, alive_seq, states)
batch_dim, beam_dim, vocab_dim = logits.shape.dims
# Convert logits to normalized log probs
candidate_log_probs = mtf.log_softmax(logits, vocab_dim)
# Multiply the probabilities by the current probabilities of the beam.
# (batch_size, beam_size, vocab_size) + (batch_size, beam_size, 1)
log_probs = candidate_log_probs + alive_log_probs
length_penalty = mtf.pow(((5. + mtf.cast(i + 1, logits.dtype)) / 6.), alpha)
# scores have shape [batch, beam, vocab]
curr_scores = log_probs / length_penalty
# We find the top 2k sequences to make sure we get k alive sequences.
#
# TODO(noam): This is inefficient. We should separately compute the k
# finished sequences (previously alive sequences + EOS), and the top k new
# alive sequences.
double_beam = mtf.Dimension("double_beam", beam_dim.size * 2)
if use_tpu and layout is not None and mesh_shape is not None:
# Do some partial top-k-ing first locally to avoid communication.
# We reshape the logits from:
# [batch, beam, vocab] to
# [batch, beam, major_vocab, minor_vocab]
# We first reduce (locally) across the minor_vocab dimension. This makes
# the thing we need to broadcast smaller.
# This also enables our shortcut of only picking the top num_prefilter
# sequences per beam per major_vocab in the first pass.
major_vocab_size = mtf.tensor_dim_to_mesh_dim_size(
layout, mesh_shape, vocab_dim)
major_vocab = mtf.Dimension(vocab_dim.name, major_vocab_size)
minor_vocab = mtf.Dimension(
"minor_vocab", vocab_dim.size // major_vocab_size)
curr_scores = mtf.reshape(
curr_scores, [batch_dim, beam_dim, major_vocab, minor_vocab])
prefilter = mtf.Dimension("prefilter", num_prefilter or double_beam.size)
# shape = [batch_dim, beam_dim, major_vocab, prefilter]
top_scores, top_minor_vocab_ids = mtf.top_k(
curr_scores, reduced_dim=minor_vocab, k_dim=prefilter)
combined = mtf.Dimension(
"combined", beam_dim.size * major_vocab.size * prefilter.size)
top_scores = mtf.reshape(top_scores, [batch_dim, combined])
top_minor_vocab_ids = mtf.reshape(
top_minor_vocab_ids, [batch_dim, combined])
# shpae = [batch_dim, double_beam]
# ids are indices representing (beam, major_vocab, prefilter)
top_scores, top_combined_ids = mtf.top_k(
top_scores, reduced_dim=combined, k_dim=double_beam)
top_minor_vocab_ids = mtf.gather(
top_minor_vocab_ids, top_combined_ids, combined,
output_shape=[batch_dim, double_beam])
top_beam_index = top_combined_ids // (major_vocab.size * prefilter.size)
top_combined_ids -= top_beam_index * (major_vocab.size * prefilter.size)
top_major_vocab_ids = top_combined_ids // prefilter.size
top_combined_ids -= top_major_vocab_ids * prefilter.size
top_ids = top_major_vocab_ids * minor_vocab.size + top_minor_vocab_ids
else:
beam_and_vocab_dim = mtf.Dimension(
"beam_and_vocab", beam_dim.size * vocab_dim.size)
flat_shape = mtf.Shape([batch_dim, beam_and_vocab_dim])
# Flatten out (beam_size, vocab_size) probs into a list of possibilities
flat_curr_scores = mtf.reshape(
curr_scores, flat_shape, name="flatten_scores")
top_scores, top_ids = mtf.top_k(
flat_curr_scores, reduced_dim=beam_and_vocab_dim, k_dim=double_beam)
# Work out what beam the top probs are in.
top_beam_index = top_ids // vocab_dim.size
top_ids %= vocab_dim.size # Unflatten the ids
# Recovering the log probs because we will need to send them back
top_log_probs = top_scores * length_penalty
selector = mtf.one_hot(top_beam_index, beam_dim, dtype=tf.float32)
def my_gather(tensor):
return mtf.gather(
tensor, top_beam_index, beam_dim,
output_shape=mtf.Shape(
[double_beam if d == beam_dim else d for d in tensor.shape.dims]))
# Gather up the most probable 2*beams both for the ids and finished_in_alive
# bools
top_seq = my_gather(alive_seq)
# Append the most probable alive
top_seq += top_ids * mtf.one_hot(i, length_dim, dtype=tf.int32)
top_finished = mtf.equal(top_ids, eos_id)
return (
top_seq, top_log_probs, top_scores, top_finished, new_states, selector)
def inner_loop(i, alive_seq, alive_log_probs, finished_seq, finished_scores,
finished_flags, *states):
"""Inner beam search loop.
There are three groups of tensors, alive, finished, and topk.
The alive group contains information about the current alive sequences
The topk group contains information about alive + topk current decoded words
the finished group contains information about finished sentences, that is,
the ones that have decoded to <EOS>. These are what we return.
The general beam search algorithm is as follows:
While we haven't terminated (pls look at termination condition)
1. Grow the current alive to get beam*2 topk sequences
2. Among the topk, keep the top beam_size ones that haven't reached EOS
into alive
3. Among the topk, keep the top beam_size ones have reached EOS into
finished
Repeat
To make things simple with using fixed size tensors, we will end
up inserting unfinished sequences into finished in the beginning. To stop
that we add -ve INF to the score of the unfinished sequence so that when a
true finished sequence does appear, it will have a higher score than all the
unfinished ones.
Args:
i: loop index
alive_seq: Topk sequences decoded so far [batch_size, beam_size, i+1]
alive_log_probs: probabilities of the beams. [batch_size, beam_size]
finished_seq: Current finished sequences.
[batch_size, beam_size, i+1]
finished_scores: scores for each of these sequences.
[batch_size, beam_size]
finished_flags: finished bools for each of these sequences.
[batch_size, beam_size]
*states: mtf Tensors
Returns:
Tuple of
(Incremented loop index
New alive sequences,
Log probs of the alive sequences,
New finished sequences,
Scores of the new finished sequences,
Flags indicating which sequence in finished as reached EOS,
dict of final decoding states)
"""
states = [mtf.replace_dimensions(
state, batch_and_beam_dim, [batch_dim, beam_dim]) for state in states]
# Each inner loop, we carry out three steps:
# 1. Get the current topk items.
# 2. Extract the ones that have finished and haven't finished
# 3. Recompute the contents of finished based on scores.
(top2k_seq, top2k_log_probs, top2k_scores, top2k_finished,
new_states, first_selector) = grow_topk(
i, alive_seq, alive_log_probs, states)
with tf.variable_scope("grow_alive"):
alive_seq, alive_log_probs, _, second_selector = grow_alive(
top2k_seq, top2k_scores, top2k_log_probs, top2k_finished)
with tf.variable_scope("grow_finished"):
finished_seq, finished_scores, finished_flags, _ = grow_finished(
finished_seq, finished_scores, finished_flags, top2k_seq,
top2k_scores, top2k_finished)
old_beam_dim = mtf.Dimension("old_beam", beam_dim.size)
selector = mtf.einsum(
[mtf.rename_dimension(first_selector, beam_dim.name, old_beam_dim.name),
second_selector],
output_shape=[batch_dim, old_beam_dim, beam_dim])
gathered_states = []
if use_tpu and layout is not None and mesh_shape is not None:
# This hack combines the beam dimension with some of the batch dimension.
# It makes gathering faster on TPU.
#
# Instead of multiplying by a [beam, beam] selector matrix, we instead
# multiply by a [minor_batch*beam, minor_batch*beam] selector matrix.
# This is theoretically more FLOPs, but it brings the matrix size closer
# to the magic optimal value of 128.
#
# TODO(noam): file a bug with the XLA team to do this automatically
major_batch_size = mtf.tensor_dim_to_mesh_dim_size(
layout, mesh_shape, batch_dim)
major_batch = mtf.Dimension(batch_dim.name, major_batch_size)
minor_batch = mtf.Dimension(
"minor_batch", batch_dim.size // major_batch.size)
old_minor_batch = mtf.Dimension("old_minor_batch", minor_batch.size)
old_combined = mtf.Dimension(
"old_combined", minor_batch.size * beam_dim.size)
combined = mtf.Dimension(
"new_combined", old_combined.size)
same_minor_batch = mtf.to_float(
mtf.equal(mtf.range(mesh, old_minor_batch, tf.float32),
mtf.range(mesh, minor_batch, tf.float32)))
selector = mtf.reshape(
selector, [major_batch, minor_batch, old_beam_dim, beam_dim])
selector = mtf.einsum(
[selector, same_minor_batch],
output_shape=[major_batch,
old_minor_batch, old_beam_dim,
minor_batch, beam_dim],
reduced_dims=[])
selector = mtf.reshape(selector, [major_batch, old_combined, combined])
for state in new_states:
s = mtf.replace_dimensions(
state, [batch_dim, beam_dim], [major_batch, old_combined])
s = mtf.einsum(
[s, mtf.cast(selector, state.dtype)],
reduced_dims=[old_combined],
output_shape=mtf.replace_dimensions(
state.shape, [batch_dim, beam_dim],
[major_batch, combined]))
gathered_states.append(mtf.replace_dimensions(
s, [major_batch, combined], batch_and_beam_dim))
else:
for state in new_states:
state = mtf.einsum(
[mtf.rename_dimension(state, beam_dim.name, old_beam_dim.name),
mtf.cast(selector, state.dtype)],
reduced_dims=[old_beam_dim], output_shape=state.shape)
state = mtf.replace_dimensions(
state, [batch_dim, beam_dim], batch_and_beam_dim)
gathered_states.append(state)
return (i + 1, alive_seq, alive_log_probs, finished_seq, finished_scores,
finished_flags) + tuple(gathered_states)
def _is_finished(i, unused_alive_seq, alive_log_probs, unused_finished_seq,
finished_scores, finished_in_finished, *unused_states):
"""Checking termination condition.
We terminate when we decoded up to decode_length or the lowest scoring item
in finished has a greater score that the highest prob item in alive divided
by the max length penalty
Args:
i: loop index
alive_log_probs: probabilities of the beams. [batch_size, beam_size]
finished_scores: scores for each of these sequences.
[batch_size, beam_size]
finished_in_finished: finished bools for each of these sequences.
[batch_size, beam_size]
Returns:
Bool.
"""
# TODO(noam): support a different decode length...
# decode_length = mtf.constant(mesh, length_dim.size, dtype=tf.int32)
# del alive_log_probs, finished_scores, finished_in_finished
# return mtf.less(i, length_dim.size)
if not stop_early:
return mtf.less(i, decode_length)
max_length_penalty = mtf.pow(
((5. + mtf.cast(decode_length, finished_scores.dtype)) / 6.), alpha)
# The best possible score of the most likely alive sequence.
lower_bound_alive_scores = mtf.gather(
alive_log_probs, mtf.constant(mesh, 0, dtype=tf.int32),
beam_dim) / max_length_penalty
# Now to compute the lowest score of a finished sequence in finished
# If the sequence isn't finished, we multiply it's score by 0. since
# scores are all -ve, taking the min will give us the score of the lowest
# finished item.
lowest_score_of_finished_in_finished = mtf.reduce_min(
finished_scores * mtf.cast(finished_in_finished, finished_scores.dtype),
reduced_dim=beam_dim)
# If none of the sequences have finished, then the min will be 0 and
# we have to replace it by -ve INF if it is. The score of any seq in alive
# will be much higher than -ve INF and the termination condition will not
# be met.
lowest_score_of_finished_in_finished += (
(1. - mtf.cast(mtf.reduce_any(
finished_in_finished, reduced_dim=beam_dim),
finished_scores.dtype)) * -INF)
bound_is_met = mtf.reduce_all(
mtf.greater(lowest_score_of_finished_in_finished,
lower_bound_alive_scores))
return mtf.logical_and(
mtf.less(i, decode_length), mtf.logical_not(bound_is_met))
initial_step_num = mtf.constant(mesh, 0, dtype=tf.int32)
states = [mtf.replace_dimensions(
state, [batch_dim, beam_dim], batch_and_beam_dim) for state in states]
while_loop_inputs = [
initial_step_num, alive_seq, alive_log_probs, finished_seq,
finished_scores, finished_flags] + states
(_, alive_seq, alive_log_probs, finished_seq, finished_scores,
finished_flags) = mtf.while_loop(
_is_finished, inner_loop, while_loop_inputs,
num_loop_vars=None if use_tpu else 6)[:6]
# Accounting for corner case: It's possible that no sequence in alive for a
# particular batch item ever reached EOS. In that case, we should just copy
# the contents of alive for that batch item. tf.reduce_any(finished_flags, 1)
# if 0, means that no sequence for that batch index had reached EOS. We need
# to do the same for the scores as well.
finished_seq = mtf.where(
mtf.reduce_any(finished_flags, reduced_dim=beam_dim),
finished_seq, alive_seq)
finished_scores = mtf.where(
mtf.reduce_any(finished_flags, reduced_dim=beam_dim),
finished_scores, alive_log_probs)
return finished_seq, finished_scores
@gin.configurable
def greedy_decode(logits_fn,
initial_ids,
temperature=0.0,
initial_states=None,
eos_id=EOS_ID,
forced_ids=None,
use_tpu=True):
"""Greedy decoding.
Args:
logits_fn: Interface to the model, to provide logits.
Shoud take:
step_num - mtf Scalar
ids - mtf Tensor with shape [..., length]
states - list of mtf.Tensor
Should return:
logits - [batch, vocab_size]
new_states - list of mtf.Tensor
initial_ids: mtf.Tensor with shape [..., length], containing zeros.
temperature: a float between 0.0 (argmax) and 1.0 (random)
initial_states: list of mtf.Tensor
eos_id: ID for end of sentence.
forced_ids: optional mtf.Tensor with shape [..., length]
use_tpu: a boolean
Returns:
Tensor with shape [..., length]
"""
length_dim = initial_ids.shape.dims[-1]
mesh = initial_ids.mesh
num_steps = mtf.constant(mesh, length_dim.size, dtype=tf.int32)
def cond_fn(step_num, prev_ids, *unused_states):
"""Should we run another loop iteration."""
overflow = mtf.equal(step_num, num_steps)
has_eos = mtf.reduce_any(
mtf.equal(prev_ids, eos_id), reduced_dim=length_dim)
all_has_eos = mtf.reduce_all(has_eos)
return mtf.logical_not(mtf.logical_or(overflow, all_has_eos))
def body_fn(step_num, ids, *states):
"""Body function for greedy decoding.
Args:
step_num: a mtf.Tensor
ids: a mtf.Tensor
*states: additional mtf.Tensors
Returns:
new_step_num, new_ids, *new_states
"""
logits, new_states = logits_fn(step_num, ids, states)
vocab_dim = logits.shape.dims[-1]
new_ids = mtf.sample_with_temperature(
logits, vocab_dim, temperature)
if forced_ids is not None:
# force the new ids to equal the partial targets where specified
# (positions where partial_targets contain nonzero values)
forced = mtf.gather(forced_ids, step_num, length_dim)
new_ids = forced + new_ids * mtf.to_int32(mtf.equal(forced, 0))
ids += new_ids * mtf.one_hot(step_num, length_dim, dtype=tf.int32)
new_step_num = step_num + 1
return [new_step_num, ids] + new_states
initial_step_num = mtf.constant(mesh, 0, dtype=tf.int32)
while_loop_inputs = [initial_step_num, initial_ids] + initial_states
final_step_num, mtf_samples = mtf.while_loop(
cond_fn, body_fn, while_loop_inputs,
num_loop_vars=None if use_tpu else 2)[:2]
mtf_samples = mtf.Print(mtf_samples, [final_step_num], "output_length")
return mtf_samples
| mesh-master | mesh_tensorflow/beam_search.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mesh TensorFlow ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import functools
import itertools
import operator
import os
import re
import gin
from mesh_tensorflow import utils
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow.compat.v1 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.ops.gen_nn_ops import conv3d_backprop_input_v2
from tensorflow.python.ops.nn_ops import conv3d_backprop_filter_v2
Dimension = collections.namedtuple("Dimension", ["name", "size"])
def convert_to_dimension(d):
"""Converts input to a Dimension.
Args:
d: Dimension, tuple (string, int), or None.
Returns:
Dimension or None.
Raises:
ValueError: If d cannot be converted to a Dimension.
"""
if d is None:
return None
if isinstance(d, Dimension):
if not isinstance(d.name, str) or not isinstance(d.size, int):
raise ValueError("Bad dimension %s" % (d,))
return d
name, size = d
if isinstance(name, str) and isinstance(size, int):
return Dimension(name, size)
else:
raise ValueError("could not convert %s to Dimension" % (d,))
class Shape(object):
"""Shape of a Tensor or Mesh.
#### Examples
```python
# Create shape [4, 8] with names "x" and "y" respectively.
shape = mtf.Shape([mtf.Dimension("x", 4), mtf.Dimension("y", 8)])
```
"""
def __init__(self, dims):
"""Constructs a shape for a Tensor or Mesh.
Args:
dims: List-like of Dimensions.
Raises:
ValueError: If Dimensions are repeated.
"""
self._dims = [convert_to_dimension(d) for d in tuple(dims)]
if len(set(dims)) != len(dims):
raise ValueError("Shape must not have repeated dimensions %s" % dims)
@property
def dims(self):
return list(self._dims)
@property
def ndims(self):
return len(self._dims)
def __repr__(self):
return self.to_string
def __eq__(self, other):
return self.dims == other.dims
def __ne__(self, other):
return self.dims != other.dims
def __add__(self, other):
if isinstance(other, Shape):
other = other.dims
if isinstance(other, Dimension):
other = [other]
return Shape(self.dims + other)
def __sub__(self, other):
if other is None:
return self
if isinstance(other, Shape):
other = other.dims
if isinstance(other, Dimension):
if other not in self.dims:
raise ValueError(
"Subtracting a dimension from a shape requires that the shape"
" contain that dimension. Use shape - [dimension] for the case"
" where the dimension may not be in the shape.")
other = [other]
return Shape([d for d in self.dims if d not in other])
def __len__(self):
return len(self._dims)
def __getitem__(self, key):
return self._dims[key]
def __iter__(self):
return iter(self._dims)
@property
def to_integer_list(self):
return [d.size for d in self.dims]
@property
def size(self):
return list_product(self.to_integer_list)
@property
def to_string(self):
return "Shape[%s]" % ", ".join(
["%s=%d" % (d.name, d.size) for d in self.dims])
@property
def cumprod(self):
"""Cumulative product (exclusive) of Dimension sizes."""
return _cumprod(self.to_integer_list)[:-1]
def cumprod_to_tensor_axis(self, cumprod):
"""Maximum tensor axis i such that self.cumprod[i] == cumprod, or None."""
try:
return len(self) - 1 - self.cumprod[::-1].index(cumprod)
except ValueError:
return None
@property
def dimension_names(self):
return [d.name for d in self.dims]
def rename_dimension(self, old_name, new_name):
"""Returns a copy where one dimension is renamed."""
if old_name not in self.dimension_names:
raise ValueError("Shape %s does not have dimension named %s"
% (self, old_name))
return Shape(
[Dimension(new_name, d.size) if d.name == old_name else d
for d in self.dims])
def resize_dimension(self, name, new_size):
"""Returns a copy where one dimension has a different size."""
if name not in self.dimension_names:
raise ValueError("Shape %s does not have dimension named %s"
% (self, name))
return Shape(
[Dimension(name, new_size) if d.name == name else d
for d in self.dims])
def get_dim_by_name(self, name):
"""Get the Dimension with `name` from this shape.
Args:
name: a string, the name of the dimension we wish to get
Returns:
Dimension with `name`
Raises:
ValueError: if the shape does not contain a dimension with `name`
"""
for d in self.dims:
if d.name == name:
return d
raise ValueError("Dimension {} not found in {}.".format(
name, self.to_string))
def convert_to_shape(x):
"""Converts input to a Shape.
Args:
x: Shape, str, or None.
Returns:
Shape or None.
Raises:
ValueError: If x cannot be converted to a Shape.
"""
if x is None:
return None
if isinstance(x, Shape):
return x
if isinstance(x, str):
x = _parse_string_to_list_of_pairs(x, seconds_to_int=True)
return Shape(x)
class LayoutRules(object):
"""Represents layout of a computation.
#### Examples
```python
# Map "d_ff" and "heads" Tensor Dimensions to the "model" Mesh Dimension.
layout_rules = mtf.LayoutRules([("d_ff", "model"), ("heads", "model")])
```
"""
def __init__(self, pairs):
"""Constructs a layout.
Args:
pairs: Set-like of string pairs (tensor_dim_name, mesh_dim_name).
"""
self._pairs = set(pairs)
def __repr__(self):
return "LayoutRules%s" % self._pairs
def tensor_dimension_to_mesh_axis(self, tensor_dimension, mesh_shape):
"""Mesh axis associated with tensor dimension (or None).
Args:
tensor_dimension: Dimension.
mesh_shape: Shape.
Returns:
Integer or None.
Raises:
ValueError: If one Tensor dimension maps to two mesh dimensions.
"""
val = [i for i, mesh_dimension in enumerate(mesh_shape)
if (tensor_dimension.name, mesh_dimension.name) in self._pairs]
if len(val) > 1:
raise ValueError(
"Tensor dimension maps to multiple mesh dimensions"
" tensor_dimension=%s mesh_shape=%s layout=%s"
% (tensor_dimension, mesh_shape, self._pairs))
return val[0] if val else None
def tensor_layout(self, tensor_shape, mesh_shape):
"""Computes TensorLayout given a Tensor Shape and a Mesh Shape.
Args:
tensor_shape: Shape.
mesh_shape: Shape.
Returns:
TensorLayout.
Raises:
ValueError: If two Tensor Dimensions map to the same Mesh Dimensions.
"""
ret = [self.tensor_dimension_to_mesh_axis(d, mesh_shape)
for d in tensor_shape]
not_nones = [a for a in ret if a is not None]
if len(not_nones) != len(set(not_nones)):
raise ValueError(
"Two Tensor Dimensions may not map to the same Mesh Dimension:"
" layout=%s tensor_shape=%s mesh_shape=%s " %
(self, tensor_shape, mesh_shape))
return TensorLayout(ret)
def mesh_dimension_name_to_tensor_dimension_names(self, mesh_dimension_name):
return [tdn for tdn, mdn in self._pairs if mdn == mesh_dimension_name]
def convert_to_layout_rules(x):
"""Converts input to a LayoutRules.
Args:
x: LayoutRules, str, or set-like of string pairs.
Returns:
LayoutRules.
"""
if isinstance(x, LayoutRules):
return x
if isinstance(x, str):
x = _parse_string_to_list_of_pairs(x)
return LayoutRules(x)
class TensorLayout(object):
"""Injective partial map between Tensor axes and Mesh axes.
TensorLayout is a tuple of optional integers with length tensor.ndims. Each
item is either a unique integer indicating the mesh axis over which that
tensor dimension is split or None, indicating that this tensor dimension is
not split.
#### Examples
```python
# Split first and last Tensor dimensions according to mesh axes 0 and 1.
tensor_layout = mtf.TensorLayout([0, None, 1])
```
"""
def __init__(self, tensor_axis_to_mesh_axis):
"""Creates a TensorLayout.
Args:
tensor_axis_to_mesh_axis: List-like where each element is an int or None.
"""
self._tensor_axis_to_mesh_axis = tuple(tensor_axis_to_mesh_axis)
def __eq__(self, other):
return self.tensor_axis_to_mesh_axis == other.tensor_axis_to_mesh_axis
def __ne__(self, other):
return self.tensor_axis_to_mesh_axis != other.tensor_axis_to_mesh_axis
def __repr__(self):
return "TensorLayout%s" % (self.tensor_axis_to_mesh_axis,)
def __len__(self):
return len(self._tensor_axis_to_mesh_axis)
def __getitem__(self, key):
return self._tensor_axis_to_mesh_axis[key]
def __iter__(self):
return iter(self._tensor_axis_to_mesh_axis)
@property
def tensor_axis_to_mesh_axis(self):
"""Converts to a tuple of optional integers."""
return self._tensor_axis_to_mesh_axis
@property
def is_fully_replicated(self):
"""Whether all tensor dimensions map to None."""
return self.tensor_axis_to_mesh_axis == (None,) * len(self)
def mesh_axis_to_tensor_axis(self, mesh_ndims):
"""For each mesh axis, which Tensor axis maps to it.
Args:
mesh_ndims: int.
Returns:
Tuple of optional integers, with length mesh_ndims.
"""
ta2ma = self._tensor_axis_to_mesh_axis
return tuple(
[ta2ma.index(mesh_axis) if mesh_axis in ta2ma else None
for mesh_axis in xrange(mesh_ndims)])
class Graph(object):
"""Mesh-TensorFlow graph."""
def __init__(self):
self._operations = []
self._trainable_variables = []
self._all_variables = []
# Maps a name used in the graph to the next id to use for that name.
self._names_in_use = {}
self.name_to_variable = {}
self.captured_variable_scope = tf.get_variable_scope()
def __repr__(self):
return self.to_string
@property
def operations(self):
return self._operations
@property
def trainable_variables(self):
return self._trainable_variables
@property
def all_variables(self):
return self._all_variables
@property
def to_string(self):
return "\n".join([op.to_string for op in self.operations])
def unique_name(self, name, mark_as_used=True):
"""Like tf.Graph.unique_name, returns a unique operation name for `name`.
Args:
name: The name for an operation.
mark_as_used: whether to mark this name as being used.
Returns:
A string to use as the name for the operation.
"""
scope_name = tf.get_variable_scope().name
if scope_name:
name = scope_name + "/" + name
# As in TensorFlow, treat names as case insensitive when deciding whether
# they are in use.
name_key = name.lower()
i = self._names_in_use.get(name_key, 0)
if mark_as_used:
self._names_in_use[name_key] = i + 1
if i > 0:
base_name_key = name_key
while name_key in self._names_in_use:
name_key = "%s_%d" % (base_name_key, i)
i += 1
if mark_as_used:
self._names_in_use[name_key] = 1
name = "%s_%d" % (name, i-1)
return name
@gin.configurable
def rewrite_stack_variables(self,
max_combined_variable_size=2 ** 29,
max_combined_slice_size=2 ** 27,
mesh_to_impl=None):
"""Rewrite the current graph to combine variables.
This helps speed up graph construction times in the case of large meshes
and large numbers of variables.
This function should be called after graph construction (it is called by
default in the Lowering constuctor).
When we find a set of variables with the same shape/dtype/etc, we replace
them with one StackedVariable and an "unstack" operation. The
StackedVariable has multiple master variables (so as to maintain checkpiont
compatibility), but only one slice variable per device. We point the inputs
of later operations to the outputs of the "unstack" operations, instead of
the outputs of the defunct single variables.
In order for variables to be combinable, they must be set in the same Assign
operation(s) - so it is necessary to call mtf.grouped_assign() from the
optimizer instead of many separate calls to mtf.assign(). The assign
operations get rewritten to set the appropriate stacked variables.
TODO(noam): Combining to larger sizes seems to cause errors on TPU.
debug this. Perhaps we should try to keep the combined master variables
on the same device.
Args:
max_combined_variable_size: an integer
max_combined_slice_size: an integer
mesh_to_impl: an optional dictionary from Mesh to MeshImpl
"""
# pylint: disable=protected-access
all_variables = self._all_variables
operations = self._operations
self._operations = []
self._all_variables = []
self._trainable_variables = []
# We can only stack varaibles which share the same set of assignment
# operations.
var_to_assign_ops = collections.defaultdict(str)
for op in operations:
if isinstance(op, Assign):
for v in op._variables:
var_to_assign_ops[v] += op.name + ", "
# Two variables with the same "key" can be stacked together.
def var_key(v):
return str([v.mesh,
v.shape,
str(v.dtype.__dict__),
v.trainable,
var_to_assign_ops[v]])
key_to_vars = collections.defaultdict(collections.deque)
for v in all_variables:
key_to_vars[var_key(v)].append(v)
individual_to_stacked = {}
for op in operations:
if isinstance(op, StackedVariable):
raise ValueError("stack_variables() should not be called twice.")
elif isinstance(op, Variable):
if op.name in individual_to_stacked:
continue
similar_vars = key_to_vars[var_key(op)]
num_to_stack = len(similar_vars)
if max_combined_variable_size is not None:
num_to_stack = min(
num_to_stack, max_combined_variable_size // op.shape.size)
if mesh_to_impl is not None:
mesh_impl = mesh_to_impl[op.mesh]
if mesh_impl.size == 1:
num_to_stack = 1 # no point in stacking for single processors.
slice_size = mesh_impl.slice_size(op.shape)
num_to_stack = min(
num_to_stack, max_combined_slice_size // slice_size)
num_to_stack = max(1, num_to_stack)
to_stack = [similar_vars.popleft() for _ in xrange(num_to_stack)]
if num_to_stack > 1:
stacked_var = StackedVariable(to_stack)
stack_dim = stacked_var.shape.dims[0]
unstacked = unstack(stacked_var.outputs[0], stack_dim)
unstack_op = unstacked[0].operation
# replace the output Tensors of the unstack operation with the
# Tensors which were the outputs of the original variable operations.
# Later operations use these Tensors as inputs.
unstack_op._outputs = [v.outputs[0] for v in to_stack]
for t in unstack_op._outputs:
t._operation = unstack_op
for idx, v in enumerate(to_stack):
individual_to_stacked[v.name] = stacked_var, idx
else:
assert op == to_stack[0]
self._operations.append(op)
self._all_variables.append(op)
if op.trainable:
self._trainable_variables.append(op)
else:
if isinstance(op, Assign):
# Rewrite the grouped assignment to stack up the values and then
# assign to the stacked variables.
new_variables = []
new_values = []
var_to_val = dict(zip([v.name for v in op._variables], op._inputs))
for var, val in zip(op._variables, op._inputs):
if var.name in individual_to_stacked:
stacked_var, pos = individual_to_stacked[var.name]
if pos == 0:
vals = [var_to_val[n] for n in stacked_var.original_names]
new_variables.append(stacked_var)
new_values.append(
stack(vals, stacked_var.shape.dims[0].name, 0))
else:
new_variables.append(var)
new_values.append(val)
op._variables = new_variables
op._inputs = new_values
self._operations.append(op)
# pylint: enable=protected-access
def combine_assignments(self, assignments):
"""Rewrite the current graph to combine "Assign" operations.
Combine similar Assign operations into grouped Assign operations.
This is useful when using the rewrite_stack_variables() optimization,
since variables can only be stacked if they are present in the same set
of Assign operations.
This function takes a list of Assign operations and returns a possibly
shorter list of Assign operations. The input Assignment operations
are removed from the graph and become invalid.
Args:
assignments: a list of Assign objects
Returns:
a list of Assign objects
"""
group_by_fn = collections.defaultdict(list)
for a in assignments:
if not isinstance(a, Assign):
raise ValueError("ops should be instances of mtf.Assign")
group_by_fn[a.assign_fn].append(a)
assignments_set = set(assignments)
self._operations = [
op for op in self._operations if op not in assignments_set]
ret = []
for fn, ops in six.iteritems(group_by_fn):
variables = []
values = []
for a in ops:
variables.extend(a.variables)
values.extend(a.inputs)
ret.append(Assign(variables, values, fn))
return ret
def make_variables_untrainable(self, variables):
"""Makes the variables untrainable.
Args:
variables: a list of Variable objects
"""
variables = set(variables)
for v in variables:
v._trainable = False # pylint: disable=protected-access
self._trainable_variables = [
v for v in self._trainable_variables if v not in variables
]
def clone_operations(self, ops, input_mapping):
"""Clone a portion of the graph, but with different inputs.
The differnt inputs are specified by the `input_mapping` dictionary, which
maps from input Tensor in the original operations to input Tensor in the
cloned operations. If an original operation uses an external input that is
not in `input_mapping`, then the original input is used for the cloned
operation.
The function returns a list of cloned operations as well an
`extended_mapping` dictionary which consits of the union of the input
mapping and the map from original-operation-output to
cloned-operation-output.
Variables and Random operations are not cloned.
Args:
ops: a list of operations
input_mapping: a dictionary from Tensor to Tensor
Returns:
cloned_operations: a list of operations
extended_mapping: a dictionary from Tensor to Tensor
"""
# pylint: disable=protected-access
mapping = copy.copy(input_mapping)
prev_num_operations = len(self.operations)
for op in ops:
if isinstance(op, Variable):
continue
if isinstance(op, RandomOperation):
# The random values will be copied instead of recomputed.
# TODO(noam): Use stateless_random to allow for recompute.
tf.logging.warning(
"Not cloning random operation, so as to ensure the same values.")
continue
new_op = copy.copy(op)
# new_op._name = self.unique_name(op.name)
self._operations.append(new_op)
new_op._inputs = [mapping.get(t, t) for t in op._inputs]
new_op._outputs = []
for i, t in enumerate(op.outputs):
new_t = Tensor(new_op, t.shape, t.dtype, t.name, i)
new_t.usable = True
new_op._outputs.append(new_t)
if t in mapping:
raise ValueError(
"input mapping should not contain any of the outputs"
" of the cloned operations")
mapping[t] = new_t
# pylint: enable=protected-access
return self.operations[prev_num_operations:], mapping
def capture_operations(self, fn):
"""Run a function and capture the list of operations it generates.
Args:
fn: a function taking no arguments
Returns:
fn_output: the function output
captured_operations: a list of Operation
"""
n = len(self.operations)
y = fn()
return y, self.operations[n:]
class Lowering(object):
"""Lowering of a Graph from Mesh-TensorFlow to TensorFlow.
#### Examples
Below we form a Graph with one Tensor and lower it to recover the original
tf.Tensor.
```python
from mesh_tensorflow import placement_mesh_impl
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
inputs = tf.constant(0.)
mtf_inputs = mtf.import_tf_tensor(mesh,
inputs=inputs,
shape=mtf.Shape([]))
mesh_impl = placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
outputs = lowering.export_to_tf_tensor(mtf_inputs) # tf.constant(0.)
```
"""
def __init__(self, graph, mesh_to_impl, autostack=True, log_file=None):
"""Creates a Lowering of a Graph.
Args:
graph: Graph.
mesh_to_impl: {Mesh: MeshImpl}. Keys are the Mesh's in the graph and
their values are MeshImpl's, which map Tensor Dimension names to
Mesh Dimension names.
autostack: a boolean. If True, then the graph gets rewritten to
reduce the number of variables (see rewrite_stack_variables()).
This is a helpful performance optimization for large meshes.
For more fine-grained control, you can call
graph.rewrite_stack_variables() yourself before creating the Lowering.
log_file: an optional string. If provided, information about the variables
and operations will also be logged to this file.
"""
# tf.logging.info("LOWERING GRAPH:\n%s" % graph.to_string)
self.mesh_to_impl = mesh_to_impl # {Mesh: MeshImpl}
self.graph = graph
if autostack:
self.autostack()
self._counters = []
self.tensors = {} # {Tensor: Mesh.LaidOutTensor}
self.operations = {} # {Operation: tf.Operation}
self.variables = {} # {Variable: LaidOutVariable}
for op in graph.operations:
# tf.logging.info("Lowering operation %s" % op.to_string)
with tf.name_scope(op.name):
op.lower(self)
for out in op.outputs:
self.add_counter(
"output/%s" % type(op).__name__, self.laid_out_size(out))
self.add_counter("output_unique/%s" % type(op).__name__, out.size)
def log_info(f=None):
"""Log the variables and operations, possibly to file `f` as well."""
log_variable_sizes(
graph.trainable_variables,
"Trainable Variables",
verbose=True,
mesh_to_impl=self.mesh_to_impl,
log_file=f)
log_variable_sizes(
graph.all_variables,
"All Variables",
verbose=False,
mesh_to_impl=self.mesh_to_impl,
log_file=f)
_log_info_also_to_file(
"Counters:\n" + pretty_print_counters(self._counters), log_file=f)
if log_file:
with tf.io.gfile.GFile(log_file, mode="w") as f:
log_info(f)
else:
log_info()
def mesh_impl(self, m):
if not isinstance(m, Mesh):
m = m.mesh
return self.mesh_to_impl[m]
def export_to_tf_tensor(self, x):
"""Turn a Tensor into a tf.Tensor.
Args:
x: Tensor.
Returns:
tf.Tensor.
"""
mesh_impl = self.mesh_impl(x)
return mesh_impl.export_to_tf_tensor(
x, self.tensors[x].to_laid_out_tensor())
def lowered_operation(self, op):
return self.operations[op]
def copy_masters_to_slices(self):
if os.environ.get("MTF_SEQUENCE_MODE", "") == "1":
mesh_impls = [impl for impl in six.itervalues(self.mesh_to_impl)]
assert len(mesh_impls) == 1
mesh_impl = mesh_impls[0]
return mesh_impl.copy_master_to_slice_ops[-1]
else:
return tf.group(
[v.copy_master_to_slices for v in six.itervalues(self.variables)])
def copy_slices_to_masters(self):
return tf.group(
[v.copy_slices_to_master for v in six.itervalues(self.variables)])
def add_counter(self, key, value):
assert isinstance(value, int)
self._counters.append((key, value))
@property
def counters(self):
return self._counters
def laid_out_size(self, tensor):
"""Total size of all slices.
Args:
tensor: Tensor.
Returns:
int.
"""
return self.mesh_impl(tensor).laid_out_size(tensor.shape)
def set_tensor_lowering(self, tensor, laid_out_tensor):
self.verify_slice_shapes(tensor, laid_out_tensor)
self.tensors[tensor] = laid_out_tensor
def verify_slice_shapes(self, tensor, laid_out_tensor):
mesh_impl = self.mesh_impl(tensor)
correct_shape = mesh_impl.slice_shape(tensor.shape)
actual_shape = laid_out_tensor.slice_shape
if actual_shape != correct_shape:
raise ValueError(
"Wrong slice shape: correct_shape = %s actual shape = %s"
% (correct_shape, actual_shape))
def autostack(self):
"""Rewrite graph to combine similarly-shaped variables (faster startup)."""
num_slices = 0
for v in self.graph.all_variables:
num_slices += self.mesh_to_impl[v.mesh].size
if num_slices >= 2 ** 16:
# Startup times are slow with lots of variable slices.
# Perform more aggressive stacking
max_combined_slice_size = 2 ** 27
else:
# Stacking hurts memory utilization - only stack small variables.
max_combined_slice_size = 2 ** 16
self.graph.rewrite_stack_variables(
mesh_to_impl=self.mesh_to_impl,
max_combined_slice_size=max_combined_slice_size)
class Mesh(object):
"""A placeholder with no functionality.
A Graph is built with each Tensor assigned to a Mesh. The Mesh does not
know its shape or its implementation.
A Lowering assigns each Mesh to a MeshImpl.
"""
def __init__(self, graph, name, variable_placer=None):
self._graph = graph
self._name = name
self._variable_placer = variable_placer
@property
def graph(self):
return self._graph
@property
def variable_placer_fn(self):
if self._variable_placer is not None:
return self._variable_placer.device_function
else:
return "cpu:0"
class MeshImpl(object):
"""Implementation of a Mesh.
Unlike Mesh, MeshImpl carries Shape and LayoutRules. Subclasses of MeshImpl
also carry devices.
#### Examples
```python
shape = mtf.Shape([mtf.Dimension("batch", 4),
mtf.Dimension("model", 8)])
layout_rules = mtf.LayoutRules([("batch", "batch"),
("d_ff", "model"),
("heads", "model")])
mesh_impl = mtf.MeshImpl(shape=shape, layout_rules=layout_rules)
```
"""
def __init__(self, shape, layout_rules):
"""Creates a mesh implementation.
Args:
shape: Shape.
layout_rules: LayoutRules.
"""
self._shape = convert_to_shape(shape)
self._layout_rules = convert_to_layout_rules(layout_rules)
@property
def shape(self):
return self._shape
@property
def ndims(self):
return len(self._shape)
@property
def layout_rules(self):
return self._layout_rules
@property
def size(self):
return self.shape.size
@property
def supports_control_dependencies(self):
return True
def tensor_dimension_to_mesh_axis(self, tensor_dimension):
"""Mesh axis associated with tensor dimension (or None).
Args:
tensor_dimension: Dimension.
Returns:
int or None.
"""
return self.layout_rules.tensor_dimension_to_mesh_axis(
tensor_dimension, self.shape)
def tensor_layout(self, arg):
"""Compute TensorLayout for a Tensor or a Shape.
Args:
arg: Tensor or Shape.
Returns:
TensorLayout.
"""
if isinstance(arg, Tensor):
arg = arg.shape
return self.layout_rules.tensor_layout(arg, self.shape)
def mesh_axis_to_cumprod(self, tensor_shape):
"""For each mesh axis, give the product of previous tensor axes.
Args:
tensor_shape: Shape.
Returns:
list with length self.ndims where each element is an integer or None.
"""
tensor_layout = self.tensor_layout(tensor_shape)
ma2ta = tensor_layout.mesh_axis_to_tensor_axis(self.ndims)
ta2cumprod = tensor_shape.cumprod
return [None if ta is None else ta2cumprod[ta] for ta in ma2ta]
def slice_shape(self, tensor_shape):
"""Shape of each slice of the Tensor.
Args:
tensor_shape: Shape.
Returns:
list of integers with length tensor_shape.ndims.
Raises:
ValueError: If a Tensor dimension is not divisible by the corresponding
Mesh dimension.
"""
tensor_layout = self.tensor_layout(tensor_shape)
ret = []
for tensor_dim, mesh_axis in zip(
tensor_shape, tensor_layout.tensor_axis_to_mesh_axis):
if mesh_axis is None:
ret.append(tensor_dim.size)
else:
mesh_dim = self.shape[mesh_axis]
if tensor_dim.size % mesh_dim.size != 0:
raise ValueError(
"Tensor dimension size not divisible by mesh dimension size:"
" tensor_shape=%s tensor_layout=%s"
% (tensor_shape, tensor_layout))
ret.append(tensor_dim.size // mesh_dim.size)
return ret
def slice_begin(self, tensor_shape, pnum):
"""Begin position for the tensor slice for the given processor.
Args:
tensor_shape: Shape.
pnum: int <= self.size.
Returns:
list of integers with length tensor_shape.ndims.
"""
tensor_layout = self.tensor_layout(tensor_shape)
coordinates = pnum_to_processor_coordinates(self.shape, pnum)
ret = []
for dim_size, mesh_axis in zip(
tensor_shape.to_integer_list, tensor_layout.tensor_axis_to_mesh_axis):
if mesh_axis is None:
ret.append(0)
else:
ret.append(
dim_size // self.shape[mesh_axis].size * coordinates[mesh_axis])
return ret
def slice_size(self, tensor_shape):
return list_product(self.slice_shape(tensor_shape))
def laid_out_size(self, tensor_shape):
"""Total size of all slices.
Args:
tensor_shape: Shape.
Returns:
int.
"""
return list_product(self.slice_shape(tensor_shape)) * self.size
def slicewise(self, fn, *inputs):
"""Executes a function in parallel on all slices.
Args:
fn: function from tf.Tensors to tf.Tensor or a tuple of tf.Tensors.
*inputs: list of inputs. Each input is either a LaidOutTensor or
has a to_laid_out_tensor method or is convertible to a tf.Tensor.
Returns:
LaidOutTensor, or a tuple of LaidOutTensors if fn returns a tuple.
"""
raise NotImplementedError("Slicewise not implemented")
def slicewise_delay_allreduce(self, fn, *inputs):
"""If all the arguments are compatible LazyAllreduceSums, then stay lazy.
Args:
fn: function from tf.Tensors to tf.Tensor or a tuple of tf.Tensors.
*inputs: list of inputs. Each input is either a LaidOutTensor or
has a to_laid_out_tensor method or is convertibleto a tf.Tensor.
Returns:
LaidOutTensor or LazyAllreduceSum
"""
if compatible_lazy_allreduce_sums(inputs):
return LazyAllreduceSum(
self,
self.slicewise(
fn, *[x.laid_out_input for x in inputs]),
inputs[0].mesh_axes,
add_counter_fn=inputs[0].add_counter_fn)
else:
return self.slicewise(fn, *inputs)
def Print(self, x, data, message, **kwargs): # pylint: disable=invalid-name
"""Calls tf.Print.
Args:
x: LaidOutTensor.
data: list of LaidOutTensor.
message: str.
**kwargs: keyword arguments to tf.print.
Returns:
LaidOutTensor.
"""
del data, message, kwargs
tf.logging.warning("Warning - mtf.Print not implemented for this mesh type")
return x
def allreduce(self, x, mesh_axes, reduction_fn_string):
"""Grouped allreduce, (summed across the given dimensions).
Args:
x: LaidOutTensor.
mesh_axes: list of integers, the mesh dimensions to be reduced.
reduction_fn_string: "SUM" or "MAX".
Returns:
LaidOutTensor.
"""
raise NotImplementedError("Allreduce not implemented")
def allsplit(self, x, mesh_axis, split_axis, which=None):
"""Inverse of allconcat - split each slice and keep only one piece of it.
The number of ways to split is the number of processors in the group.
The part that is kept corresponds to the processor's index in the group.
Args:
x: LaidOutTensor.
mesh_axis: int, the mesh axis along which to split.
split_axis: int, the Tensor axis along which to split.
which: an optional LaidOutTensor of integer scalars. Selects the slice to
to keep, instead of the coordinate.
Returns:
LaidOutTensor.
"""
if which is None:
which = self.laid_out_pcoord(mesh_axis)
num_splits = self.shape[mesh_axis].size
def my_fn(x, which):
slice_begin = [
dimsize // num_splits * which if i == split_axis else 0
for i, dimsize in enumerate(x.shape.as_list())]
slice_size = [
dimsize // num_splits if i == split_axis else dimsize
for i, dimsize in enumerate(x.shape.as_list())]
return tf.slice(x, slice_begin, slice_size)
return self.slicewise(my_fn, x, which)
def allconcat(self, x, mesh_axis, concat_axis):
"""Grouped allconcat (like MPI allgather followed by concat).
Args:
x: LaidOutTensor.
mesh_axis: int, the mesh axis along which to group.
concat_axis: int, the Tensor axis along which to concatenate.
Returns:
LaidOutTensor.
"""
raise NotImplementedError("Allconcat not implemented")
def alltoall(self, x, mesh_axis, split_axis, concat_axis):
"""Grouped alltoall (like MPI alltoall with splitting and concatenation).
Args:
x: LaidOutTensor.
mesh_axis: int, the mesh axis along which to group.
split_axis: int, the Tensor axis along which to split.
concat_axis: int, the Tensor axis along which to concatenate.
Returns:
LaidOutTensor.
"""
raise NotImplementedError("Alltoall not implemented")
def receive(self, x, mesh_axis, source_pcoord):
"""Collective receive in groups.
Each group contains the processors that differ only in mesh_axis.
```python
group_size = self.shape[mesh_axis].size
```
Args:
x: a LaidOutTensor
mesh_axis: an integer
source_pcoord: a list of optional integers. Each element is either None
or an integer in [0, group_size). If source_pcoord[k] is None, then the
output for the k-th processor in each group is a zero tensor. If
source_pcoord[k] is not None, then the output for the k-th processor in
each group is equal to the input for the source_pcoord[k]-th processor
in that group.
Returns:
a LaidOutTensor
"""
raise NotImplementedError("Receive not implemented")
def shift_by_n_processors(self, x, mesh_axis, offset, wrap):
"""Receive the slice from processor pcoord - offset.
Args:
x: a LaidOutTensor
mesh_axis: an integer
offset: an integer
wrap: a boolean. If True, then wrap around. Otherwise, pad with zeros.
Returns:
a LaidOutTensor
"""
n = self.shape[mesh_axis].size
source_pcoord = []
for i in xrange(n):
c = i - offset
if c != c % n:
if wrap:
c = c % n
else:
c = None
source_pcoord.append(c)
return self.receive(x, mesh_axis, source_pcoord)
def laid_out_pnum(self):
"""Returns a LaidOutTensor containing the processor number.
Returns:
LaidOutTensor where each slice is an integer scalar.
"""
raise NotImplementedError("laid_out_pnum not implemented")
def laid_out_pcoord(self, mesh_axis):
"""Returns a LaidOutTensor containing the processor coordinate.
Args:
mesh_axis: int.
Returns:
LaidOutTensor where each slice is an integer scalar.
"""
divisor = list_product(self.shape.to_integer_list[mesh_axis + 1:])
modulus = self.shape[mesh_axis].size
def my_fn(pnum):
# TODO(noam): casting to float32 for the floordiv masks a bug.
# document and file the bug.
return tf.cast((tf.cast(pnum, tf.float32) // divisor), tf.int32) % modulus
return self.slicewise(my_fn, self.laid_out_pnum())
def laid_out_slice_num(self, tensor_shape):
"""A LaidOutTensor with an int32 scalar, identical for identical slices.
This is useful for synchronizing random operations.
Args:
tensor_shape: a TensorShape
Returns:
a LaidOutTensor where each slice is an integer scalar.
"""
ret = self.slicewise(lambda: tf.to_int32(0))
tensor_layout = self.tensor_layout(tensor_shape)
for mesh_axis in tensor_layout.tensor_axis_to_mesh_axis:
if mesh_axis is not None:
def my_fn(x, pcoord, mesh_dim_size):
return x * mesh_dim_size + pcoord
ret = self.slicewise(
my_fn, ret, self.laid_out_pcoord(mesh_axis),
self.shape[mesh_axis].size)
return ret
def broadcast_impl(self, old_slices, old_shape, new_shape):
"""Implementation of a broadcast operation.
Args:
old_slices: LaidOutTensor.
old_shape: Shape.
new_shape: Shape.
Returns:
LaidOutTensor.
"""
new_slice_shape = self.slice_shape(new_shape)
def tf_fn(x):
return (tf.zeros(new_slice_shape, dtype=x.dtype) +
_expand_dims(x, old_shape, new_shape))
return self.slicewise(tf_fn, old_slices)
def make_slices(self, tf_tensor, tensor_shape):
"""Turns a single tf.Tensor into a list of slices, one for each processor.
Args:
tf_tensor: tf.Tensor.
tensor_shape: Shape.
Returns:
list of tf.tensor with length self.size.
"""
tensor_layout = self.tensor_layout(tensor_shape)
slice_shape = self.slice_shape(tensor_shape)
def my_fn(pnum):
if tensor_layout.is_fully_replicated:
return tf_tensor
else:
slice_begin = self.slice_begin(tensor_shape, pnum)
return tf.slice(tf_tensor, slice_begin, slice_shape)
return parallel([tf_tensor.device] * self.size, my_fn,
list(xrange(self.size)))
def combine_slices(self, slices, tensor_shape, device=None):
"""Turns a set of slices into a single tensor.
Args:
slices: list of tf.Tensor with length self.size.
tensor_shape: Shape.
device: optional str. If absent, we use the devices of the slices.
Returns:
tf.Tensor.
"""
if tensor_shape.ndims == 0:
return slices[0]
ret = slices[:]
tensor_layout = self.tensor_layout(tensor_shape)
for mesh_dim, tensor_axis in zip(
self.shape, tensor_layout.mesh_axis_to_tensor_axis(self.ndims)):
slice_size = len(ret) // mesh_dim.size
if tensor_axis is None:
ret = ret[:slice_size]
else:
if device:
devices = [device] * slice_size
else:
devices = [ret[i].device for i in xrange(slice_size)]
concat_inputs = []
for i in xrange(slice_size):
concat_inputs.append(
[ret[i + slice_size * j] for j in xrange(mesh_dim.size)])
ret = parallel(
devices, tf.concat, concat_inputs,
axis=[tensor_axis] * len(devices))
assert len(ret) == 1
return ret[0]
def export_to_tf_tensor(self, x, laid_out_x):
"""Turns a Tensor into a tf.Tensor.
Args:
x: Tensor.
laid_out_x: LaidOutTensor.
Returns:
tf.Tensor.
"""
raise NotImplementedError("export_to_tf_tensor not implemented")
def import_tf_tensor(self, x, tf_x):
"""Imports a tf.Tensor, producing a LaidOutTensor.
Args:
x: Tensor.
tf_x: tf.Tensor.
Returns:
LaidOutTensor.
"""
raise NotImplementedError("Import not implemented")
def einsum(self, equation, *slices):
"""Override this for custom einsum implementation.
Args:
equation: a string
*slices: a list of tf.Tensor
Returns:
a Tensor
"""
return tf.einsum(equation, *slices)
class LazyAllreduceSum(object):
"""Represents a LaidOutTensor with a lazy allreduce.
The purpose of delaying allreduce is that it saves bandwidth to first add
and then allreduce, as opposed to the other way around.
"""
def __init__(self,
mesh_impl,
laid_out_input,
mesh_axes,
add_counter_fn=None):
"""Create a LazyAllreduceSum.
Args:
mesh_impl: a mesh_impl
laid_out_input: a LaidOutTensor
mesh_axes: a list of mesh axes
add_counter_fn: a function taking no arguments which calls
lowering.add_counter if and when the allreduce executes.
Returns:
a LazyAllreduceSum
"""
self.mesh_impl = mesh_impl
self.laid_out_input = laid_out_input
self.mesh_axes = mesh_axes
self.add_counter_fn = add_counter_fn
self._reduced = None
def to_laid_out_tensor(self):
if not self._reduced:
self._reduced = self.mesh_impl.allreduce(
self.laid_out_input, self.mesh_axes, "SUM")
if self.add_counter_fn:
self.add_counter_fn()
return self._reduced
@property
def slice_shape(self):
return self.laid_out_input.slice_shape
def compatible_lazy_allreduce_sums(xs):
""""Are xs all compatible LazyAllreduceSum objects.
Args:
xs: a list
Returns:
a boolean
"""
if not xs:
return False
if not all([isinstance(x, LazyAllreduceSum) for x in xs]):
return False
x = xs[0]
for y in xs[1:]:
if x.mesh_impl != y.mesh_impl:
return False
if x.mesh_axes != y.mesh_axes:
return False
return True
def convert_args_to_laid_out_tensors(xs):
"""Convert list elements to laid-out-tensors when possible.
Args:
xs: a list
Returns:
a list
"""
ret = []
for x in xs:
if hasattr(x, "to_laid_out_tensor"):
ret.append(x.to_laid_out_tensor())
else:
ret.append(x)
return ret
class Tensor(object):
"""A Distributed Tensor."""
def __init__(self, operation, shape, dtype, name=None, index=0):
"""Create a Tensor.
Args:
operation: the Operation that outputs this tensor
shape: a Shape
dtype: a tf.DType
name: an optional string
index: optional integer, the index among operation's output tensors
"""
if not isinstance(shape, Shape):
raise ValueError("shape must be a Shape got %s" % shape.to_string)
if not isinstance(dtype, tf.DType):
raise ValueError("dtype must be a tf.DType got %s" % dtype)
self._mesh = operation.mesh
self._operation = operation
self._shape = shape
self._dtype = dtype
if name is None:
name = self.operation.name + ":" + str(index)
self._name = name
# A flag that we can turn off to assert that no one uses the tensor
# as the input to an operation.
self.usable = True
@property
def shape(self):
return self._shape
@property
def size(self):
return self.shape.size
@property
def mesh(self):
return self._mesh
@property
def graph(self):
return self._mesh.graph
@property
def operation(self):
return self._operation
@property
def dtype(self):
return self._dtype
@property
def name(self):
return self._name
def __repr__(self):
return self.to_string
def __add__(self, other):
return add(self, other)
def __radd__(self, other):
return add(self, other)
def __sub__(self, other):
return sub(self, other)
def __rsub__(self, other):
return sub(other, self)
def __mul__(self, other):
return multiply(self, other)
def __rmul__(self, other):
return multiply(self, other)
def __neg__(self):
return negative(self)
def __truediv__(self, other):
return divide(self, other)
def __rtruediv__(self, other):
return divide(other, self)
def __floordiv__(self, other):
return floordiv(self, other)
def __rfloordiv__(self, other):
return floordiv(other, self)
def __mod__(self, other):
return mod(self, other)
def __rmod__(self, other):
return mod(other, self)
@property
def to_string(self):
return "Tensor[%s, %s, %s]" % (self.name, self.shape.to_string, self.dtype)
class Operation(object):
"""A Distributed Operation."""
def __init__(self, inputs, mesh=None, name=None):
"""Initializer.
Args:
inputs: a list of Tensor
mesh: an optional Mesh (if unspecified, will be inferred from first input)
name: a string, which will get uniquified (in TensorFlow style)
Raises:
ValueError: mesh was not provided and there were no inputs to infer from.
"""
if mesh is None:
if not inputs:
raise ValueError("mesh must be specified if no inputs")
mesh = inputs[0].mesh
self._inputs = inputs[:]
self._outputs = []
self._mesh = mesh
# In a default operation, all dimensions are splittable.
self._splittable_dims, self._unsplittable_dims = (
self._initialize_all_dimensions_as_splittable())
assert name is not None
self._name = mesh.graph.unique_name(name)
mesh.graph.operations.append(self)
for t in inputs:
if not t.usable:
raise ValueError("Operation %s has unusable input %s" % (self, t))
@property
def graph(self):
return self._mesh.graph
@property
def mesh(self):
return self._mesh
@property
def name(self):
return self._name
@property
def inputs(self):
return self._inputs[:]
@property
def outputs(self):
return self._outputs[:]
@property
def splittable_dims(self):
"""Frozenset of the names of dims safe to split when lowering this op."""
return self._splittable_dims
@property
def unsplittable_dims(self):
"""Frozenset of the names of dims unsafe to split when lowering this op."""
return self._unsplittable_dims
@property
def to_string(self):
return "%s[Inputs=(%s) Outputs=(%s)]" % (
type(self).__name__,
", ".join([t.to_string for t in self.inputs]),
", ".join([t.to_string for t in self.outputs]))
@property
def has_gradient(self):
return (
[t for t in self.inputs if t.dtype.is_floating] and
[t for t in self.outputs if t.dtype.is_floating])
def gradient(self, unused_grad_ys):
raise NotImplementedError("Gradient not implemented")
def lower(self, lowering):
raise NotImplementedError("Lower not implemented")
def _initialize_splittable_and_unsplittable_dims(
self, default_splittability, exception_dims_iterable=None):
"""Initializer for splittable_dims and unsplittable_dims.
Helper method to categorize all dimensions in the input/output tensors as
either splittable or unsplittable.
Args:
default_splittability: a string which is either "splittable" or
"unsplittable".
exception_dims_iterable: an optional iterable of names of dimensions
which are exceptions to the default splittability.
Returns:
splittable_dims and unsplittable_dims, two frozensets of names of
dimensions (strings)
Raises:
ValueError: default_splittability is not one of "splittable" or
"unsplittable".
"""
default_dims = set()
exception_dims = set()
if exception_dims_iterable:
exception_dims.update(exception_dims_iterable)
for t in itertools.chain(self.inputs, self.outputs):
for dim_name in t.shape.dimension_names:
if dim_name not in exception_dims:
default_dims.add(dim_name)
if default_splittability == "splittable":
return frozenset(default_dims), frozenset(exception_dims)
elif default_splittability == "unsplittable":
return frozenset(exception_dims), frozenset(default_dims)
else:
raise ValueError("default_splittability should be either \"splittable\" "
"or \"unsplittable\" but was {}"
.format(default_splittability))
def _initialize_all_dimensions_as_splittable(self):
"""Helper init for the most common case: all dimensions may be split."""
return self._initialize_splittable_and_unsplittable_dims("splittable")
class SlicewiseOperation(Operation):
"""Apply any tensorflow function slice-wise.
Calls the Tensorflow function on each slice of the inputs to produce the
corresponding slice of the outputs. Gradients are computed through
tensorflow.
The user must specify "splittable_dims": a list of Dimensions which can
be split while still keeping this computation valid. For example, for
component-wise functions, all the dimensions are splittable, but if the
function is a reduction, the reduced dimensions are not splittable.
"""
def __init__(self,
tf_fn,
inputs,
output_shapes,
output_dtypes,
splittable_dims,
grad_function=None,
name=None):
"""Create a SlicewiseOperation.
grad_function is a python function taking this operation and a gradients
Tensor and producing input gradients tensors.
e.g.
def _square_grad(op, dy):
return [dy * op.inputs[0] * 2]
Args:
tf_fn: a function taking n tf.Tensors and returning a tf.Tensor
inputs: a list of n Tensors
output_shapes: a list of Shapes
output_dtypes: a list of dtypes
splittable_dims: a list of Dimensions which are ok to split
grad_function: an optional python function. Default to using tf.gradients
pass in the number 0 to indicate no gradient
name: an optional string
"""
super(SlicewiseOperation, self).__init__(inputs, name=name or "slicewise")
self._tf_fn = tf_fn
self._outputs = [Tensor(self, shape, dtype) for (shape, dtype)
in zip(output_shapes, output_dtypes)]
self._splittable_dims, self._unsplittable_dims = (
self._initialize_splittable_and_unsplittable_dims(
"unsplittable", [dim.name for dim in splittable_dims]))
self._grad_function = grad_function
@property
def has_gradient(self):
if self._grad_function == 0:
return False
return super(SlicewiseOperation, self).has_gradient
def gradient(self, grad_ys):
if self._grad_function is not None:
return self._grad_function(self, *grad_ys)
return GenericGradOperation(self, grad_ys).outputs
def lower(self, lowering):
# Check that only splittable dims are split
mesh_impl = lowering.mesh_impl(self)
for t in self.inputs + self.outputs:
layout = mesh_impl.tensor_layout(t)
for d, mesh_axis in zip(t.shape.dims, layout.tensor_axis_to_mesh_axis):
if mesh_axis is not None and d.name not in self._splittable_dims:
raise ValueError("dimension %s is not declared as splittable" % d)
values = mesh_impl.slicewise(
self._tf_fn, *[lowering.tensors[x] for x in self.inputs])
if len(self.outputs) == 1:
values = values,
for output, value in zip(self.outputs, values):
lowering.set_tensor_lowering(output, value)
def slicewise(tf_fn,
xs,
output_shape=None,
output_dtype=None,
splittable_dims=None,
grad_function=None,
name=None):
"""Slice-wise call to any tensorflow function.
The output shape and dtype default to those of the first input.
splittable_dims is a list of Dimensions which can be split while keeping the
computation valid.
Args:
tf_fn: a function taking n tf.Tensors and returning a tf.Tensor
xs: a list of n Tensors
output_shape: a Shape (or list of shapes)
output_dtype: a dtype (or list of dtypes)
splittable_dims: a list of Dimensions which are ok to split
grad_function: an optional gradients function. If None, use tf gradient.
name: an optional string
Returns:
a Tensor (or a tuple of Tensors)
"""
multiple_outputs = isinstance(output_dtype, list)
output_shapes = output_shape if multiple_outputs else [output_shape]
output_dtypes = output_dtype if multiple_outputs else [output_dtype]
op = SlicewiseOperation(
tf_fn,
xs,
[convert_to_shape(shape) or xs[0].shape for shape in output_shapes],
[dtype or xs[0].dtype for dtype in output_dtypes],
splittable_dims,
grad_function,
name=name)
return tuple(op.outputs) if multiple_outputs else op.outputs[0]
def cwise(tf_fn, xs, output_dtype=None, grad_function=None, name=None):
"""Component-wise operation with no broadcasting.
Args:
tf_fn: a component-wise function taking n tf.Tensor inputs and producing
a tf.Tensor output
xs: n Tensors
output_dtype: an optional dtype
grad_function: an optional python function
name: an optional string
Returns:
a Tensor
"""
return slicewise(
tf_fn, xs, output_dtype=output_dtype, splittable_dims=xs[0].shape.dims,
grad_function=grad_function, name=name or "cwise")
def identity(x, name="identity"):
return cwise(tf.identity, [x], name=name)
def sin(x, name="sin"):
return cwise(tf.sin, [x], name=name)
def cos(x, name="cos"):
return cwise(tf.cos, [x], name=name)
def square(x, name="square"):
return cwise(
tf.square, [x], name=name,
grad_function=lambda op, dy: [dy * op.inputs[0] * 2])
def sqrt(x, name="sqrt"):
return cwise(
tf.sqrt, [x], name=name,
grad_function=lambda op, dy: [dy * 0.5 / op.outputs[0]])
def _rsqrt_grad(op, dy):
return [dy * -0.5 * op.outputs[0] * op.outputs[0] * op.outputs[0]]
def rsqrt(x, name="rsqrt"):
return cwise(
tf.math.rsqrt, [x], name=name, grad_function=_rsqrt_grad)
def log(x, name="log"):
return cwise(
tf.math.log, [x], name=name,
grad_function=lambda op, dy: [dy / op.inputs[0]])
def exp(x, name="exp"):
return cwise(tf.exp, [x], name=name,
grad_function=lambda op, dy: [dy * op.outputs[0]])
def sigmoid(x, name="sigmoid"):
def grad_function(op, dy):
y = op.outputs[0]
return [y * (1.0 - y) * dy]
return cwise(tf.sigmoid, [x], name=name, grad_function=grad_function)
def tanh(x, name="tanh"):
def grad_function(op, dy):
y = op.outputs[0]
return [(1.0 - square(y)) * dy]
return cwise(tf.tanh, [x], name=name, grad_function=grad_function)
def mtf_pow(x, y):
"""Call externally as mtf.pow()."""
return exp(log(x) * y)
def negative(x, name="negative"):
return cwise(tf.negative, [x], name=name,
grad_function=lambda op, dy: [negative(dy)])
def logical_not(x, name="logical_not"):
return cwise(tf.logical_not, [x], name=name)
def swish(x):
"""Swish activation from https://arxiv.org/abs/1710.05941 ."""
return x * sigmoid(x)
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tanh((np.sqrt(2 / np.pi) * (x + 0.044715 * x * x * x))))
return x * cdf
def elu(x):
"""Exponential Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1511.07289
Args:
x: float Tensor to perform activation.
Returns:
'x' with the ELU activation applied.
"""
return cwise(tf.nn.elu, [x], name="elu")
def selu(x):
"""Scaled Exponential Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1706.02515
Args:
x: float Tensor to perform activation.
Returns:
'x' with the SELU activation applied.
"""
return cwise(tf.nn.selu, [x], name="selu")
def softplus(x):
"""Softplus activation."""
return cwise(tf.math.softplus, [x], name="softplus")
def reciprocal(x, name="reciprocal"):
return cwise(
tf.math.reciprocal, [x], name=name,
grad_function=lambda op, dy: [negative(dy * square(op.outputs[0]))])
def _relu_grad(op, dy):
return [dy * cast(greater(op.inputs[0], 0), op.inputs[0].dtype)]
def relu(x, name="relu"):
return cwise(tf.nn.relu, [x], name=name, grad_function=_relu_grad)
def leaky_relu(x, alpha=0.2, name="leaky_relu"):
def forward_function(x):
return tf.nn.leaky_relu(x, alpha)
def grad_function(op, dy):
return [dy * cast(greater(op.inputs[0], 0), op.inputs[0].dtype) + \
dy * cast(less_equal(op.inputs[0], 0), op.inputs[0].dtype) * alpha]
return cwise(forward_function, [x], name=name, grad_function=grad_function)
def sign(x, name="sign"):
ret = cwise(tf.sign, [x], name=name, grad_function=0)
return ret
def mtf_abs(x):
"""Call externally as mtf.abs()."""
return x * sign(x)
def cast(x, dtype, name="cast"):
if dtype == x.dtype:
return x
return cwise(
lambda x: tf.cast(x, dtype), [x], output_dtype=dtype, name=name,
grad_function=lambda op, dy: [cast(dy, op.inputs[0].dtype)])
def to_float(x, name="to_float"):
return cast(x, tf.float32, name=name)
def to_bfloat16(x, name="to_bfloat16"):
return cast(x, tf.bfloat16, name=name)
def to_int32(x, name="to_int32"):
return cast(x, tf.int32, name=name)
class GenericGradOperation(Operation):
"""Gradients that follow regular TF.
Calling tf.gradients multiple times seems really slow in python.
TODO(noam): can we speed this up using functions or some other method?
"""
def __init__(self, forward_op, grad_ys, name=None):
# tf.logging.info("forward inp %s, operations %s, grad_ys: %s",
# forward_op.inputs, forward_op.outputs, grad_ys)
super(GenericGradOperation, self).__init__(
forward_op.inputs + forward_op.outputs + grad_ys,
name=name or "generic_grad")
self._grad_ys = grad_ys
self._forward_op = forward_op
self._outputs = [Tensor(self, x.shape, x.dtype, index=i)
for i, x in enumerate(forward_op.inputs)]
def lower(self, lowering):
# lists of lists of tf.Tensor
all_ys = transpose_list_of_lists(
[lowering.tensors[y].tensor_list for y in self._forward_op.outputs])
all_xs = transpose_list_of_lists(
[lowering.tensors[x].tensor_list for x in self._forward_op.inputs])
all_grad_ys = transpose_list_of_lists(
[lowering.tensors[dy].tensor_list for dy in self._grad_ys])
all_grad_xs = [
tf.gradients( # pylint: disable=g-complex-comprehension
ys=ys,
xs=xs,
grad_ys=grad_ys,
unconnected_gradients=tf.UnconnectedGradients.ZERO)
for ys, xs, grad_ys in zip(all_ys, all_xs, all_grad_ys)
]
grad_xs = transpose_list_of_lists(all_grad_xs)
for out, grad_x in zip(self.outputs, grad_xs):
lowering.set_tensor_lowering(
out,
lowering.mesh_impl(self).LaidOutTensor.from_tensor_list(grad_x))
class ScalarMultiplyOperation(Operation):
"""Multiply by a tf Scalar (no backprop to scalar)."""
def __init__(self, x, scalar, name=None):
super(ScalarMultiplyOperation, self).__init__(
[x], name=name or "scalar_mul")
self._outputs = [Tensor(self, x.shape, x.dtype)]
self._scalar = scalar
def gradient(self, grad_ys):
dy = grad_ys[0]
return [dy * self._scalar]
def lower(self, lowering):
lowering.set_tensor_lowering(
self.outputs[0],
lowering.mesh_impl(self).slicewise(
lambda x: x * self._scalar, lowering.tensors[self.inputs[0]]))
class ScalarAddOperation(Operation):
"""Add a tf Scalar (no backprop to scalar)."""
def __init__(self, x, scalar, name=None):
super(ScalarAddOperation, self).__init__([x], name=name or "scalar_add")
self._outputs = [Tensor(self, x.shape, x.dtype)]
self._scalar = scalar
def gradient(self, grad_ys):
return grad_ys
def lower(self, lowering):
lowering.set_tensor_lowering(
self.outputs[0],
lowering.mesh_impl(self).slicewise(
lambda x: x + self._scalar, lowering.tensors[self.inputs[0]]))
class BinaryOpWithBroadcasting(Operation):
"""Binary operation with broadcasting."""
def __init__(self, tf_fn, x1, x2, output_shape, output_dtype, name=None):
super(BinaryOpWithBroadcasting, self).__init__(
[x1, x2], name=name or "binary_op")
if x1.dtype != x2.dtype:
# If there is ever a binary operation with different operand types, then
# we should add an argument allow_different_operand_dtypes=False.
raise ValueError("Dtypes must be equal- got %s and %s"
% (x1.dtype, x2.dtype))
assert isinstance(output_dtype, tf.DType)
self._outputs = [Tensor(self, output_shape, output_dtype)]
self._tf_fn = tf_fn
# Rerun to take the new output into account.
self._splittable_dims, self._unsplittable_dims = (
self._initialize_all_dimensions_as_splittable())
def gradient(self, unused_grad_ys):
raise ValueError("Gradient not implememnted")
def lower(self, lowering):
x1 = self.inputs[0]
x2 = self.inputs[1]
output = self.outputs[0]
laid_out_x1 = lowering.tensors[x1]
laid_out_x2 = lowering.tensors[x2]
mesh_impl = lowering.mesh_impl(self)
if x1.shape != output.shape:
laid_out_x1 = mesh_impl.slicewise(
_expand_dims, laid_out_x1, x1.shape, output.shape)
if x2.shape != output.shape:
laid_out_x2 = mesh_impl.slicewise(
_expand_dims, laid_out_x2, x2.shape, output.shape)
if self._tf_fn == tf.add:
out = mesh_impl.slicewise_delay_allreduce(
self._tf_fn, laid_out_x1, laid_out_x2)
else:
out = mesh_impl.slicewise(self._tf_fn, laid_out_x1, laid_out_x2)
lowering.set_tensor_lowering(self.outputs[0], out)
def binary_arguments_to_tensors(x1, x2):
"""Convert argument of a binary operation to Tensors.
Args:
x1: a Tensor or something convertible to a tf Scalar
x2: a Tensor or something convertible to a tf Scalar
Returns:
new_x1: a Tensor
new_x2: a Tensor
Raises:
ValueError: on failure
"""
if not isinstance(x1, Tensor) and not isinstance(x2, Tensor):
raise ValueError("at least one of x1 and x2 must be an mtf Tensor")
elif isinstance(x1, Tensor) and isinstance(x2, Tensor):
return x1, x2
elif isinstance(x1, Tensor):
return x1, import_tf_tensor(
x1.mesh, tf.convert_to_tensor(x2, dtype=x1.dtype), Shape([]))
else:
return import_tf_tensor(x2.mesh, tf.convert_to_tensor(x1, dtype=x2.dtype),
Shape([])), x2
def binary_op_with_broadcasting(
tf_fn, x1, x2, output_shape=None, output_dtype=None):
x1, x2 = binary_arguments_to_tensors(x1, x2)
output_shape = _infer_binary_broadcast_shape(x1.shape, x2.shape, output_shape)
output_dtype = output_dtype or x1.dtype
assert isinstance(output_dtype, tf.DType)
return BinaryOpWithBroadcasting(
tf_fn, x1, x2, convert_to_shape(output_shape),
output_dtype).outputs[0]
def less(x1, x2, output_shape=None):
return binary_op_with_broadcasting(
tf.less, x1, x2, output_dtype=tf.bool, output_shape=output_shape)
def greater(x1, x2, output_shape=None):
return binary_op_with_broadcasting(
tf.greater, x1, x2, output_dtype=tf.bool, output_shape=output_shape)
def less_equal(x1, x2, output_shape=None):
return binary_op_with_broadcasting(
tf.less_equal, x1, x2, output_dtype=tf.bool, output_shape=output_shape)
def greater_equal(x1, x2, output_shape=None):
return binary_op_with_broadcasting(
tf.greater_equal, x1, x2, output_dtype=tf.bool, output_shape=output_shape)
def equal(x1, x2, output_shape=None):
return binary_op_with_broadcasting(
tf.equal, x1, x2, output_dtype=tf.bool, output_shape=output_shape)
def not_equal(x1, x2, output_shape=None):
return binary_op_with_broadcasting(
tf.not_equal, x1, x2, output_dtype=tf.bool, output_shape=output_shape)
def logical_and(x1, x2, output_shape=None):
return binary_op_with_broadcasting(
tf.logical_and, x1, x2, output_dtype=tf.bool, output_shape=output_shape)
def logical_or(x1, x2, output_shape=None):
return binary_op_with_broadcasting(
tf.logical_or, x1, x2, output_dtype=tf.bool, output_shape=output_shape)
def floordiv(x1, x2, output_shape=None):
output_dtype = x1.dtype if isinstance(x1, Tensor) else x2.dtype
return binary_op_with_broadcasting(
tf.floordiv, x1, x2, output_dtype=output_dtype, output_shape=output_shape)
def mod(x1, x2, output_shape=None):
output_dtype = x1.dtype if isinstance(x1, Tensor) else x2.dtype
return binary_op_with_broadcasting(
tf.mod, x1, x2, output_dtype=output_dtype, output_shape=output_shape)
class AddOperation(BinaryOpWithBroadcasting):
"""Binary addition with broadcasting."""
def __init__(self, x1, x2, output_shape, name=None):
super(AddOperation, self).__init__(
tf.add, x1, x2, output_shape, x1.dtype, name=name or "add")
def gradient(self, grad_ys):
dy = grad_ys[0]
return [reduce_sum(dy, output_shape=self.inputs[0].shape),
reduce_sum(dy, output_shape=self.inputs[1].shape)]
class MinMaxOperation(BinaryOpWithBroadcasting):
"""Binary minimum/maximum with broadcasting."""
def __init__(self, tf_fn, x1, x2, output_shape, name=None):
super(MinMaxOperation, self).__init__(
tf_fn, x1, x2, output_shape, x1.dtype, name=name or "add")
def gradient(self, grad_ys):
dy = grad_ys[0]
return [dy * cast(equal(self.inputs[0], self.outputs[0]), dy.dtype),
dy * cast(equal(self.inputs[1], self.outputs[0]), dy.dtype)]
def minimum(x1, x2, output_shape=None, name=None):
"""Binary minimum with broadcsting.
Args:
x1: a Tensor
x2: a Tensor
output_shape: an optional Shape
name: an optional string
Returns:
a Tensor
"""
output_shape = convert_to_shape(output_shape)
with tf.name_scope(name, default_name="minimum"):
x1, x2 = binary_arguments_to_tensors(x1, x2)
return MinMaxOperation(
tf.minimum, x1, x2, output_shape=_infer_binary_broadcast_shape(
x1.shape, x2.shape, output_shape)).outputs[0]
def maximum(x1, x2, output_shape=None, name=None):
"""Binary maximum with broadcsting.
Args:
x1: a Tensor
x2: a Tensor
output_shape: an optional Shape
name: an optional string
Returns:
a Tensor
"""
output_shape = convert_to_shape(output_shape)
with tf.name_scope(name, default_name="maximum"):
x1, x2 = binary_arguments_to_tensors(x1, x2)
return MinMaxOperation(
tf.maximum, x1, x2, output_shape=_infer_binary_broadcast_shape(
x1.shape, x2.shape, output_shape)).outputs[0]
class BroadcastOperation(Operation):
"""Broadcast - output dims are a superset of input dims, in any order."""
def __init__(self, x, output_shape, name=None):
super(BroadcastOperation, self).__init__([x], name=name or "broadcast")
self._outputs = [Tensor(self, output_shape, x.dtype)]
# Rerun to take the new output into account.
self._splittable_dims, self._unsplittable_dims = (
self._initialize_all_dimensions_as_splittable())
def gradient(self, grad_ys):
return [reduce_sum(grad_ys[0], output_shape=self.inputs[0].shape)]
def lower(self, lowering):
ret = lowering.mesh_impl(self).broadcast_impl(
lowering.tensors[self.inputs[0]], self.inputs[0].shape,
self.outputs[0].shape)
lowering.set_tensor_lowering(self.outputs[0], ret)
def broadcast(x, new_shape):
new_shape = convert_to_shape(new_shape)
if x.shape == new_shape:
return x
return BroadcastOperation(x, new_shape).outputs[0]
def _reduce_helper(input_shape,
output_shape,
input_tensor_layout,
reduction_fn_string="SUM"):
"""Returns slicewise function and reduced mesh dimensions.
Args:
input_shape: a Shape
output_shape: a Shape
input_tensor_layout: a TensorLayout
reduction_fn_string: "SUM" or "MAX"
Returns:
reduce_slice_fn: a function from tf.Tensor to tf.Tensor
reduced_mesh_axes: a list of integers
"""
reduce_dims_indices = [
i for i, d in enumerate(input_shape.dims) if d not in output_shape.dims]
reduced_input_shape = Shape([
d for d in input_shape.dims if d in output_shape.dims])
perm = [reduced_input_shape.dims.index(d) for d in output_shape.dims]
def reduce_slice_fn(xslice):
ret = xslice
if reduce_dims_indices:
ret = reduction_fn(reduction_fn_string)(xslice, reduce_dims_indices)
if perm != list(xrange(len(perm))):
ret = tf.transpose(ret, perm)
return ret
reduced_mesh_axes = []
for i in reduce_dims_indices:
mesh_axis = input_tensor_layout[i]
if mesh_axis is not None:
reduced_mesh_axes.append(mesh_axis)
return reduce_slice_fn, reduced_mesh_axes
class ReduceOperation(Operation):
"""Reduction - output dims are a subset of input dims, in any order."""
def __init__(self, x, output_shape, reduction_fn_string, name=None):
super(ReduceOperation, self).__init__([x], name=name or "reduce")
self._outputs = [Tensor(self, output_shape, x.dtype)]
self._reduction_fn_string = reduction_fn_string
def gradient(self, grad_ys):
if self._reduction_fn_string == "SUM":
return [broadcast(grad_ys[0], self.inputs[0].shape)]
elif (self._reduction_fn_string == "MAX" or
self._reduction_fn_string == "MIN"):
return [cast(equal(self.inputs[0], self.outputs[0]), self.inputs[0].dtype)
* grad_ys[0]]
else:
raise ValueError("Gradients to other reductions not implemented")
def lower(self, lowering):
mesh_impl = lowering.mesh_impl(self)
slicewise_fn, reduced_mesh_axes = _reduce_helper(
self.inputs[0].shape, self.outputs[0].shape,
mesh_impl.tensor_layout(self.inputs[0]),
self._reduction_fn_string)
y = mesh_impl.slicewise(slicewise_fn, lowering.tensors[self.inputs[0]])
if reduced_mesh_axes:
def add_counter_fn():
lowering.add_counter("allreduce/%s/reduce_op" % reduced_mesh_axes,
lowering.laid_out_size(self.outputs[0]))
if self._reduction_fn_string == "SUM":
y = LazyAllreduceSum(
mesh_impl, y, reduced_mesh_axes, add_counter_fn=add_counter_fn)
else:
y = mesh_impl.allreduce(
y, reduced_mesh_axes, self._reduction_fn_string)
add_counter_fn()
lowering.set_tensor_lowering(self.outputs[0], y)
def _pool_helper(ksize,
strides,
pool_fn_string="MAX_2D"):
"""Returns slicewise function and reduced mesh dimensions.
Args:
ksize: kernel size, a tuple or list.
strides: a tuple or list.
pool_fn_string: "MAX" or "AVERAGE"
Returns:
pool_slice_fn: a function from tf.Tensor to tf.Tensor
"""
def pool_slice_fn(xslice):
ret = pool_fn(pool_fn_string)(xslice, ksize, strides, "VALID")
return ret
return pool_slice_fn
def _tf_upscale(x, dim_idx_start, dim_idx_end, xscales):
"""Upscale the tf.Tensor x.
N-dimensional version of tf.image.resize_images with NEAREST interpolation.
Similar to: https://github.com/tensorflow/tensorflow/issues/2169
Args:
x: a tf.Tensor
dim_idx_start: the index of starting dimension
dim_idx_end: the index of ending dimension
xscales: an integer list of upscaling factors
Returns:
a tf Tensor. Dimensions in [dim_idx_start, dim_idx_end - 1] will be upscaled
xscales[i]-times.
"""
xscales = list(xscales)
if dim_idx_start < 0:
dim_idx_start += len(x.get_shape().as_list())
def _tf_upscale_one_trailing_dim(x_1tdim):
"""Upscaling with dim_idx_end = -1."""
x_shape = x_1tdim.get_shape().as_list()
x_scaled_shape = [ori_size * scale for ori_size, scale \
in zip(x_shape[dim_idx_start:-1], xscales)]
dim_idx_len = len(x_shape[dim_idx_start:-1])
x_1tdim = tf.reshape(x_1tdim, [-1] + x_shape[-dim_idx_len:])
for dim_idx in range(dim_idx_len, 0, -1):
x_1tdim = tf.concat([x_1tdim] * xscales.pop(), dim_idx)
output_shape = x_shape[:dim_idx_start] + x_scaled_shape + x_shape[-1:]
x_1tdim = tf.reshape(x_1tdim, output_shape)
return x_1tdim
x_shape = x.get_shape().as_list()
trailing_shape = x_shape[dim_idx_end:]
x = tf.reshape(x, x_shape[:dim_idx_end] + [-1])
x = _tf_upscale_one_trailing_dim(x)
x = tf.reshape(x, x.shape.as_list()[:-1] + trailing_shape)
return x
class PoolOperation(Operation):
"""Pooling - average or max pool data along HW (2D) or DHW (3D) dimensions.
For the current implementation of backpropagation, we only handle cases
when strides == ksize and the input dimensions are divisible by ksize.
"""
def __init__(self, x, ksize, strides, pool_fn_string, name=None):
super(PoolOperation, self).__init__([x], name=name or "pool")
assert ksize == strides
if "2D" in pool_fn_string:
assert len(ksize) == 2
else:
assert "3D" in pool_fn_string
assert len(ksize) == 3
self._ksize = ksize
self._strides = strides
self._pool_fn_string = pool_fn_string
if "2D" in pool_fn_string:
batch_dims = x.shape.dims[:-3]
spatial_dims = x.shape.dims[-3:-1]
channel_dim = x.shape.dims[-1:]
else:
batch_dims = x.shape.dims[:-4]
spatial_dims = x.shape.dims[-4:-1]
channel_dim = x.shape.dims[-1:]
# Compute output_shape and allocate output Tensor.
output_spatial_dims = []
for spatial_dim, kernel_size, stride_size in zip(
spatial_dims, ksize, strides):
output_dim_size = (spatial_dim.size - kernel_size) // stride_size + 1
output_spatial_dim = Dimension(spatial_dim.name, output_dim_size)
output_spatial_dims.append(output_spatial_dim)
output_shape = Shape(batch_dims + output_spatial_dims + channel_dim)
self._outputs = [Tensor(self, output_shape, x.dtype)]
# Claim unsplittable dims.
self._splittable_dims, self._unsplittable_dims = (
self._initialize_splittable_and_unsplittable_dims(
"splittable", [dim.name for dim in spatial_dims]))
def gradient(self, grad_ys):
"""Returns the gradient to input, for unoverlapping pooling."""
x = self.inputs[0]
y = self.outputs[0]
dy = grad_ys[0]
dx = pool_backprop(x, y, dy,
self._ksize, self._strides, self._pool_fn_string)
return [dx]
def lower(self, lowering):
mesh_impl = lowering.mesh_impl(self)
slicewise_fn = _pool_helper(
self._ksize, self._strides, self._pool_fn_string)
x = lowering.tensors[self.inputs[0]]
y = mesh_impl.slicewise(slicewise_fn, x)
lowering.set_tensor_lowering(self.outputs[0], y)
class PoolBackPropOperation(Operation):
"""Pooling backpropagation.
For the current implementation, we only handle cases when
strides == ksize and the input dimensions are divisible by ksize.
"""
def __init__(self, x, y, dy,
ksize, strides, pool_fn_string, name=None):
super(PoolBackPropOperation, self).__init__(
[x, y, dy], name=name or "pool_backprop")
assert ksize == strides
if "2D" in pool_fn_string:
assert len(ksize) == 2
else:
assert "3D" in pool_fn_string
assert len(ksize) == 3
self._ksize = ksize
self._strides = strides
self._pool_fn_string = pool_fn_string
self._outputs = [Tensor(self, x.shape, dy.dtype)]
def lower(self, lowering):
"""Returns the gradient to input, for unoverlapping pooling."""
mesh_impl = lowering.mesh_impl(self)
if self._pool_fn_string == "MAX_2D":
def slicewise_fn(x, y, dy):
y_scaled_back = _tf_upscale(y, -3, -1, self._strides)
dy_scaled_back = _tf_upscale(dy, -3, -1, self._strides)
return tf.cast(tf.equal(x, y_scaled_back), x.dtype) * dy_scaled_back
elif self._pool_fn_string == "MAX_3D":
def slicewise_fn(x, y, dy):
y_scaled_back = _tf_upscale(y, -4, -1, self._strides)
dy_scaled_back = _tf_upscale(dy, -4, -1, self._strides)
return tf.cast(tf.equal(x, y_scaled_back), x.dtype) * dy_scaled_back
elif self._pool_fn_string == "AVG_2D":
def slicewise_fn(x, y, dy):
del y
dy_scaled_back = _tf_upscale(dy, -3, -1, self._strides)
return dy_scaled_back / tf.constant(
self._strides[0] * self._strides[1], dtype=x.dtype)
elif self._pool_fn_string == "AVG_3D":
def slicewise_fn(x, y, dy):
del y
dy_scaled_back = _tf_upscale(dy, -4, -1, self._strides)
return dy_scaled_back / tf.constant(
self._strides[0] * self._strides[1] * self._strides[2],
dtype=x.dtype)
else:
raise ValueError("Pooling %s is not implemented." % self._pool_fn_string)
dx = mesh_impl.slicewise(
slicewise_fn, *[lowering.tensors[x] for x in self.inputs])
lowering.set_tensor_lowering(self.outputs[0], dx)
def pool_backprop(x, y, dy, ksize, strides, pool_fn_string, name=None):
return PoolBackPropOperation(x, y, dy,
ksize, strides, pool_fn_string,
name).outputs[0]
class ConcatOperation(Operation):
"""tf.concat.
All inputs have the same shape, except for the size of the dimension named
dim_name.
"""
def __init__(self, xs, concat_dim_name, name=None):
super(ConcatOperation, self).__init__(xs, name=name or "concat")
# verify that the shapes are all compatible
dim_names = [dim.name for dim in xs[0].shape.dims]
self._concat_dim_name = concat_dim_name
if concat_dim_name not in dim_names:
raise ValueError("xs[0] does not contain a dimension named dim_name")
self._axis = dim_names.index(concat_dim_name)
should_be_equal = [
x.shape.resize_dimension(concat_dim_name, 0) for x in xs]
if not all(s == should_be_equal[0] for s in should_be_equal):
raise ValueError("shapes are not compatible %s" % xs)
self._input_sizes = [x.shape.dims[self._axis].size for x in xs]
output_size = sum(self._input_sizes)
self._outputs = [
Tensor(self, xs[0].shape.resize_dimension(concat_dim_name, output_size),
xs[0].dtype)]
self._splittable_dims, self._unsplittable_dims = (
self._initialize_splittable_and_unsplittable_dims(
"splittable", [concat_dim_name]))
def gradient(self, grad_ys):
dy = grad_ys[0]
return split(dy, self.outputs[0].shape.dims[self._axis], self._input_sizes)
def lower(self, lowering):
mesh_impl = lowering.mesh_impl(self)
if mesh_impl.tensor_dimension_to_mesh_axis(
Dimension(self._concat_dim_name, 0)) is not None:
raise ValueError("can't concat along split axis")
def slicewise_fn(*args):
return tf.concat(args, axis=self._axis, name="concat")
y = mesh_impl.slicewise(
slicewise_fn, *[lowering.tensors[x] for x in self._inputs])
lowering.set_tensor_lowering(self.outputs[0], y)
def concat(xs, concat_dim_name, name=None):
"""Like tf.concat.
All inputs must have equal shape except for the sizes in the concatenated
dimension. The dimension names should be the same, even that of the
concatenated dimension.
Args:
xs: a list of Tensors
concat_dim_name: a string
name: an optional string
Returns:
a Tensor
"""
return ConcatOperation(xs, concat_dim_name, name).outputs[0]
class SplitOperation(Operation):
"""like tf.split.
TODO(noam, nikip): this code has never been run. Run it and test it.
"""
def __init__(self, x, split_dim, num_or_size_splits, name=None):
super(SplitOperation, self).__init__([x], name=name or "split")
self._split_dim = split_dim
if split_dim not in x.shape.dims:
raise ValueError("%s does not contain dimension %s" % (x, split_dim))
self._axis = x.shape.dims.index(split_dim)
if isinstance(num_or_size_splits, list):
self._output_sizes = num_or_size_splits
if sum(num_or_size_splits) != split_dim.size:
raise ValueError(
"Sizes do not add up %s %s" % (num_or_size_splits, split_dim))
else:
assert isinstance(num_or_size_splits, int)
assert split_dim.size % num_or_size_splits == 0
self._output_sizes = (
[split_dim.size // num_or_size_splits] * num_or_size_splits)
self._outputs = [
Tensor(self, x.shape.resize_dimension(split_dim.name, output_size),
x.dtype, index=i)
for i, output_size in enumerate(self._output_sizes)]
self._splittable_dims, self._unsplittable_dims = (
self._initialize_splittable_and_unsplittable_dims(
"splittable", [split_dim.name]))
def gradient(self, grad_ys):
grad_ys = [g or zeros_like(o) for g, o in zip(grad_ys, self._outputs)]
return [concat(grad_ys, self._split_dim.name)]
def lower(self, lowering):
mesh_impl = lowering.mesh_impl(self)
if mesh_impl.tensor_dimension_to_mesh_axis(self._split_dim) is not None:
raise ValueError("can't split along split axis")
def slicewise_fn(x):
# Since we return a tuple of tf.Tensor, slicewise will collate the
# outputs and return a tuple of LaidOutTensors.
return tuple(tf.split(x, self._output_sizes, axis=self._axis))
values = mesh_impl.slicewise(
slicewise_fn, lowering.tensors[self.inputs[0]])
for t, v in zip(self._outputs, values):
lowering.set_tensor_lowering(t, v)
def split(x, split_dim, num_or_size_splits, name=None):
"""Like tf.split.
Args:
x: a Tensor
split_dim: a Dimension in x.shape.dims
num_or_size_splits: either an integer dividing split_dim.size
or a list of integers adding up to split_dim.size
name: an optional string
Returns:
a list of Tensors.
"""
return SplitOperation(x, split_dim, num_or_size_splits, name=name).outputs
class StackOperation(Operation):
"""Like tf.stack."""
def __init__(self, xs, dim_name, axis, name=None):
super(StackOperation, self).__init__(xs, name=name or "stack")
self._axis = axis
self._new_dim = Dimension(dim_name, len(xs))
input_shape = xs[0].shape
for x in xs:
if x.shape != xs[0].shape:
raise ValueError(
"inputs to stack must have the same shape, got %s" % xs)
output_shape = Shape(
input_shape.dims[:axis] + [self._new_dim]+ input_shape.dims[axis:])
self._outputs = [Tensor(self, output_shape, xs[0].dtype)]
self._splittable_dims, self._unsplittable_dims = (
self._initialize_splittable_and_unsplittable_dims(
"splittable", [dim_name]))
def gradient(self, grad_ys):
return unstack(grad_ys[0], self._new_dim)
def lower(self, lowering):
mesh_impl = lowering.mesh_impl(self)
if mesh_impl.tensor_dimension_to_mesh_axis(self._new_dim) is not None:
raise ValueError("can't stack along split axis")
inputs = [lowering.tensors[t] for t in self._inputs]
def slicewise_fn(*args):
return tf.stack(args, axis=self._axis)
ret = mesh_impl.slicewise(slicewise_fn, *inputs)
lowering.set_tensor_lowering(self.outputs[0], ret)
def stack(xs, dim_name, axis=0, name=None):
"""Stack multiple Tensors to make a new dimension.
Args:
xs: a list of Tensors with identical shapes.
dim_name: a string (name of the new dimension)
axis: an integer (index of the new dimension in the output shape)
name: an optional string
Returns:
a Tensor
"""
if axis < 0:
axis = xs[0].shape.ndims + 1 + axis
ret = StackOperation(xs, dim_name, axis, name).outputs[0]
return ret
class UnstackOperation(Operation):
"""Split into multiple Tensors, eliminating a dimension."""
def __init__(self, x, dim, name=None):
super(UnstackOperation, self).__init__([x], name=name or "unstack")
self._dim = dim
self._axis = x.shape.dims.index(dim)
output_shape = x.shape - dim
self._outputs = [
Tensor(self, output_shape, x.dtype, index=i) for i in xrange(dim.size)]
self._splittable_dims, self._unsplittable_dims = (
self._initialize_splittable_and_unsplittable_dims(
"splittable", [dim.name]))
def gradient(self, grad_ys):
return [stack(grad_ys, self._dim.name, self._axis)]
def lower(self, lowering):
mesh_impl = lowering.mesh_impl(self)
if mesh_impl.tensor_dimension_to_mesh_axis(self._dim) is not None:
raise ValueError("can't unstack along split axis")
def slicewise_fn(x):
return tuple(tf.unstack(x, num=self._dim.size, axis=self._axis))
output_values = mesh_impl.slicewise(
slicewise_fn, lowering.tensors[self._inputs[0]])
for t, v in zip(self.outputs, list(output_values)):
lowering.set_tensor_lowering(t, v)
def unstack(x, dim, name=None):
"""Split into multiple Tensors, eliminating a dimension.
Args:
x: a Tensor
dim: a Dimension
name: an optional string
Returns:
a list of dim.size Tensors, each with shape (x.shape - dim)
"""
return UnstackOperation(x, dim, name).outputs
def cumsum(x, dim, exclusive=False):
"""Cumulative sum.
Args:
x: a Tensor
dim: a Dimension
exclusive: a boolean
Returns:
a Tensor with the same shape as x.
"""
with tf.variable_scope("cumsum"):
new_name = "tmp_dim_cumsum"
new_dim = Dimension(new_name, dim.size)
new_shape = x.shape.rename_dimension(dim.name, new_name)
comparator = less if exclusive else less_equal
m = cast(
comparator(mtf_range(x.mesh, dim, dtype=tf.float32),
mtf_range(x.mesh, new_dim, dtype=tf.float32)), x.dtype)
ret = einsum([x, m], output_shape=new_shape)
return reshape(ret, x.shape)
def _einsum_helper(input_shapes, output_shape, mesh_impl):
"""Returns slicewise function and reduced mesh dimensions.
Assumes the output shape contains no new dimensions.
Args:
input_shapes: a list of Shapes
output_shape: a Shape
mesh_impl: a MeshImpl
Returns:
einsum_slice_fn: a function from tf.Tensors to tf.Tensor
reduced_mesh_axes: a list of integers
"""
input_shape_union = _shape_union(input_shapes)
total_num_dims = input_shape_union.ndims
# list of input shapes that contain all dimensions.
full_shapes = [
s for s in input_shapes + [output_shape] if s.ndims == total_num_dims]
full_shape = full_shapes[0] if full_shapes else input_shape_union
reduce_slice_fn, reduced_mesh_axes = _reduce_helper(
full_shape, output_shape, mesh_impl.tensor_layout(full_shape))
def einsum_slice_fn_naive(*slices):
# naive einsum implementation where we broadcast all inputs to the full
# shape, multiply componentwise, then reduce.
return reduce_slice_fn(functools.reduce(tf.multiply, [
_expand_dims(x, input_shape, full_shape)
for x, input_shape in zip(slices, input_shapes)]))
if full_shapes:
# it is not wasteful of space to broadcast fully and then reduce.
# this helps to avoid some inefficient GPU implementations.
einsum_slice_fn = einsum_slice_fn_naive
else:
# call tf.einsum
equation = _einsum_equation(input_shapes, output_shape)
def einsum_slice_fn(*slices):
if slices[0].dtype.is_floating:
return mesh_impl.einsum(equation, *slices)
else:
return einsum_slice_fn_naive(*slices)
return einsum_slice_fn, reduced_mesh_axes
class EinsumOperation(Operation):
"""Einstein summation (matmul, etc).
The equation follows the dimensions in the input and output shapes.
Every dimension must occur in at least two of the input/output Tensors.
i.e. no new dimensions in the output, and no reduction of dimensions that
occur in only one input.
"""
def __init__(self, inputs, output_shape, name=None):
super(EinsumOperation, self).__init__(inputs, name=name or "einsum")
if not inputs:
raise ValueError("Einsum needs at least one input")
for x in inputs:
if x.dtype != inputs[0].dtype:
raise ValueError("Input dtypes must be equal got %s"
% ([y.dtype for y in inputs],))
self._outputs = [Tensor(self, output_shape, inputs[0].dtype)]
def gradient(self, grad_ys):
dy = grad_ys[0]
xs = self.inputs
ret = []
for i in xrange(len(self.inputs)):
ret.append(
einsum([dy] + [xs[j] for j in xrange(len(xs)) if j != i], xs[i].shape)
)
return ret
def lower(self, lowering):
mesh_impl = lowering.mesh_impl(self)
xs = self.inputs
input_shape_set = set(sum([x.shape.dims for x in xs], []))
output_shape = self.outputs[0].shape
intersection_shape = Shape(
[d for d in output_shape.dims if d in input_shape_set])
einsum_slice_fn, reduced_mesh_axes = _einsum_helper(
[x.shape for x in self.inputs], intersection_shape, mesh_impl)
y = mesh_impl.slicewise(
einsum_slice_fn, *[lowering.tensors[x] for x in self.inputs])
if reduced_mesh_axes:
def add_counter_fn():
lowering.add_counter(
"allreduce/%s/einsum_op" % reduced_mesh_axes,
mesh_impl.laid_out_size(intersection_shape))
y = LazyAllreduceSum(
mesh_impl, y, reduced_mesh_axes, add_counter_fn=add_counter_fn)
# broadcast from intersection_shape to output_shape
if intersection_shape != output_shape:
y = mesh_impl.broadcast_impl(y, intersection_shape, output_shape)
lowering.set_tensor_lowering(self.outputs[0], y)
computation_shape = Shape(list(input_shape_set))
lowering.add_counter("einsum", mesh_impl.laid_out_size(computation_shape))
lowering.add_counter("einsum_unique", computation_shape.size)
class Conv2dOperation(Operation):
"""like tf.nn.conv2d.
Always data format "NHWC".
# TODO(nikip): support dilations
Always dilation rate of 1
padding: "SAME" or "VALID"
TODO(noam): implement more options.
"""
def __init__(self, conv_input, conv_filter, strides, padding, name=None):
super(Conv2dOperation, self).__init__(
[conv_input, conv_filter], name=name or "conv2d")
self._padding = padding
self._batch_dims = conv_input.shape.dims[:-3]
self._in_h_dim, self._in_w_dim, self._in_dim = conv_input.shape.dims[-3:]
self._fh_dim, self._fw_dim = conv_filter.shape.dims[:2]
f_in_dim, self._out_dim = conv_filter.shape.dims[2:]
if f_in_dim != self._in_dim:
raise ValueError("Dimensions do not match input=%s filter=%s"
% (conv_input, conv_filter))
out_h = self._in_h_dim.size
out_w = self._in_w_dim.size
if padding == "VALID":
out_h -= (self._fh_dim.size - 1)
out_w -= (self._fw_dim.size - 1)
self._strides = strides
if strides is not None:
out_h //= strides[1]
out_w //= strides[2]
self._out_h_dim = Dimension(self._in_h_dim.name, out_h)
self._out_w_dim = Dimension(self._in_w_dim.name, out_w)
output_shape = Shape(
self._batch_dims + [self._out_h_dim, self._out_w_dim, self._out_dim])
self._outputs = [Tensor(self, output_shape, conv_input.dtype)]
unsplittable_dims = [self._in_h_dim, self._in_w_dim, self._fh_dim,
self._fw_dim]
self._splittable_dims, self._unsplittable_dims = (
self._initialize_splittable_and_unsplittable_dims(
"splittable", [dim.name for dim in unsplittable_dims]))
def gradient(self, grad_ys):
dy = grad_ys[0]
conv_input, conv_filter = self.inputs
return [
conv2d_backprop_input(self._inputs[0].shape,
conv_filter,
dy,
self._strides,
self._padding),
conv2d_backprop_filter(conv_input,
self._inputs[1].shape,
dy,
self._strides,
self._padding)]
def lower(self, lowering):
mesh_impl = lowering.mesh_impl(self)
conv_input, conv_filter = self.inputs
if mesh_impl.tensor_dimension_to_mesh_axis(self._in_h_dim) is not None:
raise ValueError("can't slice along dimension h")
if mesh_impl.tensor_dimension_to_mesh_axis(self._in_w_dim) is not None:
raise ValueError("can't slice along dimension w")
if mesh_impl.tensor_dimension_to_mesh_axis(self._fh_dim) is not None:
raise ValueError("can't slice along dimension fh")
if mesh_impl.tensor_dimension_to_mesh_axis(self._fw_dim) is not None:
raise ValueError("can't slice along dimension fw")
def tf_fn(tf_input, tf_filter):
output = tf.nn.conv2d(
_tf_flatten_batch_dims(tf_input, 3),
tf_filter, self._strides, self._padding)
return _tf_restore_batch_dims(output, 3, tf_input)
y = mesh_impl.slicewise(
tf_fn, lowering.tensors[conv_input], lowering.tensors[conv_filter])
# reducing out input channels - may need to allreduce
in_mesh_axis = mesh_impl.tensor_dimension_to_mesh_axis(self._in_dim)
if in_mesh_axis is not None:
def add_counter_fn():
lowering.add_counter(
"allreduce/%s/conv2d_op" % [in_mesh_axis],
mesh_impl.laid_out_size(self.outputs[0].shape))
y = LazyAllreduceSum(mesh_impl, y, [in_mesh_axis], add_counter_fn)
lowering.set_tensor_lowering(self.outputs[0], y)
computation_shape = _shape_union([conv_filter.shape, self.outputs[0].shape])
lowering.add_counter("conv2d/forward",
mesh_impl.laid_out_size(computation_shape))
lowering.add_counter("conv2d_unique/forward", computation_shape.size)
class Conv2or3dBackpropInputOperation(Operation):
"""like tf.nn.conv2d/conv3d_backprop_input."""
def __init__(self, conv_dimension, is_transpose,
input_shape, conv_filter, dy, strides, padding, name=None):
assert conv_dimension in [2, 3]
self._trans = "_trans" if is_transpose else ""
default_name = "conv%dd%s_backprop" % (conv_dimension, self._trans)
super(Conv2or3dBackpropInputOperation, self).__init__(
[dy, conv_filter], name=name or default_name)
self._conv_dimension = conv_dimension
self._is_transpose = is_transpose
self._padding = padding
self._strides = strides
self._input_shape = input_shape
self._outputs = [Tensor(self, input_shape, dy.dtype)]
self._num_nonbatch_dims = conv_dimension + 1
# Rerun to take the new output into account.
self._splittable_dims, self._unsplittable_dims = (
self._initialize_all_dimensions_as_splittable())
def lower(self, lowering):
mesh_impl = lowering.mesh_impl(self)
dy, conv_filter = self.inputs
input_sizes = mesh_impl.slice_shape(self.outputs[0].shape)
input_sizes = [list_product(input_sizes[:-self._num_nonbatch_dims])] + (
input_sizes[-self._num_nonbatch_dims:])
if self._is_transpose:
if self._conv_dimension == 2:
backprop_fn = tf.nn.conv2d
else:
backprop_fn = tf.nn.conv3d
def tf_fn(tf_dy, tf_filter):
return _tf_restore_batch_dims(
backprop_fn(
_tf_flatten_batch_dims(tf_dy, self._num_nonbatch_dims),
tf_filter,
self._strides, self._padding),
self._num_nonbatch_dims, tf_dy)
dx = mesh_impl.slicewise(
tf_fn, lowering.tensors[dy], lowering.tensors[conv_filter])
else: # if not self._is_transpose:
if self._conv_dimension == 2:
backprop_fn = tf.nn.conv2d_backprop_input
else:
backprop_fn = conv3d_backprop_input_v2
def tf_fn(tf_dy, tf_filter):
return _tf_restore_batch_dims(
backprop_fn(
input_sizes, tf_filter,
_tf_flatten_batch_dims(tf_dy, self._num_nonbatch_dims),
self._strides, self._padding),
self._num_nonbatch_dims, tf_dy)
dx = mesh_impl.slicewise(
tf_fn, lowering.tensors[dy], lowering.tensors[conv_filter])
# reducing out output channels - may need to allreduce
out_mesh_axis = mesh_impl.tensor_dimension_to_mesh_axis(dy.shape.dims[-1])
if out_mesh_axis is not None:
def add_counter_fn():
lowering.add_counter(
"allreduce/%s/conv%dd%s_op" % (
[out_mesh_axis], self._conv_dimension, self._trans),
mesh_impl.laid_out_size(self.outputs[0].shape))
dx = LazyAllreduceSum(mesh_impl, dx, [out_mesh_axis], add_counter_fn)
lowering.set_tensor_lowering(self.outputs[0], dx)
computation_shape = _shape_union([conv_filter.shape, dy.shape])
lowering.add_counter(
"conv%dd%s/backprop_input" % (self._conv_dimension, self._trans),
mesh_impl.laid_out_size(computation_shape))
lowering.add_counter(
"conv%dd%s_unique/backprop_input" % (self._conv_dimension, self._trans),
computation_shape.size)
def conv2d_backprop_input(input_shape,
conv_filter,
dy,
strides,
padding, name=None):
return Conv2or3dBackpropInputOperation(2, False,
input_shape,
conv_filter,
dy,
strides,
padding,
name=name).outputs[0]
class Conv2or3dBackpropFilterOperation(Operation):
"""Like tf.nn.conv2d_backprop_filter."""
def __init__(self, conv_dimension, is_transpose,
conv_input, filter_shape, dy, strides, padding, name=None):
assert conv_dimension in [2, 3]
self._trans = "_trans" if is_transpose else ""
default_name = "conv%dd%s_backprop_filter" % (conv_dimension, self._trans)
super(Conv2or3dBackpropFilterOperation, self).__init__(
[conv_input, dy], name=name or default_name)
self._conv_dimension = conv_dimension
self._is_transpose = is_transpose
self._padding = padding
self._strides = strides
self._filter_shape = filter_shape
self._outputs = [Tensor(self, filter_shape, dy.dtype)]
self._num_nonbatch_dims = conv_dimension + 1
# Rerun to take the new output into account.
self._splittable_dims, self._unsplittable_dims = (
self._initialize_all_dimensions_as_splittable())
def lower(self, lowering):
mesh_impl = lowering.mesh_impl(self)
conv_input, dy = self.inputs
filter_sizes = mesh_impl.slice_shape(self.outputs[0].shape)
if self._conv_dimension == 2:
backprop_fn = tf.nn.conv2d_backprop_filter
else:
backprop_fn = conv3d_backprop_filter_v2
def tf_fn(tf_input, tf_dy):
if self._is_transpose:
y, x = tf_input, tf_dy
else:
x, y = tf_input, tf_dy
return backprop_fn(
_tf_flatten_batch_dims(x, self._num_nonbatch_dims),
filter_sizes,
_tf_flatten_batch_dims(y, self._num_nonbatch_dims),
self._strides,
self._padding)
df = mesh_impl.slicewise(
tf_fn, lowering.tensors[conv_input], lowering.tensors[dy])
# reducing out batch dimensions - may need to allreduce
reduced_mesh_axes = [
mesh_impl.tensor_dimension_to_mesh_axis(d)
for d in dy.shape.dims[:-self._num_nonbatch_dims]]
reduced_mesh_axes = [a for a in reduced_mesh_axes if a is not None]
if reduced_mesh_axes:
def add_counter_fn():
lowering.add_counter(
"allreduce/%s/conv%dd%s_backprop_filter" % (
reduced_mesh_axes, self._conv_dimension, self._trans),
mesh_impl.laid_out_size(self.outputs[0].shape))
df = LazyAllreduceSum(mesh_impl, df, reduced_mesh_axes, add_counter_fn)
lowering.set_tensor_lowering(self.outputs[0], df)
computation_shape = _shape_union([self.outputs[0].shape, dy.shape])
lowering.add_counter("conv%dd%s/backprop_filter" % (self._conv_dimension,
self._trans),
mesh_impl.laid_out_size(computation_shape))
lowering.add_counter(
"conv%dd%s_unique/backprop_filter" % (self._conv_dimension,
self._trans),
computation_shape.size)
def conv2d_backprop_filter(conv_input,
filter_shape,
dy,
strides,
padding, name=None):
return Conv2or3dBackpropFilterOperation(2, False,
conv_input,
filter_shape,
dy,
strides,
padding,
name=name).outputs[0]
class Conv3dOperation(Operation):
"""like tf.nn.conv3d.
Currently we assume that the data format is always "NDHWC".
# TODO(lehou): support more options such as dilation.
Always dilation rate of 1
padding: "SAME" or "VALID"
"""
def __init__(self, conv_input, conv_filter, strides, padding, name=None):
super(Conv3dOperation, self).__init__(
[conv_input, conv_filter], name=name or "conv3d")
self._padding = padding
self._batch_dims = conv_input.shape.dims[:-4]
self._in_d_dim, self._in_h_dim, self._in_w_dim, self._in_dim = (
conv_input.shape.dims[-4:])
self._fd_dim, self._fh_dim, self._fw_dim = conv_filter.shape.dims[:3]
f_in_dim, self._out_dim = conv_filter.shape.dims[3:]
if f_in_dim != self._in_dim:
raise ValueError("Dimensions do not match input=%s filter=%s"
% (conv_input, conv_filter))
out_d = self._in_d_dim.size
out_h = self._in_h_dim.size
out_w = self._in_w_dim.size
if padding == "VALID":
out_d -= (self._fd_dim.size - 1)
out_h -= (self._fh_dim.size - 1)
out_w -= (self._fw_dim.size - 1)
self._strides = strides
if strides is not None:
out_d //= strides[1]
out_h //= strides[2]
out_w //= strides[3]
self._out_d_dim = Dimension(self._in_d_dim.name, out_d)
self._out_h_dim = Dimension(self._in_h_dim.name, out_h)
self._out_w_dim = Dimension(self._in_w_dim.name, out_w)
output_shape = Shape(
self._batch_dims + [self._out_d_dim, self._out_h_dim,
self._out_w_dim, self._out_dim])
self._outputs = [Tensor(self, output_shape, conv_input.dtype)]
unsplittable_dims = [self._in_d_dim, self._in_h_dim, self._in_w_dim,
self._fd_dim, self._fh_dim, self._fw_dim]
self._splittable_dims, self._unsplittable_dims = (
self._initialize_splittable_and_unsplittable_dims(
"splittable", [dim.name for dim in unsplittable_dims]))
def gradient(self, grad_ys):
dy = grad_ys[0]
conv_input, conv_filter = self.inputs
return [
conv3d_backprop_input(self._inputs[0].shape,
conv_filter,
dy,
self._strides,
self._padding),
conv3d_backprop_filter(conv_input,
self._inputs[1].shape,
dy,
self._strides,
self._padding)]
def lower(self, lowering):
mesh_impl = lowering.mesh_impl(self)
conv_input, conv_filter = self.inputs
if mesh_impl.tensor_dimension_to_mesh_axis(self._in_d_dim) is not None:
raise ValueError("can't slice along dimension d")
if mesh_impl.tensor_dimension_to_mesh_axis(self._in_h_dim) is not None:
raise ValueError("can't slice along dimension h")
if mesh_impl.tensor_dimension_to_mesh_axis(self._in_w_dim) is not None:
raise ValueError("can't slice along dimension w")
if mesh_impl.tensor_dimension_to_mesh_axis(self._fd_dim) is not None:
raise ValueError("can't slice along dimension fd")
if mesh_impl.tensor_dimension_to_mesh_axis(self._fh_dim) is not None:
raise ValueError("can't slice along dimension fh")
if mesh_impl.tensor_dimension_to_mesh_axis(self._fw_dim) is not None:
raise ValueError("can't slice along dimension fw")
def tf_fn(tf_input, tf_filter):
output = tf.nn.conv3d(
_tf_flatten_batch_dims(tf_input, 4),
tf_filter, self._strides, self._padding)
return _tf_restore_batch_dims(output, 4, tf_input)
y = mesh_impl.slicewise(
tf_fn, lowering.tensors[conv_input], lowering.tensors[conv_filter])
# reducing out input channels - may need to allreduce
in_mesh_axis = mesh_impl.tensor_dimension_to_mesh_axis(self._in_dim)
if in_mesh_axis is not None:
def add_counter_fn():
lowering.add_counter(
"allreduce/%s/conv3d_op" % [in_mesh_axis],
mesh_impl.laid_out_size(self.outputs[0].shape))
y = LazyAllreduceSum(mesh_impl, y, [in_mesh_axis], add_counter_fn)
lowering.set_tensor_lowering(self.outputs[0], y)
computation_shape = _shape_union([conv_filter.shape, self.outputs[0].shape])
lowering.add_counter("conv3d/forward",
mesh_impl.laid_out_size(computation_shape))
lowering.add_counter("conv3d_unique/forward", computation_shape.size)
def conv3d_backprop_input(input_shape,
conv_filter,
dy,
strides,
padding, name=None):
return Conv2or3dBackpropInputOperation(3, False,
input_shape,
conv_filter,
dy,
strides,
padding,
name=name).outputs[0]
def conv3d_backprop_filter(conv_input,
filter_shape,
dy,
strides,
padding, name=None):
return Conv2or3dBackpropFilterOperation(3, False,
conv_input,
filter_shape,
dy,
strides,
padding,
name=name).outputs[0]
class Conv2dTransposeOperation(Operation):
"""like tf.nn.conv2d_transpose.
Currently we assume that the data format is always "NHWC".
# TODO(lehou): support more options such as dilation.
Always dilation rate of 1
padding: "SAME" or "VALID"
"""
def __init__(self, conv_input, conv_filter, strides, padding, name=None):
super(Conv2dTransposeOperation, self).__init__(
[conv_input, conv_filter], name=name or "conv2d_transpose")
self._padding = padding
self._batch_dims = conv_input.shape.dims[:-3]
self._in_h_dim, self._in_w_dim, self._in_dim = conv_input.shape.dims[-3:]
self._fh_dim, self._fw_dim = conv_filter.shape.dims[:2]
# Filter shape is transposed.
self._out_dim, f_in_dim = conv_filter.shape.dims[2:]
if f_in_dim != self._in_dim:
raise ValueError("Dimensions do not match input=%s filter=%s"
% (conv_input, conv_filter))
# compute output shape.
# now we assume the padding doesn't change the output shape.
# TODO(lehou): work out the output shape in general cases.
out_h = self._in_h_dim.size
out_w = self._in_w_dim.size
self._strides = strides
if strides is not None:
out_h *= strides[1]
out_w *= strides[2]
# name output shape.
self._out_h_dim = Dimension(self._in_h_dim.name, out_h)
self._out_w_dim = Dimension(self._in_w_dim.name, out_w)
output_shape = Shape(self._batch_dims + [
self._out_h_dim, self._out_w_dim, self._out_dim])
self._outputs = [Tensor(self, output_shape, conv_input.dtype)]
unsplittable_dims = [self._in_h_dim, self._in_w_dim,
self._fh_dim, self._fw_dim]
self._splittable_dims, self._unsplittable_dims = (
self._initialize_splittable_and_unsplittable_dims(
"splittable", [dim.name for dim in unsplittable_dims]))
def gradient(self, grad_ys):
dy = grad_ys[0]
conv_input, conv_filter = self.inputs
return [
conv2d_transpose_backprop_input(self._inputs[0].shape,
conv_filter,
dy,
self._strides,
self._padding),
conv2d_transpose_backprop_filter(conv_input,
self._inputs[1].shape,
dy,
self._strides,
self._padding)]
def lower(self, lowering):
mesh_impl = lowering.mesh_impl(self)
conv_input, conv_filter = self.inputs
if mesh_impl.tensor_dimension_to_mesh_axis(self._in_h_dim) is not None:
raise ValueError("can't slice along dimension h")
if mesh_impl.tensor_dimension_to_mesh_axis(self._in_w_dim) is not None:
raise ValueError("can't slice along dimension w")
if mesh_impl.tensor_dimension_to_mesh_axis(self._fh_dim) is not None:
raise ValueError("can't slice along dimension fh")
if mesh_impl.tensor_dimension_to_mesh_axis(self._fw_dim) is not None:
raise ValueError("can't slice along dimension fw")
# run conv2d_transpose in each slice.
def tf_fn(tf_input, tf_filter):
"""conv2d_transpose in tensorflow."""
# Get the output shape.
# Here, we compute flattened batch size from tf_input, since there can be
# split along batch dimensions.
flattened_batch_size = 1
for dim in tf_input.shape[:-3]:
flattened_batch_size *= dim
flattened_output_shape = [
flattened_batch_size, self._out_h_dim.size,
self._out_w_dim.size, self._out_dim.size]
output = tf.nn.conv2d_backprop_input(
flattened_output_shape, tf_filter,
_tf_flatten_batch_dims(tf_input, 3),
self._strides, self._padding)
return _tf_restore_batch_dims(output, 3, tf_input)
y = mesh_impl.slicewise(
tf_fn, lowering.tensors[conv_input], lowering.tensors[conv_filter])
# reducing out input channels - may need to allreduce
in_mesh_axis = mesh_impl.tensor_dimension_to_mesh_axis(self._in_dim)
if in_mesh_axis is not None:
def add_counter_fn():
lowering.add_counter(
"allreduce/%s/conv2d_transpose_op" % [in_mesh_axis],
mesh_impl.laid_out_size(self.outputs[0].shape))
y = LazyAllreduceSum(mesh_impl, y, [in_mesh_axis], add_counter_fn)
lowering.set_tensor_lowering(self.outputs[0], y)
computation_shape = _shape_union([conv_filter.shape, self.outputs[0].shape])
lowering.add_counter("conv2d_transpose/forward",
mesh_impl.laid_out_size(computation_shape))
lowering.add_counter("conv2d_transpose_unique/forward",
computation_shape.size)
def conv2d_transpose_backprop_input(input_shape,
conv_filter,
dy,
strides,
padding, name=None):
return Conv2or3dBackpropInputOperation(2, True,
input_shape,
conv_filter,
dy,
strides,
padding,
name=name).outputs[0]
def conv2d_transpose_backprop_filter(conv_input,
filter_shape,
dy,
strides,
padding, name=None):
return Conv2or3dBackpropFilterOperation(2, True,
conv_input,
filter_shape,
dy,
strides,
padding,
name=name).outputs[0]
class Conv3dTransposeOperation(Operation):
"""like tf.nn.conv3d_transpose.
Currently we assume that the data format is always "NDHWC".
# TODO(lehou): support more options such as dilation.
Always dilation rate of 1
padding: "SAME" or "VALID"
"""
def __init__(self, conv_input, conv_filter, strides, padding, name=None):
super(Conv3dTransposeOperation, self).__init__(
[conv_input, conv_filter], name=name or "conv3d_transpose")
self._padding = padding
self._batch_dims = conv_input.shape.dims[:-4]
self._in_d_dim, self._in_h_dim, self._in_w_dim, self._in_dim = (
conv_input.shape.dims[-4:])
self._fd_dim, self._fh_dim, self._fw_dim = conv_filter.shape.dims[:3]
# Filter shape is transposed.
self._out_dim, f_in_dim = conv_filter.shape.dims[3:]
if f_in_dim != self._in_dim:
raise ValueError("Dimensions do not match input=%s filter=%s"
% (conv_input, conv_filter))
# compute output shape.
# now we assume the padding doesn't change the output shape.
# TODO(lehou): work out the output shape in general cases.
out_d = self._in_d_dim.size
out_h = self._in_h_dim.size
out_w = self._in_w_dim.size
self._strides = strides
if strides is not None:
out_d *= strides[1]
out_h *= strides[2]
out_w *= strides[3]
# name output shape.
self._out_d_dim = Dimension(self._in_d_dim.name, out_d)
self._out_h_dim = Dimension(self._in_h_dim.name, out_h)
self._out_w_dim = Dimension(self._in_w_dim.name, out_w)
output_shape = Shape(self._batch_dims + [self._out_d_dim, self._out_h_dim,
self._out_w_dim, self._out_dim])
self._outputs = [Tensor(self, output_shape, conv_input.dtype)]
unsplittable_dims = [self._in_d_dim, self._in_h_dim, self._in_w_dim,
self._fd_dim, self._fh_dim, self._fw_dim]
self._splittable_dims, self._unsplittable_dims = (
self._initialize_splittable_and_unsplittable_dims(
"splittable", [dim.name for dim in unsplittable_dims]))
def gradient(self, grad_ys):
dy = grad_ys[0]
conv_input, conv_filter = self.inputs
return [
conv3d_transpose_backprop_input(self._inputs[0].shape,
conv_filter,
dy,
self._strides,
self._padding),
conv3d_transpose_backprop_filter(conv_input,
self._inputs[1].shape,
dy,
self._strides,
self._padding)]
def lower(self, lowering):
mesh_impl = lowering.mesh_impl(self)
conv_input, conv_filter = self.inputs
if mesh_impl.tensor_dimension_to_mesh_axis(self._in_d_dim) is not None:
raise ValueError("can't slice along dimension d")
if mesh_impl.tensor_dimension_to_mesh_axis(self._in_h_dim) is not None:
raise ValueError("can't slice along dimension h")
if mesh_impl.tensor_dimension_to_mesh_axis(self._in_w_dim) is not None:
raise ValueError("can't slice along dimension w")
if mesh_impl.tensor_dimension_to_mesh_axis(self._fd_dim) is not None:
raise ValueError("can't slice along dimension fd")
if mesh_impl.tensor_dimension_to_mesh_axis(self._fh_dim) is not None:
raise ValueError("can't slice along dimension fh")
if mesh_impl.tensor_dimension_to_mesh_axis(self._fw_dim) is not None:
raise ValueError("can't slice along dimension fw")
# run conv3d_transpose in each slice.
def tf_fn(tf_input, tf_filter):
"""conv3d_transpose in tensorflow."""
# Get the output shape.
# Here, we compute flattened batch size from tf_input, since there can be
# split along batch dimensions.
flattened_batch_size = 1
for dim in tf_input.shape[:-4]:
flattened_batch_size *= dim
flattened_output_shape = [flattened_batch_size,
self._out_d_dim.size, self._out_h_dim.size,
self._out_w_dim.size, self._out_dim.size]
output = conv3d_backprop_input_v2(
flattened_output_shape, tf_filter,
_tf_flatten_batch_dims(tf_input, 4),
self._strides, self._padding)
return _tf_restore_batch_dims(output, 4, tf_input)
y = mesh_impl.slicewise(
tf_fn, lowering.tensors[conv_input], lowering.tensors[conv_filter])
# reducing out input channels - may need to allreduce
in_mesh_axis = mesh_impl.tensor_dimension_to_mesh_axis(self._in_dim)
if in_mesh_axis is not None:
def add_counter_fn():
lowering.add_counter(
"allreduce/%s/conv3d_transpose_op" % [in_mesh_axis],
mesh_impl.laid_out_size(self.outputs[0].shape))
y = LazyAllreduceSum(mesh_impl, y, [in_mesh_axis], add_counter_fn)
lowering.set_tensor_lowering(self.outputs[0], y)
computation_shape = _shape_union([conv_filter.shape, self.outputs[0].shape])
lowering.add_counter("conv3d_transpose/forward",
mesh_impl.laid_out_size(computation_shape))
lowering.add_counter("conv3d_transpose_unique/forward",
computation_shape.size)
def conv3d_transpose_backprop_input(input_shape,
conv_filter,
dy,
strides,
padding, name=None):
return Conv2or3dBackpropInputOperation(3, True,
input_shape,
conv_filter,
dy,
strides,
padding,
name=name).outputs[0]
def conv3d_transpose_backprop_filter(conv_input,
filter_shape,
dy,
strides,
padding, name=None):
return Conv2or3dBackpropFilterOperation(3, True,
conv_input,
filter_shape,
dy,
strides,
padding,
name=name).outputs[0]
class ShiftOperation(Operation):
"""Shift by a static offset in one dimension."""
def __init__(self, x, offset, dim, wrap, name=None):
"""Create a shift operation.
Shift x right by +offset in dimension dim.
If offset is negative, shift left.
If wrap is true then wrap-around. Else, pad with zeros.
Args:
x: a Tensor
offset: an integer
dim: a Dimension of x
wrap: a boolean - whether to wrap or pad.
name: an optional string
"""
super(ShiftOperation, self).__init__([x], name=name or "shift")
self._dim = dim
self._axis = x.shape.dims.index(dim)
self._offset = offset
self._wrap = wrap
self._outputs = [Tensor(self, x.shape, x.dtype)]
def gradient(self, grad_ys):
return [shift(grad_ys[0], -self._offset, self._dim, self._wrap)]
def lower(self, lowering):
mesh_impl = lowering.mesh_impl(self)
mesh_axis = mesh_impl.tensor_dimension_to_mesh_axis(self._dim)
inputs = self._inputs[0]
ndims = self._inputs[0].shape.ndims
axis = self._axis
dim = self._dim
lowered_x = lowering.tensors[inputs]
if not self._wrap and abs(self._offset) >= dim.size:
lowering.set_tensor_lowering(
self.outputs[0],
mesh_impl.slicewise(tf.zeros_like, lowered_x))
return
def my_slice(x, start, size):
assert size >= 0
begin = [0] * axis + [start] + [0] * (ndims - axis - 1)
size = [-1] * axis + [size] + [-1] * (ndims - axis - 1)
return tf.slice(x, begin, size)
if mesh_axis is None:
def slicewise_fn(x):
"""Slicewise function."""
def my_pad(s, begin_pad, end_pad):
paddings = ([[0, 0]] * axis + [[begin_pad, end_pad]]
+ [[0, 0]] * (ndims - axis - 1))
return tf.pad(s, paddings)
if self._wrap:
offset = self._offset % dim.size
return tf.concat([my_slice(x, dim.size - offset, offset),
my_slice(x, 0, dim.size - offset)], axis=axis)
elif self._offset > 0:
return my_pad(
my_slice(x, 0, dim.size - self._offset), self._offset, 0)
else:
neg_offset = -self._offset
return my_pad(
my_slice(x, neg_offset, dim.size - neg_offset), 0, neg_offset)
lowered_y = mesh_impl.slicewise(slicewise_fn, lowered_x)
else:
mesh_dim_size = mesh_impl.shape.dims[mesh_axis].size
tensor_dim_size = self._dim.size
block_size = tensor_dim_size // mesh_dim_size
odiv = self._offset // block_size
omod = self._offset % block_size
laid_out_size = mesh_impl.laid_out_size(inputs.shape)
if omod == 0:
# shift by an integral number of processors.
lowered_y = mesh_impl.shift_by_n_processors(
lowered_x, mesh_axis, odiv, self._wrap)
lowering.add_counter("shift[%d]" % odiv, laid_out_size)
else:
# shift by odiv processors + omod positions
sliced = mesh_impl.slicewise(
lambda x: my_slice(x, 0, block_size - omod), lowered_x)
second_part = mesh_impl.shift_by_n_processors(
sliced, mesh_axis, odiv, self._wrap)
lowering.add_counter(
"shift[%d]" % odiv,
laid_out_size * (block_size - omod) // block_size)
sliced = mesh_impl.slicewise(
lambda x: my_slice(x, block_size - omod, omod), lowered_x)
first_part = mesh_impl.shift_by_n_processors(
sliced, mesh_axis, odiv + 1, self._wrap)
lowered_y = mesh_impl.slicewise(
lambda a, b: tf.concat([a, b], axis), first_part, second_part)
lowering.add_counter(
"shift[%d]" % (odiv + 1), laid_out_size * omod // block_size)
lowering.set_tensor_lowering(self.outputs[0], lowered_y)
def shift(x, offset, dim, wrap, name=None):
"""Shift operation.
Shift x right by +offset in dimension dim.
Args:
x: a Tensor
offset: an integer. If negative, shift left instead of right.
dim: a Dimension of x
wrap: a boolean - whether to wrap (True) or pad with zeros (False).
name: an optional string
Returns:
a Tensor with the same shape and dtype as x
"""
return ShiftOperation(x, offset, dim, wrap, name=name).outputs[0]
def dynamic_shift(x, offset, dim, wrap):
"""Shift with dynamic offset.
Shift x right by +offset in dimension dim.
Args:
x: a Tensor
offset: an Tensor whose shape is a subset of x.shape.dims - [dim]
dim: a Dimension of x
wrap: a boolean - whether to wrap (True) or pad with zeros (False).
Returns:
a Tensor with the same shape and dtype as x
"""
if dim not in x.shape.dims:
raise ValueError("dim must be a dimension of x")
if dim in offset.shape.dims:
raise ValueError("dim may not appear in offset")
for d in offset.shape.dims:
if d not in x.shape.dims:
raise ValueError("offset.shape %s must be a subset of x.shape %s"
% (offset.shape, x.shape))
tmp_dim = Dimension("dynamic_shift_tmp", dim.size)
x_reshaped = replace_dimensions(x, dim, tmp_dim)
dim_range = mtf_range(x.mesh, dim, dtype=tf.int32)
tmp_dim_range = mtf_range(x.mesh, tmp_dim, dtype=tf.int32)
tmp_dim_range_offset = tmp_dim_range + offset
if wrap:
tmp_dim_range_offset = mod(tmp_dim_range_offset, dim.size)
perm = cast(equal(dim_range, tmp_dim_range_offset), x.dtype)
return einsum([x_reshaped, perm], output_shape=x.shape)
class SliceOperation(Operation):
"""tf.slice.
We support the slice operation along one axis. Similar to tf.slice, specify
the begin and size values for the slice_dim.
"""
def __init__(self, x, begin, size, slice_dim_name, name=None):
super(SliceOperation, self).__init__([x], name=name or "slice")
dim_names = x.shape.dimension_names
self._axis = axis = dim_names.index(slice_dim_name)
self._begin = begin
self._slice_dim = Dimension(slice_dim_name, size)
input_shape = self._inputs[0].shape
output_shape = Shape(
input_shape.dims[:axis] + [self._slice_dim] + input_shape.dims[axis+1:])
self._outputs = [Tensor(self, output_shape, x.dtype)]
self._splittable_dims, self._unsplittable_dims = (
self._initialize_splittable_and_unsplittable_dims(
"splittable", [slice_dim_name]))
def gradient(self, grad_ys):
actual_size = self._inputs[0].shape.dims[self._axis].size
return [
pad(grad_ys[0],
[self._begin, actual_size - self._slice_dim.size - self._begin],
self._slice_dim.name)]
def lower(self, lowering):
mesh_impl = lowering.mesh_impl(self)
if mesh_impl.tensor_dimension_to_mesh_axis(self._slice_dim) is not None:
raise ValueError("can't slice along split axis")
inputs = self._inputs[0]
ndims = self._inputs[0].shape.ndims
axis = self._axis
begin = [0] * axis + [self._begin] + [0] * (ndims - axis - 1)
size = [-1] * axis + [self._slice_dim.size] + [-1] * (ndims - axis - 1)
def slicewise_fn(x, begin, size):
return tf.slice(x, begin, size, name="slice")
y = mesh_impl.slicewise(
slicewise_fn, lowering.tensors[inputs], begin, size)
lowering.set_tensor_lowering(self.outputs[0], y)
class PadOperation(Operation):
"""tf.pad.
Similar to tf.pad but we only pad along one axis given by pad_dim_name
with values specified by paddings. paddings is a list of two
values, giving the padding value before and after pad_dim.
"""
def __init__(self, x, paddings, pad_dim_name, name=None):
super(PadOperation, self).__init__([x], name=name or "pad")
assert len(paddings) == 2
input_shape = self._inputs[0].shape
dim_names = [dim.name for dim in x.shape.dims]
if pad_dim_name not in dim_names:
raise ValueError("Padding dim name %s not found in input." % pad_dim_name)
self._paddings = paddings
self._axis = axis = dim_names.index(pad_dim_name)
output_size = input_shape.dims[axis].size + sum(paddings)
self._output_dim = Dimension(pad_dim_name, output_size)
output_shape = Shape(
input_shape.dims[:axis] +
[self._output_dim] + input_shape.dims[axis+1:])
self._outputs = [Tensor(self, output_shape, x.dtype)]
self._splittable_dims, self._unsplittable_dims = (
self._initialize_splittable_and_unsplittable_dims(
"splittable", [pad_dim_name]))
def gradient(self, grad_ys):
slice_dim_name = self._output_dim.name
slice_size = self._inputs[0].shape.dims[self._axis].size
return [mtf_slice(grad_ys[0], self._paddings[0],
slice_size, slice_dim_name)]
def lower(self, lowering):
mesh_impl = lowering.mesh_impl(self)
if mesh_impl.tensor_dimension_to_mesh_axis(self._output_dim) is not None:
raise ValueError("can't pad along split axis")
inputs = self._inputs[0]
ndims = self._inputs[0].shape.ndims
axis = self._axis
paddings = [[0, 0]] * axis + [self._paddings] + [[0, 0]]* (ndims - axis - 1)
def slicewise_fn(x, paddings):
return tf.pad(x, paddings, name="pad")
y = mesh_impl.slicewise(
slicewise_fn, lowering.tensors[inputs], paddings)
lowering.set_tensor_lowering(self.outputs[0], y)
class OneHotOperation(Operation):
"""Like tf.one_hot.
"""
def __init__(self, indices, output_dim, on_value, off_value, dtype,
name=None):
super(OneHotOperation, self).__init__([indices], name=name or "one_hot")
if not indices.dtype.is_integer:
raise ValueError("indices requires an integer dtype got %s" % indices)
self._output_dim = output_dim
self._on_value = on_value
self._off_value = off_value
self._dtype = dtype
output_shape = Shape(indices.shape.dims + [output_dim])
self._outputs = [Tensor(self, output_shape, dtype)]
# Rerun to take the new output into account.
self._splittable_dims, self._unsplittable_dims = (
self._initialize_all_dimensions_as_splittable())
def lower(self, lowering):
mesh_impl = lowering.mesh_impl(self)
indices = self.inputs[0]
output_shape = self.outputs[0].shape
output_slice_shape = mesh_impl.slice_shape(output_shape)
mesh_axis = mesh_impl.tensor_dimension_to_mesh_axis(self._output_dim)
depth = output_slice_shape[-1]
if mesh_axis is None:
offset = 0
else:
offset = mesh_impl.slicewise(
tf.multiply, mesh_impl.laid_out_pcoord(mesh_axis), depth)
def slicewise_fn(indices_slice, offset):
return tf.one_hot(indices_slice - offset,
depth,
on_value=tf.cast(self._on_value, self._dtype),
off_value=tf.cast(self._off_value, self._dtype),
dtype=self._dtype)
y = mesh_impl.slicewise(
slicewise_fn, lowering.tensors[indices], offset)
lowering.set_tensor_lowering(self.outputs[0], y)
class ImportOperation(Operation):
"""Import a tf.Tensor onto a mesh."""
def __init__(self, mesh, tf_tensor, shape, name=None):
super(ImportOperation, self).__init__([], mesh=mesh, name=name or "import")
tf_tensor = tf.convert_to_tensor(tf_tensor)
if not tf_tensor.shape.is_compatible_with(shape.to_integer_list):
raise ValueError("Incompatible Shape - trying to import %s with shape %s"
% (tf_tensor, shape))
self._outputs = [Tensor(self, shape, tf_tensor.dtype)]
self._tf_tensor = tf_tensor
# Rerun to take the new output into account.
self._splittable_dims, self._unsplittable_dims = (
self._initialize_all_dimensions_as_splittable())
def lower(self, lowering):
mesh_impl = lowering.mesh_impl(self)
lowering.set_tensor_lowering(
self.outputs[0],
mesh_impl.import_tf_tensor(self.outputs[0], self._tf_tensor))
class ImportLaidOutTensorOperation(Operation):
"""Import LaidOutTensor."""
def __init__(self, mesh, laid_out_tensor, shape, name=None):
super(ImportLaidOutTensorOperation, self).__init__([],
mesh=mesh,
name=name or "import")
dtype = laid_out_tensor.tensor_list[0].dtype
self._outputs = [Tensor(self, shape, dtype)]
self._laid_out_tensor = laid_out_tensor
# For this operation, it doesn't make sense to talk about the splittability
# of dimensions, because laid_out_tensor depends on a particular layout.
self._splittable_dims, self._unsplittable_dims = (
self._initialize_splittable_and_unsplittable_dims("unsplittable"))
def lower(self, lowering):
lowering.set_tensor_lowering(self.outputs[0], self._laid_out_tensor)
def anonymous_shape(shape):
shape = convert_to_shape(shape)
return Shape([Dimension("_anonymous_%i" % i, d.size)
for i, d in enumerate(shape)])
def anonymize(x):
return reshape(x, anonymous_shape(x.shape))
def import_tf_tensor(mesh, tf_tensor, shape=None, name=None):
tf_tensor = tf.convert_to_tensor(tf_tensor)
if shape is None:
shape = Shape([])
assert not tf_tensor.shape.as_list()
return ImportOperation(
mesh, tf_tensor, convert_to_shape(shape), name=name).outputs[0]
def import_laid_out_tensor(mesh, laid_out_tensor, shape, name=None):
"""Import a laid_out_tensor.
For expert users.
The input must be laid out appropriately given the eventual MeshImpl,
and layout.
Args:
mesh: a Mesh
laid_out_tensor: a LaidOutTensor
shape: a mtf.Shape
name: an optional string
Returns:
a mtf.Tensor
"""
return ImportLaidOutTensorOperation(
mesh, laid_out_tensor, convert_to_shape(shape), name=name).outputs[0]
def import_fully_replicated(mesh, tf_tensor, shape, name=None):
return reshape(import_tf_tensor(
mesh, tf_tensor, anonymous_shape(shape), name), shape)
class LazyLaidOutTensor(object):
"""Computes a function later to create a LaidOutTensor.
The given to_laid_out_tensor_fn() is called every time
the to_laid_out_tensor() method is called. Really, we should not need this
class, since XLA rematerialization should do it all for us.
"""
def __init__(self, to_laid_out_tensor_fn, slice_shape):
self._to_laid_out_tensor_fn = to_laid_out_tensor_fn
self._slice_shape = slice_shape
def to_laid_out_tensor(self):
return self._to_laid_out_tensor_fn()
@property
def slice_shape(self):
return self._slice_shape
class VariableDType(object):
"""Class containing datatype information for a variable.
A variable has three datatypes.
master_dtype:
the datatype used for storing the variable to checkpoints
slice_dtype:
the datatype used for maintaining and updating the value during training
activation_dtype:
the datatype used for computation. Calls to get_variable return a Tensor
with this datatype.
If slice_dtype=tf.bfloat16 during training, then repeated roundoff errors
interfere with model quality - use tf.float32 instead. Otherwise, tf.bfloat16
can help reduce memory usage and checkpoint size. It is necessary to keep
master_dtype the same between training/inference/evaluation in order to read
and write checkpoints.
We will later extend this functionality to allow for custom quantization code.
"""
def __init__(self,
master_dtype=tf.float32,
slice_dtype=None,
activation_dtype=None):
self._master_dtype = master_dtype
self._slice_dtype = slice_dtype or master_dtype
self._activation_dtype = activation_dtype or master_dtype
@property
def master_dtype(self):
return self._master_dtype
@property
def slice_dtype(self):
return self._slice_dtype
@property
def activation_dtype(self):
return self._activation_dtype
class Variable(Operation):
"""Variable."""
def __init__(
self, mesh, name, shape, dtype, initializer, trainable, **kwargs):
super(Variable, self).__init__([], mesh, name="name_will_be_set_later")
if not isinstance(dtype, VariableDType):
raise ValueError("dtype must be a VariableDType got %s" % dtype)
self._dtype = dtype
self._trainable = trainable
if not isinstance(self, StackedVariable):
with tf.device(mesh.variable_placer_fn), utils.outside_all_rewrites():
self._master = tf.get_variable(
name,
shape.to_integer_list,
dtype=self.master_dtype,
initializer=initializer,
trainable=trainable,
**kwargs)
self._name = self._master.name[:self._master.name.find(":")]
self._outputs = [Tensor(self, shape, dtype.activation_dtype)]
# Rerun to take the new output into account.
self._splittable_dims, self._unsplittable_dims = (
self._initialize_all_dimensions_as_splittable())
self.graph.all_variables.append(self)
if trainable:
self.graph.trainable_variables.append(self)
def __repr__(self):
return "Variable(%s)" % self.value
def lower(self, lowering):
mesh_impl = lowering.mesh_impl(self)
with utils.outside_all_rewrites():
sv = mesh_impl.LaidOutVariable(self, mesh_impl)
lowering.variables[self] = sv
lowering.set_tensor_lowering(
self.outputs[0],
mesh_impl.slicewise(
tf.cast, sv.laid_out_tensor, self.activation_dtype))
if self._trainable:
lowering.add_counter("variables/trainable", self.outputs[0].size)
else:
lowering.add_counter("variables/untrainable", self.outputs[0].size)
@property
def value(self):
return self.outputs[0]
@property
def shape(self):
return self.value.shape
@property
def size(self):
return self.shape.size
@property
def dtype(self):
return self._dtype
@property
def master_dtype(self):
return self._dtype.master_dtype
@property
def slice_dtype(self):
return self._dtype.slice_dtype
@property
def activation_dtype(self):
return self._dtype.activation_dtype
@property
def trainable(self):
return self._trainable
@property
def master_device(self):
return self._master.device
def get_master(self):
return self._master
def assign_to_master(self, val):
return tf.assign(self._master, val)
class StackedVariable(Variable):
"""A Variable which combines many variables into one.
This is a performance optimization to reduce the time associated with large
numbers of slice variables. See Graph.rewrite_stack_variables() for usage.
"""
def __init__(self, vs):
"""Create a StackedVariable.
Args:
vs: a list of Variables
"""
shape = Shape([Dimension("stacked", len(vs))] + vs[0].shape.dims)
name = "stacked/" + vs[0].name
# TODO(noam): verify that vs are the same shape, etc.
super(StackedVariable, self).__init__(
vs[0].mesh, name, shape, vs[0].dtype, None, vs[0].trainable)
self._name = name
self._masters = [v.get_master() for v in vs]
self._original_names = [v.name for v in vs]
# Rerun to take the new output into account.
self._splittable_dims, self._unsplittable_dims = (
self._initialize_all_dimensions_as_splittable())
@property
def original_names(self):
return self._original_names
@property
def master_device(self):
return self._masters[0].device
def get_master(self):
with tf.device(self.master_device):
return tf.stack(self._masters)
def assign_to_master(self, val):
return tf.group([
tf.assign(var_slice, val_slice) for var_slice, val_slice
in zip(self._masters, tf.unstack(val))])
class ReadVariable(Operation):
"""Read a variable."""
def __init__(self, var, name=None):
super(ReadVariable, self).__init__(
var.outputs, name=name or "read_variable")
self._var = var
self._outputs = [Tensor(self, var.shape, var.activation_dtype)]
def gradient(self, grad_ys):
return grad_ys
def lower(self, lowering):
mesh_impl = lowering.mesh_impl(self)
sv = lowering.variables[self._var]
lowering.set_tensor_lowering(
self.outputs[0], mesh_impl.slicewise(
tf.cast, sv.laid_out_tensor, self._var.activation_dtype))
def get_variable(mesh, name, shape, dtype=tf.float32,
master_dtype=None, slice_dtype=None, activation_dtype=None,
initializer=None, trainable=True,
**kwargs):
"""Create a new variable or retrieve an already-created one.
Args:
mesh: a Mesh
name: a string (uses the existing tf.variable_scope())
shape: a Shape
dtype: a VariableDType or a tf.DType
master_dtype: an optional tf.DType (deprecated - use dtype arg)
slice_dtype: an optional tf.DType (deprecated - use dtype arg)
activation_dtype: an optional tf.DType (deprecated - use dtype arg)
initializer: an optional tf initializer function
trainable: a boolean
**kwargs: additional keyword arguments to tf.get_variable
Returns:
a Tensor with the given shape and dtype equal to dtype.activation_dtype
"""
if dtype is None:
dtype = VariableDType(master_dtype, slice_dtype, activation_dtype)
elif isinstance(dtype, tf.DType):
dtype = VariableDType(
master_dtype or dtype, slice_dtype or dtype, activation_dtype or dtype)
elif not isinstance(dtype, VariableDType):
raise ValueError("dtype should be a tf.dtype or a mtf.VariableDType")
scope_name = tf.get_variable_scope().name
if scope_name:
full_name = scope_name + "/" + name
else:
full_name = name
if initializer is None:
tf.logging.warning(
"Using default tf glorot_uniform_initializer for variable %s "
" The initialzer will guess the input and output dimensions "
" based on dimension order." % full_name)
if full_name in mesh.graph.name_to_variable:
var = mesh.graph.name_to_variable[full_name]
else:
var = Variable(
mesh, name, convert_to_shape(shape), dtype, initializer, trainable,
**kwargs)
if var.name != full_name:
raise ValueError(
"Expected var.name == full_name. %s vs %s" % (var.name, full_name))
mesh.graph.name_to_variable[full_name] = var
return var.outputs[0]
def read_variable(var):
return ReadVariable(var).outputs[0]
def assign_slice(variable, slice_var, val):
return tf.assign(
slice_var,
tf.cast(val, variable.slice_dtype))
def assign_add_slice(variable, slice_var, val):
val = tf.cast(val, variable.slice_dtype)
return tf.assign(slice_var, slice_var + val)
def assign_sub_slice(variable, slice_var, val):
val = tf.cast(val, variable.slice_dtype)
return tf.assign(slice_var, slice_var - val)
class Assign(Operation):
"""Assign to one or more variables."""
def __init__(self, variables, new_values, assign_fn=assign_slice, name=None):
super(Assign, self).__init__(
new_values, variables[0].mesh, name=name or "assign")
self._variables = variables
self._assign_fn = assign_fn
self._outputs = []
def lower(self, lowering):
ops = []
for var, val in zip(self._variables, self.inputs):
ops.append(lowering.variables[var].assign_to_slices(
self._assign_fn,
lowering.tensors[val].to_laid_out_tensor().all_slices))
lowering.operations[self] = tf.group(ops)
@property
def assign_fn(self):
return self._assign_fn
@property
def variables(self):
return self._variables
def assign(var, new_val, assign_fn=assign_slice, name=None):
"""Assign a new value to a variable.
Args:
var: either a Variable operation or its output Tensor,
or the output of a chain of unary operations starting with a Variable.
new_val: a Tensor
assign_fn: a function from
(mtf.Variable, tf.Variable, tf.Tensor) -> tf.Operation
name: a string for the Assign op.
Returns:
an Operation
Raises:
ValueError: if var is not a Variable and var.operation is not a Variable
"""
# find the original Variable operation.
if isinstance(var, Tensor):
var = var.operation
while not isinstance(var, Variable) and len(var.inputs) == 1:
var = var.inputs[0].operation
if not isinstance(var, Variable):
raise ValueError("var must be a mtf.Variable or its output Tensor.")
return Assign([var], [new_val], assign_fn=assign_fn, name=name)
def assign_add(var, new_val):
return assign(var, new_val, assign_fn=assign_add_slice)
def assign_sub(var, new_val):
return assign(var, new_val, assign_fn=assign_sub_slice)
class Depend(Operation):
"""Control dependency."""
def __init__(self, x, dependencies, name=None):
super(Depend, self).__init__([x], x.mesh, name=name or "depend")
for d in dependencies:
if not isinstance(d, Operation) and not isinstance(d, Tensor):
raise ValueError("dependencies must be mtf.Operations or mtf.Tensor."
"got %s" % d)
self._dependencies = dependencies
self._outputs = [Tensor(self, x.shape, x.dtype)]
def lower(self, lowering):
mesh_impl = lowering.mesh_impl(self)
if not mesh_impl.supports_control_dependencies:
raise ValueError("Mesh does not suppport control dependencies.")
control_inputs = []
for d in self._dependencies:
if isinstance(d, Operation):
control_inputs.append(lowering.operations[d])
else:
control_inputs.append(lowering.tensors[d].tensor_list)
with tf.control_dependencies(tf.nest.flatten(control_inputs)):
lowering.set_tensor_lowering(
self.outputs[0],
mesh_impl.slicewise(tf.identity,
lowering.tensors[self.inputs[0]]))
def gradient(self, grad_ys):
return grad_ys
def depend(x, dependencies):
"""Identity of Tensor x that depends on operation dependencies.
Args:
x: a Tensor
dependencies: a list of Operations or Tensors
Returns:
an tensor
"""
return Depend(x, dependencies).outputs[0]
class Constant(Operation):
"""A tensor where every element is the same constant value."""
def __init__(self, mesh, value, shape, dtype, name=None):
super(Constant, self).__init__([], mesh, name=name or "constant")
self._outputs = [Tensor(self, shape, dtype)]
self._value = value
# Rerun to take the new output into account.
self._splittable_dims, self._unsplittable_dims = (
self._initialize_all_dimensions_as_splittable())
def lower(self, lowering):
mesh_impl = lowering.mesh_impl(self)
slice_shape = mesh_impl.slice_shape(self.outputs[0].shape)
def tf_fn():
return tf.constant(value=self._value,
dtype=self.outputs[0].dtype,
shape=slice_shape)
lowering.set_tensor_lowering(self.outputs[0], mesh_impl.slicewise(tf_fn))
def constant(mesh, value, shape=None, dtype=tf.float32):
shape = convert_to_shape(shape)
return Constant(mesh, value,
shape if shape is not None else Shape([]),
dtype).outputs[0]
def zeros(mesh, shape, dtype=tf.float32):
return constant(mesh, 0, shape=convert_to_shape(shape), dtype=dtype)
def zeros_like(t):
return zeros(t.mesh, t.shape, dtype=t.dtype)
def ones(mesh, shape, dtype=tf.float32):
return constant(mesh, 1, shape=convert_to_shape(shape), dtype=dtype)
def ones_like(t):
return ones(t.mesh, t.shape, dtype=t.dtype)
class StopGradient(Operation):
"""Similar to tf.stop_gradient."""
def __init__(self, x, name=None):
super(StopGradient, self).__init__(
[x], x.mesh, name=name or "stop_gradient")
self._outputs = [Tensor(self, x.shape, x.dtype)]
def lower(self, lowering):
lowering.set_tensor_lowering(self.outputs[0],
lowering.tensors[self.inputs[0]])
@property
def has_gradient(self):
return False
def stop_gradient(x):
return StopGradient(x).outputs[0]
class ScalarSummaryOperation(Operation):
"""Similar to tf.Print."""
def __init__(self, name, x):
super(ScalarSummaryOperation, self).__init__(
[x], x.mesh, name=name)
if x.shape.dims:
raise ValueError("ScalarSummaryOperation takes a scalar")
self._outputs = [Tensor(self, x.shape, x.dtype)]
def lower(self, lowering):
lowered_input = lowering.tensors[self.inputs[0]].to_laid_out_tensor()
tf.add_to_collection(utils.SCALAR_SUMMARIES_COLLECTION_KEY,
(self.name, lowered_input.tensor_list[0]))
lowering.set_tensor_lowering(
self.outputs[0], lowered_input)
def gradient(self, grad_ys):
return grad_ys
def scalar_summary(name, x):
"""Call tf.summary.scalar.
Caveat - summaries do not generally work on TPU - they need to be rewritten
into a host call.
TODO(noam): provide a pointer to code for this.
Args:
name: a string
x: a 0-dimensional Tensor
Returns:
a Tensor which is identical in value to x
"""
return ScalarSummaryOperation(name, x)
class PrintOperation(Operation):
"""Similar to tf.Print."""
def __init__(self, x, data, message, name=None, **kwargs):
super(PrintOperation, self).__init__(
[x], x.mesh, name=name or "Print")
self._outputs = [Tensor(self, x.shape, x.dtype)]
self._data = data
self._message = message
self._kwargs = kwargs
def lower(self, lowering):
lowering.set_tensor_lowering(
self.outputs[0],
lowering.mesh_impl(self).Print(
lowering.tensors[self.inputs[0]],
[lowering.tensors[d].to_laid_out_tensor() for d in self._data],
self._message, **self._kwargs))
def gradient(self, grad_ys):
return grad_ys
def Print(x, data, message, **kwargs): # pylint: disable=invalid-name
"""Call tf.Print.
Args:
x: a Tensor.
data: a list of Tensor
message: a string
**kwargs: keyword arguments to tf.Print
Returns:
a Tensor which is identical in value to x
"""
message += " %s" % data
return PrintOperation(x, data, message, **kwargs).outputs[0]
class ReshapeOperation(Operation):
"""Similar to tf.stop_gradient."""
def __init__(self, x, new_shape, name=None):
super(ReshapeOperation, self).__init__([x], x.mesh, name=name or "reshape")
if x.shape.size != new_shape.size:
raise ValueError("Cannot reshape Tensor %s to shape %s - sizes differ."
% (x, new_shape))
self._outputs = [Tensor(self, new_shape, x.dtype)]
# Rerun to take the new output into account.
self._splittable_dims, self._unsplittable_dims = (
self._initialize_all_dimensions_as_splittable())
def lower(self, lowering):
"""Lower the ReshapeOperation.
Reshaping can require collective communication between processors.
We haven't yet implemented all possible reshapes. We try to handle the
common cases here - otherwise we raise a NotImplementedError.
Args:
lowering: a Lowering
Raises:
NotImplementedError: if we haven't covered this case
"""
old_shape = self.inputs[0].shape
new_shape = self.outputs[0].shape
mesh_impl = lowering.mesh_impl(self)
slices = lowering.tensors[self.inputs[0]]
mesh_axis_to_cumprod_old = mesh_impl.mesh_axis_to_cumprod(old_shape)
mesh_axis_to_cumprod_new = mesh_impl.mesh_axis_to_cumprod(new_shape)
# Figure out what needs to be done for different mesh-axes
mesh_axes_allsplit = []
mesh_axes_allconcat = []
mesh_axes_alltoall = []
for mesh_axis, (old_cumprod, new_cumprod) in enumerate(
zip(mesh_axis_to_cumprod_old, mesh_axis_to_cumprod_new)):
if new_cumprod != old_cumprod:
if old_cumprod is None:
# split in new layout but not in old layout - we need an allsplit
mesh_axes_allsplit.append(mesh_axis)
elif new_cumprod is None:
# split in old layout but not in new layout - we need an allconcat
mesh_axes_allconcat.append(mesh_axis)
else:
# split differently in old and new layouts - we need an alltoall
mesh_axes_alltoall.append(mesh_axis)
laid_out_size = mesh_impl.laid_out_size(old_shape)
# list of (mesh_axis, tensor_axis) pairs to allsplit after the reshape
# typically we do the allsplit before the reshape, to save communication,
# but sometimes we need to delay it.
allsplit_after_reshape = []
for mesh_axis in mesh_axes_allsplit:
tensor_axis = old_shape.cumprod_to_tensor_axis(
mesh_axis_to_cumprod_new[mesh_axis])
if tensor_axis is None:
# delay allsplit until after reshape
tensor_axis = new_shape.cumprod_to_tensor_axis(
mesh_axis_to_cumprod_new[mesh_axis])
allsplit_after_reshape.append((mesh_axis, tensor_axis))
else:
slices = mesh_impl.allsplit(slices, mesh_axis, tensor_axis)
laid_out_size //= mesh_impl.shape[mesh_axis].size
for mesh_axis in mesh_axes_alltoall:
split_tensor_axis = old_shape.cumprod_to_tensor_axis(
mesh_axis_to_cumprod_new[mesh_axis])
if split_tensor_axis is None:
# TODO(noam): try to handle this case
raise NotImplementedError(
"Try first reshaping to insert a new tf dimension,"
" then changing layout. input_shape=%s output_shape=%s"
% (self.inputs[0].shape, self.outputs[0].shape))
concat_tensor_axis = old_shape.cumprod_to_tensor_axis(
mesh_axis_to_cumprod_old[mesh_axis])
assert concat_tensor_axis is not None
slices = mesh_impl.alltoall(
slices, mesh_axis, split_tensor_axis, concat_tensor_axis)
lowering.add_counter(
"alltoall/%s/reshape_op" % mesh_axis, laid_out_size)
for mesh_axis in mesh_axes_allconcat:
tensor_axis = old_shape.cumprod_to_tensor_axis(
mesh_axis_to_cumprod_old[mesh_axis])
assert tensor_axis is not None
slices = mesh_impl.allconcat(slices, mesh_axis, tensor_axis)
laid_out_size *= mesh_impl.shape[mesh_axis].size
lowering.add_counter(
"allconcat/%s/reshape_op" % mesh_axis, laid_out_size)
# now reshape the slices
new_slice_shape = mesh_impl.slice_shape(new_shape)
for mesh_axis, tensor_axis in allsplit_after_reshape:
new_slice_shape[tensor_axis] *= mesh_impl.shape[mesh_axis].size
def reshape_fn(x):
return tf.reshape(x, new_slice_shape)
slices = mesh_impl.slicewise_delay_allreduce(reshape_fn, slices)
for mesh_axis, tensor_axis in allsplit_after_reshape:
slices = mesh_impl.allsplit(slices, mesh_axis, tensor_axis)
lowering.set_tensor_lowering(self.outputs[0], slices)
def gradient(self, grad_ys):
return [reshape(grad_ys[0], self.inputs[0].shape)]
def reshape(x, new_shape, name="reshape"):
return ReshapeOperation(x, convert_to_shape(new_shape), name=name).outputs[0]
def transpose(x, new_shape, name="transpose"):
new_shape = convert_to_shape(new_shape)
if set(x.shape.dims) != set(new_shape.dims):
raise ValueError("x must have the same dimensions as new_shape %s vs %s"
% (x, new_shape))
return einsum([x], output_shape=new_shape, name=name)
def rename_dimension(x, old_name, new_name):
"""Reshape a Tensor, renaming one dimension.
Args:
x: a Tensor
old_name: a string
new_name: a string
Returns:
a Tensor
"""
return reshape(x, x.shape.rename_dimension(old_name, new_name))
def replace_dimensions(tensor_or_shape, old_dim_or_dims, new_dim_or_dims):
"""Replace dimensions in a Tensor or Shape.
old_dim_or_dims consists of a single dimension or a list of dimensions
that must occur consecutively in the input shape. They are replaced
by the dimensions in new_dim_or_dims.
Args:
tensor_or_shape: a Tensor or a Shape
old_dim_or_dims: a Dimension or a list of Dimensions
new_dim_or_dims: a Dimensions or a list of Dimensions
Returns:
a new Tensor or a Shape
"""
if isinstance(tensor_or_shape, Tensor):
return reshape(tensor_or_shape, replace_dimensions(
tensor_or_shape.shape, old_dim_or_dims, new_dim_or_dims))
if not isinstance(tensor_or_shape, Shape):
raise ValueError(
"tensor_or_shape must be a Tensor or Shape got %s" % (tensor_or_shape,))
in_dims = tensor_or_shape.dims
if isinstance(old_dim_or_dims, Dimension):
old_dim_or_dims = [old_dim_or_dims]
if isinstance(new_dim_or_dims, Dimension):
new_dim_or_dims = [new_dim_or_dims]
if not isinstance(old_dim_or_dims, list) or not old_dim_or_dims:
raise ValueError(
"old_dim_or_dims must be a Dimension or a list of Dimension got %s"
% (old_dim_or_dims,))
if not isinstance(new_dim_or_dims, list) or not new_dim_or_dims:
raise ValueError(
"new_dim_or_dims must be a Dimension or a list of Dimension got %s"
% (new_dim_or_dims,))
try:
positions = [in_dims.index(d) for d in old_dim_or_dims]
pos = positions[0]
if positions != list(range(pos, pos + len(positions))):
raise ValueError()
except ValueError:
raise ValueError(
"old_dim_or_dims must be a subsequence of the input's dimensions"
" old_dim_or_dims=%s input's dimensions=%s" %
(old_dim_or_dims, in_dims))
return Shape(in_dims[:pos] + new_dim_or_dims +
in_dims[pos + len(old_dim_or_dims):])
def einsum(xs, output_shape=None, reduced_dims=None, name=None):
"""Einstein summation.
einsum(xs, output_shape) is equivalent to broadcasting all inputs
to the union of all of their shapes, multiplying them componentwise,
and finally reduce_summing down to output_shape.
One common case of this is matrix multiplication:
x has shape [a, b]
y has shape [b, c]
matmul(x, y) == einsum([x, y], output_shape=[a, c])
We provide a few options for specifying the output shape:
If neither output_shape nor reduced_dims is specified, then the output
shape is set to the contain all dimensions that appear exactly once in the
inputs, in order of appearance.
If output_shape is not specified, then the output shape is set to the contain
all dimensions that appear in xs but not in reduced_dims, in the order
that they appear in xs. If reduced_dims is also not specified, then
reduced_dims is set to the set of all dimensions that appear at least twice in
xs.
If both output_shape and reduced_dims are specified, then we check that
reduced_dims matches the set of dimensions present in xs but not in
output_shape, and throw an exception if it does not. This helps to reduce
bugs.
Args:
xs: a list of Tensors
output_shape: an optional Shape.
reduced_dims: an optional list of Dimensions.
name: an optional string
Returns:
a Tensor
Raises:
ValueError: if reduced_dims contradicts output_shape
"""
output_shape = convert_to_shape(output_shape)
input_dim_count = collections.defaultdict(int)
input_dims = []
for x in xs:
for d in x.shape.dims:
if d not in input_dim_count:
input_dims.append(d)
input_dim_count[d] += 1
if reduced_dims is not None:
for d in reduced_dims:
if not isinstance(d, Dimension):
raise ValueError("reduced_dims must be a list of Dimensions. Got %s."
% (reduced_dims,))
if output_shape is None:
if reduced_dims is None:
reduced_dims = [d for d, c in six.iteritems(input_dim_count) if c > 1]
output_shape = Shape([d for d in input_dims if d not in reduced_dims])
elif reduced_dims is not None:
computed_reduced_dims = [
d for d in input_dims if d not in output_shape.dims]
if set(computed_reduced_dims) != set(reduced_dims):
raise ValueError(
"Specified reduced_dims and output_shape do not match."
" xs=%s output_shape=%s reduced_dims=%s " % (
xs, output_shape, reduced_dims))
return EinsumOperation(xs, output_shape, name=name).outputs[0]
def matmul(a, b, output_shape=None, reduced_dims=None, name=None):
"""Alias for einsum([a, b])."""
return einsum(
[a, b], output_shape=output_shape, reduced_dims=reduced_dims, name=name)
def _reduction_output_shape(x, output_shape, reduced_dim):
"""Helper function to reduce_sum, etc."""
if output_shape is None:
if reduced_dim is None:
return Shape([])
else:
if reduced_dim not in x.shape.dims:
raise ValueError(
"reduced_dim=%s not in x.shape.dims=%s" % (reduced_dim, x.shape))
return x.shape - reduced_dim
if reduced_dim is not None:
if [reduced_dim] != [d for d in x.shape.dims if d not in output_shape.dims]:
raise ValueError(
"reduced_dim contradicts output_shape:"
"x=%s output_shape=%s reduced_dim=%s" %
(x, output_shape, reduced_dim))
return output_shape
def reduce_sum(x,
disable_positional_args=None,
output_shape=None,
reduced_dim=None,
name=None):
"""Reduction on 1 or more axes.
If reduced_dim is present, then only that dimension is reduced out.
Alternatively, specify output_shape.
Do not specify both reduced_dim and output_shape.
If neither is specified, then all dimensions are reduced out.
Args:
x: a Tensor
disable_positional_args: None
output_shape: an optional Shape. Must be a subsequence of x.shape.
reduced_dim: a mtf.Dimension
name: an optional string
Returns:
a Tensor
"""
output_shape = convert_to_shape(output_shape)
reduced_dim = convert_to_dimension(reduced_dim)
assert disable_positional_args is None
output_shape = _reduction_output_shape(x, output_shape, reduced_dim)
if output_shape == x.shape:
return x
return ReduceOperation(x, output_shape, "SUM", name=name).outputs[0]
def reduce_mean(x,
disable_positional_args=None,
output_shape=None,
reduced_dim=None,
name=None):
"""Reduction on 1 or more axes.
If reduced_dim is present, then only that dimension is reduced out.
Alternatively, specify output_shape.
Do not specify both reduced_dim and output_shape.
If neither is specified, then all dimensions are reduced out.
Args:
x: a Tensor
disable_positional_args: None
output_shape: an optional Shape. Must be a subsequence of x.shape.
reduced_dim: a mtf.Dimension
name: an optional string
Returns:
a Tensor
"""
output_shape = convert_to_shape(output_shape)
reduced_dim = convert_to_dimension(reduced_dim)
assert disable_positional_args is None
output_shape = _reduction_output_shape(x, output_shape, reduced_dim)
with tf.variable_scope(name, default_name="reduce_mean"):
if output_shape == x.shape:
return x
return reduce_sum(
x, output_shape=output_shape) * (output_shape.size / x.shape.size)
def reduce_max(x,
disable_positional_args=None,
output_shape=None,
reduced_dim=None,
name=None):
"""Reduction on 1 or more axes.
Args:
x: a Tensor
disable_positional_args: None
output_shape: an optional Shape. Must be a subsequence of x.shape.
reduced_dim: an optional Dimension
name: an optional string
Returns:
a Tensor
"""
output_shape = convert_to_shape(output_shape)
reduced_dim = convert_to_dimension(reduced_dim)
assert disable_positional_args is None
output_shape = _reduction_output_shape(x, output_shape, reduced_dim)
if output_shape is None:
output_shape = Shape([])
if output_shape == x.shape:
return x
return ReduceOperation(
x, output_shape, "MAX", name=name or "reduce_max").outputs[0]
def reduce_min(x,
disable_positional_args=None,
output_shape=None,
reduced_dim=None,
name=None):
"""Reduction on 1 or more axes.
Args:
x: a Tensor
disable_positional_args: None
output_shape: an optional Shape. Must be a subsequence of x.shape.
reduced_dim: an optional Dimension
name: an optional string
Returns:
a Tensor
"""
output_shape = convert_to_shape(output_shape)
reduced_dim = convert_to_dimension(reduced_dim)
assert disable_positional_args is None
output_shape = _reduction_output_shape(x, output_shape, reduced_dim)
if output_shape is None:
output_shape = Shape([])
if output_shape == x.shape:
return x
return ReduceOperation(
x, output_shape, "MIN", name=name or "reduce_min").outputs[0]
def reduce_all(x,
disable_positional_args=None,
output_shape=None,
reduced_dim=None,
name=None):
output_shape = convert_to_shape(output_shape)
reduced_dim = convert_to_dimension(reduced_dim)
return cast(reduce_min(to_float(x),
disable_positional_args=disable_positional_args,
output_shape=output_shape,
reduced_dim=reduced_dim,
name=name or "reduce_all"), tf.bool)
def reduce_any(x,
disable_positional_args=None,
output_shape=None,
reduced_dim=None,
name=None):
output_shape = convert_to_shape(output_shape)
reduced_dim = convert_to_dimension(reduced_dim)
return cast(reduce_max(to_float(x),
disable_positional_args=disable_positional_args,
output_shape=output_shape,
reduced_dim=reduced_dim,
name=name or "reduce_any"), tf.bool)
class TopKOperation(Operation):
"""Compute top k indices and values - see comment on top_k() below."""
def __init__(self, x, reduced_dim, k_dim, name=None):
super(TopKOperation, self).__init__([x], name=name or "top_k")
self._value_dtype = x.dtype
if reduced_dim not in x.shape.dims:
raise ValueError("reduced dim %s must be in x.shape %s"
% (reduced_dim, x.shape))
if k_dim.size > reduced_dim.size:
raise ValueError("k_dim.size must be <= reduced_dim.size: %s vs %s"
% (k_dim, reduced_dim))
output_shape = x.shape - reduced_dim + k_dim
self._outputs = [Tensor(self, output_shape, x.dtype),
Tensor(self, output_shape, tf.int32),]
self._reduced_dim = reduced_dim
self._k_dim = k_dim
self._splittable_dims, self._unsplittable_dims = (
self._initialize_splittable_and_unsplittable_dims(
"splittable", [self._k_dim.name]))
def gradient(self, grad_ys):
dvalue = grad_ys[0]
indices = self.outputs[1]
mapping = one_hot(indices, self._reduced_dim, dtype=self._value_dtype)
return [einsum([dvalue, mapping], output_shape=self.inputs[0].shape)]
def lower(self, lowering):
mesh_impl = lowering.mesh_impl(self)
x = self.inputs[0]
ndims = x.shape.ndims
reduced_axis = x.shape.dims.index(self._reduced_dim)
reduced_mesh_axis = mesh_impl.tensor_dimension_to_mesh_axis(
self._reduced_dim)
if reduced_mesh_axis is not None:
reduced_dim_per_shard = (
self._reduced_dim.size // mesh_impl.shape[reduced_mesh_axis].size)
else:
reduced_dim_per_shard = self._reduced_dim.size
def _slicewise_top_k(t):
t = tf.transpose(
t, [i for i in range(ndims) if i != reduced_axis] + [reduced_axis])
if self._k_dim.size == 1:
# top_k seems to be slow on TPU - use reduce_max and argmax instead
return (tf.expand_dims(tf.math.reduce_max(t, -1), -1),
tf.expand_dims(tf.cast(tf.math.argmax(t, -1), tf.int32), -1))
else:
return tf.math.top_k(t, min(self._k_dim.size, reduced_dim_per_shard))
values, indices = mesh_impl.slicewise(_slicewise_top_k, lowering.tensors[x])
if reduced_mesh_axis is not None:
# indices are now indices within a shard. Make them global indices.
indices = mesh_impl.slicewise(
lambda idxs, pcoord: idxs + pcoord * reduced_dim_per_shard,
indices, mesh_impl.laid_out_pcoord(reduced_mesh_axis))
# concatenate values and indices across processors,
# duplicating the result across mesh axis `reduced_mesh_axis`.
values = mesh_impl.allconcat(values, reduced_mesh_axis, ndims - 1)
indices = mesh_impl.allconcat(indices, reduced_mesh_axis, ndims - 1)
# final reduction to find top k among all shards
def _global_top_k(vals, global_indices):
vals, local_indices = tf.math.top_k(vals, self._k_dim.size)
return vals, tf.gather(global_indices,
local_indices,
batch_dims=ndims-1)
values, indices = mesh_impl.slicewise(_global_top_k, values, indices)
lowering.set_tensor_lowering(self.outputs[0], values)
lowering.set_tensor_lowering(self.outputs[1], indices)
def top_k(x, reduced_dim, k_dim, name=None):
"""Like tf.math.top_k.
This operation returns two tensors with the same shape. The output shape
is identical to the shape of x, except that reduced_dim is removed and
k_dim is inserted at the end.
Args:
x: a Tensor
reduced_dim: a Dimension in x.shape.dims.
k_dim: a Dimension. The size determines k.
name: optional string.
Returns:
values: a Tensor with same type as x.
indices: a Tensor with dtype tf.int32
"""
if k_dim.size > 1 and k_dim.size < 5:
return _iterative_top_k(x, reduced_dim, k_dim, name=name)
else:
op = TopKOperation(x, reduced_dim, k_dim, name=name)
return op.outputs[0], op.outputs[1]
def _iterative_top_k(x, reduced_dim, k_dim, name=None):
"""Like tf.top_k.
Iterative implementation of top_k.
This is faster for small k on TPU for now, since the implementation of
tf.nn.top_k() seems to use sorting.
Args:
x: a Tensor
reduced_dim: a Dimension in x.shape.dims.
k_dim: a Dimension. The size determines k.
name: optional string.
Returns:
values: a Tensor with same type as x.
indices: a Tensor with dtype tf.int32
"""
reduced_dim = convert_to_dimension(reduced_dim)
k_dim = convert_to_dimension(k_dim)
indices = []
values = []
k = k_dim.size
with tf.name_scope(name, default_name="top_k"):
for i in xrange(k):
max_val, max_index = top_1(x, reduced_dim)
indices.append(max_index)
values.append(max_val)
if i + 1 < k:
x += one_hot(max_index, reduced_dim, on_value=-1e9, dtype=x.dtype)
return stack(values, k_dim.name, -1), stack(indices, k_dim.name, -1)
def top_1(x, reduced_dim, name=None):
"""Max and Argmax.
Args:
x: a Tensor
reduced_dim: a Dimension in x.shape.dims
name: an optional string
Returns:
values: Tensor equal to mtf.reduce_max(x, reduced_dim=reduced_dim)
indices: a Tensor with dtype tf.int32
"""
one_dim = Dimension("_one", 1)
values, indices = top_k(x, reduced_dim, one_dim, name=name)
values = reshape(values, values.shape - one_dim)
indices = reshape(indices, indices.shape - one_dim)
return values, indices
def argmax(x, reduced_dim, name=None):
"""Compute argmax.
Args:
x: a Tensor
reduced_dim: a Dimension in x.shape.dims
name: an optional string
Returns:
A Tensor with shape x.shape - reduced_dim and dtype tf.int32.
"""
reduced_dim = convert_to_dimension(reduced_dim)
return top_1(x, reduced_dim, name=name)[1]
def sample_with_temperature(logits, dim, temperature=1.0, name=None):
"""Sample from a probability distribution.
If temperature=0.0, then we compute argmax(logits, dim)
If temperature=1.0, then we sample with probability proportional to
exp(logits). So you can pass in the log(probablity) as the logits.
`dim` is one the dimension of `logits` which represents the set of choices.
The other dimensions of `logits` are treated as batch-dimensions.
Args:
logits: a Tensor.
dim: a Dimension in logits.shape.dims
temperature: a float 0.0=argmax 1.0=random
name: an optional string
Returns:
a Tensor with type tf.int32 and shape (logits.shape - dim)
"""
dim = convert_to_dimension(dim)
with tf.name_scope(name, default_name="sample_with_temperature"):
if temperature != 0.0:
# gumbel trick.
# Note: we don't want to generate 0 or 1 because:
# * -log(-log(0)) is -infinity
# * -log(-log(1)) is +infinity.
# The numerics may be weird in bfloat16 - use float32.
logits = cast(logits, tf.float32)
tiny_val = 1e-9
g = -log(-log(
random_uniform(
logits.mesh,
logits.shape,
minval=tiny_val,
maxval=1.,
dtype=logits.dtype)))
logits += g * temperature
return argmax(logits, dim, name)
def add(x1, x2, output_shape=None, name=None):
"""Binary addition with broadcsting.
Args:
x1: a Tensor
x2: a Tensor
output_shape: an optional Shape
name: an optional string
Returns:
a Tensor
"""
output_shape = convert_to_shape(output_shape)
if not isinstance(x2, Tensor):
return ScalarAddOperation(x1, x2).outputs[0]
with tf.name_scope(name, default_name="add"):
x1, x2 = binary_arguments_to_tensors(x1, x2)
return AddOperation(
x1, x2, output_shape=_infer_binary_broadcast_shape(
x1.shape, x2.shape, output_shape)).outputs[0]
def add_n(xs):
if not xs:
return 0
return functools.reduce(add, xs)
def sub(x1, x2, output_shape=None, name=None):
"""Binary subtraction with broadcsting.
Args:
x1: a Tensor
x2: a Tensor
output_shape: an optional Shape
name: an optional string
Returns:
a Tensor
"""
output_shape = convert_to_shape(output_shape)
if not isinstance(x2, Tensor):
return ScalarAddOperation(x1, -x2).outputs[0]
with tf.name_scope(name, default_name="sub"):
x1, x2 = binary_arguments_to_tensors(x1, x2)
return add(x1, negative(x2), output_shape=output_shape)
def multiply(x1, x2, output_shape=None, name=None):
"""Binary multiplication with broadcasting.
Args:
x1: a Tensor
x2: a Tensor
output_shape: an optional Shape
name: an optional string
Returns:
a Tensor
"""
if not isinstance(x2, Tensor):
return ScalarMultiplyOperation(x1, x2).outputs[0]
with tf.name_scope(name, default_name="mul"):
x1, x2 = binary_arguments_to_tensors(x1, x2)
return einsum(
[x1, x2],
output_shape=_infer_binary_broadcast_shape(
x1.shape, x2.shape, output_shape))
def divide(x1, x2, output_shape=None, name=None):
"""Binary division with broadcasting.
Args:
x1: a Tensor
x2: a Tensor
output_shape: an optional Shape
name: an optional string
Returns:
a Tensor
"""
output_shape = convert_to_shape(output_shape)
if not isinstance(x2, Tensor):
return ScalarMultiplyOperation(x1, 1.0 / x2).outputs[0]
with tf.name_scope(name, default_name="divide"):
x1, x2 = binary_arguments_to_tensors(x1, x2)
return multiply(x1, reciprocal(x2), output_shape=output_shape)
def mtf_slice(x, begin, size, slice_dim_name, name=None):
"""Slice operation.
Call externally as mtf.slice()
Args:
x: a list of Tensors
begin: integer, where to begin slicing from along the axis
size: integer, size to slice from axis.
slice_dim_name: string, dimension name of slicing axis.
name: an optional string
Returns:
a Tensor
"""
return SliceOperation(
x, begin, size, slice_dim_name, name=name).outputs[0]
def pad(x, paddings, dim_name, name=None):
"""Pad operation.
Args:
x: a Tensor
paddings: list of integers of size 2, padding size before and after for dim.
dim_name: string, name for the padding dim
name: an optional string
Returns:
a Tensor
"""
return PadOperation(
x, paddings, dim_name, name=name).outputs[0]
def one_hot(indices, output_dim, on_value=1.0,
off_value=0.0, dtype=tf.float32, name=None):
"""One hot operation.
TODO(noam): Is there a good reason we need a special mtf.Operation here?
We could just use some code like this:
cast(equal(indices, mtf_range(indices.mesh, output_dim, dtype=indices.dtype)),
dtype)
Args:
indices: a Tensor
output_dim: a Dimension
on_value: Value taken when indices are on at a location, default 1
off_value: Value taken when indices are off at a location, default 0
dtype: a tf.DType
name: an optional string
Returns:
a Tensor with shape extended by output_dim for the last axis.
"""
return OneHotOperation(
indices, output_dim, on_value, off_value, dtype, name=name).outputs[0]
def gather(weights, indices, dim, output_shape=None):
"""Shorthand for einsum([one_hot(indices, dim)], weights, reduced_dims=[dim]).
Args:
weights: a Tensor
indices: a Tensor with integer type
dim: a Dimension
output_shape: an optional mtf.Shape
Returns:
a Tensor
"""
dim = convert_to_dimension(dim)
output_shape = convert_to_shape(output_shape)
if not isinstance(indices, Tensor):
# TODO(noam): when `indices` is an integer, gather can be implemented
# more directly with mtf_slice() and reshape()
indices = constant(weights.mesh, indices, dtype=tf.int32)
if weights.dtype == tf.bool:
return cast(gather(to_float(weights), indices, dim, output_shape), tf.bool)
return einsum([one_hot(indices, dim, dtype=weights.dtype), weights],
reduced_dims=[dim], output_shape=output_shape)
def gradients(ys, xs, grad_ys=None, operations=None):
"""Compute gradients in dtf.
Args:
ys: a list of Tensors
xs: a list of Tensors
grad_ys: an optional list of Tensors
operations: list of operations through which to back-propagate gradients
defaults to ys[0].graph.operations
Returns:
grad_xs: a list of Tensors
"""
if operations is None:
operations = ys[0].graph.operations
if not grad_ys:
grad_ys = [Constant(y.mesh, 1.0, y.shape, y.dtype).outputs[0] for y in ys]
# figure out what Tensors are downstream of xs
downstream = set(xs)
for op in operations:
if op.has_gradient:
if set(op.inputs) & downstream:
downstream |= set(op.outputs)
tensor_to_gradient = {y: g for y, g in zip(ys, grad_ys) if g is not None}
with tf.variable_scope(ys[0].graph.captured_variable_scope):
for op in operations[::-1]:
grad_outputs = [tensor_to_gradient.get(out) for out in op.outputs]
if (op.has_gradient and any(grad_outputs)
and (set(op.inputs) & downstream)):
with tf.variable_scope(op.name + "/gradients"):
input_grads = op.gradient(grad_outputs)
for inp, grad in zip(op.inputs, input_grads):
if inp in downstream and grad is not None:
if inp in tensor_to_gradient:
tensor_to_gradient[inp] += grad
else:
tensor_to_gradient[inp] = grad
return [tensor_to_gradient.get(x, None) for x in xs]
def _infer_binary_broadcast_shape(shape1, shape2, given_output_shape=None):
"""Infer shape of the output of a binary op with broadcasting.
If the output shape is not given with given_output_shape, then we check
to see if one of the shapes is a subsequence of the other one, and we
return the one that is the supersequence. Otherwise, we list the dimensions
of shape1, followed by all new dimensions in shape2.
Args:
shape1: a Shape
shape2: a Shape
given_output_shape: an optional Shape
Returns:
a Shape
"""
shape1 = convert_to_shape(shape1)
shape2 = convert_to_shape(shape2)
given_output_shape = convert_to_shape(given_output_shape)
if given_output_shape is not None:
return given_output_shape
if is_subsequence(shape1.dims, shape2.dims):
return shape2
if is_subsequence(shape2.dims, shape1.dims):
return shape1
return Shape(
shape1.dims + [d for d in shape2.dims if d not in shape1.dims])
def _expand_dims(x, input_shape, output_shape):
"""Expand dimensions and transpose if necessary.
Args:
x: a tf.Tensor
input_shape: a Shape
output_shape: a Shape whose dimensions are a superset of
those in input_shape
Returns:
a tf.Tensor
"""
verify_no_new_dims([output_shape], input_shape)
if input_shape == output_shape or input_shape.ndims == 0:
return x
perm = [input_shape.dims.index(d) for d in output_shape.dims
if d in input_shape.dims]
x = tf.transpose(x, perm)
for i, d in enumerate(output_shape.dims):
if d not in input_shape.dims:
x = tf.expand_dims(x, i)
return x
def _einsum_equation(input_shapes, output_shape):
"""Turn shapes into an einsum equation.
e.g. "ij,jk->ik"
Args:
input_shapes: a list of Shapes
output_shape: a Shape
Returns:
a string
"""
ret = []
next_letter = ord("a")
dim_to_letter = {}
for shape_num, shape in enumerate(input_shapes + [output_shape]):
if shape_num == len(input_shapes):
ret.append("->")
elif shape_num > 0:
ret.append(",")
for d in shape.dims:
if d not in dim_to_letter:
dim_to_letter[d] = chr(next_letter)
next_letter += 1
ret.append(dim_to_letter[d])
return "".join(ret)
def is_subsequence(short_seq, long_seq):
"""Is short_seq a subsequence of long_seq."""
if not short_seq:
return True
pos = 0
for x in long_seq:
if pos == len(short_seq):
return True
if short_seq[pos] == x:
pos += 1
if pos == len(short_seq):
return True
return False
def verify_no_new_dims(input_shapes, output_shape):
"""Verifies that all dimensions in the output are in at least one input.
Args:
input_shapes: a list of Shapes
output_shape: a Shape
Raises:
ValueError: if there are new dimensions in the output.
"""
all_input_dims = set(sum([s.dims for s in input_shapes], []))
all_output_dims = set(output_shape.dims)
if not all_output_dims.issubset(all_input_dims):
raise ValueError(
"No new dimensions allowed in output"
" input_shapes = %s output_shape= %s"
% ([s.dims for s in input_shapes], output_shape.dims))
def pnum_to_processor_coordinates(mesh_shape, pnum):
"""Coordinates of a processor in the mesh.
Args:
mesh_shape: a Shape or a list of integers
pnum: an integer less than len(mesh_shape)
Returns:
a list of integers with length len(mesh_shape)
"""
if isinstance(mesh_shape, Shape):
mesh_shape = mesh_shape.to_integer_list
if not isinstance(mesh_shape, list):
raise ValueError("mesh_shape must be a Shape or a list of integers")
ret = []
for dimsize in mesh_shape[::-1]:
ret.append(pnum % dimsize)
pnum //= dimsize
return ret[::-1]
def processor_coordinates_to_pnum(mesh_shape, coord):
"""Inverse of pnum_to_processor_coordinates.
Args:
mesh_shape: a Shape or a list of integers
coord: a list of integers with length len(mesh_shape)
Returns:
an integer less than len(mesh_shape)
"""
if isinstance(mesh_shape, Shape):
mesh_shape = mesh_shape.to_integer_list
if not isinstance(mesh_shape, list):
raise ValueError("mesh_shape must be a Shape or a list of integers")
ret = 0
multiplier = 1
for c, d in zip(coord[::-1], mesh_shape[::-1]):
ret += multiplier * c
multiplier *= d
return ret
def pnum_to_group(mesh_shape, group_dims, pnum):
"""Group number for grouped allreduce.
Args:
mesh_shape: a Shape
group_dims: a list of integers (the dimensions reduced over)
pnum: an integer
Returns:
an integer
"""
coord = pnum_to_processor_coordinates(mesh_shape, pnum)
remaining_shape = Shape(
[d for i, d in enumerate(mesh_shape) if i not in group_dims])
remaining_coord = [d for i, d in enumerate(coord) if i not in group_dims]
return processor_coordinates_to_pnum(remaining_shape, remaining_coord)
def processor_groups(mesh_shape, group_dims):
"""Groups of processors which differ only in the given dimensions.
Args:
mesh_shape: a Shape
group_dims: a list of integers
Returns:
a list of lists of integers (processor numbers)
"""
group_numbers = [
pnum_to_group(mesh_shape, group_dims, pnum)
for pnum in xrange(mesh_shape.size)]
ret = []
for pnum, g in enumerate(group_numbers):
while len(ret) <= g:
ret.append([])
ret[g].append(pnum)
return ret
def list_product(l):
return functools.reduce(operator.mul, l, 1)
def reduce_logsumexp(x, reduced_dim, extra_logit=None, name=None):
"""Numerically stable version of log(reduce_sum(exp(x))).
Unlike other reductions, the output has the same shape as the input.
Note: with a minor change, we could allow multiple reduced dimensions.
Args:
x: a Tensor
reduced_dim: a dimension in x
extra_logit: an optional Tensor broadcastable to (x.shape - reduced_dim)
name: an optional string
Returns:
a Tensor with the same shape and dtype as x.
"""
reduced_dim = convert_to_dimension(reduced_dim)
with tf.variable_scope(name, default_name="reduce_logsumexp"):
reduced_shape = x.shape - reduced_dim
max_logit = reduce_max(stop_gradient(x), output_shape=reduced_shape)
if extra_logit is not None:
if isinstance(extra_logit, Tensor):
extra_logit = stop_gradient(extra_logit)
max_logit = maximum(max_logit, extra_logit)
x -= max_logit
exp_x = exp(x)
sum_exp_x = reduce_sum(exp_x, output_shape=reduced_shape)
if extra_logit is not None:
sum_exp_x += exp(extra_logit - max_logit)
return log(sum_exp_x) + max_logit
def log_softmax(x, reduced_dim, extra_logit=None, name=None):
"""log(softmax(x)).
Args:
x: a Tensor whose shape contains vocab_dim
reduced_dim: a Dimension
extra_logit: an optional Tensor broadcastable to (x.shape - reduced_dim)
name: an optional string
Returns:
a Tensor with the same shape as x
"""
return x - reduce_logsumexp(
x, reduced_dim, extra_logit=extra_logit, name=name)
def softmax(x, reduced_dim, extra_logit=None, name=None):
with tf.variable_scope(name, default_name="softmax"):
return exp(log_softmax(x, reduced_dim, extra_logit=extra_logit))
class RangeOperation(Operation):
"""tf.range."""
def __init__(self, mesh, dim, dtype, name=None):
super(RangeOperation, self).__init__([], mesh, name=name or "range")
dim = convert_to_dimension(dim)
self._mesh = mesh
self._dim = dim
self._dtype = dtype
self._outputs = [Tensor(self, Shape([dim]), dtype)]
def lower(self, lowering):
mesh_impl = lowering.mesh_impl(self)
with tf.variable_scope(self.name, default_name="range"):
if self._dtype == tf.bfloat16:
# tf.range(dtype=bfloat16) gives the wrong shape.
# TODO(noam): report the bug.
tf_range = tf.cast(tf.range(self._dim.size), tf.bfloat16)
else:
tf_range = tf.range(self._dim.size, dtype=self._dtype)
lowering.set_tensor_lowering(
self.outputs[0],
mesh_impl.import_tf_tensor(self.outputs[0], tf_range))
def mtf_range(mesh, dim, dtype, name=None):
"""Create a 1d mesh tensor with a range from [0, dim.size).
Call externally as mtf.range()
Args:
mesh: a Mesh
dim: a Dimension
dtype: a tf.DType
name: an optional string
Returns:
a Tensor
"""
return RangeOperation(mesh, dim, dtype, name).outputs[0]
def pretty_print_counters(counters):
"""print counters hierarchically.
Each counter is a pair of a string and a number.
The string can have slashes, meaning that the number also counts towards
each prefix. e.g. "parameters/trainable" counts towards both "parameters"
and "parameters/trainable".
Args:
counters: a list of (string, number) pairs
Returns:
a string
"""
totals = collections.defaultdict(int)
for (name, val) in counters:
prefixes = [name[:i] for i in xrange(len(name)) if name[i] == "/"] + [name]
for p in prefixes:
totals[p] += val
parts = []
for name, val in sorted(six.iteritems(totals)):
parts.append(" " * name.count("/") + "%s: %.3g" % (name, val))
return "\n".join(parts)
def _parse_string_to_list_of_pairs(s, seconds_to_int=False):
r"""Parses a string into a list of pairs.
In the input string, each pair is separated by a colon, and the delimiters
between pairs are any of " ,.;".
e.g. "rows:32,cols:32"
Args:
s: str to parse.
seconds_to_int: Boolean. If True, then the second elements are returned
as integers; otherwise they are strings.
Returns:
List of tuple pairs.
Raises:
ValueError: Badly formatted string.
"""
ret = []
for p in [s.split(":") for s in re.sub("[,.;]", " ", s).split()]:
if len(p) != 2:
raise ValueError("bad input to _parse_string_to_list_of_pairs %s" % s)
if seconds_to_int:
ret.append((p[0], int(p[1])))
else:
ret.append(tuple(p))
return ret
def parallel(devices, fn, *args, **kwargs):
"""Call a function once on each device.
Args:
devices: a list of n devices
fn: a function
*args: arguments, each of which is a list of length n
**kwargs: keyword-args, each of which is a list of length n
Returns:
a list of length n
Raises:
ValueError: if the arguments are not all lists of length n
"""
if not isinstance(devices, list):
raise ValueError("devices must be a list")
for x in list(args) + list(six.itervalues(kwargs)):
if not isinstance(x, list) or len(x) != len(devices):
raise ValueError(
"Argument not a list with same length as devices "
"arg=%s devices=%s" % (x, devices))
ret = []
for i, device in enumerate(devices):
with tf.device(device):
with tf.variable_scope("parallel_%d" % i):
my_args = [x[i] for x in args]
my_kwargs = {k: v[i] for k, v in six.iteritems(kwargs)}
ret.append(fn(*my_args, **my_kwargs))
return ret
def transpose_list_of_lists(lol):
"""Transpose a list of equally-sized python lists.
Args:
lol: a list of lists
Returns:
a list of lists
Raises:
ValueError: if list is empty
"""
if not lol:
raise ValueError("cannot transpose the empty list")
return [list(x) for x in zip(*lol)]
def binary_reduction_fn(reduction_fn_string):
if reduction_fn_string == "SUM":
return tf.add
elif reduction_fn_string == "MAX":
return tf.maximum
elif reduction_fn_string == "MIN":
return tf.minimum
else:
raise ValueError("Unknown reduction_fn_string %s" % reduction_fn_string)
def reduction_fn(reduction_fn_string):
if reduction_fn_string == "SUM":
return tf.reduce_sum
elif reduction_fn_string == "MAX":
return tf.reduce_max
elif reduction_fn_string == "MIN":
return tf.reduce_min
else:
raise ValueError("Unknown reduction_fn_string %s" % reduction_fn_string)
def pool_fn(pool_fn_string):
"""Converts a string function name to actual function."""
def avg_pool2d_fn(x, ksize, strides, padding):
return _tf_restore_batch_dims(
tf.nn.avg_pool2d(_tf_flatten_batch_dims(x, 3), ksize, strides, padding),
3, x)
def avg_pool3d_fn(x, ksize, strides, padding):
return _tf_restore_batch_dims(
tf.nn.avg_pool3d(_tf_flatten_batch_dims(x, 4), ksize, strides, padding),
4, x)
def max_pool2d_fn(x, ksize, strides, padding):
return _tf_restore_batch_dims(
tf.nn.max_pool2d(_tf_flatten_batch_dims(x, 3), ksize, strides, padding),
3, x)
def max_pool3d_fn(x, ksize, strides, padding):
return _tf_restore_batch_dims(
tf.nn.max_pool3d(_tf_flatten_batch_dims(x, 4), ksize, strides, padding),
4, x)
if pool_fn_string == "AVG_2D":
return avg_pool2d_fn
elif pool_fn_string == "AVG_3D":
return avg_pool3d_fn
elif pool_fn_string == "MAX_2D":
return max_pool2d_fn
elif pool_fn_string == "MAX_3D":
return max_pool3d_fn
else:
raise ValueError("Unknown pool_fn_string %s" % pool_fn_string)
class MtfCheckpointSaverListener(tf.estimator.CheckpointSaverListener):
"""Copy slices to masters before saving."""
def __init__(self, lowering):
self._op = lowering.copy_slices_to_masters()
def begin(self):
# You can add ops to the graph here.
tf.logging.info("Starting the session.")
def before_save(self, session, global_step_value):
# assigns
tf.logging.info("Before Save.")
session.run(self._op)
tf.logging.info("About to write a checkpoint")
def after_save(self, session, global_step_value):
tf.logging.info("Done writing checkpoint.")
def end(self, session, global_step_value):
tf.logging.info("Done with the session.")
class MtfRestoreHook(tf.estimator.SessionRunHook):
"""Copy masters to slices after restoring."""
def __init__(self, lowering):
self._lowering = lowering
def begin(self):
# This namescope is useful in adding the hook operation when the graph is
# constructed. It's also necessary to call the op when the exported model is
# loaded in another session.
with tf.name_scope("mtf_restore_hook"):
self._op = self._lowering.copy_masters_to_slices()
def after_create_session(self, session, coord):
tf.logging.info("Before copy master to slices.")
session.run(self._op)
tf.logging.info("Done with copy master to slices.")
class RandomOperation(Operation):
"""Random operation such as tf.random.uniform."""
def __init__(self, mesh, shape, tf_fn, **kwargs):
super(RandomOperation, self).__init__(
[], mesh=mesh, name=kwargs.get("name", "random"))
self._tf_fn = tf_fn
self._kwargs = kwargs
self._outputs = [Tensor(self, shape, kwargs.get("dtype", tf.float32))]
# Rerun to take the new output into account.
self._splittable_dims, self._unsplittable_dims = (
self._initialize_all_dimensions_as_splittable())
def lower(self, lowering):
mesh_impl = lowering.mesh_impl(self)
output_shape = self.outputs[0].shape
lowering.set_tensor_lowering(self.outputs[0], (
mesh_impl.random(output_shape, self._tf_fn, self._kwargs)))
def random_uniform(mesh, shape, **kwargs):
"""Random uniform.
Args:
mesh: a Mesh
shape: a Shape
**kwargs: keyword args for tf.random.uniform, except seed
Returns:
a Tensor
"""
shape = convert_to_shape(shape)
return RandomOperation(mesh, shape, tf.random.uniform, **kwargs).outputs[0]
def random_normal(mesh, shape, **kwargs):
"""Random uniform.
Args:
mesh: a Mesh
shape: a Shape
**kwargs: keyword args for tf.random.normal, except seed
Returns:
a Tensor
"""
shape = convert_to_shape(shape)
return RandomOperation(mesh, shape, tf.random.normal, **kwargs).outputs[0]
def dropout(x, keep_prob=None, rate=None, noise_shape=None, name=None):
"""Randomly set some elements to 0 and scale up the rest.
Dropout rate should be specified in exactly one of two ways:
rate - the fraction to drop
keep_prob - the fraction to keep
If x has floating-point type, then kept values are scaled up by
a factor of (1.0 / keep_prob). If x is has integer type, the kept values
are not scaled up.
Args:
x: a Tensor
keep_prob: a float between 0.0 and 1.0
rate: a float between 0.0 and 1.0
noise_shape: an optional Shape (a subset of x.shape)
name: an optional string
Returns:
a Tensor
"""
if (keep_prob is None) == (rate is None):
raise ValueError("exactly one of keep_prob and rate should be set")
if keep_prob is None:
keep_prob = 1.0 - rate
noise_shape = convert_to_shape(noise_shape)
if noise_shape is None:
noise_shape = x.shape
with tf.variable_scope(name, default_name="dropout"):
if keep_prob == 1.0:
return x
noise = cast(less(random_uniform(
x.mesh, noise_shape,
dtype=(x.dtype if x.dtype.is_floating else tf.float32)),
keep_prob), x.dtype)
if x.dtype.is_floating:
noise /= keep_prob
return x * noise
def _cumprod(l):
"""Cumulative product of a list.
Args:
l: a list of integers
Returns:
a list with one more element (starting with 1)
"""
ret = [1]
for item in l:
ret.append(ret[-1] * item)
return ret
def log_variable_sizes(var_list,
tag,
verbose=True,
mesh_to_impl=None,
log_file=None):
"""Log the sizes and shapes of variables, and the total size.
Args:
var_list: a list of variables; defaults to trainable_variables
tag: a string; defaults to "Trainable Variables"
verbose: bool, if True, log every weight; otherwise, log total size only.
mesh_to_impl: an optional map from Mesh to MeshImpl
log_file: an optional tf.io.gfile.GFile. If provided, information about
the variables will also be logged to this file.
"""
if not var_list:
return
name_to_var = {v.name: v for v in var_list}
total_size = 0
total_slice_size = 0
for v_name in sorted(list(name_to_var)):
v = name_to_var[v_name]
v_size = v.shape.size
if mesh_to_impl is not None:
slice_size = mesh_to_impl[v.mesh].slice_size(v.shape)
else:
slice_size = 0
total_slice_size += slice_size
if verbose:
_log_info_also_to_file(
"Variable %s size %s slice_size %s %s",
v.name.ljust(60),
str(v_size).ljust(12),
str(slice_size).ljust(12),
str(v.shape).ljust(60),
log_file=log_file)
if isinstance(v, StackedVariable):
for n in v.original_names:
_log_info_also_to_file(" " + n, log_file=log_file)
total_size += v_size
_log_info_also_to_file(
"%s count: %s Total size: %s Total slice_size: %s",
tag.ljust(30),
str(len(var_list)).ljust(6),
str(total_size).ljust(15),
str(total_slice_size).ljust(15),
log_file=log_file)
def _log_info_also_to_file(format_str, *args, **kw_args):
"""Logs at the info level and writes to file if one is provided.
Args:
format_str: a string; will be logged and can contain things such as %s.
*args: arguments to the format_str.
**kw_args: keyword arguments. May contain optional tf.io.gfile.GFile keyed
by "log_file", where the message will also be appended to this file. Other
arguments will be ignored.
"""
tf.logging.info(format_str, *args)
log_file = kw_args.get("log_file", None)
if log_file:
log_file.write(format_str % args)
log_file.write("\n")
class WhileLoopOperation(Operation):
"""While loop, like tf.while_loop."""
def __init__(self, cond_fn, body_fn, inputs,
tf_kwargs=None, has_accumulators=False, name="while_loop"):
"""Create a WhileLoopOperation.
A few differences from tf.while_loop:
- gradients are not yet supported
- inputs must be a list of tensors, as opposed to an arbitrary nested
structure. cond_fn and body_fn take an argument list
- we support optional "accumulators" which are additional outputs
returned by body_fn. These are summed across all iterations and
retured as additional outputs of the while-loop. To use accumulators,
the has_accumulators argument must be True. For better performance,
we delay allreduce on the accumulators until after the loop, so that it
only needs to happen once. This is useful, for example, if the
accumulators are summing gradients for many mini-batches.
Args:
cond_fn: a function from n mtf Tensors to mtf Scalar
body_fn: a function from n mtf Tensors to sequence of mtf Tensors
inputs: list of n mtf Tensors
tf_kwargs: a dictionary of arguments for tf.while_loop
has_accumulators: a boolean
name: a string
Returns:
a WhileLoopOperation
"""
super(WhileLoopOperation, self).__init__(
inputs, mesh=inputs[0].mesh, name=name)
self._cond_fn = cond_fn
self._body_fn = body_fn
self._tf_kwargs = tf_kwargs or {}
assert not self._tf_kwargs.get("back_prop", False)
ops = self.graph.operations
# remove self from the graph's operations
ops.pop()
before = len(ops)
def make_placeholders(name):
return [Tensor(self, t.shape, t.dtype, name="%s:%d" % (name, i))
for i, t in enumerate(inputs)]
self._cond_inputs = make_placeholders("cond_input")
self._cond_output = self._cond_fn(*self._cond_inputs)
self._cond_ops = ops[before:]
del ops[before:]
self._body_inputs = make_placeholders("body_input")
self._body_outputs = self._body_fn(*self._body_inputs)
if len(self._body_outputs) < len(inputs):
raise ValueError("body_fn produces fewer outputs than inputs")
if len(self._body_outputs) > len(inputs) and not has_accumulators:
raise ValueError("body_fn produces more outputs than inputs")
for (i, (inp, body_out)) in enumerate(
zip(inputs, self._body_outputs[:len(inputs)])):
if inp.shape != body_out.shape:
raise ValueError(
"shape mismatch i=%d inp=%s body_out=%s" % (i, inp, body_out))
# Pull new variables outside the loop.
added_ops = ops[before:]
del ops[before:]
self._body_ops = []
for op in added_ops:
if isinstance(op, Variable):
ops.append(op)
else:
self._body_ops.append(op)
# re-add self to graph's operations
ops.append(self)
self._outputs = [
Tensor(self, t.shape, t.dtype, name="output:%d" % i)
for i, t in enumerate(self._body_outputs)]
# Rerun to take the new output into account.
self._splittable_dims, self._unsplittable_dims = (
self._initialize_all_dimensions_as_splittable())
def lower(self, lowering):
mesh_impl = lowering.mesh_impl(self)
def tf_cond_fn(*tf_inputs):
for tf_inp, mtf_inp in zip(
tf_inputs[:len(self._cond_inputs)], self._cond_inputs):
lowering.tensors[mtf_inp] = mesh_impl.LaidOutTensor(tf_inp)
for op in self._cond_ops:
with tf.name_scope(op.name):
op.lower(lowering)
lowered_output = lowering.tensors[self._cond_output]
ret = lowered_output.to_laid_out_tensor().tensor_list[0]
return ret
# This array keeps track of which lowered body-outputs have type
# LazyAllreduceSum. We treat these specially - instead of
# immediately converting to LaidOutTensor (executing the allreduce)
# we sum across iterations first, then allreduce at the end.
# When one of the body outputs is a LazyAllreduceSum, we put the
# LazyAllreduceSum object into this array for future reference.
is_lazyallreducesum = [None] * len(self._outputs)
def tf_body_fn(*tf_inputs):
"""Body function for tf.while_loop.
Args:
*tf_inputs: a list of tf.Tensor
Returns:
a list of tf.Tensor
"""
for tf_inp, mtf_inp in zip(
tf_inputs[:len(self._inputs)], self._body_inputs):
lowering.tensors[mtf_inp] = mesh_impl.LaidOutTensor(tf_inp)
for op in self._body_ops:
with tf.name_scope(op.name):
op.lower(lowering)
ret = []
for i, mtf_out in enumerate(self._body_outputs):
lowered_out = lowering.tensors[mtf_out]
if isinstance(lowered_out, LazyAllreduceSum):
is_lazyallreducesum[i] = lowered_out
ret.append(lowered_out.laid_out_input.tensor_list)
else:
ret.append(lowered_out.to_laid_out_tensor().tensor_list)
# accumulators
for i in range(len(self._inputs), len(self._outputs)):
ret[i] = [x + y for x, y in zip(ret[i], tf_inputs[i])]
return ret
lowered_inputs = []
for t in self.inputs:
lowered_inputs.append(
lowering.tensors[t].to_laid_out_tensor().tensor_list)
# accumulators get initial value 0
for t in self._body_outputs[len(self.inputs):]:
def slice_fn():
return tf.zeros(mesh_impl.slice_shape(t.shape), dtype=t.dtype)
lowered_inputs.append(mesh_impl.slicewise(slice_fn).tensor_list)
tf_outs = tf.while_loop(tf_cond_fn,
tf_body_fn,
lowered_inputs,
back_prop=False,
**self._tf_kwargs)
for i, (tf_out, mtf_out) in enumerate(zip(tf_outs, self._outputs)):
out = mesh_impl.LaidOutTensor(tf_out)
lazy = is_lazyallreducesum[i]
if lazy:
out = LazyAllreduceSum(
mesh_impl, out, lazy.mesh_axes, lazy.add_counter_fn)
lowering.set_tensor_lowering(mtf_out, out)
def while_loop(cond_fn, body_fn, inputs, num_loop_vars=None,
has_accumulators=False, **kwargs):
"""While Loop.
See comments above for WhileLoopOperation
num_loop_vars is a hack for the multi-gpu setup. In this case, loops
are generally slow, as all loop variables are placed on device. By setting
num_loop_vars=k, then all of the loop variables except for the first k
are handled as mtf Variables instead of loop variables, using explicit
updates and control dependencies. In this case, we only return the
first num_loop_vars outputs. Do not use this option on TPU, since it
is unnecessary and also produces incorrect results, since xla does not
respect control dependencies.
Args:
cond_fn: a function from n Tensors to scalar boolean Tensor
body_fn: a function from n Tensors to list of n Tensors
inputs: a list of n Tensors
num_loop_vars: an optional integer.
has_accumulators: a boolean
**kwargs: additional kwargs passed to tf.while_loop
Returns:
a list of n Tensors.
"""
if num_loop_vars is None:
return WhileLoopOperation(cond_fn, body_fn, inputs, tf_kwargs=kwargs,
has_accumulators=has_accumulators).outputs
# Turn all loop vars except for the first ones into non-loop vars.
# see comments in docstring.
assert num_loop_vars > 0
extra_inputs = inputs[num_loop_vars:]
my_vars = []
for i, x in enumerate(extra_inputs):
my_vars.append(get_variable(
x.mesh, "loop_var_%d" % i,
x.shape, initializer=tf.zeros_initializer(),
dtype=x.dtype,
collections=[tf.GraphKeys.LOCAL_VARIABLES]))
my_vars = tuple(my_vars)
first_input = depend(
inputs[0], [assign(var, x) for var, x in zip(my_vars, extra_inputs)])
inputs = [first_input] + inputs[1:num_loop_vars]
def my_cond_fn(*inputs):
return cond_fn(*(inputs + my_vars))
def my_body_fn(*inputs):
outputs = tuple(body_fn(*(inputs + my_vars)))
extra_outputs = outputs[num_loop_vars:]
first_output = depend(
outputs[0], [assign(var, x) for var, x in zip(my_vars, extra_outputs)])
outputs = (first_output,) + outputs[1:num_loop_vars]
return outputs
return WhileLoopOperation(
my_cond_fn, my_body_fn, inputs, tf_kwargs=kwargs,
has_accumulators=has_accumulators).outputs
class CustomGradientOperation(Operation):
"""Operation to implement custom gradients.
See comments on custom_gradient() below.
"""
def __init__(self,
explicit_inputs,
all_inputs,
fn_outputs,
grad_fn,
forward_operations,
name=None):
super(CustomGradientOperation, self).__init__(
all_inputs + fn_outputs, name=name or "custom_gradient")
self._explicit_inputs = explicit_inputs
self._all_inputs = all_inputs
self._grad_fn = grad_fn
self._fn_outputs = fn_outputs
self._outputs = [Tensor(self, x.shape, x.dtype, index=i)
for i, x in enumerate(fn_outputs)]
self._forward_operations = forward_operations
def lower(self, lowering):
for fn_output, output in zip(
self._fn_outputs, self._outputs):
lowering.set_tensor_lowering(output,
lowering.tensors[fn_output])
def gradient(self, grad_ys):
graph = self._inputs[0].graph
old_num_vars = len(graph.all_variables)
grads = self._grad_fn(
explicit_inputs=self._explicit_inputs,
all_inputs=self._all_inputs,
forward_operations=self._forward_operations,
outputs=self._fn_outputs,
output_grads=grad_ys)
new_num_vars = len(graph.all_variables)
if new_num_vars != old_num_vars:
raise ValueError(
"new variables created by custom gradient."
"Maybe a problem with scope. %s" % (
graph.all_variables[old_num_vars:],))
for g, t in zip(grads, self._all_inputs):
if g is None:
tf.logging.warning("No gradient on input %s" % t)
return list(grads) + [None] * len(self._fn_outputs)
def custom_gradient(fn, grad_fn, explicit_inputs):
"""Execute a function and call a custom gradient fn on the backward pass.
`fn` takes positional Tensor arguments and returns a Tensor or a tuple of
Tensors.
`explicit_inputs` is a list of tensors to be passed as positional arguments
to the function `fn`.
`grad_fn` has the following signature:
Args:
explicit_inputs: the list of Tensors passed to this function and to `fn`
all_inputs: a list of tensors beginning with explicit_inputs, but also
containing external Tensors used by fn.
forward_operations: a list of Operation. (the operations created on the
foward pass
outputs: the outputs of `fn` from the forward pass
output_grads: the gradient Tensors corresponding to those outputs.
Returns
a list of Tensor/None with the same length as `all_inputs`
Args:
fn: a function taking positional Tensor arguments
grad_fn: a function (see above)
explicit_inputs: list of Tensors
Returns:
a list of outputs
"""
graph = explicit_inputs[0].graph
outputs, forward_operations = graph.capture_operations(
lambda: fn(*explicit_inputs))
returns_tuple = isinstance(outputs, tuple)
new_outputs = set()
new_inputs = set()
for op in forward_operations:
new_inputs.update(set(op.inputs))
if not isinstance(op, Variable):
new_outputs.update(set(op.outputs))
external_inputs = list(new_inputs - new_outputs - set(explicit_inputs))
external_inputs = [t for t in external_inputs if t.dtype.is_floating]
all_inputs = explicit_inputs + external_inputs
if not returns_tuple:
outputs = outputs,
ret = CustomGradientOperation(explicit_inputs,
all_inputs,
list(outputs),
grad_fn,
forward_operations).outputs
# Make sure no one uses the internals of this function, since the gradients
# will probably not work correctly.
for t in new_outputs - set(outputs):
t.usable = False
return ret if returns_tuple else ret[0]
def _recompute_grad_grad(explicit_inputs,
all_inputs,
forward_operations,
outputs,
output_grads,
control_dependencies):
"""Gradient function used with recompute_grad."""
graph = forward_operations[0].graph
input_mapping = {t: t for t in all_inputs}
if control_dependencies:
# we need to outsmart XLA here to force a control dependency
zero_with_control_dependency = reduce_sum(output_grads[0] * 1e-30)
for t in explicit_inputs:
if t.dtype.is_floating:
input_mapping[t] += cast(zero_with_control_dependency, t.dtype)
mapped_inputs = [input_mapping[t] for t in all_inputs]
recomputed_operations, mapping = graph.clone_operations(
forward_operations, input_mapping)
recomputed_outputs = [mapping[t] for t in outputs]
input_grads = gradients(
ys=recomputed_outputs,
xs=mapped_inputs,
grad_ys=output_grads,
operations=recomputed_operations)
for x, g in zip(all_inputs, input_grads):
if x.dtype.is_floating and g is None:
raise ValueError("_recompute_grad_grad: no gradient for %s" % x)
return input_grads
def recompute_grad(fn, explicit_inputs, control_dependencies=True):
"""Execute a function and recompute it on the backwards pass.
Args:
fn: a function taking positional arguments and returning a Tensor or tuple
of Tensors.
explicit_inputs: inputs to the function
control_dependencies: a boolean - whether to force the recomputation to
happen after the output gradients.
Returns:
a Tensor or tuple of Tensors
"""
return custom_gradient(
fn,
functools.partial(_recompute_grad_grad,
control_dependencies=control_dependencies),
explicit_inputs)
def where(condition, if_true, if_false, output_shape=None):
dtype = if_true.dtype
return (
multiply(if_true, cast(condition, dtype), output_shape=output_shape) +
multiply(if_false,
cast(logical_not(condition), dtype), output_shape=output_shape))
def _shape_union(shapes):
"""A shape containing the union of all dimensions in the input shapes.
Args:
shapes: a list of Shapes
Returns:
a Shape
"""
return Shape(sorted(list(set(sum([s.dims for s in shapes], [])))))
def _tf_flatten_batch_dims(x, num_nonbatch_dims):
"""Flatten all but last num_nonbatch_dims into one dimension.
Args:
x: a tf.Tensor:
num_nonbatch_dims: an integer
Returns:
a tf.Tensor with 1 + num_nonbatch_dims dimensions.
"""
shape = x.shape.as_list()
assert None not in shape
new_shape = ([list_product(shape[:-num_nonbatch_dims])]
+ shape[-num_nonbatch_dims:])
if new_shape != shape:
x = tf.reshape(x, new_shape)
return x
def _tf_restore_batch_dims(x, num_nonbatch_dims, prototype):
"""Reverse op of _tf_flatten_batch_dims.
Un-flatten the first dimension of x to match all but the last
num_nonbatch_dims dimensions of prototype.
Args:
x: a tf.Tensor with 1 + num_nonbatch_dims dimensions
num_nonbatch_dims: an integer
prototype: a tf.Tensor
Returns:
a tf.Tensor
"""
assert x.shape.ndims == 1 + num_nonbatch_dims
new_shape = (
prototype.shape.as_list()[:-num_nonbatch_dims] + x.shape.as_list()[1:])
assert None not in new_shape
if new_shape != x.shape.as_list():
x = tf.reshape(x, new_shape)
return x
def halo_exchange(x, blocks_dim, block_size_dim, halo_size, wrap=False):
"""Concat each block with the margins of adjacent blocks.
Get left and right blocks_dim and concatenate along block_size_dim.
Args:
x: a Tensor.
blocks_dim: a Dimension in x.shape
block_size_dim: a Dimension in x.shape
halo_size: an integer
wrap: a boolean
Returns:
a Tensor with the same shape as x, other than in block_size_dim, whose
size is increased by 2*halo_size.
"""
if halo_size == 0:
return x
block_size = block_size_dim.size
partial_size = halo_size % block_size
num_complete_blocks = halo_size // block_size
parts = [x]
for i in xrange(1, num_complete_blocks + 1):
parts = ([shift(x, i, blocks_dim, wrap)] + parts +
[shift(x, -i, blocks_dim, wrap)])
if partial_size > 0:
left_margin = mtf_slice(x, 0, partial_size, block_size_dim.name)
right_margin = mtf_slice(
x, block_size_dim.size - partial_size, partial_size,
block_size_dim.name)
parts = (
[shift(right_margin, num_complete_blocks + 1, blocks_dim, wrap)]
+ parts +
[shift(left_margin, -(num_complete_blocks + 1), blocks_dim, wrap)])
return concat(parts, block_size_dim.name)
def left_halo_exchange(x, blocks_dim, block_size_dim, halo_size, wrap=False):
"""Concat each block with the margins of adjacent blocks from the left.
Get left blocks_dim and concatenate along block_size_dim.
Args:
x: a Tensor.
blocks_dim: a Dimension in x.shape
block_size_dim: a Dimension in x.shape
halo_size: an integer
wrap: a boolean
Returns:
a Tensor with the same shape as x, other than in block_size_dim, whose
size is increased by halo_size.
"""
if halo_size == 0:
return x
block_size = block_size_dim.size
partial_size = halo_size % block_size
num_complete_blocks = halo_size // block_size
parts = [x]
for i in xrange(1, num_complete_blocks + 1):
parts = ([shift(x, i, blocks_dim, wrap)] + parts)
if partial_size > 0:
right_margin = mtf_slice(
x, block_size_dim.size - partial_size, partial_size,
block_size_dim.name)
parts = ([shift(right_margin, num_complete_blocks + 1, blocks_dim, wrap)]
+ parts)
return concat(parts, block_size_dim.name)
def tensor_dim_to_mesh_dim_size(layout, mesh_shape, tensor_dim):
"""How many ways does a tensor dimension get split.
This is used to "cheat" when building the mtf graph and peek at how a
tensor dimension will be split. Returns 1 if the tensor dimension is not
split.
Args:
layout: an input to convert_to_layout_rules
mesh_shape: an input to convert_to_shape
tensor_dim: a Dimension
Returns:
an integer
"""
layout_rules = convert_to_layout_rules(layout)
mesh_shape = convert_to_shape(mesh_shape)
mesh_axis = layout_rules.tensor_dimension_to_mesh_axis(tensor_dim, mesh_shape)
if mesh_axis is None:
return 1
else:
return mesh_shape.dims[mesh_axis].size
def tensor_dim_to_size_per_split(layout, mesh_shape, tensor_dim):
mesh_dim_size = tensor_dim_to_mesh_dim_size(layout, mesh_shape, tensor_dim)
if tensor_dim.size % mesh_dim_size:
raise ValueError("Mesh dimension (%s) must divide tensor dimension (%s)"
% (mesh_dim_size, tensor_dim))
return tensor_dim.size // mesh_dim_size
def combined_dimension(dims, name=None):
if not dims:
raise ValueError("dims must be a list of one or more Dimensions")
return Dimension(name or dims[0].name, Shape(dims).size)
def serialize_training_step(features, model_fn, batch_dim, num_splits):
"""Break the training batch into multiple microbatches.
Returns two structures:
grads - a list of Tensors corresponding to the gradients on
graph.trainable_variables. These are summed across all microbatches
outputs - a dictionary of Tensors corresponding to the output dictionary of
model_fn. Each value is either summed across all microbatches (if it
has no batch-dimension), or concatenated across all microbatches to
represent the original batch (if it does have a batch-dimension).
Args:
features: a dictionary of Tensors, each with a batch_dim dimension
model_fn: a function from feature dictionary to output dictionary
output_dictionary must contain "loss"
batch_dim: a Dimension
num_splits: an integer dividing batch_dim.size
Returns:
grads: a list of Tensors corresponding to the gradients on
graph.trainable_variables
outputs: dictionary of output Tensors summed across microbatches
"""
for v in features.values():
mesh = v.mesh
graph = v.graph
microbatch_dim = Dimension("microbatch", num_splits)
smaller_batch_dim = Dimension(batch_dim.name, batch_dim.size // num_splits)
cache = {}
def select(t, microbatch_num):
return gather(
replace_dimensions(t, batch_dim, [smaller_batch_dim, microbatch_dim]),
microbatch_num, microbatch_dim)
def cond_fn(microbatch_num):
return less(microbatch_num, num_splits)
def body_fn(microbatch_num):
"""Body function for mtf.while_loop.
Args:
microbatch_num: a mtf Scalar
Returns:
a list of mtf Tensors
"""
my_features = {}
for k, v in six.iteritems(features):
my_features[k] = select(v, microbatch_num)
outputs = model_fn(my_features)
grads = gradients(
[outputs["loss"]], [v.outputs[0] for v in graph.trainable_variables])
if None in grads:
for var, var_grad in zip(graph.trainable_variables, grads):
if var_grad is None:
tf.logging.error(
"None gradient for trainable variable %s." % var.outputs[0])
raise ValueError("Fond trainable variable(s) with None gradient. "
"Check if there are trainable variables(s) "
"disconnected from the graph.")
output_keys = outputs.keys()
cache["output_keys"] = output_keys
ret = []
ret.append(microbatch_num + 1)
# The rest of the returned values are "accumulators" that get summed
# across all microbatches.
for t in outputs.values():
if smaller_batch_dim in t.shape:
# The output contains a batch dimension, so we want to concatenate
# across microbatches.
# Here we pad the tensor for each microbatch - summing will complete
# the concatenation.
t = einsum(
[t, one_hot(microbatch_num, microbatch_dim, dtype=t.dtype)],
output_shape=replace_dimensions(
t.shape, smaller_batch_dim,
[smaller_batch_dim, microbatch_dim]))
t = replace_dimensions(
t, [smaller_batch_dim, microbatch_dim], batch_dim)
ret.append(t)
else:
# There is no batch dimension. Sum across all microbatches.
ret.append(t)
# we also want to sum the gradients.
ret.extend(grads)
return ret
while_out = while_loop(
cond_fn, body_fn, [constant(mesh, 0, dtype=tf.int32)],
has_accumulators=True)
num_outputs = len(cache["output_keys"])
combined_outputs = {}
for k, v in zip(cache["output_keys"], while_out[1:1 + num_outputs]):
combined_outputs[k] = v
combined_grads = while_out[1 + num_outputs:]
return combined_grads, combined_outputs
def nth_largest_element(x, n, reduced_dim, name=None):
"""Nth-largest reduction on specified axis.
Note that n is zero-indexed.
Args:
x: a Tensor
n: an integer
reduced_dim: a Dimension
name: an optional string
Returns:
a Tensor
"""
# Compute the top k=n+1 values, then take the last one.
k_dim = Dimension("_top_k_", n + 1)
values, _ = top_k(x, reduced_dim=reduced_dim, k_dim=k_dim, name=name)
return gather(values, n, k_dim)
def nth_smallest_element(x, n, reduced_dim, name=None):
return -nth_largest_element(-x, n, reduced_dim, name=name)
def pool_tensor_1d(tensor, pool_dim, reduce_fn=reduce_mean, pool_size=2):
"""Apply 1D pooling to a tensor.
There can be multiple batch dims and other dims. The only constraint is that
the pool_size is divisible by the pool_dim.size.
Here is an example with pool_size = 2 and reduce_fn = reduce_mean.
[2, 5, 9, 15] reshape -> [[2, 5], [9, 15]] reduce -> [3.5, 12.0]
Here is another example with pool_size = 2 and reduce_fn = reduce_first, which
selects the first sequence element and drop the rest.
[2, 5, 9, 15] reshape -> [[2, 5], [9, 15]] reduce -> [2, 9]
The input tensor is first reshaped and the reduce function is applied to the
temporary dim (`low_dim`).
Args:
tensor: a Tensor with shape [<batch_dims>, length_dim, <other_dims>]
pool_dim: a Dimension, the dimension along with to apply pooling.
reduce_fn: a callable, a reduce function with a signature `reudce_fn(tensor,
reduced_dim)` where reduced_dim is a keyword arg.
pool_size: an int specifying the pooling size.
Returns:
a Tensor with shape [<batch_dims>, pooled_length_dim, <other_dims>]
"""
high_dim = Dimension(pool_dim.name, pool_dim.size // pool_size)
low_dim = Dimension("_low", pool_size)
reshaped = replace_dimensions(tensor, pool_dim, [high_dim, low_dim])
return reduce_fn(reshaped, reduced_dim=low_dim)
def stride_tensor_1d(tensor, pool_dim, pool_size=2):
"""Apply 1D stride operation to a tensor.
1D stride operation is a special case of `pool_tensor_1d` with pool_fn =
reduce_first, which reduces a tensor to the first element along the
`pool_dim`. See the docstring of pool_tensor_1d for more detail and an
example.
Args:
tensor: a Tensor with shape [<batch_dims>, length_dim, <other_dims>]
pool_dim: a Dimension, the dimension along with to apply pooling.
pool_size: an int specifying the pooling size.
Returns:
a Tensor with shape [<batch_dims>, strided_length_dim, <other_dims>]
"""
return pool_tensor_1d(
tensor, pool_dim, reduce_fn=reduce_first, pool_size=pool_size)
def reduce_first(tensor, reduced_dim):
"""Reduce the tensor to the first element along the `reduce_dim`.
An example with `reduced_dim` corresponding to the dimension with axis=1
[[2, 5], [9, 15]] -> [2, 9]
Args:
tensor: a Tensor with shape [<batch_dims>, length_dim, <other_dims>]
reduced_dim: a Dimension, the dimension to be reduced.
Returns:
a Tensor with shape [<batch_dims>, <other_dims>]
"""
r = mtf_range(tensor.mesh, reduced_dim, dtype=tf.int32)
first_element_filter = cast(equal(r, 0), tensor.dtype)
return reduce_sum(tensor * first_element_filter, reduced_dim=reduced_dim)
| mesh-master | mesh_tensorflow/ops.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities for Mesh TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import heapq
import tensorflow.compat.v1 as tf
import tensorflow.compat.v2 as tf2
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.framework import ops # pylint: disable=g-direct-tensorflow-import
from tensorflow.python.tpu.topology import Topology
@contextlib.contextmanager
def outside_all_rewrites():
with ops.control_dependencies(None):
yield
class BalancedVariablePlacer(object):
"""Place the variable on different device and balance the memory usage."""
def __init__(self, devices, init_usage=None):
init_usage = init_usage if init_usage else [0] * len(devices)
assert len(devices) == len(init_usage)
self._mem_device_heap = list(zip(init_usage, devices))
heapq.heapify(self._mem_device_heap)
self._last_device = devices[0]
def device_function(self, var):
"""Choose a device for the input variable.
Args:
var: an Variable.
Returns:
The device for placing the var.
"""
if var.type not in ('Variable', 'VariableV2', 'VarHandleOp'):
tf.logging.debug('Place {} on last device: {}.'.format(
var.name, self._last_device))
return self._last_device
shape = tf.TensorShape(var.get_attr('shape'))
assert shape.num_elements() is not None
size = var.get_attr('dtype').size
mem, device = heapq.heappop(self._mem_device_heap)
mem += shape.num_elements() * size
heapq.heappush(self._mem_device_heap, (mem, device))
tf.logging.debug('Place variable {} on {} and consumes {} Bytes.'.format(
var.name, device, mem))
self._last_device = device
return device
SCALAR_SUMMARIES_COLLECTION_KEY = 'mtf_scalar_summaries'
def create_host_call(model_dir):
"""Construct a host_call writing scalar summaries.
Borrowed from t2t.
TODO(noam): remove this code once there is a better way to get summaries on
TPU.
Args:
model_dir: String containing path to train
Returns:
(fn, args) Pair to be called by TPUEstimator as the host_call.
"""
graph = tf.get_default_graph()
# a list of (name, lowered tensor) tuples
summaries = graph.get_collection(SCALAR_SUMMARIES_COLLECTION_KEY)
def maybe_cast(tensor):
assert tensor.shape.is_compatible_with([]), tensor.name
if tensor.dtype == tf.int64:
return tf.to_int32(tensor)
if tensor.dtype == tf.bfloat16:
return tf.cast(tensor, tf.float32)
return tensor
reshaped_tensors = [tf.reshape(maybe_cast(t), [1]) for _, t in summaries]
# When no supported summaries are found, don't create host_call. Otherwise,
# TPU outfeed queue would enqueue global_step while host_call doesn't dequeue
# it, eventually causing hang.
if not reshaped_tensors:
return None
def host_call_fn(global_step, *args):
"""Training host call. Creates scalar summaries for training metrics."""
# This function is executed on the CPU and should not directly reference
# any Tensors in the rest of the `model_fn`. To pass Tensors from the
# model to the `model_fn`, provide as part of the `host_call`.
global_step = tf.cast(global_step[0], tf.int64)
with tf2.summary.create_file_writer(model_dir).as_default():
# We cannot directly use any tensor from summaries, because each
# tensor here must be a concat of multiple tensors from all shards.
# Therefore, we rely on the assumption that args wil have the same
# length as summaries, and all tensors in args will have the same
# order of self._tup_summaries.
assert len(args) == len(summaries)
for i, tensor in enumerate(args):
name = summaries[i][0]
tf2.summary.scalar(
name, tf.reduce_mean(tensor), step=global_step)
return tf.summary.all_v2_summary_ops()
global_step_t = tf.reshape(tf.to_int32(tf.train.get_global_step()), [1])
return host_call_fn, [global_step_t] + reshaped_tensors
def topology_rank(topology):
# Deserialize the Topology proto, if it is a string.
if isinstance(topology, bytes):
topology = Topology(serialized=topology)
if not isinstance(topology, Topology):
raise ValueError('`topology` is not a Topology object; got {}'.format(
type(topology)))
return len(topology.mesh_shape)
def remove_summaries():
"""Remove summaries from the default graph."""
g = tf.get_default_graph()
key = 'mtf_scalar_summaries'
tf.logging.debug('Remove summaries %s' % str(g.get_collection(key)))
del g.get_collection_ref(key)[:]
assert not g.get_collection(key)
| mesh-master | mesh_tensorflow/utils.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test import."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mesh_tensorflow as mtf # pylint: disable=unused-import
import tensorflow.compat.v1 as tf
class ImportTest(tf.test.TestCase):
def test_import(self):
pass
if __name__ == '__main__':
tf.disable_v2_behavior()
tf.test.main()
| mesh-master | mesh_tensorflow/import_test.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Mesh TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import mesh_tensorflow as mtf
from mesh_tensorflow import test_utils as mtf_test_utils
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.python.framework import test_util # pylint:disable=g-direct-tensorflow-import
class LaidOutTensor(object):
"""LaidOutTensor (see placement_mesh_impl.py, simd_mesh_impl.py) for tests."""
def __init__(self, tensor_list):
self.tensor_list = tensor_list
class MeshTensorFlowTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters(
(mtf.Dimension("x", 5),),
(("x", 5),),
)
def testConvertToDimension(self, inputs):
dimension = mtf.convert_to_dimension(inputs)
self.assertEqual(dimension.name, "x")
self.assertEqual(dimension.size, 5)
def testConvertToDimensionGenericInputs(self):
dimension = mtf.convert_to_dimension(None)
self.assertEqual(dimension, None)
with self.assertRaises(TypeError):
mtf.convert_to_dimension(5)
@parameterized.parameters(
(mtf.Shape([mtf.Dimension("x", 4),
mtf.Dimension("y", 8)]),),
("x:4;y:8",),
("x:4.y:8",),
("x:4 y:8",),
("x:4,y:8",),
)
def testConvertToShape(self, inputs):
shape = mtf.convert_to_shape(inputs)
self.assertEqual(shape, mtf.Shape([mtf.Dimension("x", 4),
mtf.Dimension("y", 8)]))
def testConvertToShapeGenericInputs(self):
shape = mtf.convert_to_shape([])
self.assertEqual(shape.dims, [])
shape = mtf.convert_to_shape(None)
self.assertEqual(shape, None)
with self.assertRaises(ValueError):
mtf.convert_to_shape("x;4")
@parameterized.parameters(
(mtf.LayoutRules([("d_ff", "model"), ("heads", "model")]),),
("d_ff:model;heads:model",),
("d_ff:model.heads:model",),
("d_ff:model heads:model",),
("d_ff:model,heads:model",),
([("d_ff", "model"), ("heads", "model")],),
)
def testConvertToLayoutRules(self, inputs):
layout_rules = mtf.convert_to_layout_rules(inputs)
self.assertEqual(
layout_rules._pairs,
mtf.LayoutRules([("d_ff", "model"), ("heads", "model")])._pairs)
def testConvertToLayoutRulesGenericInputs(self):
with self.assertRaises(ValueError):
mtf.convert_to_layout_rules("d_ff;heads")
def testTensorLayout(self):
tensor_layout = mtf.TensorLayout([0, 2, 1])
self.assertEqual(tensor_layout.mesh_axis_to_tensor_axis(0), ())
self.assertEqual(tensor_layout.mesh_axis_to_tensor_axis(1), (0,))
self.assertEqual(tensor_layout.mesh_axis_to_tensor_axis(2), (0, 2))
tensor_layout = mtf.TensorLayout([None, 0])
self.assertFalse(tensor_layout.is_fully_replicated)
tensor_layout = mtf.TensorLayout([None, None, None])
self.assertTrue(tensor_layout.is_fully_replicated)
def testGraph(self):
graph = mtf.Graph()
self.assertEmpty(graph.operations)
self.assertEmpty(graph.trainable_variables)
self.assertEmpty(graph.all_variables)
mesh = mtf.Mesh(graph, "mesh_test")
_ = mtf.import_tf_tensor(mesh,
tf_tensor=tf.constant(0.),
shape=mtf.Shape([]))
self.assertLen(graph.operations, 1)
self.assertEmpty(graph.trainable_variables)
self.assertEmpty(graph.all_variables)
_ = mtf.get_variable(mesh, "variable_0", mtf.Shape([]), trainable=True)
self.assertLen(graph.operations, 2)
self.assertLen(graph.trainable_variables, 1)
self.assertLen(graph.all_variables, 1)
_ = mtf.get_variable(mesh, "variable_1", mtf.Shape([]), trainable=False)
self.assertLen(graph.operations, 3)
self.assertLen(graph.trainable_variables, 1)
self.assertLen(graph.all_variables, 2)
def testGraphNames(self):
# Standard Usage.
graph = mtf.Graph()
self.assertEqual(graph.unique_name("a"), "a")
self.assertEqual(graph.unique_name("a"), "a_1")
self.assertEqual(graph.unique_name("a"), "a_2")
# Edge cases, the user may choose the name "a_1".
graph = mtf.Graph()
self.assertEqual(graph.unique_name("a"), "a")
self.assertEqual(graph.unique_name("a"), "a_1")
self.assertEqual(graph.unique_name("a_1"), "a_1_1")
graph = mtf.Graph()
self.assertEqual(graph.unique_name("a"), "a")
self.assertEqual(graph.unique_name("a_1"), "a_1")
self.assertEqual(graph.unique_name("a"), "a_2")
# Case insensitive.
graph = mtf.Graph()
self.assertEqual(graph.unique_name("a"), "a")
self.assertEqual(graph.unique_name("A"), "A_1")
@test_util.run_in_graph_and_eager_modes()
def testLowering(self):
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
inputs = tf.constant(0.)
mtf_inputs = mtf.import_tf_tensor(mesh,
tf_tensor=inputs,
shape=mtf.Shape([]))
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
outputs = lowering.export_to_tf_tensor(mtf_inputs)
inputs_value, outputs_value = self.evaluate([inputs, outputs])
self.assertEqual(inputs_value, outputs_value)
# Check that methods run without error.
_ = lowering.copy_masters_to_slices()
_ = lowering.copy_slices_to_masters()
def testMesh(self):
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
self.assertEqual(mesh.graph, graph)
def testMeshImpl(self):
shape = mtf.Shape([mtf.Dimension("batch", 4),
mtf.Dimension("model", 8)])
layout_rules = mtf.LayoutRules([("batch", "batch"),
("d_ff", "model"),
("heads", "model")])
mesh_impl = mtf.MeshImpl(shape=shape, layout_rules=layout_rules)
self.assertEqual(mesh_impl.shape, shape)
self.assertLen(shape, mesh_impl.ndims)
self.assertEqual(mesh_impl.layout_rules, layout_rules)
self.assertEqual(mesh_impl.size, shape.size)
self.assertTrue(mesh_impl.supports_control_dependencies)
batch = mtf.Dimension("batch", 128)
length = mtf.Dimension("length", 500)
d_ff = mtf.Dimension("d_ff", 2048)
heads = mtf.Dimension("heads", 8)
self.assertEqual(mesh_impl.tensor_dimension_to_mesh_axis(batch), 0)
self.assertEqual(mesh_impl.tensor_dimension_to_mesh_axis(d_ff), 1)
self.assertEqual(mesh_impl.tensor_dimension_to_mesh_axis(heads), 1)
self.assertEqual(mesh_impl.tensor_layout(mtf.Shape([batch, length, d_ff])),
mtf.TensorLayout([0, None, 1]))
@parameterized.parameters({
"pool_fn": np.mean,
"pool_fn_mtf": mtf.reduce_mean
}, {
"pool_fn": np.max,
"pool_fn_mtf": mtf.reduce_max
}, {
"pool_fn": np.min,
"pool_fn_mtf": mtf.reduce_min
})
def testPoolTensor1d(self, pool_fn, pool_fn_mtf):
converter = mtf_test_utils.NumpyConverter()
pool_size = 2
x = np.random.randn(2, 3, 4, 5)
expected = np.empty(shape=[2, 3, 2, 5])
expected[:, :, 0, :] = pool_fn(x[:, :, 0:2, :], axis=2)
expected[:, :, 1, :] = pool_fn(x[:, :, 2:4, :], axis=2)
x_mtf = converter.convert_np_array_to_mtf_tensor(x, dtype=tf.float32)
pooled_mtf = mtf.pool_tensor_1d(
x_mtf,
pool_dim=x_mtf.shape.dims[2],
reduce_fn=pool_fn_mtf,
pool_size=pool_size)
actual = converter.convert_mtf_tensor_to_np_array(pooled_mtf)
self.assertAllClose(expected, actual)
@parameterized.parameters({"pool_size": 2}, {"pool_size": 3})
def testStrideTensor1d(self, pool_size):
converter = mtf_test_utils.NumpyConverter()
x = np.random.randint(0, 100, size=[2, 3, 6, 5])
x_mtf = converter.convert_np_array_to_mtf_tensor(x)
expected = x[:, :, range(0, x.shape[2], pool_size), :]
strided_mtf = mtf.stride_tensor_1d(
x_mtf, pool_dim=x_mtf.shape.dims[2], pool_size=pool_size)
actual = converter.convert_mtf_tensor_to_np_array(strided_mtf)
self.assertAllEqual(expected, actual)
def testReduceFirst(self):
converter = mtf_test_utils.NumpyConverter()
x = np.random.randint(0, 100, size=[2, 3, 6, 5])
x_mtf = converter.convert_np_array_to_mtf_tensor(x)
expected = x[:, :, 0, :]
reduced_mtf = mtf.reduce_first(x_mtf, reduced_dim=x_mtf.shape.dims[2])
actual = converter.convert_mtf_tensor_to_np_array(reduced_mtf)
self.assertAllEqual(expected, actual)
class OperationSplittabilityTest(tf.test.TestCase):
def setUp(self):
super(OperationSplittabilityTest, self).setUp()
self.graph = mtf.Graph()
self.mesh = mtf.Mesh(self.graph, "my_mesh")
self.a_dim = mtf.Dimension("a", 5)
self.b_dim = mtf.Dimension("b", 10)
self.c_dim = mtf.Dimension("c", 15)
self.ab_shape = mtf.Shape([self.a_dim, self.b_dim])
self.x = mtf.zeros(self.mesh, self.ab_shape)
self.batch_dim = mtf.Dimension("batch", 100)
self.grid_h_dim = mtf.Dimension("grid_h", 10)
self.grid_w_dim = mtf.Dimension("grid_w", 10)
self.filter_h_dim = mtf.Dimension("filter_h", 5)
self.filter_w_dim = mtf.Dimension("filter_w", 5)
self.in_dim = mtf.Dimension("in", 10)
self.out_dim = mtf.Dimension("out", 10)
self.image = mtf.zeros(self.mesh, [self.batch_dim, self.grid_h_dim,
self.grid_w_dim, self.in_dim])
def testOperation(self):
operation = mtf.Operation([self.x], name="operation")
# Everything is splittable.
self.assertEqual(
operation._initialize_all_dimensions_as_splittable(),
(frozenset(["a", "b"]), frozenset()))
# Everything is unsplittable.
self.assertEqual(
operation._initialize_splittable_and_unsplittable_dims("unsplittable"),
(frozenset(), frozenset(["a", "b"])))
# Everything is unsplittable except dimension "b".
self.assertEqual(
operation._initialize_splittable_and_unsplittable_dims(
"unsplittable", ["b"]),
(frozenset(["b"]), frozenset(["a"])))
self.assertRaises(
ValueError,
operation._initialize_splittable_and_unsplittable_dims,
"invalid")
def testSlicewiseOperationAndGenericGradOperation(self):
slicewise_operation = mtf.SlicewiseOperation(
tf.exp,
[self.x],
[self.x.shape],
[self.x.dtype],
splittable_dims=[self.a_dim], # pretend only dim "a" can be split.
grad_function=lambda op, dy: [dy * op.outputs[0]],
name="component-wise exp")
self.assertEqual(slicewise_operation.splittable_dims, frozenset(["a"]))
self.assertEqual(slicewise_operation.unsplittable_dims, frozenset(["b"]))
generic_grad_operation = mtf.GenericGradOperation(slicewise_operation,
[self.x])
self.assertEqual(generic_grad_operation.splittable_dims,
frozenset(["a", "b"]))
self.assertEqual(generic_grad_operation.unsplittable_dims,
frozenset())
def testScalarMultiplyOperationandScalarAddOperation(self):
scalar = 2.0
scalar_multiply_operation = mtf.ScalarMultiplyOperation(self.x, scalar)
self.assertEqual(scalar_multiply_operation.splittable_dims,
frozenset(["a", "b"]))
self.assertEqual(scalar_multiply_operation.unsplittable_dims, frozenset())
scalar_add_operation = mtf.ScalarAddOperation(self.x, scalar)
self.assertEqual(scalar_add_operation.splittable_dims,
frozenset(["a", "b"]))
self.assertEqual(scalar_add_operation.unsplittable_dims, frozenset())
def testBinaryOpWithBroadcasting(self):
x2 = mtf.zeros(self.mesh, mtf.Shape([self.a_dim, self.c_dim]))
binary_op_with_broadcasting = mtf.BinaryOpWithBroadcasting(
tf.less,
self.x,
x2,
mtf.Shape([self.a_dim, self.b_dim, self.c_dim]),
tf.bool,
name="less with broadcasting")
self.assertEqual(binary_op_with_broadcasting.splittable_dims,
frozenset(["a", "b", "c"]))
self.assertEqual(binary_op_with_broadcasting.unsplittable_dims, frozenset())
def testBroadcastOperation(self):
broadcast_operation = mtf.BroadcastOperation(
self.x, mtf.Shape([self.b_dim, self.c_dim, self.a_dim]))
self.assertEqual(broadcast_operation.splittable_dims,
frozenset(["a", "b", "c"]))
self.assertEqual(broadcast_operation.unsplittable_dims, frozenset())
def testReduceOperation(self):
reduce_operation = mtf.ReduceOperation(self.x, mtf.Shape([self.b_dim]),
"sum")
self.assertEqual(reduce_operation.splittable_dims, frozenset(["a", "b"]))
self.assertEqual(reduce_operation.unsplittable_dims, frozenset())
def testPoolOperation(self):
reduce_operation = mtf.PoolOperation(self.image, [2, 2], [2, 2], "AVG_2D")
self.assertEqual(reduce_operation.splittable_dims,
frozenset(["batch", "in"]))
self.assertEqual(reduce_operation.unsplittable_dims,
frozenset(["grid_h", "grid_w"]))
def testConcatOperation(self):
concat_dim1 = mtf.Dimension("concat", 5)
concat_dim2 = mtf.Dimension("concat", 7)
x1 = mtf.zeros(self.mesh, mtf.Shape([self.a_dim, self.b_dim, concat_dim1]))
x2 = mtf.zeros(self.mesh, mtf.Shape([self.a_dim, self.b_dim, concat_dim2]))
concat_operation = mtf.ConcatOperation([x1, x2], "concat")
self.assertEqual(concat_operation.splittable_dims, frozenset(["a", "b"]))
self.assertEqual(concat_operation.unsplittable_dims, frozenset(["concat"]))
def testSplitOperation(self):
split_operation = mtf.SplitOperation(self.x, self.b_dim, [3, 7])
self.assertEqual(split_operation.splittable_dims, frozenset(["a"]))
self.assertEqual(split_operation.unsplittable_dims, frozenset(["b"]))
def testStackOperation(self):
stack_operation = mtf.StackOperation([self.x, self.x], "stack", axis=0)
self.assertEqual(stack_operation.splittable_dims, frozenset(["a", "b"]))
self.assertEqual(stack_operation.unsplittable_dims, frozenset(["stack"]))
def testUnstackOperation(self):
unstack_operation = mtf.UnstackOperation(self.x, self.b_dim)
self.assertEqual(unstack_operation.splittable_dims, frozenset(["a"]))
self.assertEqual(unstack_operation.unsplittable_dims, frozenset(["b"]))
def testEinsumOperation(self):
x2 = mtf.zeros(self.mesh, mtf.Shape([self.a_dim, self.c_dim]))
einsum_operation = mtf.EinsumOperation([self.x, x2],
mtf.Shape([self.b_dim, self.c_dim]))
self.assertEqual(einsum_operation.splittable_dims,
frozenset(["a", "b", "c"]))
self.assertEqual(einsum_operation.unsplittable_dims, frozenset())
def testConv2dOperations(self):
conv_input = mtf.zeros(
self.mesh,
mtf.Shape([self.batch_dim, self.grid_h_dim, self.grid_w_dim,
self.in_dim]))
conv_filter = mtf.zeros(
self.mesh,
mtf.Shape([self.filter_h_dim, self.filter_w_dim, self.in_dim,
self.out_dim]))
strides = [1, 1, 1, 1]
padding = "SAME"
conv2d_operation = mtf.Conv2dOperation(conv_input, conv_filter, strides,
padding)
self.assertEqual(conv2d_operation.splittable_dims,
frozenset(["batch", "in", "out"]))
self.assertEqual(conv2d_operation.unsplittable_dims,
frozenset(["filter_h", "filter_w", "grid_h", "grid_w"]))
output = conv2d_operation.outputs[0]
d_output = mtf.zeros(self.mesh, output.shape)
conv2d_backprop_input_operation = mtf.Conv2or3dBackpropInputOperation(
2, False, conv_input.shape, conv_filter, d_output, strides, padding)
self.assertEqual(conv2d_backprop_input_operation.splittable_dims,
frozenset(["batch", "filter_h", "filter_w", "grid_h",
"grid_w", "in", "out"]))
self.assertEqual(conv2d_backprop_input_operation.unsplittable_dims,
frozenset())
conv2d_backprop_filter_operation = mtf.Conv2or3dBackpropFilterOperation(
2, False, conv_input, conv_filter.shape, d_output, strides, padding)
self.assertEqual(conv2d_backprop_filter_operation.splittable_dims,
frozenset(["batch", "filter_h", "filter_w", "grid_h",
"grid_w", "in", "out"]))
self.assertEqual(conv2d_backprop_filter_operation.unsplittable_dims,
frozenset())
def testShiftOperation(self):
shift_operation = mtf.ShiftOperation(self.x, -5, self.b_dim, wrap=True)
self.assertEqual(shift_operation.splittable_dims, frozenset(["a", "b"]))
self.assertEqual(shift_operation.unsplittable_dims, frozenset())
def testSliceOperation(self):
slice_operation = mtf.SliceOperation(self.x, begin=3, size=4,
slice_dim_name="b")
self.assertEqual(slice_operation.splittable_dims, frozenset(["a"]))
self.assertEqual(slice_operation.unsplittable_dims, frozenset(["b"]))
def testPadOperation(self):
pad_operation = mtf.PadOperation(self.x, [7, 2], "a")
self.assertEqual(pad_operation.splittable_dims, frozenset(["b"]))
self.assertEqual(pad_operation.unsplittable_dims, frozenset(["a"]))
def testOneHotOperation(self):
x = mtf.zeros(self.mesh, self.ab_shape, dtype=tf.int32)
one_hot_operation = mtf.OneHotOperation(x, self.c_dim, 1, 0, dtype=tf.bool)
self.assertEqual(one_hot_operation.splittable_dims,
frozenset(["a", "b", "c"]))
self.assertEqual(one_hot_operation.unsplittable_dims, frozenset())
def testImportOperation(self):
tf_x = tf.zeros([5, 10])
import_operation = mtf.ImportOperation(self.mesh, tf_x, self.ab_shape)
self.assertEqual(import_operation.splittable_dims, frozenset(["a", "b"]))
self.assertEqual(import_operation.unsplittable_dims, frozenset())
def testImportLaidOutTensorOperation(self):
laid_out_x = LaidOutTensor([self.x])
import_laid_out_tensor_operation = mtf.ImportLaidOutTensorOperation(
self.mesh, laid_out_x, self.ab_shape)
self.assertEqual(import_laid_out_tensor_operation.splittable_dims,
frozenset())
self.assertEqual(import_laid_out_tensor_operation.unsplittable_dims,
frozenset(["a", "b"]))
def testVariableOperations(self):
var = mtf.Variable(self.mesh,
"test_variable",
self.ab_shape,
mtf.VariableDType(tf.int32, tf.int32, tf.int32),
initializer=tf.zeros_initializer(),
trainable=True)
self.assertEqual(var.splittable_dims, frozenset(["a", "b"]))
self.assertEqual(var.unsplittable_dims, frozenset())
read_variable = mtf.ReadVariable(var)
self.assertEqual(read_variable.splittable_dims, frozenset(["a", "b"]))
self.assertEqual(read_variable.unsplittable_dims, frozenset())
assign = mtf.Assign([var], [self.x])
self.assertEqual(assign.splittable_dims, frozenset(["a", "b"]))
self.assertEqual(assign.unsplittable_dims, frozenset())
depend = mtf.Depend(read_variable.outputs[0], [assign])
self.assertEqual(depend.splittable_dims, frozenset(["a", "b"]))
self.assertEqual(depend.unsplittable_dims, frozenset())
def testConstant(self):
constant = mtf.Constant(self.mesh, 0, self.ab_shape, dtype=tf.int32)
self.assertEqual(constant.splittable_dims, frozenset(["a", "b"]))
self.assertEqual(constant.unsplittable_dims, frozenset())
def testStopGradient(self):
stop_gradient = mtf.StopGradient(self.x)
self.assertEqual(stop_gradient.splittable_dims, frozenset(["a", "b"]))
self.assertEqual(stop_gradient.unsplittable_dims, frozenset())
def testPrintOperation(self):
print_operation = mtf.PrintOperation(self.x, [self.x], "Tensor x: ")
self.assertEqual(print_operation.splittable_dims, frozenset(["a", "b"]))
self.assertEqual(print_operation.unsplittable_dims, frozenset())
def testReshapeOperation(self):
reshape_operation = mtf.ReshapeOperation(
self.x, mtf.Shape([mtf.Dimension("x", 25), mtf.Dimension("y", 2)]))
self.assertEqual(reshape_operation.splittable_dims,
frozenset(["a", "b", "x", "y"]))
self.assertEqual(reshape_operation.unsplittable_dims, frozenset())
def testRandomOperation(self):
random_operation = mtf.RandomOperation(self.mesh, self.ab_shape,
tf.random_uniform)
self.assertEqual(random_operation.splittable_dims, frozenset(["a", "b"]))
self.assertEqual(random_operation.unsplittable_dims, frozenset())
def testWhileLoopOperation(self):
# This test case implements the following:
# for i in range(10):
# x = x * 2
i = mtf.constant(self.mesh, 0, mtf.Shape([]))
cond_fn = lambda i, x: mtf.less(i, 10)
body_fn = lambda i, x: [mtf.add(i, 1), mtf.multiply(x, 2)]
while_loop_operation = mtf.WhileLoopOperation(cond_fn, body_fn, [i, self.x])
self.assertEqual(while_loop_operation.splittable_dims,
frozenset(["a", "b"]))
self.assertEqual(while_loop_operation.unsplittable_dims, frozenset())
class NthSmallestTest(tf.test.TestCase):
def testNthLargest(self):
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
a_dim = mtf.Dimension("a", 6)
b_dim = mtf.Dimension("b", 2)
inputs = tf.constant([[1, 10],
[2, 9],
[3, 8],
[4, 7],
[5, 6],
[6, 5]])
n = 1 # find second largest element (since n is zero-indexed)
reduced_dim = a_dim
expected_outputs = tf.constant([5, 9])
mtf_inputs = mtf.import_tf_tensor(
mesh, inputs, shape=mtf.Shape([a_dim, b_dim]))
mtf_outputs = mtf.nth_largest_element(
mtf_inputs, n, reduced_dim, "test_nth_largest")
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape="all:2", layout="a:all", devices=["", ""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
self.assertAllEqual(self.evaluate(actual_outputs),
self.evaluate(expected_outputs))
def testNthSmallestReduceSecondDim(self):
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
a_dim = mtf.Dimension("a", 6)
b_dim = mtf.Dimension("b", 2)
inputs = tf.constant([[1, 10],
[2, 9],
[3, 8],
[4, 7],
[5, 6],
[6, 5]])
n = 0 # find smallest element (n is zero-indexed)
reduced_dim = b_dim
expected_outputs = tf.constant([1, 2, 3, 4, 5, 5])
mtf_inputs = mtf.import_tf_tensor(
mesh, inputs, shape=mtf.Shape([a_dim, b_dim]))
mtf_outputs = mtf.nth_smallest_element(
mtf_inputs, n, reduced_dim, "test_nth_smallest")
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape="all:2", layout="a:all", devices=["", ""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_outputs = lowering.export_to_tf_tensor(mtf_outputs)
self.assertAllEqual(self.evaluate(actual_outputs),
self.evaluate(expected_outputs))
class TopKTest(tf.test.TestCase):
def testTopK(self):
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
a_dim = mtf.Dimension("a", 6)
b_dim = mtf.Dimension("b", 2)
inputs = tf.constant([[1, 10],
[2, 9],
[3, 8],
[4, 7],
[5, 6],
[6, 5]],
dtype=tf.float32)
k_dim = mtf.Dimension("k", 2)
d_values = tf.constant([[11, 12], [13, 14]], dtype=tf.float32)
reduced_dim = a_dim
expected_values = tf.constant([[6, 5], [10, 9]], dtype=tf.float32)
expected_indices = tf.constant([[5, 4], [0, 1]])
expected_d_inputs = tf.constant([[0, 13],
[0, 14],
[0, 0],
[0, 0],
[12, 0],
[11, 0]],
dtype=tf.float32)
mtf_inputs = mtf.import_fully_replicated(
mesh, inputs, shape=mtf.Shape([a_dim, b_dim]))
mtf_d_values = mtf.import_tf_tensor(
mesh, d_values, shape=mtf.Shape([b_dim, k_dim]))
mtf_values, mtf_indices = mtf.top_k(mtf_inputs,
reduced_dim=reduced_dim,
k_dim=k_dim,
name="test_nth_smallest")
[mtf_d_inputs] = mtf.gradients([mtf_values], [mtf_inputs], [mtf_d_values])
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape="rows:2,cols:2", layout="a:rows,b:cols", devices=["", "", "", ""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_values = lowering.export_to_tf_tensor(mtf_values)
actual_indices = lowering.export_to_tf_tensor(mtf_indices)
actual_d_inputs = lowering.export_to_tf_tensor(mtf_d_inputs)
actual_inputs = lowering.export_to_tf_tensor(mtf_inputs)
self.assertAllEqual(self.evaluate(actual_inputs),
self.evaluate(inputs))
self.assertAllEqual(self.evaluate(actual_values),
self.evaluate(expected_values))
self.assertAllEqual(self.evaluate(actual_indices),
self.evaluate(expected_indices))
self.assertAllEqual(self.evaluate(actual_d_inputs),
self.evaluate(expected_d_inputs))
class RecomputeGradTest(tf.test.TestCase):
def testRecomputeGrad(self):
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
# let's differentiate x^2 + x
# dy/dx = 2x+1
def x_squared_plus_x(x):
return x * x + x
x = tf.constant([5, 10], dtype=tf.float32)
dy = tf.constant([2, 3], dtype=tf.float32)
two = mtf.Dimension("two", 2)
expected_y = tf.constant([30, 110], dtype=tf.float32)
expected_dx = tf.constant([22, 63], dtype=tf.float32)
mtf_x = mtf.import_fully_replicated(
mesh, x, shape=mtf.Shape([two]))
mtf_dy = mtf.import_tf_tensor(
mesh, dy, shape=mtf.Shape([two]))
mtf_y = mtf.recompute_grad(x_squared_plus_x, [mtf_x])
[mtf_dx] = mtf.gradients([mtf_y], [mtf_x], [mtf_dy])
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape="processors:2", layout="two:processors", devices=["", ""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
actual_y = lowering.export_to_tf_tensor(mtf_y)
actual_dx = lowering.export_to_tf_tensor(mtf_dx)
self.assertAllEqual(self.evaluate(actual_y),
self.evaluate(expected_y))
self.assertAllEqual(self.evaluate(actual_dx),
self.evaluate(expected_dx))
if __name__ == "__main__":
tf.disable_v2_behavior()
tf.enable_eager_execution()
tf.test.main()
| mesh-master | mesh_tensorflow/ops_test.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layers implemented in Mesh TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import gin
from mesh_tensorflow import ops_with_redefined_builtins as mtf
import tensorflow.compat.v1 as tf
@gin.configurable
def unit_scaling_convention(value=False):
"""Turn this on with gin to enable the unit-scaling convention.
TODO(noam): turn this comment into a position paper and post to arxiv
Under the unit-scaling convention, all weights are initialized with unit
variance, and the outputs of most contractions (matmul/einsum operations) are
divided by the square-root of the sizes of the contracting dimensions.
This differs from the typical inverse-square-root weight-initalization
convention often attributed to
http://proceedings.mlr.press/v9/glorot10a.html
in which weights are typically initialized according to a distribution with
mean zero and standard-deviation equal to the inverse-square-root of the
contracting dimension(s).
Under both conventions, the purpose of the inverse-square-root scaling is so
that activations in a layer should be scaled similarly to the activations in
the previous layer. (Typically, models are initialized so that activations in
all layers should have RMS=O(1)).
The difference between the two conventions is whether this scaling happens in
the parameters (their way), or as an explicit multiplier on the activations
(our way).
In our opinion, parameter-scaling (their way) has three main disadvantages:
1. Optimizers need to be aware of differently-scaled parameters. This is
because the learning-rates of adaptive optimizers represent target step-sizes
for the parameters. The desired step size for a parameter logically depends
on the scale of the parameter itself, and so one typically needs to lower the
learning-rate when the layers get bigger and the parameters get consequently
smaller. Under the unit-scaling convention, this is unnecessary, since all
parameters are on the same unit scale.
2. It is often unwieldy from an engineering standpoint to communicate to both
the variable initializers and to the optimizer what the scale of the variable
should be. Typically, the variable initializer guesses this by inferring from
the dimension order which dimension of the variable might represent
contracting dimensions. This is highly error-prone.
3. Sometimes contractions happen without being associated with parameters, as
in neural attention. It may be important here too to divide by the square
root of the contracting dimensions, in order to maintain activation scale.
See the discussion in section 3.2.1 of https://arxiv.org/abs/1706.03762
Being in the habit of scaling the outputs of contractions in this way makes
it more likely to remember to do the same thing in these circumstances.
Note: When switching to the unit-scaling convention, it is probably necessary
to raise the learning rate, since larger parameters need larger updates. An
exception is when using Adafactor, which by default scales the updates
relative to the scale of the current parameter values.
Args:
value: a boolean
Returns:
a boolean
"""
return value
def us_einsum(xs, *args, **kwargs):
"""Einsum with optional unit-scaling convention.
If the unit-scaling convention is enabled, then divide the output by
the square-root of the product of the contracting dimensions.
Args:
xs: a list of mtf.Tensor
*args: arguments to mtf.einsum
**kwargs: keyword arguments to mtf.einsum
Returns:
a mtf.Tensor
"""
y = mtf.einsum(xs, *args, **kwargs)
if unit_scaling_convention():
all_input_dims = set(sum([x.shape.dims for x in xs], []))
reduced_dims = [d for d in all_input_dims if d not in y.shape.dims]
y *= mtf.Shape(reduced_dims).size ** -0.5
return y
def dense(x,
new_dims,
reduced_dims=None,
expert_dims=None,
use_bias=True,
activation=None,
master_dtype=tf.float32,
slice_dtype=tf.float32,
variable_dtype=None,
kernel_initializer=None,
kernel_weights=None,
name=None):
"""Dense layer doing (kernel*x + bias) computation.
Args:
x: a mtf.Tensor of shape [..., reduced_dims].
new_dims: a list of mtf.Dimension.
reduced_dims: a list of mtf.Dimensions of x to be reduced.
If omitted (deprecated interface), we reduce the last dimension.
expert_dims: an optional list of mtf.Dimension which represent different
experts. Different experts get different weights.
use_bias: a boolean, whether to add bias.
activation: an optional function from mtf.Tensor to mtf.Tensor
master_dtype: a tf.dtype (deprecated - use variable_dtype)
slice_dtype: a tf.dtype (deprecated - use variable_dtype)
variable_dtype: a mtf.VariableDType
kernel_initializer: an initializer for kernel variable.
kernel_weights: mtf.Tensor weights matrix to use for dense computation
name: a string used for tf.variable_scope.
Returns:
a mtf.Tensor of shape [..., new_dims].
"""
if not isinstance(new_dims, list):
new_dims = [new_dims]
if variable_dtype is None:
variable_dtype = mtf.VariableDType(master_dtype, slice_dtype, x.dtype)
if expert_dims is None:
expert_dims = []
if reduced_dims is None:
tf.logging.warning(
"Deprecation warning - it is recommended to pass reduced_dims "
"explicitly to mtf.layers.dense() so as not to depend on dimension "
"order. To silence this warning, explicitly pass "
"reduced_dims=x.shape.dims[-1:] (in scope %s)"
% tf.get_variable_scope().name)
reduced_dims = x.shape.dims[-1:]
# if any reduced dims have the same names as new dims, first change these
# dimension names in the input so as to avoid name conflict in the weight
# matrix.
reduced_dims = reduced_dims[:]
for i in range(len(reduced_dims)):
if reduced_dims[i] in new_dims:
original_name = reduced_dims[i].name
tmp_name = "_" + original_name
reduced_dims[i] = mtf.Dimension(tmp_name, reduced_dims[i].size)
x = mtf.rename_dimension(x, original_name, tmp_name)
output_shape = mtf.Shape([d for d in x.shape.dims if d not in reduced_dims] +
new_dims)
if not kernel_weights:
kernel_weights = get_dense_kernel_weights(x, new_dims, reduced_dims,
expert_dims, kernel_initializer,
name, variable_dtype,
master_dtype, slice_dtype)
with tf.variable_scope(name, default_name="dense"):
y = us_einsum([x, kernel_weights], output_shape)
if use_bias:
b = mtf.get_variable(
x.mesh,
"bias",
mtf.Shape(expert_dims + new_dims),
initializer=tf.zeros_initializer(),
dtype=variable_dtype)
y += b
if activation is not None:
y = activation(y)
return y
def get_dense_kernel_weights(x,
new_dims,
reduced_dims,
expert_dims,
kernel_initializer,
name=None,
variable_dtype=None,
master_dtype=tf.float32,
slice_dtype=tf.float32):
"""Create w matrix variable.
Args:
x: a mtf.Tensor.
new_dims: a list of mtf.Dimension.
reduced_dims: a list of mtf.Dimensions of x to be reduced.
expert_dims: an optional list of mtf.Dimension which represent different
experts. Different experts get different weights.
kernel_initializer: an initializer for kernel variable.
name: a string used for tf.variable_scope.
variable_dtype: a mtf.VariableDType
master_dtype: a tf.dtype (deprecated - use variable_dtype)
slice_dtype: a tf.dtype (deprecated - use variable_dtype)
Returns:
a mtf.Tensor.
"""
if variable_dtype is None:
variable_dtype = mtf.VariableDType(master_dtype, slice_dtype, x.dtype)
w_shape = mtf.Shape(expert_dims + reduced_dims + new_dims)
with tf.variable_scope(name, default_name="dense"):
if kernel_initializer is None:
kernel_initializer = VarianceScalingInitializer()
if isinstance(kernel_initializer, DenseInitializer):
kernel_initializer = kernel_initializer(reduced_dims, new_dims)
w = mtf.get_variable(
x.mesh,
"kernel",
w_shape,
initializer=kernel_initializer,
dtype=variable_dtype)
w = mtf.cast(w, x.dtype)
return w
def dense_product(x,
reduced_dims,
new_dims,
activation_functions=None,
name="dense_product",
**kwargs):
"""Component-wise product of multiple dense layers.
e.g. if activation_functions=["linear", "sigmoid"], then this implements
Gated Linear Units https://arxiv.org/pdf/1612.08083.pdf
Args:
x: a Tensor
reduced_dims: a list of Dimensions.
new_dims: a list of Dimensions.
activation_functions: a list of activation functions (or a singleton)
Each can be a either:
- a callable function from Tensor to Tensor
- a string function name from namespace mtf)
- None or "linear", meaning no activation function
name: an optional string
**kwargs: additional kwargs for mtf.layers.dense()
"""
if not isinstance(activation_functions, list):
activation_functions = [activation_functions]
num_factors = len(activation_functions)
factors = []
for i, activation in enumerate(activation_functions):
if activation == "linear":
activation = None
elif isinstance(activation, str):
activation = getattr(mtf, activation)
factors.append(
dense(x,
reduced_dims=reduced_dims,
new_dims=new_dims,
activation=activation,
name="%s_%d" % (name, i) if num_factors > 1 else name,
**kwargs))
return functools.reduce(mtf.multiply, factors)
class DenseInitializer(object):
"""Initializer that can be passed to dense().
The __call__ function takes reduced_dims and new_dims and returns a
tf initializer class.
"""
def __call__(self, reduced_dims, new_dims):
raise NotImplementedError("not implemented")
@gin.configurable
class VarianceScalingInitializer(DenseInitializer):
"""Initializer capable of adapting its scale to the shape of weights.
With `distribution="normal"`, samples are drawn from a truncated normal
distribution centered on zero, with `stddev = sqrt(scale / n)` where n is:
1.0 if unit_scaling_convention() is turned on
otherwise:
number of input units in the weight tensor, if mode = "fan_in"
number of output units, if mode = "fan_out"
average of the numbers of input and output units, if mode = "fan_avg"
With `distribution="uniform"`,
samples are drawn from a uniform distribution
within [-limit, limit], with `limit = sqrt(3 * scale / n)`.
# Arguments
scale: Scaling factor (positive float).
mode: One of "fan_in", "fan_out", "fan_avg".
distribution: Random distribution to use. One of "normal", "uniform".
seed: A Python integer. Used to seed the random generator.
"""
def __init__(self, scale=1.0,
mode="fan_in",
distribution="normal"):
self.scale = scale
self.mode = mode.lower()
self.distribution = distribution.lower()
def __call__(self, reduced_dims, new_dims):
fan_in = mtf.list_product(d.size for d in reduced_dims)
fan_out = mtf.list_product(d.size for d in new_dims)
scale = self.scale
if self.mode == "fan_in":
if not unit_scaling_convention():
scale /= max(1., fan_in)
elif self.mode == "fan_out":
if unit_scaling_convention():
raise ValueError("Unit scaling convention only works with \"fan_in\"")
scale /= max(1., fan_out)
elif self.mode == "fan_avg":
if unit_scaling_convention():
raise ValueError("Unit scaling convention only works with \"fan_in\"")
scale /= max(1., float(fan_in + fan_out) / 2)
else:
raise ValueError(
"Invalid `mode` argument: "
"expected on of {\"fan_in\", \"fan_out\", \"fan_avg\"} "
"but got %s" % (self.mode,))
stddev = scale ** 0.5
if self.distribution == "normal":
return tf.truncated_normal_initializer(stddev=stddev)
elif self.distribution == "uniform":
limit = stddev * 3. ** 0.5
return tf.random_uniform_initializer(minval=-limit, maxval=limit)
else:
raise ValueError("Invalid `distribution` argument: "
"expected one of {\"normal\", \"uniform\"} "
"but got %s" % (self.distribution,))
def conv1d(x, output_dim, filter_size=3, stride=1, **kw_args):
"""1D Convolution.
x can have multiple batch dims. The last dimension is considered the channel
dimension and the second-last dimension is the width dimension.
This function supports either "SAME" padding or "VALID" padding. The padding
type is specified by kwarg `padding` to conv2d, which transform the input
tensor x as follows:
padding="SAME"
[batch, fake_height, length, d_model]
-> [batch, fake_height, length, output_dim]
padding="VALID"
[batch, fake_height, length, d_model]
-> [batch, fake_height, output_length, output_dim]
Args:
x: a mtf.Tensor of format NWC where N can be multiple batch dimensions.
output_dim: a mtf.Dimension, indicating the output channel dimension.
filter_size: a positive integer, the filter width.
stride: a positive integer, the stride.
**kw_args: optional keyword arguments to mtf.layers.conv2d.
Returns:
a mtf.Tensor of format NWO, where O is the output dimension.
"""
fake_height_dim = mtf.Dimension("fake_height", 1)
x = mtf.reshape(
x, mtf.Shape(x.shape.dims[:-2] + [fake_height_dim] + x.shape.dims[-2:]))
output = conv2d(
x,
output_dim,
filter_size=(1, filter_size),
strides=(1, stride),
**kw_args)
output_length_dim = output.shape.dims[-2]
output_shape = output.shape.dims[:-3] + [output_length_dim] + [output_dim]
output_shape = mtf.Shape(output_shape)
return mtf.reshape(output, output_shape)
def _depthwise_conv1d_hack(x,
depth_dim,
length_dim,
min_relative_pos=-1,
max_relative_pos=1,
name=None,
use_bias=True,
initializer_scale=1.0,
kernel_depth_weights=None):
"""Hacky version of a 1d depthwise convolution.
Args:
x: a mtf.Tensor
depth_dim: mtf.Dimension,
length_dim: mtf.Dimension,
min_relative_pos: int, min relative position,
max_relative_pos: int, max relative position,
name: str, variable_scope name,
use_bias: Bool, whether to use bias,
initializer_scale: int, initalizer scale,
kernel_depth_weights: an optional list of kernel weight tensors. The list
contains one element for each relative position in the kernel. Each element
has a width equal to the depth over which the separable conv operation is
being "separated"
Returns:
an mtf.Tensor
"""
ret = 0
kernel_size = max_relative_pos - min_relative_pos + 1
with tf.variable_scope(name, default_name="depthwise_conv_hack"):
for i in range(kernel_size):
relative_pos = min_relative_pos + i
shifted_input = mtf.shift(x, -relative_pos, length_dim, wrap=False)
ret += dense(
shifted_input,
new_dims=[],
reduced_dims=[],
expert_dims=[depth_dim],
kernel_weights=kernel_depth_weights[i]
if kernel_depth_weights else None,
name="depthwise_dense_%d" % i,
use_bias=use_bias and (i == 0),
kernel_initializer=VarianceScalingInitializer(
scale=initializer_scale / kernel_size))
return ret
def separable_conv1d(x,
output_dim,
min_relative_pos=-1,
max_relative_pos=1,
depthwise_filter_initializer_scale=1.0,
pointwise_filter_initializer_scale=1.0,
name=None,
use_bias=True,
kernel_depth_weights=None):
"""1-D convolution with separable filters.
The filter size will be `max_relative_pos - min_relative_pos + 1`.
Args:
x: a mtf.Tensor of format NWC.
output_dim: a mtf.Dimension, indicating the output channel dimension.
min_relative_pos: an integer, the inclusive minimum relative positive of the
depthwise filter, where a relative position of zero means the left end of
the filter aligns with the left end of the input.
max_relative_pos: an integer, the inclusive maximum relative position of the
depthwise filter, where a relative position of zero means the right end of
the filter aligns with the right end of the input.
depthwise_filter_initializer_scale: a positive float, the scale of the
initializer for the depthwise filter.
pointwise_filter_initializer_scale: a positive float, the scale of the
initializer for the pointwise filter.
name: a string used for tf.variable_scope.
use_bias: a bool, whether to use bias in the convolutions.
kernel_depth_weights: an optional list of kernel weight tensors. The list
contains one element for each relative position in the kernel. Each element
has a width equal to the dimension over which the separable conv operation
is being "separated"
Returns:
a mtf.Tensor of format NWO, where O is the output dimension.
"""
depth_dim = x.shape.dims[-1]
length_dim = x.shape.dims[-2]
with tf.variable_scope(name, default_name="separable_conv1d"):
depthwise = _depthwise_conv1d_hack(
x,
depth_dim=depth_dim,
length_dim=length_dim,
min_relative_pos=min_relative_pos,
max_relative_pos=max_relative_pos,
use_bias=use_bias,
initializer_scale=depthwise_filter_initializer_scale,
kernel_depth_weights=kernel_depth_weights)
return dense(
depthwise,
new_dims=[output_dim],
reduced_dims=[depth_dim],
name="pointwise_dense",
use_bias=use_bias,
kernel_initializer=VarianceScalingInitializer(
scale=pointwise_filter_initializer_scale))
def conv2d(x, output_dim, filter_size=(3, 3),
strides=(1, 1), padding="SAME", filter_initializer=None,
variable_dtype=None, name=None):
"""2D Convolution.
Args:
x: a mtf.Tensor of format NHWC.
output_dim: a mtf.Dimension, indicating the output channel dimension.
filter_size: a list or tuple in format [filter_height, filter_width].
strides: a list or tuple in format [stride_height, stride_width].
padding: either "SAME" or "VALID".
filter_initializer: the initializer for tf.get_variable.
variable_dtype: a mtf.VariableDType
name: a string used for tf.variable_scope.
Returns:
a mtf.Tensor.
"""
fh_dim = mtf.Dimension("fh", filter_size[0])
fw_dim = mtf.Dimension("fw", filter_size[1])
input_dim = x.shape[-1]
with tf.variable_scope(name, default_name="conv2d"):
if variable_dtype is None:
variable_dtype = mtf.VariableDType(activation_dtype=x.dtype)
conv_filter = mtf.get_variable(
x.mesh, "kernel", [fh_dim, fw_dim, input_dim, output_dim],
initializer=filter_initializer, dtype=variable_dtype)
# Pad stride in batch and channel dimensions.
strides = [1] + list(strides) + [1]
return mtf.Conv2dOperation(x, conv_filter, strides, padding).outputs[0]
def conv2d_with_blocks(
x, output_dim, filter_size=(3, 3),
strides=(1, 1), padding="SAME",
h_blocks_dim=None, w_blocks_dim=None, filter_initializer=None,
variable_dtype=None, name=None):
"""2D Convolution with spatial partitioning.
Spatial partitioning is implemented by decomposing the image into blocks.
Block dimensions represented as h_blocks_dim and w_blocks_dim can be split
along the mesh axis. If split, then we do a halo exchange where each block
receives the part of the image from its left and right neighbors necessary to
do the convolution. Exchange can involve complete or partial blocks depending
on the filter height and width.
Currently, only "SAME" padding with dilation rate of 1 is supported.
Args:
x: a Tensor of shape
[batch, h_blocks_dim, w_blocks_dim, h_dim, w_dim, in_channels_dim]
output_dim: a mtf.Dimension, indicating the output channel dimension.
filter_size: a list or tuple in format [filter_height, filter_width].
strides: a list or tuple in format [stride_height, stride_width].
padding: string, "SAME". The type of padding algorithm to use.
"Valid" is not currently supported.
h_blocks_dim: Dimension representing number of height blocks.
w_blocks_dim: Dimension representing number of witdh blocks.
filter_initializer: the initializer for tf.get_variable.
variable_dtype: a mtf.VariableDType
name: a name for the operation (optional).
Returns:
A Tensor of shape
[batch, h_blocks_dim, w_blocks_dim, h_dim, w_dim, out_channels_dim]
"""
# If h_blocks_dim and w_blocks_dim are not split, directly call conv2d.
if h_blocks_dim is None and w_blocks_dim is None:
return conv2d(x, output_dim,
filter_size, strides, padding, filter_initializer,
variable_dtype, name)
assert filter_size[0] % 2 == 1
assert filter_size[1] % 2 == 1
# Padding 'VALID' is not supported yet.
if padding != "SAME":
raise NotImplementedError("conv2d_with_blocks requires padding=SAME")
# Halo exchange for h_blocks and w_blocks.
h_dim, w_dim = x.shape.dims[-3:-1]
for blocks_dim, block_size_dim, halo_size in [
(h_blocks_dim, h_dim, filter_size[0] // 2),
(w_blocks_dim, w_dim, filter_size[1] // 2)]:
if halo_size > 0:
if blocks_dim is not None:
x = mtf.halo_exchange(x, blocks_dim, block_size_dim, halo_size)
else:
x = mtf.pad(x, [halo_size, halo_size], block_size_dim.name)
return conv2d(x, output_dim,
filter_size, strides, "VALID", filter_initializer,
variable_dtype, name)
def conv2d_transpose(x, output_dim,
filter_size=(2, 2), strides=(2, 2),
padding="SAME", filter_initializer=None,
variable_dtype=None, name=None):
"""2D Transposed Convolution.
Args:
x: a mtf.Tensor of format NHWC.
output_dim: a mtf.Dimension, indicating the output channel dimension.
filter_size: a list or tuple in format
[filter_height, filter_width]. Only filter_size of (2, 2) is tested.
strides: a list or tuple in format
[stride_height, stride_width]. Only strides of (2, 2) is tested.
padding: either "SAME" or "VALID".
filter_initializer: the initializer for tf.get_variable.
variable_dtype: a mtf.VariableDType
name: a string used for tf.variable_scope.
Returns:
a mtf.Tensor.
"""
fh_dim = mtf.Dimension("fh", filter_size[0])
fw_dim = mtf.Dimension("fw", filter_size[1])
input_dim = x.shape[-1]
with tf.variable_scope(name, default_name="conv2d_transpose"):
if variable_dtype is None:
variable_dtype = mtf.VariableDType(activation_dtype=x.dtype)
conv_filter = mtf.get_variable(
x.mesh, "kernel", [fh_dim, fw_dim, output_dim, input_dim],
initializer=filter_initializer, dtype=variable_dtype)
# Pad stride in batch and channel dimensions.
strides = [1] + list(strides) + [1]
return mtf.Conv2dTransposeOperation(
x, conv_filter, strides, padding).outputs[0]
def conv2d_transpose_with_blocks(
x, output_dim, filter_size=(2, 2),
strides=(2, 2), padding="SAME",
h_blocks_dim=None, w_blocks_dim=None, filter_initializer=None,
variable_dtype=None, name=None):
"""2D Transposed Convolution with spatial partitioning.
Spatial partitioning is implemented by decomposing the image into blocks.
Block dimensions represented as h_blocks_dim and w_blocks_dim can be split
along the mesh axis. If split, then we do a halo exchange where each block
receives the part of the image from its left and right neighbors necessary to
do the convolution. Exchange can involve complete or partial blocks depending
on the filter depth and height.
Currently, only "SAME" padding with dilation rate of 1 is supported. Only
splitting along the depth and height dimensions are supported.
Args:
x: a Tensor of shape
[batch, h_blocks_dim, w_blocks_dim, h_dim, w_dim, in_channel_dim]
output_dim: a mtf.Dimension, indicating the output channel dimension.
filter_size: a list or tuple in format
[filter_height, filter_width]. Only filter_size of (2, 2) is tested.
strides: a list or tuple in format
[stride_height, stride_width]. Only strides of (2, 2) is tested.
padding: string, "SAME". The type of padding algorithm to use.
"Valid" is not currently supported.
h_blocks_dim: Dimension representing number of height blocks.
w_blocks_dim: Dimension representing number of width blocks.
filter_initializer: the initializer for tf.get_variable.
variable_dtype: a mtf.VariableDType
name: a name for the operation (optional).
Returns:
A Tensor of shape
[batch, h_blocks_dim, w_blocks_dim, h_dim, w_dim, out_channels_dim]
"""
# If h_blocks_dim and w_blocks_dim are not split, directly call conv2d_trans.
if h_blocks_dim is None and w_blocks_dim is None:
return conv2d_transpose(
x, output_dim, filter_size, strides, padding, filter_initializer,
variable_dtype, name)
# Now only supports even-sized filters.
assert filter_size[0] % 2 == 0
assert filter_size[1] % 2 == 0
# Padding 'VALID' is not supported yet.
if padding != "SAME":
raise NotImplementedError(
"conv2d_transpose_with_blocks requires padding=SAME")
# Halo exchange for h_blocks and w_blocks.
# TODO(lehou): figure out the halo_size in general cases.
h_dim, w_dim = x.shape.dims[-3:-1]
for blocks_dim, block_size_dim, halo_size in [
(h_blocks_dim, h_dim, filter_size[0] // 2 - 1),
(w_blocks_dim, w_dim, filter_size[1] // 2 - 1)]:
if halo_size > 0:
if blocks_dim is not None:
x = mtf.halo_exchange(x, blocks_dim, block_size_dim, halo_size)
else:
x = mtf.pad(x, [halo_size, halo_size], block_size_dim.name)
return conv2d_transpose(
x, output_dim, filter_size, strides, "VALID", filter_initializer,
variable_dtype, name)
def conv3d(x, output_dim, filter_size=(3, 3, 3),
strides=(1, 1, 1), padding="SAME",
filter_initializer=None,
variable_dtype=None, name=None):
"""3D Convolution.
Args:
x: a mtf.Tensor of format NDHWC.
output_dim: a mtf.Dimension, indicating the output channel dimension.
filter_size: a list or tuple in format
[filter_depth, filter_height, filter_width].
strides: a list or tuple in format
[stride_depth, stride_height, stride_width].
padding: either "SAME" or "VALID".
filter_initializer: the initializer for tf.get_variable.
variable_dtype: a mtf.VariableDType
name: a string used for tf.variable_scope.
Returns:
a mtf.Tensor.
"""
fd_dim = mtf.Dimension("fd", filter_size[0])
fh_dim = mtf.Dimension("fh", filter_size[1])
fw_dim = mtf.Dimension("fw", filter_size[2])
input_dim = x.shape[-1]
with tf.variable_scope(name, default_name="conv3d"):
if variable_dtype is None:
variable_dtype = mtf.VariableDType(activation_dtype=x.dtype)
conv_filter = mtf.get_variable(
x.mesh, "kernel", [fd_dim, fh_dim, fw_dim, input_dim, output_dim],
initializer=filter_initializer, dtype=variable_dtype)
# Pad stride in batch and channel dimensions.
strides = [1] + list(strides) + [1]
return mtf.Conv3dOperation(x, conv_filter, strides, padding).outputs[0]
def conv3d_with_blocks(
x, output_dim, filter_size=(3, 3, 3),
strides=(1, 1, 1), padding="SAME",
d_blocks_dim=None, h_blocks_dim=None, filter_initializer=None,
variable_dtype=None, name=None):
"""3D Convolution with spatial partitioning.
Spatial partitioning is implemented by decomposing the image into blocks.
Block dimensions represented as d_blocks_dim and h_blocks_dim can be split
along the mesh axis. If split, then we do a halo exchange where each block
receives the part of the image from its left and right neighbors necessary to
do the convolution. Exchange can involve complete or partial blocks depending
on the filter depth and height.
Currently, only "SAME" padding with dilation rate of 1 is supported. Only
splitting along the depth and height dimensions are supported.
Args:
x: a Tensor of shape
[batch, d_blocks_dim, h_blocks_dim, d_dim, h_dim, w_dim, in_channel_dim]
output_dim: a mtf.Dimension, indicating the output channel dimension.
filter_size: a list or tuple in format
[filter_depth, filter_height, filter_width].
strides: a list or tuple in format
[stride_depth, stride_height, stride_width].
padding: string, "SAME". The type of padding algorithm to use.
"Valid" is not currently supported.
d_blocks_dim: Dimension representing number of depth blocks.
h_blocks_dim: Dimension representing number of height blocks.
filter_initializer: the initializer for tf.get_variable.
variable_dtype: a mtf.VariableDType
name: a name for the operation (optional).
Returns:
A Tensor of shape
[batch, d_blocks_dim, h_blocks_dim, w_blocks_dim,
d_dim, h_dim, w_dim, out_channels_dim]
"""
# If d_blocks_dim and h_blocks_dim are not split, directly call conv3d.
if d_blocks_dim is None and h_blocks_dim is None:
return conv3d(x, output_dim,
filter_size, strides, padding, filter_initializer,
variable_dtype, name)
assert filter_size[0] % 2 == 1
assert filter_size[1] % 2 == 1
assert filter_size[2] % 2 == 1
# Padding 'VALID' is not supported yet.
if padding != "SAME":
raise NotImplementedError("conv3d_with_blocks requires padding=SAME")
# Halo exchange for d_blocks and h_blocks.
d_dim, h_dim, w_dim = x.shape.dims[-4:-1]
for blocks_dim, block_size_dim, halo_size in [
(d_blocks_dim, d_dim, filter_size[0] // 2),
(h_blocks_dim, h_dim, filter_size[1] // 2)]:
if halo_size > 0:
if blocks_dim is not None:
x = mtf.halo_exchange(x, blocks_dim, block_size_dim, halo_size)
else:
x = mtf.pad(x, [halo_size, halo_size], block_size_dim.name)
# Pad w dimension with zeros.
x = mtf.pad(x, [filter_size[2] // 2, filter_size[2] // 2],
dim_name=w_dim.name, name="conv3d_pad_w_dim")
return conv3d(x, output_dim,
filter_size, strides, "VALID", filter_initializer,
variable_dtype, name)
def conv3d_transpose(x, output_dim,
filter_size=(2, 2, 2), strides=(2, 2, 2),
padding="SAME", filter_initializer=None,
variable_dtype=None, name=None):
"""3D Transposed Convolution.
Args:
x: a mtf.Tensor of format NDHWC.
output_dim: a mtf.Dimension, indicating the output channel dimension.
filter_size: a list or tuple in format
[filter_depth, filter_height, filter_width].
Only filter_size of (2, 2, 2) is tested.
strides: a list or tuple in format
[stride_depth, stride_height, stride_width].
Only strides of (2, 2, 2) is tested.
padding: either "SAME" or "VALID".
filter_initializer: the initializer for tf.get_variable.
variable_dtype: a mtf.VariableDType
name: a string used for tf.variable_scope.
Returns:
a mtf.Tensor.
"""
fd_dim = mtf.Dimension("fd", filter_size[0])
fh_dim = mtf.Dimension("fh", filter_size[1])
fw_dim = mtf.Dimension("fw", filter_size[2])
input_dim = x.shape[-1]
with tf.variable_scope(name, default_name="conv3d_transpose"):
if variable_dtype is None:
variable_dtype = mtf.VariableDType(activation_dtype=x.dtype)
conv_filter = mtf.get_variable(
x.mesh, "kernel", [fd_dim, fh_dim, fw_dim, output_dim, input_dim],
initializer=filter_initializer, dtype=variable_dtype)
# Pad stride in batch and channel dimensions.
strides = [1] + list(strides) + [1]
return mtf.Conv3dTransposeOperation(
x, conv_filter, strides, padding).outputs[0]
def conv3d_transpose_with_blocks(
x, output_dim, filter_size=(2, 2, 2),
strides=(2, 2, 2), padding="SAME",
d_blocks_dim=None, h_blocks_dim=None, filter_initializer=None,
variable_dtype=None, name=None):
"""3D Transposed Convolution with spatial partitioning.
Spatial partitioning is implemented by decomposing the image into blocks.
Block dimensions represented as d_blocks_dim and h_blocks_dim can be split
along the mesh axis. If split, then we do a halo exchange where each block
receives the part of the image from its left and right neighbors necessary to
do the convolution. Exchange can involve complete or partial blocks depending
on the filter depth and height.
Currently, only "SAME" padding with dilation rate of 1 is supported. Only
splitting along the depth and height dimensions are supported.
Args:
x: a Tensor of shape
[batch, d_blocks_dim, h_blocks_dim, d_dim, h_dim, w_dim, in_channel_dim]
output_dim: a mtf.Dimension, indicating the output channel dimension.
filter_size: a list or tuple in format
[filter_depth, filter_height, filter_width].
Only filter_size of (2, 2, 2) is tested.
strides: a list or tuple in format
[stride_depth, stride_height, stride_width].
Only strides of (2, 2, 2) is tested.
padding: string, "SAME". The type of padding algorithm to use.
"Valid" is not currently supported.
d_blocks_dim: Dimension representing number of depth blocks.
h_blocks_dim: Dimension representing number of height blocks.
filter_initializer: the initializer for tf.get_variable.
variable_dtype: a mtf.VariableDType
name: a name for the operation (optional).
Returns:
A Tensor of shape
[batch, d_blocks_dim, h_blocks_dim, w_blocks_dim,
d_dim, h_dim, w_dim, out_channels_dim]
"""
# If d_blocks_dim and h_blocks_dim are not split, directly call conv3d_trans.
if d_blocks_dim is None and h_blocks_dim is None:
return conv3d_transpose(
x, output_dim, filter_size, strides, padding, filter_initializer,
variable_dtype, name)
# Now only supports even-sized filters.
assert filter_size[0] % 2 == 0
assert filter_size[1] % 2 == 0
assert filter_size[2] % 2 == 0
# Padding 'VALID' is not supported yet.
if padding != "SAME":
raise NotImplementedError(
"conv3d_transpose_with_blocks requires padding=SAME")
# Halo exchange for d_blocks and h_blocks.
# TODO(lehou): figure out the halo_size in general cases.
d_dim, h_dim, w_dim = x.shape.dims[-4:-1]
for blocks_dim, block_size_dim, halo_size in [
(d_blocks_dim, d_dim, filter_size[0] // 2 - 1),
(h_blocks_dim, h_dim, filter_size[1] // 2 - 1)]:
if halo_size > 0:
if blocks_dim is not None:
x = mtf.halo_exchange(x, blocks_dim, block_size_dim, halo_size)
else:
x = mtf.pad(x, [halo_size, halo_size], block_size_dim.name)
# Pad w dimension with zeros.
x = mtf.pad(x, [filter_size[2] // 2 - 1, filter_size[2] // 2 - 1],
dim_name=w_dim.name, name="conv3d_trans_pad_w_dim")
return conv3d_transpose(
x, output_dim, filter_size, strides, "VALID", filter_initializer,
variable_dtype, name)
def layer_norm(x, dim, epsilon=1e-6, name="layer_prepostprocess"):
"""Layer normalization over dimension dim.
Args:
x: a mtf.Tensor whose shape contains dim.
dim: a mtf.Dimension
epsilon: a floating point number
name: a string used for tf.variable_scope.
Returns:
a mtf.Tensor with same shape as x.
"""
with tf.variable_scope(name + "/layer_norm"):
scale = mtf.get_variable(
x.mesh,
"layer_norm_scale",
mtf.Shape([dim]),
initializer=tf.ones_initializer(),
activation_dtype=x.dtype)
bias = mtf.get_variable(
x.mesh,
"layer_norm_bias",
mtf.Shape([dim]),
initializer=tf.zeros_initializer(),
activation_dtype=x.dtype)
reduced_shape = x.shape - dim
mean = mtf.reduce_mean(x, output_shape=reduced_shape)
variance = mtf.reduce_mean(mtf.square(x - mean), output_shape=reduced_shape)
norm_x = (x - mean) * mtf.rsqrt(variance + epsilon)
return norm_x * scale + bias
def batch_norm(x, is_training, momentum, epsilon=1e-9,
dims_idx_start=0, dims_idx_end=-1,
init_zero=False, name=None):
"""Batch normalization.
Args:
x: a mtf.Tensor whose shape contains [batch_dim, ..., dim]
is_training: a boolean, whether mode is training.
momentum: a floating point number, specifying batch norm decay value.
epsilon: a floating point number.
dims_idx_start: an integer. Dimension with indices in
[dims_idx_start, dims_idx_end - 1] will be normalized.
dims_idx_end: an integer. Dimension with indices in
[dims_idx_start, dims_idx_end - 1] will be normalized.
init_zero: a boolean, whether to initialize scale with 0's or 1's.
name: a string used for tf.variable_scope.
Returns:
a mtf.Tensor with same shape as x.
"""
with tf.variable_scope(name, default_name="batch_norm", values=[x]):
if init_zero:
gamma_initializer = tf.zeros_initializer()
else:
gamma_initializer = tf.ones_initializer()
norm_dim = x.shape.dims[dims_idx_start:dims_idx_end]
reduced_shape = x.shape - norm_dim
scale = mtf.get_variable(
x.mesh,
"batch_norm_scale",
reduced_shape,
initializer=gamma_initializer,
activation_dtype=x.dtype)
bias = mtf.get_variable(
x.mesh,
"batch_norm_bias",
reduced_shape,
initializer=tf.zeros_initializer(),
activation_dtype=x.dtype)
moving_mean = mtf.get_variable(
x.mesh, "bn_moving_mean", reduced_shape,
initializer=tf.random_normal_initializer(stddev=1.0),
activation_dtype=x.dtype,
trainable=False)
moving_variance = mtf.get_variable(
x.mesh, "bn_moving_variance",
reduced_shape, initializer=tf.ones_initializer(),
activation_dtype=x.dtype,
trainable=False)
# At training time, calculate mean and variance and normalize across batch
# dim.
if is_training:
mean = mtf.reduce_mean(x, output_shape=reduced_shape)
variance = mtf.reduce_mean(
mtf.square(x - mean), output_shape=reduced_shape)
norm_x = (x - mean) * mtf.rsqrt(variance + epsilon)
# Update running mean and running variance.
# TODO(lehou): do not return update_ops; handle them inside MTF.
bn_stats_update_ops = []
bn_stats_update_ops.append(mtf.assign(
moving_mean, momentum * moving_mean + (1 - momentum) * mean,
name="{}/bn_mean_update".format(name)))
bn_stats_update_ops.append(mtf.assign(
moving_variance,
momentum * moving_variance + (1 - momentum) * variance,
name="{}/bn_var_update".format(name)))
else:
# At eval and test time, use the running mean and variance.
norm_x = (x - moving_mean) * mtf.rsqrt(moving_variance + epsilon)
bn_stats_update_ops = []
return (norm_x * scale) + bias, bn_stats_update_ops
def softmax_cross_entropy_with_logits(logits, targets, vocab_dim, z_loss=0.0):
"""Per-example softmax loss.
`logits` is a Tensor with floating-point dtype, containing the predicted
relative log probabilities of the classes.
Either hard targets or soft targets are supported.
In the case of hard targets, `targets` is a Tensor with integer dtype whose
values are in the range [0, vocab_dim.size). `targets` should have the same
set of dimensions as `logits`, but without `vocab_dim`.
In the case of soft targets, `targets` is a Tensor with floating point dtype
and the same dimensions as `logits. Reducing `targets` along `vocab_dim`
should result in all ones.
if z_loss is nonzero, we add a loss equal to z_loss*log(z)^2, where z is the
partition function. Example value: z_loss=1e-4. Two uses of z_loss are:
- To keep the logits from drifting too far from zero, which can cause
unacceptable roundoff errors in bfloat16.
- To encourage the logits to be normalized log-probabilities.
Args:
logits: a mtf.Tensor whose shape contains vocab_dim
targets: a mtf.Tensor representing hard or soft targets (see comments)
vocab_dim: a mtf.Dimension
z_loss: a float
Returns:
a mtf.Tensor whose shape is equal to logits.shape - vocab_dim
Raises:
ValueError: if the shapes do not match.
"""
if targets.dtype.is_integer:
# hard targets
if (set(targets.shape.dims)
!= set(logits.shape.dims).difference([vocab_dim])):
raise ValueError(
"softmax_cross_entropy_with_logits with hard targets "
"dims in targets=%s should be dims in logits=%s other than "
"vocab_dim=%s" % (targets, logits, vocab_dim))
targets = mtf.one_hot(targets, vocab_dim, dtype=logits.dtype)
elif set(targets.shape.dims) != set(logits.shape.dims):
raise ValueError(
"softmax_cross_entropy_with_logits with soft targets "
"dims in targets=%s should be dims in logits=%s"% (targets, logits))
if vocab_dim not in logits.shape.dims:
raise ValueError("vocab_dim must be in logits.shape.dims")
log_z = mtf.reduce_logsumexp(logits, vocab_dim)
log_softmax = logits - log_z
loss = mtf.negative(
mtf.reduce_sum(log_softmax * targets, reduced_dim=vocab_dim))
if z_loss != 0:
loss += z_loss * mtf.square(log_z)
return loss
def sigmoid_cross_entropy_with_logits(logits, targets):
"""Sigmoid cross-entropy loss.
Args:
logits: a mtf.Tensor
targets: a mtf.Tensor with the same shape as logits
Returns:
a mtf.Tensor whose shape is equal to logits.shape
Raises:
ValueError: if the shapes do not match.
"""
if logits.shape != targets.shape:
raise ValueError(
"logits shape must equal targets shape"
"logits=%s targets=%s" % (logits.to_string, targets.to_string))
x = logits
z = targets
return mtf.relu(x) - x * z + mtf.log(1 + mtf.exp(-mtf.abs(x)))
def weights_nonzero(targets, dtype=tf.float32):
def my_fn(x):
return tf.cast(tf.not_equal(x, 0), dtype)
return mtf.cwise(my_fn, [targets], output_dtype=dtype, name="weights_nonzero")
def dense_relu_dense(x,
hidden_channels,
dropout=0.0,
dropout_broadcast_dims=None,
master_dtype=tf.float32,
slice_dtype=tf.float32, name=None):
"""Hidden layer with ReLU activation followed by linear projection.
The output has the same number of channels as the input.
Args:
x: a mtf.Tensor
hidden_channels: a mtf.Dimension - channels in the hidden layer
dropout: an optional float
dropout_broadcast_dims: an optional list of mtf.Dimension
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
name: an optional string
Returns:
a mtf.Tensor with the same shape as x.
"""
with tf.variable_scope(name, default_name="dense_relu_dense"):
io_channels = x.shape.dims[-1]
h = dense(x, hidden_channels,
use_bias=False, activation=mtf.relu,
master_dtype=master_dtype, slice_dtype=slice_dtype, name="wi")
if dropout != 0.0:
h = mtf.dropout(h, 1.0 - dropout,
noise_shape=h.shape - dropout_broadcast_dims)
return dense(h, io_channels, use_bias=False, activation=None,
master_dtype=master_dtype, slice_dtype=slice_dtype,
name="wo")
def local_1d_halo_exchange(k, v, num_w_blocks, w_dim, mask_right):
"""Halo exchange for keys and values for Local 1D attention."""
if num_w_blocks is not None:
if mask_right:
k = mtf.left_halo_exchange(k, num_w_blocks, w_dim, w_dim.size)
v = mtf.left_halo_exchange(v, num_w_blocks, w_dim, w_dim.size)
else:
k = mtf.halo_exchange(k, num_w_blocks, w_dim, w_dim.size)
v = mtf.halo_exchange(v, num_w_blocks, w_dim, w_dim.size)
else:
if mask_right:
k = mtf.pad(k, [w_dim, None], w_dim.name)
v = mtf.pad(v, [w_dim, None], w_dim.name)
else:
k = mtf.pad(k, [w_dim, w_dim], w_dim.name)
v = mtf.pad(v, [w_dim, w_dim], w_dim.name)
return k, v
def local_self_attention_spatial_blocks(
query_antecedent,
kv_channels,
heads,
memory_w_dim=None,
mask_right=False,
master_dtype=tf.float32,
slice_dtype=tf.float32,
name=None):
"""Attention to the source position and a neighborhood to the left or right.
The sequence is divided into blocks of length block_size.
Attention for a given query position can only see memory positions
less than or equal to the query position, in the corresponding block
and the previous block.
Args:
query_antecedent: a mtf.Tensor with shape
[batch, num_h_blocks, num_w_blocks, h_dim, w_dim, io_channels]
must have the same size as query_length, but a different name.
kv_channels: a mtf.Dimension (the size of the key and value vectors)
heads: a mtf.Dimension (the number of heads)
memory_w_dim: mtf Dimension, for the memory width block.
mask_right: bool, flag specifying whether we mask out attention to the right
for the decoder.
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
name: an optional string.
Returns:
a Tensor of shape
[batch, num_h_blocks, num_w_blocks, h_dim, w_dim, io_channels]
Raises:
ValueError: if channels or depth don't match.
"""
with tf.variable_scope(
name, default_name="multihead_attention",
values=[query_antecedent]):
w_dim, io_channels = query_antecedent.shape.dims[-2:]
batch, num_w_blocks = query_antecedent.shape.dims[:2]
wq, wk, wv, wo = multihead_attention_vars(
query_antecedent.mesh, heads, io_channels, kv_channels,
master_dtype, slice_dtype, query_antecedent.dtype)
# Rename dimensions for the memory height and width.
memory_antecedent = mtf.rename_dimension(
query_antecedent, w_dim.name, "memory_" + w_dim.name)
memory_w_dim = memory_antecedent.shape.dims[-2]
# Call einsum over the query and memory to get query q, keys k and values v.
q = mtf.einsum(
[query_antecedent, wq],
mtf.Shape([batch, heads, num_w_blocks, w_dim, kv_channels]))
k = mtf.einsum(
[memory_antecedent, wk],
mtf.Shape([batch, heads, num_w_blocks, memory_w_dim, kv_channels]))
v = mtf.einsum(
[memory_antecedent, wv],
mtf.Shape([batch, heads, num_w_blocks, memory_w_dim, kv_channels]))
# Halo exchange for memory blocks.
k, v = local_1d_halo_exchange(k, v, num_w_blocks, memory_w_dim, mask_right)
# Calculate the causal mask to avoid peeking into the future. We compute
# this once and reuse it for all blocks since the block_size is known.
mask = None
if mask_right:
mask = attention_bias_local_block(
query_antecedent.mesh, w_dim, memory_w_dim)
output = dot_product_attention(q, k, v, mask=mask)
return mtf.einsum(
[output, wo], mtf.Shape([batch, num_w_blocks, w_dim, io_channels]))
def masked_local_attention_1d(x,
kv_channels,
heads,
window_size=128,
master_dtype=tf.float32,
slice_dtype=tf.float32,
length_per_split=None,
return_kv=None,
params=None,
name=None):
"""Attention to the source position and a neighborhood to the left of it.
Attention for a given query position p can only see memory positions
in the range (p - window_size, p].
Args:
x: a mtf.Tensor with shape batch_dims + [length, io_channels]
kv_channels: a mtf.Dimension (the size of the key and value vectors)
heads: a mtf.Dimension (the number of heads)
window_size: an integer
master_dtype: a tf.dtype (deprecated - use params arg)
slice_dtype: a tf.dtype (deprecated - use params arg)
length_per_split: an optional integer indicating the part of the length
dimension per processor. You can omit if the length dimension is not
split.
return_kv: an optional list onto which to append the computed k and v.
params: an optional quadruple of Tensors (see multihead_attention_params())
name: an optional string.
Returns:
a Tensor with the same shape as x
Raises:
ValueError: if channels or depth don't match.
"""
with tf.variable_scope(
name, default_name="masked_local_attention_1d", values=[x]):
batch_dims = x.shape.dims[:-2]
length, io_channels = x.shape.dims[-2:]
if params is None:
wq, wk, wv, wo = multihead_attention_vars(
x.mesh, heads, io_channels, kv_channels,
master_dtype, slice_dtype, x.dtype)
else:
wq, wk, wv, wo = params
# Get query q, keys k and values v.
qkv_shape = mtf.Shape(batch_dims + [heads, length, kv_channels])
q = mtf.einsum([x, wq], qkv_shape)
k = mtf.einsum([x, wk], qkv_shape)
v = mtf.einsum([x, wv], qkv_shape)
if return_kv is not None:
return_kv.extend([k, v])
# Choose a suitable block size.
# We choose the greatest divisor of length_per_split less than or equal
# to max(window_size, 128)
if length_per_split is None:
length_per_split = length.size
block_length = max(window_size, 128)
while length_per_split % block_length != 0:
block_length -= 1
query_block_length = mtf.Dimension("query_block_length", block_length)
memory_block_length = mtf.Dimension("memory_block_length", block_length)
# The num_blocks dimension gets the same name as the length dimension,
# so it will be split in the same way.
num_blocks = mtf.Dimension(length.name, length.size // block_length)
q_shape = batch_dims + [heads, num_blocks, query_block_length, kv_channels]
kv_shape = batch_dims + [
heads, num_blocks, memory_block_length, kv_channels]
q = mtf.reshape(q, q_shape)
k = mtf.reshape(k, kv_shape)
v = mtf.reshape(v, kv_shape)
# augment the keys and values for each block with keys and values for
# the previous window_size timesteps.
k = mtf.left_halo_exchange(k, num_blocks, memory_block_length, window_size)
v = mtf.left_halo_exchange(v, num_blocks, memory_block_length, window_size)
padded_memory_block_length = mtf.Dimension(
"memory_block_length", window_size + block_length)
mpos = mtf.range(x.mesh, padded_memory_block_length, tf.float32)
qpos = mtf.range(x.mesh, query_block_length, tf.float32) + window_size
# prevent looking forward
mask = mtf.cast(mtf.greater(mpos, qpos), x.dtype) * -1e9
# prevent looking >=block_length timesteps backward
mask += mtf.cast(mtf.less_equal(mpos, qpos - block_length), x.dtype) * -1e9
# Note: The first window_size-1 positions can see back into pre-time
# where all the keys and values are zero. We could mask this out, but we
# don't.
o = dot_product_attention(q, k, v, mask=mask)
o = mtf.reshape(o, batch_dims + [heads, length, kv_channels])
return mtf.einsum([o, wo], mtf.Shape(batch_dims + [length, io_channels]))
def masked_local_attention_1d_incremental(x,
prev_k,
prev_v,
step_num,
master_dtype=None,
slice_dtype=None,
params=None,
name=None):
"""Incremental local self-attention (one decode step).
Incremental version of masked_local_attention_1d()
Args:
x: a mtf.Tensor with shape [batch..., io_channels]
prev_k: mtf.Tensor with shape
[batch..., heads, window_length, kv_channels]
prev_v: mtf.Tensor with shape
[batch..., heads, window_length, kv_channels]
step_num: mtf Scalar with dtype tf.int32
master_dtype: a tf.dtype (deprecated)
slice_dtype: a tf.dtype (deprecated)
params: a quadruple of Tensors (see multihead_attention_params())
name: an optional string.
Returns:
y: A mtf.Tensor with shape [batch..., io_channels]
new_k: mtf.Tensor with shape
[batch..., heads, window_length, kv_channels]
new_v: mtf.Tensor with shape
[batch..., heads, window_length, kv_channels]
Raises:
ValueError: if the dimensions do not match.
"""
batch_dims = x.shape.dims[:-1]
io_channels = x.shape.dims[-1]
heads, window_length, kv_channels = prev_k.shape.dims[-3:]
with tf.variable_scope(name, default_name="masked_local_attention_1d"):
if params is None:
wq, wk, wv, wo = multihead_attention_vars(
x.mesh, heads, io_channels, kv_channels,
master_dtype, slice_dtype, x.dtype)
else:
wq, wk, wv, wo = params
q = mtf.einsum([x, wq], mtf.Shape(batch_dims + [heads, kv_channels]))
k = mtf.einsum([x, wk], mtf.Shape(batch_dims + [heads, kv_channels]))
v = mtf.einsum([x, wv], mtf.Shape(batch_dims + [heads, kv_channels]))
current_position = mtf.equal(
mtf.range(x.mesh, window_length, dtype=tf.int32),
mtf.mod(step_num, window_length.size))
k = mtf.where(current_position, k, prev_k, output_shape=prev_k.shape)
v = mtf.where(current_position, v, prev_v, output_shape=prev_v.shape)
o = dot_product_attention(q, k, v, mask=None)
y = mtf.einsum([o, wo], x.shape)
return y, k, v
def local_2d_halo_exchange(k, v, num_h_blocks, h_dim,
num_w_blocks, w_dim, mask_right):
"""Halo exchange for keys and values for Local 2D attention."""
for blocks_dim, block_size_dim, halo_size in [
(num_h_blocks, h_dim, h_dim.size),
(num_w_blocks, w_dim, w_dim.size)]:
# shape of k is [num_h_blocks, num_w_blocks, h_dim, w_dim, kv_channels]
if halo_size > 0:
if blocks_dim is not None:
if mask_right:
k = mtf.left_halo_exchange(k, blocks_dim, block_size_dim, halo_size)
v = mtf.left_halo_exchange(v, blocks_dim, block_size_dim, halo_size)
else:
k = mtf.halo_exchange(k, blocks_dim, block_size_dim, halo_size)
v = mtf.halo_exchange(v, blocks_dim, block_size_dim, halo_size)
else:
if mask_right:
k = mtf.pad(k, [halo_size, None], block_size_dim.name)
v = mtf.pad(v, [halo_size, None], block_size_dim.name)
else:
k = mtf.pad(k, [halo_size, halo_size], block_size_dim.name)
v = mtf.pad(v, [halo_size, halo_size], block_size_dim.name)
return k, v
def local_2d_self_attention_spatial_blocks(query_antecedent,
kv_channels,
heads,
memory_h_dim=None,
memory_w_dim=None,
mask_right=False,
master_dtype=tf.float32,
slice_dtype=tf.float32,
name=None):
"""Attention to the source position and a neighborhood to the left or right.
The sequence is divided into blocks of length block_size.
Attention for a given query position can only see memory positions
less than or equal to the query position, in the corresponding block
and the previous block.
Args:
query_antecedent: a mtf.Tensor with shape [batch, num_h_blocks,
num_w_blocks, h_dim, w_dim, io_channels] must have the same size as
query_length, but a different name.
kv_channels: a mtf.Dimension (the size of the key and value vectors)
heads: a mtf.Dimension (the number of heads)
memory_h_dim: mtf Dimension, for the memory height block.
memory_w_dim: mtf Dimension, for the memory width block.
mask_right: bool, flag specifying whether we mask out attention to the right
for the decoder.
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
name: an optional string.
Returns:
a Tensor of shape
[batch, num_h_blocks, num_w_blocks, h_dim, w_dim, io_channels]
Raises:
ValueError: if channels or depth don't match.
"""
with tf.variable_scope(
name, default_name="multihead_attention", values=[query_antecedent]):
h_dim, w_dim, io_channels = query_antecedent.shape.dims[-3:]
batch, num_h_blocks, num_w_blocks = query_antecedent.shape.dims[:3]
wq, wk, wv, wo = multihead_attention_vars(
query_antecedent.mesh, heads, io_channels, kv_channels,
master_dtype, slice_dtype, query_antecedent.dtype)
# Rename dimensions for the memory height and width.
memory_antecedent = mtf.rename_dimension(query_antecedent, h_dim.name,
"memory_" + h_dim.name)
memory_antecedent = mtf.rename_dimension(memory_antecedent, w_dim.name,
"memory_" + w_dim.name)
memory_h_dim, memory_w_dim = memory_antecedent.shape.dims[-3:-1]
# Call einsum over the query and memory to get query q, keys k and values v.
q = mtf.einsum([query_antecedent, wq],
mtf.Shape([
batch, heads, num_h_blocks, num_w_blocks, h_dim, w_dim,
kv_channels
]))
k = mtf.einsum([memory_antecedent, wk],
mtf.Shape([batch, heads, num_h_blocks, num_w_blocks,
memory_h_dim, memory_w_dim, kv_channels]))
v = mtf.einsum([memory_antecedent, wv],
mtf.Shape([batch, heads, num_h_blocks, num_w_blocks,
memory_h_dim, memory_w_dim, kv_channels]))
# Halo exchange for memory blocks.
k, v = local_2d_halo_exchange(k, v, num_h_blocks, memory_h_dim,
num_w_blocks, memory_w_dim, mask_right)
# Calculate the causal mask to avoid peeking into the future. We compute
# this once and reuse it for all blocks since the block_size is known.
mask = None
if mask_right:
mask = attention_bias_local_2d_block(query_antecedent.mesh, h_dim, w_dim,
memory_h_dim, memory_w_dim)
output = dot_product_attention(q, k, v, mask=mask)
return mtf.einsum(
[output, wo],
mtf.Shape(
[batch, num_h_blocks, num_w_blocks, h_dim, w_dim, io_channels]))
def rename_length_to_memory_length(
x, length_name="length", memory_length_name="memory_length"):
return mtf.rename_dimension(x, length_name, memory_length_name)
def multihead_attention_vars(
mesh, heads, io_channels, kv_channels,
master_dtype, slice_dtype, activation_dtype):
"""Deprecated version of multihead_attention_params with combine=True."""
return multihead_attention_params(
mesh, heads, io_channels, kv_channels,
mtf.VariableDType(master_dtype, slice_dtype, activation_dtype),
combine=True)
def multihead_attention_params(mesh, heads, io_channels, kv_channels,
variable_dtype, combine=False):
"""Create Parameters for Multihead Attention.
If the combine flag is set to True, then we create only one variable
which stacks together all of the parameters. Otherwise, we create four
separate variables.
Args:
mesh: a Mesh
heads: a Dimension
io_channels: a Dimension
kv_channels: a Dimension
variable_dtype: a mtf.VariableDType
combine: a boolean
Returns:
wq: a Tensor with shape [heads, io_channels, kv_channels]
wk: a Tensor with shape [heads, io_channels, kv_channels]
wv: a Tensor with shape [heads, io_channels, kv_channels]
wo: a Tensor with shape [heads, io_channels, kv_channels]
"""
qkvo = mtf.Dimension("qkvo", 4)
qk_stddev = (io_channels.size ** -0.5) * (kv_channels.size ** -0.25)
v_stddev = io_channels.size ** -0.5
# TODO(noam): should be: o_stddev = (kv_channels.size * heads.size) ** -0.5
# verify that this still works and change it.
o_stddev = (io_channels.size * heads.size) ** -0.5
if combine:
def qkvo_initializer(shape,
dtype=None,
partition_info=None,
verify_shape=None):
del partition_info, verify_shape
return tf.random_normal(shape, dtype=dtype) * tf.reshape(
tf.cast([qk_stddev, qk_stddev, v_stddev, o_stddev],
dtype or tf.float32), [4, 1, 1, 1])
var = mtf.get_variable(
mesh, "qkvo", mtf.Shape([qkvo, heads, io_channels, kv_channels]),
initializer=qkvo_initializer, dtype=variable_dtype)
return mtf.unstack(var, qkvo)
else:
return [mtf.get_variable( # pylint: disable=g-complex-comprehension
mesh, name, mtf.Shape([heads, io_channels, kv_channels]),
initializer=tf.random_normal_initializer(stddev=stddev),
dtype=variable_dtype) for name, stddev in zip(
["q", "k", "v", "o"],
[qk_stddev, qk_stddev, v_stddev, o_stddev])]
def dot_product_attention(q,
k,
v,
mask,
dropout=0.0,
dropout_broadcast_dims=None,
extra_logit=None):
"""Dot-product attention.
Args:
q: Tensor with shape [...., length_q, depth_k]. Typically leading dimensions
are [batch, heads].
k: Tensor with shape [..., length_kv, depth_k]. Leading dimensions must
match with q.
v: Tensor with shape [..., length_kv, depth_v] Leading dimensions must
match with q.
mask: mask Tensor (see attention_mask())
dropout: a float.
dropout_broadcast_dims: an optional list of mtf.Dimension
extra_logit: an optional scalar or tensor
Returns:
Tensor with shape [..., length_q, depth_v].
"""
length_kv = k.shape.dims[-2]
logits_shape = mtf.Shape(q.shape.dims[:-1] + [length_kv])
logits = mtf.einsum([q, k], logits_shape)
if mask is not None:
logits += mask
weights = mtf.softmax(logits, length_kv, extra_logit=extra_logit)
if dropout != 0.0:
weights = mtf.dropout(
weights, 1.0 - dropout,
noise_shape=weights.shape - dropout_broadcast_dims)
depth_v = v.shape.dims[-1]
outputs_shape = mtf.Shape(q.shape.dims[:-1] + [depth_v])
outputs = mtf.einsum([weights, v], outputs_shape)
return outputs
def multihead_attention(query_antecedent,
memory_antecedent,
mask,
kv_channels,
heads,
dropout=0.0,
dropout_broadcast_dims=None,
master_dtype=tf.float32,
slice_dtype=tf.float32,
name="multihead_attention"):
"""Multihead scaled-dot-product attention with input/output transformations.
In order to use only one variable containing the four weight matrices
packed together, we insist that the query and memory antecedents have the
same dimensionality (io_channels) and that the keys and values have the
same dimensionality (kv_channels).
Args:
query_antecedent: a mtf.Tensor with shape
[<batch_dims>, query_length, io_channels]
memory_antecedent: a mtf.Tensor with shape
[batch, memory_length, io_channels] (optional)
mask: mask Tensor (see attention_mask())
kv_channels: a mtf.Dimension (the size of the key and value vectors)
heads: a mtf.Dimension (the number of heads)
dropout: a floating point value
dropout_broadcast_dims: an optional list of mtf.Dimension
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
name: an optional string.
Returns:
A mtf.Tensor with shape [batch, query_length, io_channels]
Raises:
ValueError: if the dimensions do not match.
"""
batch_dims = query_antecedent.shape.dims[:-2]
query_length, io_channels = query_antecedent.shape.dims[-2:]
with tf.variable_scope(name,
default_name="multihead_attention",
values=[query_antecedent, memory_antecedent]):
wq, wk, wv, wo = multihead_attention_vars(
query_antecedent.mesh, heads, io_channels, kv_channels,
master_dtype, slice_dtype, query_antecedent.dtype)
if memory_antecedent is None:
memory_antecedent = rename_length_to_memory_length(
query_antecedent, query_length.name)
memory_batch_dims = memory_antecedent.shape.dims[:-2]
memory_length, memory_channels = memory_antecedent.shape.dims[-2:]
if memory_batch_dims != batch_dims:
raise ValueError("memory batch must equal query batch")
if memory_channels != io_channels:
raise ValueError("memory channels must equal query channels")
q = mtf.einsum(
[query_antecedent, wq],
mtf.Shape(batch_dims + [heads, query_length, kv_channels]))
k = mtf.einsum(
[memory_antecedent, wk],
mtf.Shape(batch_dims + [heads, memory_length, kv_channels]))
v = mtf.einsum(
[memory_antecedent, wv],
mtf.Shape(batch_dims + [heads, memory_length, kv_channels]))
o = dot_product_attention(
q, k, v, mask, dropout, dropout_broadcast_dims)
return mtf.einsum(
[o, wo], mtf.Shape(batch_dims + [query_length, io_channels]))
def multihead_self_attention_incremental(query_antecedent,
prev_k,
prev_v,
step_num,
master_dtype,
slice_dtype,
name="multihead_attention"):
"""Incremental self-attention (one decode step).
In order to use only one variable containing the four weight matrices
packed together, we insist that the query and memory antecedents have the
same dimensionality (io_channels) and that the keys and values have the
same dimensionality (kv_channels).
Args:
query_antecedent: a mtf.Tensor with shape [batch..., io_channels]
prev_k: mtf.Tensor with shape [batch..., heads, memory_length, kv_channels]
prev_v: mtf.Tensor with shape [batch..., heads, memory_length, kv_channels]
step_num: mtf Scalar with dtype tf.int32
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
name: an optional string.
Returns:
y: A mtf.Tensor with shape [batch..., io_channels]
new_k: mtf.Tensor with shape [batch..., heads, memory_length, kv_channels]
new_v: mtf.Tensor with shape [batch..., heads, memory_length, kv_channels]
Raises:
ValueError: if the dimensions do not match.
"""
batch_dims = query_antecedent.shape.dims[:-1]
io_channels = query_antecedent.shape.dims[-1]
heads, memory_length, kv_channels = prev_k.shape.dims[-3:]
with tf.variable_scope(name, default_name="multihead_attention"):
wq, wk, wv, wo = multihead_attention_vars(
query_antecedent.mesh, heads, io_channels, kv_channels,
master_dtype, slice_dtype, query_antecedent.dtype)
memory_antecedent = query_antecedent
q = mtf.einsum(
[query_antecedent, wq],
mtf.Shape(batch_dims + [heads, kv_channels]))
k = mtf.einsum(
[memory_antecedent, wk],
mtf.Shape(batch_dims + [heads, kv_channels]))
v = mtf.einsum(
[memory_antecedent, wv],
mtf.Shape(batch_dims + [heads, kv_channels]))
k = prev_k + mtf.multiply(
k, mtf.one_hot(step_num, memory_length, dtype=prev_k.dtype),
output_shape=prev_k.shape)
v = prev_v + mtf.multiply(
v, mtf.one_hot(step_num, memory_length, dtype=prev_v.dtype),
output_shape=prev_v.shape)
mask = mtf.cast(
mtf.greater(mtf.range(
query_antecedent.mesh, memory_length, dtype=tf.int32), step_num),
q.dtype) * -1e9
o = dot_product_attention(q, k, v, mask)
y = mtf.einsum([o, wo], query_antecedent.shape)
return y, k, v
def multihead_encdec_attention_incremental(query_antecedent,
wq, wo, k, v,
mask,
name="multihead_attention"):
"""Incremental attention over encoder (one decode step).
In order to use only one variable containing the four weight matrices
packed together, we insist that the query and memory antecedents have the
same dimensionality (io_channels) and that the keys and values have the
same dimensionality (kv_channels).
memory_dims is a subset of query_dims
Args:
query_antecedent: a mtf.Tensor with shape query_dims + [io_channels]
wq: a mtf.Tensor with shape [heads, io_channels, kv_channels]
wo: a mtf.Tensor with shape [heads, io_channels, kv_channels]
k: memory_dims + [heads, memory_length, kv_channels]
v: memory_dims + [heads, memory_length, kv_channels]
mask: mask Tensor (see attention_mask())
name: an optional string.
Returns:
A mtf.Tensor with shape [batch, qlen, io_channels]
"""
heads, _, kv_channels = k.shape.dims[-3:]
query_dims = query_antecedent.shape.dims[:-1]
with tf.variable_scope(name, default_name="multihead_attention"):
q = mtf.einsum(
[query_antecedent, wq],
mtf.Shape(query_dims + [heads, kv_channels]))
o = dot_product_attention(q, k, v, mask)
return mtf.einsum([o, wo], query_antecedent.shape)
def attention_mask_ignore_padding(inputs, dtype=tf.float32):
"""Bias for encoder-decoder attention.
Args:
inputs: a mtf.Tensor with shape [..., length_dim]
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [..., memory_length_dim]
"""
inputs = rename_length_to_memory_length(inputs)
return mtf.cast(mtf.equal(inputs, 0), dtype) * -1e9
def attention_mask_autoregressive(query_pos, dtype=tf.float32):
"""Bias for self-attention where attention to the right is disallowed.
Args:
query_pos: a mtf.Tensor with shape [..., length_dim]
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [..., length_dim, memory_length_dim]
"""
memory_pos = rename_length_to_memory_length(query_pos)
return mtf.cast(mtf.less(query_pos, memory_pos), dtype) * -1e9
def attention_mask_same_segment(
query_segment, memory_segment=None, dtype=tf.float32):
"""Bias for attention where attention between segments is disallowed.
Args:
query_segment: a mtf.Tensor with shape [..., length_dim]
memory_segment: a mtf.Tensor with shape [..., memory_length_dim]
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [..., length_dim, memory_length_dim]
"""
memory_segment = rename_length_to_memory_length(
memory_segment or query_segment)
return mtf.cast(mtf.not_equal(query_segment, memory_segment), dtype) * -1e9
def attention_bias_local_block(mesh, block_length, memory_length,
dtype=tf.int32):
"""Bias for attention for local blocks where attention to right is disallowed.
Create the bias matrix by using two separate masks, one for the memory part
which doesn't overlap with the query and second which interacts with the query
and should be disallowed to look to the right of the current query position.
Args:
mesh: a MeshTensorflow object
block_length: a mtf.Dimension
memory_length: a mtf.Dimension
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [block_length, memory_length]
"""
memory_length = mtf.Dimension(memory_length.name, block_length.size)
memory_mask = mtf.zeros(mesh, [block_length, memory_length], dtype=dtype)
mask = mtf.cast(mtf.less(mtf.range(mesh, block_length, dtype=dtype),
mtf.range(mesh, memory_length, dtype=dtype)),
dtype=dtype)
mask = mtf.cast(
mtf.concat([memory_mask, mask], memory_length.name),
dtype=tf.float32) * -1e9
return mask
def attention_bias_local_2d_block(mesh,
h_dim,
w_dim,
memory_h_dim,
memory_w_dim,
dtype=tf.int32):
"""Bias for attention for local blocks where attention to right is disallowed.
Create the bias matrix by using two separate masks, one for the memory part
which doesn't overlap with the query and second which interacts with the query
and should be disallowed to look to the right of the current query position.
Args:
mesh: a MeshTensorflow object
h_dim: a mtf.Dimension
w_dim: a mtf.Dimension
memory_h_dim: a mtf.Dimension
memory_w_dim: a mtf.Dimension
dtype: a tf.dtype
Returns:
a mtf.Tensor with shape [block_length, memory_length]
"""
memory_height = mtf.Dimension(memory_h_dim.name, h_dim.size)
memory_width = mtf.Dimension(memory_w_dim.name, w_dim.size)
mask_top_visible = mtf.zeros(mesh, [h_dim, memory_height], dtype=dtype)
mask_left_visible = mtf.zeros(mesh, [w_dim, memory_width], dtype=dtype)
mask_query = mtf.greater(
mtf.range(mesh, memory_height, dtype=tf.int32),
mtf.range(mesh, memory_width, dtype=dtype))
width_mask = mtf.concat([mask_left_visible, mask_query], memory_width.name)
mask = mtf.cast(
mtf.concat([mask_top_visible, width_mask], memory_height.name),
dtype=tf.float32) * -1e9
return mask
def multiplicative_jitter(x, epsilon=1e-2):
"""Multiply values by a random number between 1-epsilon and 1+epsilon.
Makes models more resilient to rounding errors introduced by bfloat16.
This seems particularly important for logits.
Args:
x: a mtf.Tensor
epsilon: a floating point value
Returns:
a mtf.Tensor with the same type and shape as x.
"""
if epsilon == 0:
return x
return x * mtf.random_uniform(
x.mesh, x.shape, minval=1.0 - epsilon, maxval=1.0+epsilon, dtype=x.dtype)
def multihead_self_attention_memory_compressed(x,
mask_right,
compression_factor,
kv_channels,
heads,
dropout=0.0,
dropout_broadcast_dims=None,
master_dtype=tf.float32,
slice_dtype=tf.float32,
name="multihead_attention"):
"""Memory-compressed self-attention.
The memory is first average-pooled (strided) to make it shorter by
a factor of compression_factor.
Args:
x: a mtf.Tensor with shape
[<batch_dims>, query_length, io_channels]
mask_right: a boolean
compression_factor: an integer
kv_channels: a mtf.Dimension (the size of the key and value vectors)
heads: a mtf.Dimension (the number of heads)
dropout: a floating point value
dropout_broadcast_dims: an optional list of mtf.Dimension
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
name: an optional string.
Returns:
A mtf.Tensor with shape [batch, query_length, io_channels]
Raises:
ValueError: if the dimensions do not match.
"""
batch_dims = x.shape.dims[:-2]
length, io_channels = x.shape.dims[-2:]
with tf.variable_scope(name,
default_name="compressed_attention",
values=[x]):
wq, wk, wv, wo = multihead_attention_vars(
x.mesh, heads, io_channels, kv_channels,
master_dtype, slice_dtype, x.dtype)
memory_antecedent = compress_mean(x, length, compression_factor)
memory_antecedent = rename_length_to_memory_length(memory_antecedent)
memory_length = memory_antecedent.shape.dims[-2]
q = mtf.einsum(
[x, wq],
mtf.Shape(batch_dims + [heads, length, kv_channels]))
k = mtf.einsum(
[memory_antecedent, wk],
mtf.Shape(batch_dims + [heads, memory_length, kv_channels]))
v = mtf.einsum(
[memory_antecedent, wv],
mtf.Shape(batch_dims + [heads, memory_length, kv_channels]))
if mask_right:
query_pos = mtf.range(x.mesh, length, dtype=tf.int32)
memory_pos = (
mtf.range(x.mesh, memory_length, dtype=tf.int32) * compression_factor
+ (compression_factor - 1))
mask = mtf.cast(mtf.greater(memory_pos, query_pos), x.dtype) * -1e9
else:
mask = None
o = dot_product_attention(
q, k, v, mask, dropout, dropout_broadcast_dims, extra_logit=0.0)
return mtf.einsum(
[o, wo], mtf.Shape(batch_dims + [length, io_channels]))
def compress_mean(x, dim, compression_factor):
"""Compress by taking group means.
Args:
x: a Tensor
dim: a dimension in x.shape
compression_factor: an integer
Returns:
a Tensor
"""
dims = x.shape.dims
pos = dims.index(dim)
compressed_dim = mtf.Dimension(dim.name, dim.size // compression_factor)
compression_factor_dim = mtf.Dimension(
"compression_factor", compression_factor)
new_shape = (
dims[:pos] + [compressed_dim, compression_factor_dim] + dims[pos + 1:])
x = mtf.reshape(x, new_shape)
x = mtf.reduce_mean(x, reduced_dim=compression_factor_dim)
return x
def embedding_weights(mesh,
vocab_dim,
output_dim,
variable_dtype,
name="embedding",
ensemble_dim=None,
initializer=None):
"""Embedding weights."""
if not ensemble_dim:
ensemble_dim = []
elif not isinstance(ensemble_dim, list):
ensemble_dim = [ensemble_dim]
shape = mtf.Shape(ensemble_dim) + [vocab_dim, output_dim]
if initializer is None:
initializer = tf.random_normal_initializer()
ret = mtf.get_variable(
mesh, name, shape, dtype=variable_dtype, initializer=initializer)
return ret
def embedding(indices, vocab_dim, output_dim, variable_dtype, name="embedding"):
"""Embedding layer."""
weights = embedding_weights(
indices.mesh, vocab_dim, output_dim, variable_dtype, name)
return mtf.gather(weights, indices, vocab_dim)
def max_pool2d(x, ksize=(2, 2), name="max_pool2d"):
"""2D max pooling.
Pooling is applied on the HW dimensions. We assume the dimensions of x is
[NHWC]. There can be multiple batch dimensions, e.g., [10, 4, 4, 10, 10, 3].
Currently we only support unoverlapping pooling: strides == ksize. Also the
input HW dimensions must be divisible by ksize.
Args:
x: a Tensor
ksize: kernel size. A list or tuple
name: an optional string
Returns:
a Tensor
"""
return x if tuple(ksize) == (1, 1) else mtf.PoolOperation(
x, ksize, strides=ksize, pool_fn_string="MAX_2D", name=name).outputs[0]
def max_pool3d(x, ksize=(2, 2, 2), name="max_pool3d"):
"""3D max pooling.
Pooling is applied on the DHW dimensions. We assume the dimensions of x is
[NDHWC]. There can be multiple batch dimensions, e.g.,
[10, 4, 4, 10, 10, 10, 3].
Currently we only support unoverlapping pooling: strides == ksize. Also the
input DHW dimensions must be divisible by ksize.
Args:
x: a Tensor
ksize: kernel size. A list or tuple
name: an optional string
Returns:
a Tensor
"""
return x if tuple(ksize) == (1, 1, 1) else mtf.PoolOperation(
x, ksize, strides=ksize, pool_fn_string="MAX_3D", name=name).outputs[0]
def avg_pool2d(x, ksize=(2, 2), name="avg_pool2d"):
"""2D average pooling.
Pooling is applied on the HW dimensions. We assume the dimensions of x is
[NHWC]. There can be multiple batch dimensions, e.g., [10, 4, 4, 10, 10, 3].
Currently we only support unoverlapping pooling: strides == ksize. Also the
input HW dimensions must be divisible by ksize.
Args:
x: a Tensor
ksize: kernel size. A list or tuple
name: an optional string
Returns:
a Tensor
"""
return x if tuple(ksize) == (1, 1) else mtf.PoolOperation(
x, ksize, strides=ksize, pool_fn_string="AVG_2D", name=name).outputs[0]
def avg_pool3d(x, ksize=(2, 2, 2), name="avg_pool3d"):
"""3D average pooling.
Pooling is applied on the DHW dimensions. We assume the dimensions of x is
[NDHWC]. There can be multiple batch dimensions, e.g.,
[10, 4, 4, 10, 10, 10, 3].
Currently we only support unoverlapping pooling: strides == ksize. Also the
input DHW dimensions must be divisible by ksize.
Args:
x: a Tensor
ksize: kernel size. A list or tuple
name: an optional string
Returns:
a Tensor
"""
return x if tuple(ksize) == (1, 1, 1) else mtf.PoolOperation(
x, ksize, strides=ksize, pool_fn_string="AVG_3D", name=name).outputs[0]
def _reversible_half_residual_grad(
explicit_inputs, all_inputs, forward_operations, outputs, output_grads):
"""Backpropagation function for a revnet."""
x1, _, x2, _ = explicit_inputs
extra_inputs = all_inputs[len(explicit_inputs):]
_, _, y1, _ = outputs
dy2, dy2_backwards, dy1, dy1_backwards = output_grads
# last operation should be an addition to produce y1
if not isinstance(forward_operations[-1], mtf.AddOperation):
raise ValueError("expected an addition here")
f_ops = forward_operations[:-1]
orig_fx2 = f_ops[-1].outputs[0]
orig_x2 = x2
if dy2_backwards is not None:
x2 = dy2_backwards
if dy1_backwards is not None:
y1 = dy1_backwards
graph = all_inputs[0].graph
f_again_ops, mapping = graph.clone_operations(f_ops, {orig_x2: x2})
fx2 = mapping[orig_fx2]
x1 = y1 - fx2
grads = mtf.gradients(ys=[fx2], xs=[x2] + extra_inputs, grad_ys=[dy1],
operations=f_again_ops)
dx2 = dy2 + grads[0]
extra_inputs_grads = grads[1:]
dx1 = dy1
return [dx1, x1, dx2, x2] + extra_inputs_grads
def _half_residual_and_swap(x1, x1_backwards, x2, x2_backwards, f=None):
return x2, x2_backwards, x1 + f(x2), x1_backwards
def reversible_half_residual_and_swap(x1,
x1_backwards,
x2,
x2_backwards,
f,
recompute_grads=True):
"""Building block of a revnet.
https://arxiv.org/abs/1707.04585
All the inputs and output Tensors have the same shape and dtype.
The forward computation is:
y1 = x1 + f(x2)
y2 = x2
The x1_backwards and x2_backwards tensors are used by backpropagation.
None should be passed for the first layer, then the outputs of each layer
should be passed to the next.
Example usage:
x1, x1_backwards, x2, x2_backwards = x, None, x, None
for f in my_functions:
x1, x1_backwards, x2, x2_backwards = mtf.layers.reversible_half_residual(
x1, x1_backwards, x2, x2_backwards)
y = (x1 + x2) / 2
Args:
x1: a Tensor
x1_backwards: a Tensor or None
x2: a Tensor
x2_backwards: a Tensor or None
f: a function from Tensor to Tensor
recompute_grads: a boolean
Returns:
y2: a Tensor
y2_backwards: a Tensor
y1: a Tensor
y1_backwards: a Tensor
"""
if recompute_grads:
if x1_backwards is None:
x1_backwards = mtf.zeros_like(x1)
if x2_backwards is None:
x2_backwards = mtf.zeros_like(x2)
return mtf.custom_gradient(
functools.partial(_half_residual_and_swap, f=f),
_reversible_half_residual_grad,
[x1, x1_backwards, x2, x2_backwards])
else:
return _half_residual_and_swap(x1, x1_backwards, x2, x2_backwards, f)
@gin.configurable
def clip_activation_gradient(x, clip_rms_norm=None):
"""Clip activation gradients by rms-norm."""
tf.logging.info("clip_activation_gradient.clip_rms_norm: {}".format(
clip_rms_norm))
def _reduce_rms(t):
return mtf.sqrt(mtf.reduce_mean(mtf.square(t)))
def forward_fn(x):
"""Identity forward pass."""
return mtf.identity(x)
def grad_fn(explicit_inputs, all_inputs, forward_operations, outputs,
output_grads):
del explicit_inputs, all_inputs, outputs, forward_operations
grad_ys = output_grads
if clip_rms_norm:
clipped_grad_ys = []
for g in grad_ys:
rms_norm = _reduce_rms(g)
clipping_denom = mtf.maximum(1.0, rms_norm / clip_rms_norm)
clipped_grad_ys.append(g / clipping_denom)
return clipped_grad_ys
return grad_ys
explicit_inputs = [x]
return mtf.custom_gradient(forward_fn, grad_fn, explicit_inputs)
| mesh-master | mesh_tensorflow/layers.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Placement Mesh Implementation (for CPU/GPU clusters)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import random
from mesh_tensorflow import ops_with_redefined_builtins as mtf
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow.compat.v1 as tf
class PlacementMeshImpl(mtf.MeshImpl):
"""Mesh implemented using explicit device placement."""
def __init__(self, shape, layout, devices):
super(PlacementMeshImpl, self).__init__(shape, layout)
self._devices = devices
self.copy_master_to_slice_ops = []
class LaidOutTensor(object):
"""One Slice for each processor."""
def __init__(self, tensor_list):
self._tensor_list = tensor_list
def __repr__(self):
return "[" + ",".join([str(t) for t in self._tensor_list]) + "]"
@property
def tensor_list(self):
return self._tensor_list
@classmethod
def from_tensor_list(cls, tensor_list):
return cls(tensor_list)
@property
def all_slices(self):
return self._tensor_list
@property
def slice_shape(self):
return self.tensor_list[0].shape.as_list()
def to_laid_out_tensor(self):
return self
class LaidOutVariable(object):
"""Maintains slice-variables and copy operations."""
def __init__(self, variable, mesh_impl):
"""Create a LaidOutVariable.
Args:
variable: a Variable (Operation)
mesh_impl: a MeshImpl
"""
self._variable = variable
self._mesh_impl = mesh_impl
shape = variable.outputs[0].shape
slice_shape = mesh_impl.slice_shape(shape)
base_name = variable.name
if self.slice_is_master:
slices = [variable.get_master()]
self._laid_out_tensor = mesh_impl.LaidOutTensor(slices)
self._copy_slices_to_master = tf.group([])
self._copy_master_to_slices = tf.group([])
else:
slices = []
slices_with_master_dtype = []
for pnum in xrange(mesh_impl.size):
with tf.device(mesh_impl.devices[pnum]):
slices.append(tf.get_variable(
base_name + "_slice_%d" % pnum,
slice_shape,
dtype=variable.slice_dtype, collections=[]))
slices_with_master_dtype.append(
tf.cast(slices[-1], variable.master_dtype))
self._laid_out_tensor = mesh_impl.LaidOutTensor(slices)
if os.environ.get("MTF_SEQUENCE_MODE", "") == "1":
if mesh_impl.copy_master_to_slice_ops:
with tf.control_dependencies(
[mesh_impl.copy_master_to_slice_ops[-1]]):
self._copy_master_to_slices = self.assign_to_slices(
mtf.assign_slice,
mesh_impl.make_slices(variable.get_master(), shape))
else:
self._copy_master_to_slices = self.assign_to_slices(
mtf.assign_slice,
mesh_impl.make_slices(variable.get_master(), shape))
mesh_impl.copy_master_to_slice_ops.append(self._copy_master_to_slices)
else:
self._copy_master_to_slices = self.assign_to_slices(
mtf.assign_slice,
mesh_impl.make_slices(variable.get_master(), shape))
self._copy_slices_to_master = variable.assign_to_master(
mesh_impl.combine_slices(slices_with_master_dtype, shape))
@property
def slice_is_master(self):
"""Should we avoid creating a slice variable and just use the master."""
if self._mesh_impl.size != 1:
return False
if self._variable.master_dtype != self._variable.slice_dtype:
return False
if isinstance(self._variable, mtf.StackedVariable):
return False
master_device = self._variable.master_device
slice_device = self._mesh_impl.devices[0]
return slice_device == master_device or not slice_device
def assign_to_slices(self, assign_fn, values):
"""Assign to the slice variables.
Args:
assign_fn: a function from
(mtf.Variable, tf.Variable, tf.Tensor) -> tf.Operation
values: a list of tf.Tensor
Returns:
a tf.operation
"""
return tf.group(mtf.parallel(
self._mesh_impl.devices, assign_fn, [self._variable] * len(values),
self.laid_out_tensor.all_slices, values))
@property
def laid_out_tensor(self):
return self._laid_out_tensor
@property
def copy_master_to_slices(self):
return self._copy_master_to_slices
@property
def copy_slices_to_master(self):
return self._copy_slices_to_master
def slicewise(self, fn, *inputs):
"""Execute a function in parallel on all slices.
Args:
fn: a function from tf.Tensors to tf.Tensor or a tuple of tf.Tensors.
*inputs: a list of inputs. Each input is either a LaidOutTensor or
is convertible to a tf.Tensor.
Returns:
a LaidOutTensor, or a tuple of LaidOutTensors if fn returns a tuple.
"""
# convert all inputs to LaidOutTensor where possible
inputs = mtf.convert_args_to_laid_out_tensors(inputs)
inputs = [x.tensor_list if isinstance(x, self.LaidOutTensor)
else [x] * len(self.devices) for x in inputs]
ret = mtf.parallel(self.devices, fn, *inputs)
if isinstance(ret[0], tuple):
ret = mtf.transpose_list_of_lists(ret)
return tuple([self.LaidOutTensor(t) for t in ret])
else:
return self.LaidOutTensor(ret)
def Print(self, x, data, message, **kwargs): # pylint: disable=invalid-name
"""call tf.Print.
Args:
x: a LaidOutTensor
data: a list of LaidOutTensor
message: a string
**kwargs: keyword arguments to tf.print
Returns:
a LaidOutTensor
"""
tf.logging.info("PlacementMeshImpl::Print")
x = x.to_laid_out_tensor()
new_slices = x.tensor_list[:]
with tf.device(self._devices[0]):
new_slices[0] = tf.Print(
new_slices[0], [t for d in data for t in d.tensor_list],
message, **kwargs)
return self.LaidOutTensor(new_slices)
def allreduce(self, x, mesh_axes, reduction_fn_string):
"""Grouped allreduce, (across the given dimensions).
Args:
x: a LaidOutTensor
mesh_axes: a list of integers - the mesh dimensions to be reduced
reduction_fn_string: "SUM" or "MAX"
Returns:
a LaidOutTensor
"""
return self._collective_with_groups(
x, mesh_axes, functools.partial(
allreduce_ring, reduction_fn_string=reduction_fn_string))
def allconcat(self, x, mesh_axis, concat_axis):
"""Grouped allconcat (like MPI allgather followed by concat).
Args:
x: a LaidOutTensor
mesh_axis: an integer - the mesh axis along which to group
concat_axis: an integer (the Tensor axis along which to concatenate)
Returns:
a LaidOutTensor
"""
return self._collective_with_groups(
x, [mesh_axis],
functools.partial(allconcat_ring, concat_axis=concat_axis))
def alltoall(self, x, mesh_axis, split_axis, concat_axis):
"""Grouped alltoall.
Args:
x: a LaidOutTensor
mesh_axis: an integer the mesh axis along which to group
split_axis: an integer (the Tensor axis along which to split)
concat_axis: an integer (the Tensor axis along which to concatenate)
Returns:
a LaidOutTensor
"""
return self._collective_with_groups(
x, [mesh_axis],
functools.partial(
alltoall_ring, split_axis=split_axis, concat_axis=concat_axis))
def receive(self, x, mesh_axis, source_pcoord):
"""Collective receive in groups.
Each group contains the processors that differ only in mesh_axis.
```python
group_size = self.shape[mesh_axis].size
```
Args:
x: a LaidOutTensor
mesh_axis: an integer
source_pcoord: a list of optional integers. Each element is either None
or an integer in [0, group_size). If source_pcoord[k] is None, then the
output for the k-th processor in each group is a zero tensor. If
source_pcoord[k] is not None, then the output for the k-th processor in
each group is equal to the input for the source_pcoord[k]-th processor
in that group.
Returns:
a LaidOutTensor
"""
x = x.to_laid_out_tensor()
shape = x.tensor_list[0].shape
dtype = x.tensor_list[0].dtype
def _collective_receive(tensor_list, device_list):
ret = []
for pcoord, device in enumerate(device_list):
with tf.device(device):
if source_pcoord[pcoord] is None:
ret.append(tf.zeros(shape, dtype))
else:
ret.append(tf.identity(tensor_list[source_pcoord[pcoord]]))
return ret
return self._collective_with_groups(
x, [mesh_axis], _collective_receive)
def _collective_with_groups(self, x, mesh_axes, collective):
"""Grouped collective, (across the given dimensions).
Args:
x: a LaidOutTensor
mesh_axes: a list of integers - the mesh dimensions to be reduced
collective: fn from list(tf.Tensor), list(device) -> list(tf.Tensor)
Returns:
a LaidOutTensor
"""
if not mesh_axes:
return x
x = x.to_laid_out_tensor()
if len(mesh_axes) == self.ndims:
return self.LaidOutTensor(collective(x.tensor_list, self._devices))
else:
groups = mtf.processor_groups(self.shape, mesh_axes)
ret = [None] * self.size
for g in groups:
inputs = [x.tensor_list[pnum] for pnum in g]
devices = [self._devices[pnum] for pnum in g]
reduced = collective(inputs, devices)
for pnum, y in zip(g, reduced):
ret[pnum] = y
return self.LaidOutTensor(ret)
def random(self, shape, tf_fn, kwargs):
"""Call a random tf operation (e.g. tf.random.uniform).
Args:
shape: a Shape
tf_fn: a function such as tf.random.uniform
kwargs: kwargs to pass to tf_fn, except for seed
Returns:
a LaidOutTensor
"""
slice_shape = self.slice_shape(shape)
op_seed = random.random()
def my_fn(pnum):
# seeds are necessary to make sure that slices that should have the
# same values actually do have the same values.
seed = hash("%s,%s" % (op_seed, self.slice_begin(shape, pnum)))
return tf_fn(slice_shape, seed=seed, **kwargs)
return self.slicewise(my_fn, self.laid_out_pnum())
def laid_out_pnum(self):
"""Returns a LaidOutTensor containing the processor number."""
return self.LaidOutTensor(list(range(self.size)))
@property
def devices(self):
return self._devices
def export_to_tf_tensor(self, x, laid_out_x):
"""Turn a Tensor into a tf.Tensor.
Args:
x: a Tensor
laid_out_x: a LaidOutTensor
Returns:
a tf.Tensor
"""
return self.combine_slices(laid_out_x.all_slices, x.shape)
def import_tf_tensor(self, x, tf_x):
"""Import a tf.Tensor, producing a LaidOutTensor.
Args:
x: a Tensor
tf_x: a tf.Tensor
Returns:
a LaidOutTensor
"""
return self.LaidOutTensor(self.make_slices(tf_x, x.shape))
def allreduce_ring_single_shard(xs, devices, reduction_fn_string="SUM"):
"""Compute the reduction of all Tensors and put the result everywhere.
Performance-optimized for a ring of devices.
Args:
xs: a list of n tf.Tensors
devices: a list of strings
reduction_fn_string: "SUM" or "MAX"
Returns:
a list of n Tensors
Raises:
ValueError: if devices is not a list of n strings
"""
n = len(xs)
binary_reduction = mtf.binary_reduction_fn(reduction_fn_string)
assert len(devices) == n, "devices must be a list of length len(xs)"
if n == 1:
return xs
result = [None] * n
if n % 2 == 0:
left_center = n // 2 - 1
right_center = left_center + 1
else:
left_center = n // 2
right_center = left_center
left_sum = xs[0]
for i in xrange(1, left_center + 1):
with tf.device(devices[i]):
left_sum = binary_reduction(left_sum, xs[i])
right_sum = xs[n-1]
for i in reversed(xrange(left_center + 1, n - 1)):
with tf.device(devices[i]):
right_sum = binary_reduction(xs[i], right_sum)
with tf.device(devices[left_center]):
result[left_center] = binary_reduction(left_sum, right_sum)
if n % 2 == 0:
with tf.device(devices[right_center]):
result[right_center] = binary_reduction(left_sum, right_sum)
for i in reversed(xrange(left_center)):
with tf.device(devices[i]):
result[i] = tf.identity(result[i + 1])
for i in xrange(right_center + 1, n):
with tf.device(devices[i]):
result[i] = tf.identity(result[i - 1])
return result
def allreduce_ring(xs, devices, reduction_fn_string="SUM"):
"""Compute the reduction of all Tensors and put the result everywhere.
Performance-optimized for a ring of devices.
Args:
xs: a list of n tf.Tensors
devices: a list of strings
reduction_fn_string: "SUM" or "MAX"
Returns:
a list of n Tensors
Raises:
ValueError: if devices is not a list of n strings
"""
n = len(xs)
if len(devices) != n:
raise ValueError("devices must be a list of length len(xs)")
if n == 1:
return xs
shape = xs[0].shape.as_list()
# tf.logging.info("allreduce_ring shape = %s" % shape)
size = None if None in shape else mtf.list_product(shape)
if size is None or size < 1024 or size % n != 0:
return allreduce_ring_single_shard(xs, devices, reduction_fn_string)
def _circular_shift(l, n):
n %= len(l)
return l[-n:] + l[:-n]
def _flatten_and_split(x):
# tf.reshape treats [-1] as a special value denoting 1D flattening.
return tf.split(tf.reshape(x, [-1]), n)
def _concat_and_reshape(xs):
return tf.reshape(tf.concat(xs, 0), shape)
# [device, shard]
x_split = mtf.parallel(devices, _flatten_and_split, xs)
x_split_t = mtf.transpose_list_of_lists(x_split)
y_split_t = []
for shard in xrange(n):
shard_xs = _circular_shift(x_split_t[shard], shard)
shard_devices = _circular_shift(devices, shard)
shard_ys = allreduce_ring_single_shard(
shard_xs, shard_devices, reduction_fn_string)
y_split_t.append(_circular_shift(shard_ys, -shard))
y_split = mtf.transpose_list_of_lists(y_split_t)
ys = mtf.parallel(devices, _concat_and_reshape, y_split)
return ys
def allconcat_ring(xs, devices, concat_axis):
"""Concatenate all Tensors everywhere.
Performance-optimized for a ring of devices.
Args:
xs: a list of n tf.Tensors
devices: a list of n strings
concat_axis: an integer
Returns:
a list of n Tensors
"""
n = len(xs)
if n == 1:
return xs
# [target, source]
parts = [[xs[target] if target == source else None for source in xrange(n)]
for target in xrange(n)]
for distance in xrange(1, n // 2 + 1):
for target in xrange(n):
source = (target + distance) % n
if parts[target][source] is None:
with tf.device(devices[target]):
parts[target][source] = tf.identity(parts[(target + 1) % n][source])
source = (target - distance) % n
if parts[target][source] is None:
with tf.device(devices[target]):
parts[target][source] = tf.identity(parts[(target - 1) % n][source])
return mtf.parallel(devices, tf.concat, parts, axis=[concat_axis] * n)
def alltoall_pointtwise(xs, devices, split_axis, concat_axis):
"""MPI alltoall operation.
Implementation of alltoall using pointwise communication.
Args:
xs: a list of n tf.Tensors
devices: a list of n strings
split_axis: an integer
concat_axis: an integer
Returns:
a list of n Tensors
"""
n = len(xs)
if n == 1:
return xs
# [target, source]
parts = mtf.transpose_list_of_lists(
mtf.parallel(devices, tf.split, xs, [n] * n, axis=[split_axis] * n))
return mtf.parallel(devices, tf.concat, parts, axis=[concat_axis] * n)
def alltoall_ring(xs, devices, split_axis, concat_axis):
"""MPI alltoall operation.
Performance-optimized for a ring of devices.
Args:
xs: a list of n tf.Tensors
devices: a list of n strings
split_axis: an integer
concat_axis: an integer
Returns:
a list of n Tensors
"""
n = len(xs)
if n == 1:
return xs
# set up
# [target, source]
parts = [[None] * n for i in xrange(n)]
def my_split(x, size_splits):
total_size = tf.shape(x)[split_axis]
part_size = total_size // sum(size_splits)
return tf.split(x, [s * part_size for s in size_splits], axis=split_axis)
forward_message_size = (n - 1) // 2
backward_message_size = (n - 1) - forward_message_size
forward_messages = [None] * n
backward_messages = [None] * n
for i in xrange(n):
with tf.device(devices[i]):
if i >= backward_message_size:
a, b, c, d = my_split(
xs[i], [i - backward_message_size,
backward_message_size, 1, n - i - 1])
backward_messages[i] = b
parts[i][i] = c
forward_messages[i] = tf.concat([d, a], axis=split_axis)
else:
a, b, c, d = my_split(
xs[i], [i, 1, forward_message_size, backward_message_size - i])
backward_messages[i] = tf.concat([d, a], axis=split_axis)
parts[i][i] = b
forward_messages[i] = c
for step in xrange(1, max(forward_message_size, backward_message_size) + 1):
new_forward_messages = [None] * n
new_backward_messages = [None] * n
for i in xrange(n):
with tf.device(devices[i]):
if forward_message_size > 0:
parts[i][(i - step) % n], new_forward_messages[i] = my_split(
forward_messages[(i - 1) % n], [1, forward_message_size - 1])
if backward_message_size > 0:
new_backward_messages[i], parts[i][(i + step) % n] = my_split(
backward_messages[(i + 1) % n], [backward_message_size - 1, 1])
forward_message_size -= 1
backward_message_size -= 1
forward_messages = new_forward_messages
backward_messages = new_backward_messages
return mtf.parallel(devices, tf.concat, parts, axis=[concat_axis] * n)
| mesh-master | mesh_tensorflow/placement_mesh_impl.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Mesh TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import mesh_tensorflow as mtf
import tensorflow.compat.v1 as tf
class SimdMeshImplTest(parameterized.TestCase):
@parameterized.parameters(
([8, 8, 2], [("dp", None)]),
([8, 8, 2], [("dp", None), ("mp", [1, 1, 2])]),
([8, 8, 2], [("dp", [8, 8, 1]), ("mp", [1, 1, 2])]),
([8, 8, 2], [("dp", None), ("mp", [2, 8, 1])]),
([8, 8, 2], [("dp", None), ("mp1", [1, 8, 1]), ("mp2", [8, 1, 2])]),
([8, 8, 2], [("dp", None), ("mp1", [2, 2, 1]), ("mp2", [2, 2, 1])]),
([9, 15, 7], [("d1", [3, 5, 1]), ("d2", [3, 3, 7])]),
)
def testHierarchicalTiling(self, physical_shape, spec):
hierarchical_tiling = mtf.simd_mesh_impl.HierarchicalTiling(
spec, physical_shape)
mesh_shape = hierarchical_tiling.mesh_shape
logical_to_physical = hierarchical_tiling.logical_to_physical
num_cores = physical_shape[0] * physical_shape[1] * physical_shape[2]
expected_mesh_shape = (
mtf.simd_mesh_impl.HierarchicalTiling.spec_to_mesh_shape(
spec, num_cores))
self.assertEqual(mesh_shape, expected_mesh_shape)
self.assertCountEqual(logical_to_physical, list(range(num_cores)))
@parameterized.parameters(
([128], [8, 8, 2]),
([8, 16], [8, 8, 2]),
([32, 4], [8, 8, 2]),
([2, 32, 4], [256]),
([4, 4, 8], [8, 8, 2]),
)
def testLogicalToPhysical(self, physical_shape, logical_shape):
logical_to_physical = mtf.simd_mesh_impl.auto_logical_to_physical_tpu(
physical_shape, logical_shape)
self.assertCountEqual(
logical_to_physical, list(range(mtf.list_product(physical_shape))))
if __name__ == "__main__":
tf.disable_v2_behavior()
tf.test.main()
| mesh-master | mesh_tensorflow/simd_mesh_impl_test.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mesh_tensorflow.test_utils."""
import mesh_tensorflow as mtf
from mesh_tensorflow import test_utils
import numpy as np
import tensorflow.compat.v1 as tf
# pylint:disable=g-direct-tensorflow-import
from tensorflow.python.framework import test_util as tf_test_util
class TestUtilsTest(tf.test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
def test_convert_mtf_tensor_to_np_array(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]])
converter = test_utils.NumpyConverter()
shape = mtf.Shape([mtf.Dimension("dim0", 2), mtf.Dimension("dim1", 3)])
x_mtf = mtf.constant(converter.mesh, x_np, shape=shape, dtype=tf.int32)
actual = converter.convert_mtf_tensor_to_np_array(x_mtf)
self.assertAllEqual(x_np, actual)
@tf_test_util.run_in_graph_and_eager_modes
def test_convert_mtf_tensor_to_np_array_with_trainable_variable(self):
converter = test_utils.NumpyConverter()
shape = mtf.Shape([mtf.Dimension("dim0", 2), mtf.Dimension("dim1", 3)])
x_mtf = mtf.get_variable(
converter.mesh,
name="x",
shape=shape,
dtype=tf.float32,
initializer=tf.zeros_initializer())
actual = converter.convert_mtf_tensor_to_np_array(x_mtf)
self.assertAllClose(np.zeros_like(actual), actual)
def test_convert_mtf_tensor_to_tf_tensor(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]])
converter = test_utils.NumpyConverter()
shape = mtf.Shape([mtf.Dimension("dim0", 2), mtf.Dimension("dim1", 3)])
x_mtf = mtf.constant(converter.mesh, x_np, shape=shape, dtype=tf.int32)
_, x_tf = converter.convert_mtf_tensor_to_tf_tensor(x_mtf)
actual = self.evaluate(x_tf)
self.assertAllEqual(x_np, actual)
if __name__ == "__main__":
tf.test.main()
| mesh-master | mesh_tensorflow/test_utils_test.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data augmentation lib for the Liver Tumor Segmentation (LiTS) dataset.
A set of data augmentation functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
from tensorflow.contrib import image as contrib_image
def _truncated_normal(mean, stddev):
v = tf.random.normal(shape=[], mean=mean, stddev=stddev)
v = tf.clip_by_value(v, -2 * stddev + mean, 2 * stddev + mean)
return v
def _rand_noise(noise_mean, noise_dev, scale, shape):
"""Generate random noise given a particular scale and shape."""
noise_shape = [x // scale for x in shape]
noise_shape = [1 if x == 0 else x for x in noise_shape]
noise = tf.random.normal(
shape=noise_shape, mean=noise_mean, stddev=noise_dev)
noise = tf.clip_by_value(
noise, noise_mean - 2.0 * noise_dev, noise_mean + 2.0 * noise_dev)
if scale != 1:
noise = tf.image.resize_images(
noise, [shape[0], shape[1]])
noise = tf.transpose(noise, [0, 2, 1])
noise = tf.image.resize_images(
noise, [shape[0], shape[2]])
noise = tf.transpose(noise, [0, 2, 1])
return noise
def projective_transform(
image, label, reso, image_translate_ratio, image_transform_ratio,
sampled_2d_slices=False):
"""Apply projective transformation on image and label."""
if image_translate_ratio < 0.000001 and (
image_transform_ratio < 0.000001):
return image, label
def _projective_transform(data, proj_matrix, static_axis, interpolation):
"""Apply projective transformation."""
if static_axis == 2:
data = contrib_image.transform(data, proj_matrix, interpolation)
elif static_axis == 1:
data = tf.transpose(data, [0, 2, 1])
data = contrib_image.transform(data, proj_matrix, interpolation)
data = tf.transpose(data, [0, 2, 1])
else:
data = tf.transpose(data, [2, 1, 0])
data = contrib_image.transform(data, proj_matrix, interpolation)
data = tf.transpose(data, [2, 1, 0])
return data
for static_axis in [0, 1, 2]:
if sampled_2d_slices and static_axis != 2:
continue
a0 = _truncated_normal(1.0, image_transform_ratio)
a1 = _truncated_normal(0.0, image_transform_ratio)
a2 = _truncated_normal(
0.0, image_translate_ratio * reso)
b0 = _truncated_normal(0.0, image_transform_ratio)
b1 = _truncated_normal(1.0, image_transform_ratio)
b2 = _truncated_normal(
0.0, image_translate_ratio * reso)
c0 = _truncated_normal(0.0, image_transform_ratio)
c1 = _truncated_normal(0.0, image_transform_ratio)
proj_matrix = [a0, a1, a2, b0, b1, b2, c0, c1]
image = _projective_transform(image, proj_matrix, static_axis, 'BILINEAR')
label = _projective_transform(label, proj_matrix, static_axis, 'NEAREST')
return image, label
def maybe_add_noise(image, noise_shape, scale0, scale1,
image_noise_probability, image_noise_ratio):
"""Add noise at two scales."""
if image_noise_probability < 0.000001 or (
image_noise_ratio < 0.000001):
return image
noise_list = []
for scale in [scale0, scale1]:
rand_image_noise_ratio = tf.random.uniform(
shape=[], minval=0.0, maxval=image_noise_ratio)
noise_list.append(
_rand_noise(0.0, rand_image_noise_ratio, scale, noise_shape))
skip_noise = tf.greater(tf.random.uniform([]), image_noise_probability)
image = tf.cond(skip_noise,
lambda: image, lambda: image + noise_list[0])
image = tf.cond(skip_noise,
lambda: image, lambda: image + noise_list[1])
return image
def _gen_rand_mask(ratio_mean, ratio_stddev, scale, shape, smoothness=0):
"""Generate a binary mask."""
scale = max(scale, 1)
ratio = tf.random.normal(
shape=[], mean=ratio_mean, stddev=ratio_stddev)
low_bound = tf.maximum(0.0, ratio_mean - 2 * ratio_stddev)
up_bound = tf.minimum(1.0, ratio_mean + 2 * ratio_stddev)
percentil_q = tf.cast(
100.0 * tf.clip_by_value(ratio, low_bound, up_bound),
tf.int32)
pattern = _rand_noise(0.0, 1.0, scale, shape)
if smoothness > 0:
smoothness = int(smoothness) // 2 * 2 + 1
pattern = tf.expand_dims(tf.expand_dims(pattern, 0), -1)
pattern = tf.nn.conv3d(
pattern, filter=tf.ones([smoothness, smoothness, smoothness, 1, 1]),
strides=[1, 1, 1, 1, 1], padding='SAME', dilations=[1, 1, 1, 1, 1])
pattern = tf.reduce_sum(pattern, 0)
pattern = tf.reduce_sum(pattern, -1)
thres = tfp.stats.percentile(pattern, q=percentil_q)
rand_mask = tf.less(pattern, thres)
return rand_mask
def maybe_gen_fake_data_based_on_real_data(
image, label, reso, min_fake_lesion_ratio, gen_fake_probability):
"""Remove real lesion and synthesize lesion."""
# TODO(lehou): Replace magic numbers with flag variables.
gen_prob_indicator = tf.random_uniform(
shape=[], minval=0.0, maxval=1.0, dtype=tf.float32)
background_mask = tf.less(label, 0.5)
lesion_mask = tf.greater(label, 1.5)
liver_mask = tf.logical_not(tf.logical_or(background_mask, lesion_mask))
liver_intensity = tf.boolean_mask(image, liver_mask)
lesion_intensity = tf.boolean_mask(image, lesion_mask)
intensity_diff = tf.reduce_mean(liver_intensity) - (
tf.reduce_mean(lesion_intensity))
intensity_diff *= 1.15
intensity_diff = tf.cond(tf.is_nan(intensity_diff),
lambda: 0.0, lambda: intensity_diff)
lesion_liver_ratio = 0.0
lesion_liver_ratio += tf.random.normal(shape=[], mean=0.01, stddev=0.01)
lesion_liver_ratio += tf.random.normal(shape=[], mean=0.0, stddev=0.05)
lesion_liver_ratio = tf.clip_by_value(
lesion_liver_ratio, min_fake_lesion_ratio, min_fake_lesion_ratio + 0.20)
fake_lesion_mask = tf.logical_and(
_gen_rand_mask(ratio_mean=lesion_liver_ratio, ratio_stddev=0.0,
scale=reso // 32, shape=label.shape,
smoothness=reso // 32),
tf.logical_not(background_mask))
liver_mask = tf.logical_not(tf.logical_or(background_mask, fake_lesion_mask))
# Blur the masks
lesion_mask_blur = tf.squeeze(tf.nn.conv3d(
tf.expand_dims(tf.expand_dims(tf.cast(lesion_mask, tf.float32), -1), 0),
filter=tf.ones([reso // 32] * 3 + [1, 1], tf.float32) / (reso // 32) ** 3,
strides=[1, 1, 1, 1, 1],
padding='SAME'))
fake_lesion_mask_blur = tf.squeeze(tf.nn.conv3d(
tf.expand_dims(tf.expand_dims(
tf.cast(fake_lesion_mask, tf.float32), -1), 0),
filter=tf.ones([reso // 32] * 3 + [1, 1], tf.float32) / (reso // 32) ** 3,
strides=[1, 1, 1, 1, 1],
padding='SAME'))
# Remove real lesion and add fake lesion.
# If the intensitify is too small (maybe no liver or lesion region labeled),
# do not generate fake data.
gen_prob_indicator = tf.cond(
tf.greater(intensity_diff, 0.0001),
lambda: gen_prob_indicator, lambda: 0.0)
# pylint: disable=g-long-lambda
image = tf.cond(
tf.greater(gen_prob_indicator, 1 - gen_fake_probability),
lambda: image + intensity_diff * lesion_mask_blur \
- intensity_diff * fake_lesion_mask_blur,
lambda: image)
label = tf.cond(
tf.greater(gen_prob_indicator, 1 - gen_fake_probability),
lambda: tf.cast(background_mask, tf.float32) * 0 + \
tf.cast(liver_mask, tf.float32) * 1 + \
tf.cast(fake_lesion_mask, tf.float32) * 2,
lambda: label)
# pylint: enable=g-long-lambda
return image, label
def maybe_flip(image, label, flip_axis, flip_indicator=None):
"""Randomly flip the image."""
if flip_indicator is None:
flip_indicator = tf.random_uniform(shape=[])
flip_or_not = tf.greater(flip_indicator, 0.5)
def _maybe_flip(data):
"""Flip or not according to flip_or_not."""
data = tf.cond(tf.logical_and(flip_or_not, tf.equal(flip_axis, 1)),
lambda: tf.transpose(data, [1, 0, 2]),
lambda: data)
data = tf.cond(tf.logical_and(flip_or_not, tf.equal(flip_axis, 2)),
lambda: tf.transpose(data, [2, 1, 0]),
lambda: data)
data = tf.cond(flip_or_not,
lambda: tf.image.flip_up_down(data),
lambda: data)
data = tf.cond(tf.logical_and(flip_or_not, tf.equal(flip_axis, 1)),
lambda: tf.transpose(data, [1, 0, 2]),
lambda: data)
data = tf.cond(tf.logical_and(flip_or_not, tf.equal(flip_axis, 2)),
lambda: tf.transpose(data, [2, 1, 0]),
lambda: data)
return data
return _maybe_flip(image), _maybe_flip(label)
def maybe_rot180(image, label, static_axis, rot180_k=None):
"""Randomly rotate the image 180 degrees."""
if rot180_k is None:
rot180_k = 2 * tf.random_uniform(
shape=[], minval=0, maxval=2, dtype=tf.int32)
rot_or_not = tf.not_equal(rot180_k, 0)
def _maybe_rot180(data):
"""Rotate or not according to rot_or_not."""
data = tf.cond(tf.logical_and(rot_or_not, tf.equal(static_axis, 0)),
lambda: tf.transpose(data, [2, 1, 0]),
lambda: data)
data = tf.cond(tf.logical_and(rot_or_not, tf.equal(static_axis, 1)),
lambda: tf.transpose(data, [0, 2, 1]),
lambda: data)
data = tf.cond(rot_or_not,
lambda: tf.image.rot90(data, k=rot180_k),
lambda: data)
data = tf.cond(tf.logical_and(rot_or_not, tf.equal(static_axis, 0)),
lambda: tf.transpose(data, [2, 1, 0]),
lambda: data)
data = tf.cond(tf.logical_and(rot_or_not, tf.equal(static_axis, 1)),
lambda: tf.transpose(data, [0, 2, 1]),
lambda: data)
return data
return _maybe_rot180(image), _maybe_rot180(label)
def intensity_shift(
image, label, per_class_intensity_scale, per_class_intensity_shift):
"""Perturb intensity in lesion and non-lesion regions."""
if per_class_intensity_scale < 0.000001 and (
per_class_intensity_shift < 0.000001):
return image
# Randomly change (mostly increase) intensity of non-lesion region.
per_class_noise = _truncated_normal(
per_class_intensity_shift, per_class_intensity_scale)
image = image + per_class_noise * (
image * tf.cast(tf.greater(label, 1.5), tf.float32))
# Randomly change (mostly decrease) intensity of lesion region.
per_class_noise = _truncated_normal(
-per_class_intensity_shift, per_class_intensity_scale)
image = image + per_class_noise * (
image * tf.cast(tf.less(label, 1.5), tf.float32))
return image
def image_corruption(
image, label, reso, image_corrupt_ratio_mean, image_corrupt_ratio_stddev):
"""Randomly drop non-lesion pixels."""
if image_corrupt_ratio_mean < 0.000001 and (
image_corrupt_ratio_stddev < 0.000001):
return image
# Corrupt non-lesion region according to keep_mask.
keep_mask = _gen_rand_mask(
1 - image_corrupt_ratio_mean,
image_corrupt_ratio_stddev,
reso // 3, image.shape)
keep_mask = tf.logical_or(tf.greater(label, 1.5), keep_mask)
image *= tf.cast(keep_mask, tf.float32)
return image
| mesh-master | mesh_tensorflow/experimental/data_aug_lib.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MeshTensorflow network of Unet with spatial partition.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import mesh_tensorflow as mtf
import numpy as np
from six.moves import range
import tensorflow.compat.v1 as tf # tf
# pylint: disable=g-direct-tensorflow-import,g-direct-third-party-import
from mesh_tensorflow.experimental import data_aug_lib
from tensorflow.python.platform import flags
FLAGS = flags.FLAGS
flags.DEFINE_boolean('sampled_2d_slices', False,
'Whether to build model on 2D CT slices instead of 3D.')
flags.DEFINE_integer('ct_resolution', 128,
'Resolution of CT images along depth, height and '
'width dimensions.')
flags.DEFINE_integer('n_dataset_read_interleave', 16,
'The number of interleave processes.')
flags.DEFINE_integer('n_dataset_processes', 16,
'The number of data augmentation processes.')
flags.DEFINE_integer('batch_size_train', 32, 'Training batch size.')
flags.DEFINE_integer('batch_size_eval', 32, 'Evaluation batch size.')
flags.DEFINE_integer('image_nx_block', 8, 'The number of x blocks.')
flags.DEFINE_integer('image_ny_block', 8, 'The number of y blocks.')
flags.DEFINE_integer('image_c', 1,
'The number of input image channels. '
'If sampled_2d_slices is False, image_c should be 1.')
flags.DEFINE_integer('label_c', 3, 'The number of output classes.')
flags.DEFINE_integer('pred_downsample', 2,
'Down-sampling the results by the factor of '
'`pred_downsample`, before outputing the results.')
flags.DEFINE_boolean('output_ground_truth', True,
'Whether to return the ground truth tensor in Unet, '
'in addition to returning the prediction tensor.')
flags.DEFINE_integer('n_base_filters', 32, 'The number of filters.')
flags.DEFINE_integer('network_depth', 4, 'The number of pooling layers.')
flags.DEFINE_integer('n_conv_per_block', 2,
'The number of conv layers between poolings.')
flags.DEFINE_boolean('with_batch_norm', True, 'Whether to use batch norm.')
flags.DEFINE_float('dropout_keep_p', 0.5, 'Probability to keep activations.')
flags.DEFINE_float('xen_liver_weight', 8,
'The weight of liver region pixels, '
'when computing the cross-entropy loss')
flags.DEFINE_float('xen_lesion_weight', 16,
'The weight of lesion region pixels, '
'when computing the cross-entropy loss')
flags.DEFINE_float('dice_loss_weight', 0.2,
'The weight of dice loss, ranges from 0 to 1')
flags.DEFINE_float('dice_epsilon', 0.1,
'A small value that prevents 0 dividing.')
flags.DEFINE_float('image_translate_ratio', 0.0,
'How much you want to translate the image and label, '
'for data augmentation.')
flags.DEFINE_float('image_transform_ratio', 0.0,
'How much you want to sheer the image and label, '
'for data augmentation.')
flags.DEFINE_float('image_noise_probability', 0.0,
'Probability of adding noise during data augmentation.')
flags.DEFINE_float('image_noise_ratio', 0.0,
'How much random noise you want to add to CT images.')
flags.DEFINE_float('image_corrupt_ratio_mean', 0.0,
'How much non-liver area you want to block-out in average.')
flags.DEFINE_float('image_corrupt_ratio_stddev', 0.0,
'Std-dev of how much non-liver area you want to block-out.')
flags.DEFINE_float('per_class_intensity_scale', 0.0,
'How much to scale intensities of lesion/non-lesion areas.')
flags.DEFINE_float('per_class_intensity_shift', 0.0,
'How much to shift intensities of lesion/non-lesion areas.')
flags.DEFINE_string('mtf_dtype', 'bfloat16', 'dtype for MeshTensorflow.')
flags.DEFINE_string('layout',
'batch:cores, image_nx_block:rows, image_ny_block:columns',
'layout rules')
flags.DEFINE_string('train_file_pattern', '',
'Path to CT scan training data.')
flags.DEFINE_string('eval_file_pattern', '',
'Path to CT scan evalutation data.')
def get_layout():
return mtf.convert_to_layout_rules(FLAGS.layout)
def get_dataset_creator(dataset_str):
"""Returns a function that creates an unbatched dataset."""
if dataset_str == 'train':
data_file_pattern = FLAGS.train_file_pattern.format(FLAGS.ct_resolution)
shuffle = True
interleave = True
else:
assert dataset_str == 'eval'
data_file_pattern = FLAGS.eval_file_pattern.format(FLAGS.ct_resolution)
shuffle = False
interleave = False
def _dataset_creator():
"""Returns an unbatch dataset."""
def _get_stacked_2d_slices(image_3d, label_3d):
"""Return 2d slices of the 3d scan."""
image_stack = []
label_stack = []
for begin_idx in range(0, FLAGS.ct_resolution - FLAGS.image_c + 1):
slice_begin = [0, 0, begin_idx]
slice_size = [FLAGS.ct_resolution, FLAGS.ct_resolution, FLAGS.image_c]
image = tf.slice(image_3d, slice_begin, slice_size)
slice_begin = [0, 0, begin_idx + FLAGS.image_c // 2]
slice_size = [FLAGS.ct_resolution, FLAGS.ct_resolution, 1]
label = tf.slice(label_3d, slice_begin, slice_size)
spatial_dims_w_blocks = [FLAGS.image_nx_block,
FLAGS.ct_resolution // FLAGS.image_nx_block,
FLAGS.image_ny_block,
FLAGS.ct_resolution // FLAGS.image_ny_block]
image = tf.reshape(image, spatial_dims_w_blocks + [FLAGS.image_c])
label = tf.reshape(label, spatial_dims_w_blocks)
label = tf.cast(label, tf.int32)
label = tf.one_hot(label, FLAGS.label_c)
data_dtype = tf.as_dtype(FLAGS.mtf_dtype)
image = tf.cast(image, data_dtype)
label = tf.cast(label, data_dtype)
image_stack.append(image)
label_stack.append(label)
return tf.stack(image_stack), tf.stack(label_stack)
def _parser_fn(serialized_example):
"""Parses a single tf.Example into image and label tensors."""
features = {}
features['image/ct_image'] = tf.FixedLenFeature([], tf.string)
features['image/label'] = tf.FixedLenFeature([], tf.string)
parsed = tf.parse_single_example(serialized_example, features=features)
spatial_dims = [FLAGS.ct_resolution] * 3
if FLAGS.sampled_2d_slices:
noise_shape = [FLAGS.ct_resolution] * 2 + [FLAGS.image_c]
else:
noise_shape = [FLAGS.ct_resolution] * 3
image = tf.decode_raw(parsed['image/ct_image'], tf.float32)
label = tf.decode_raw(parsed['image/label'], tf.float32)
if dataset_str != 'train':
# Preprocess intensity, clip to 0 ~ 1.
# The training set is already preprocessed.
image = tf.clip_by_value(image / 1024.0 + 0.5, 0, 1)
image = tf.reshape(image, spatial_dims)
label = tf.reshape(label, spatial_dims)
if dataset_str == 'eval' and FLAGS.sampled_2d_slices:
return _get_stacked_2d_slices(image, label)
if FLAGS.sampled_2d_slices:
# Take random slices of images and label
begin_idx = tf.random_uniform(
shape=[], minval=0,
maxval=FLAGS.ct_resolution - FLAGS.image_c + 1, dtype=tf.int32)
slice_begin = [0, 0, begin_idx]
slice_size = [FLAGS.ct_resolution, FLAGS.ct_resolution, FLAGS.image_c]
image = tf.slice(image, slice_begin, slice_size)
label = tf.slice(label, slice_begin, slice_size)
if dataset_str == 'train':
for flip_axis in [0, 1, 2]:
image, label = data_aug_lib.maybe_flip(image, label, flip_axis)
image, label = data_aug_lib.maybe_rot180(image, label, static_axis=2)
image = data_aug_lib.intensity_shift(
image, label,
FLAGS.per_class_intensity_scale, FLAGS.per_class_intensity_shift)
image = data_aug_lib.image_corruption(
image, label, FLAGS.ct_resolution,
FLAGS.image_corrupt_ratio_mean, FLAGS.image_corrupt_ratio_stddev)
image = data_aug_lib.maybe_add_noise(
image, noise_shape, 1, 4,
FLAGS.image_noise_probability, FLAGS.image_noise_ratio)
image, label = data_aug_lib.projective_transform(
image, label, FLAGS.ct_resolution,
FLAGS.image_translate_ratio, FLAGS.image_transform_ratio,
FLAGS.sampled_2d_slices)
if FLAGS.sampled_2d_slices:
# Only get the center slice of label.
label = tf.slice(label, [0, 0, FLAGS.image_c // 2],
[FLAGS.ct_resolution, FLAGS.ct_resolution, 1])
spatial_dims_w_blocks = [FLAGS.image_nx_block,
FLAGS.ct_resolution // FLAGS.image_nx_block,
FLAGS.image_ny_block,
FLAGS.ct_resolution // FLAGS.image_ny_block]
if not FLAGS.sampled_2d_slices:
spatial_dims_w_blocks += [FLAGS.ct_resolution]
image = tf.reshape(image, spatial_dims_w_blocks + [FLAGS.image_c])
label = tf.reshape(label, spatial_dims_w_blocks)
label = tf.cast(label, tf.int32)
label = tf.one_hot(label, FLAGS.label_c)
data_dtype = tf.as_dtype(FLAGS.mtf_dtype)
image = tf.cast(image, data_dtype)
label = tf.cast(label, data_dtype)
return image, label
dataset_fn = functools.partial(tf.data.TFRecordDataset,
compression_type='GZIP')
dataset = tf.data.Dataset.list_files(data_file_pattern,
shuffle=shuffle).repeat()
if interleave:
dataset = dataset.apply(
tf.data.experimental.parallel_interleave(
lambda file_name: dataset_fn(file_name).prefetch(1),
cycle_length=FLAGS.n_dataset_read_interleave,
sloppy=True))
else:
dataset = dataset.apply(
tf.data.experimental.parallel_interleave(
lambda file_name: dataset_fn(file_name).prefetch(1),
cycle_length=1,
sloppy=False))
if shuffle:
dataset = dataset.shuffle(FLAGS.n_dataset_processes).map(
_parser_fn, num_parallel_calls=FLAGS.n_dataset_processes)
else:
dataset = dataset.map(_parser_fn)
if dataset_str == 'eval' and FLAGS.sampled_2d_slices:
# When evaluating on slices, unbatch slices that belong to one CT scan.
dataset = dataset.unbatch()
return dataset
return _dataset_creator
def get_input_mtf_shapes(dataset_str):
"""Returns a list of mtf.Shapes of input tensors."""
if dataset_str == 'train':
batch_dim = mtf.Dimension('batch', FLAGS.batch_size_train)
else:
assert dataset_str == 'eval'
batch_dim = mtf.Dimension('batch', FLAGS.batch_size_eval)
image_nx_dim = mtf.Dimension('image_nx_block', FLAGS.image_nx_block)
image_ny_dim = mtf.Dimension('image_ny_block', FLAGS.image_ny_block)
image_sx_dim = mtf.Dimension('image_sx_block',
FLAGS.ct_resolution // FLAGS.image_nx_block)
image_sy_dim = mtf.Dimension('image_sy_block',
FLAGS.ct_resolution // FLAGS.image_ny_block)
batch_spatial_dims = [batch_dim,
image_nx_dim, image_sx_dim,
image_ny_dim, image_sy_dim]
if not FLAGS.sampled_2d_slices:
image_sz_dim = mtf.Dimension('image_sz_block', FLAGS.ct_resolution)
batch_spatial_dims += [image_sz_dim]
image_c_dim = mtf.Dimension('image_c', FLAGS.image_c)
mtf_image_shape = mtf.Shape(batch_spatial_dims + [image_c_dim])
label_c_dim = mtf.Dimension('label_c', FLAGS.label_c)
mtf_label_shape = mtf.Shape(batch_spatial_dims + [label_c_dim])
return [mtf_image_shape, mtf_label_shape]
class PostProcessor(object):
"""Merge and save evaluation results."""
def __init__(self):
self._area_int = []
self._area_sum = []
self._instance_i = 0
def record(self, results, pred_output_dir):
"""Do whatever to the results returned by unet_with_spatial_partition."""
if FLAGS.output_ground_truth:
pred_liver, pred_lesion, label, area_int, area_sum, _, global_step = (
results)
else:
pred_liver, pred_lesion, area_int, area_sum, _, global_step = results
if not tf.gfile.IsDirectory(pred_output_dir):
tf.gfile.MakeDirs(pred_output_dir)
if FLAGS.sampled_2d_slices:
with tf.gfile.Open(os.path.join(
pred_output_dir, 'pred_liver_{}_{}.npy'.format(
global_step, self._instance_i)), 'wb') as f:
np.save(f, pred_liver)
with tf.gfile.Open(os.path.join(
pred_output_dir, 'pred_lesion_{}_{}.npy'.format(
global_step, self._instance_i)), 'wb') as f:
np.save(f, pred_lesion)
if FLAGS.output_ground_truth:
with tf.gfile.Open(os.path.join(
pred_output_dir, 'label_{}_{}.npy'.format(
global_step, self._instance_i)), 'wb') as f:
np.save(f, label)
self._instance_i += 1
else:
pred_liver = self._reshape_to_cubes(pred_liver)
for ins_i, pred_liver_instance in enumerate(pred_liver):
with tf.gfile.Open(os.path.join(
pred_output_dir, 'pred_liver_{}_{}.npy'.format(
global_step, self._instance_i + ins_i)), 'wb') as f:
np.save(f, pred_liver_instance)
pred_lesion = self._reshape_to_cubes(pred_lesion)
for ins_i, pred_lesion_instance in enumerate(pred_lesion):
with tf.gfile.Open(os.path.join(
pred_output_dir, 'pred_lesion_{}_{}.npy'.format(
global_step, self._instance_i + ins_i)), 'wb') as f:
np.save(f, pred_lesion_instance)
if FLAGS.output_ground_truth:
label = self._reshape_to_cubes(label)
for ins_i, label_instance in enumerate(label):
with tf.gfile.Open(os.path.join(
pred_output_dir, 'label_{}_{}.npy'.format(
global_step, self._instance_i + ins_i)), 'wb') as f:
np.save(f, label_instance)
self._instance_i += len(pred_liver)
self._area_int.append(area_int)
self._area_sum.append(area_sum)
def finish(self):
"""Merge the results and compute final dice scores."""
area_int = np.concatenate(self._area_int)
area_sum = np.concatenate(self._area_sum)
if FLAGS.sampled_2d_slices:
# Merge the results on 2d slices.
assert area_int.size % (FLAGS.ct_resolution - FLAGS.image_c + 1) == 0, (
'Wrong number of results: {}'.format(area_int.shape))
area_int = area_int.reshape([-1, FLAGS.ct_resolution - FLAGS.image_c + 1])
area_int = area_int.sum(axis=1)
area_sum = area_sum.reshape([-1, FLAGS.ct_resolution - FLAGS.image_c + 1])
area_sum = area_sum.sum(axis=1)
dice_per_case = (2 * area_int / (area_sum + 0.001)).mean()
dice_global = 2 * area_int.sum() / (area_sum.sum() + 0.001)
# pylint: disable=logging-format-interpolation
tf.logging.info('dice_per_case: {}, dice_global: {}'.format(
dice_per_case, dice_global))
# pylint: enable=logging-format-interpolation
def _reshape_to_cubes(self, data):
reso = FLAGS.ct_resolution // FLAGS.pred_downsample
data = np.transpose(data, (0, 1, 3, 2, 4, 5))
data = np.reshape(data, (data.shape[0], reso, reso, reso))
return data
def conv_with_spatial_partition(
x, sampled_2d_slices, image_nx_dim, image_ny_dim, n_filters,
keep_p, with_batch_norm, is_training, odim_name, variable_dtype, name):
"""Conv with spatial partition, batch_noram and activation."""
if sampled_2d_slices:
x = mtf.layers.conv2d_with_blocks(
x, mtf.Dimension(odim_name, n_filters),
filter_size=(3, 3), strides=(1, 1), padding='SAME',
h_blocks_dim=image_nx_dim, w_blocks_dim=image_ny_dim,
variable_dtype=variable_dtype,
name=name,
)
else:
x = mtf.layers.conv3d_with_blocks(
x, mtf.Dimension(odim_name, n_filters),
filter_size=(3, 3, 3), strides=(1, 1, 1), padding='SAME',
d_blocks_dim=image_nx_dim, h_blocks_dim=image_ny_dim,
variable_dtype=variable_dtype,
name=name,
)
if with_batch_norm:
x, bn_update_ops = mtf.layers.batch_norm(
x, is_training, momentum=0.90, epsilon=0.000001,
dims_idx_start=0, dims_idx_end=-1, name=name)
else:
bn_update_ops = []
x = mtf.leaky_relu(x, 0.1)
if is_training:
x = mtf.dropout(x, keep_p)
return x, bn_update_ops
def deconv_with_spatial_partition(
x, sampled_2d_slices, image_nx_dim, image_ny_dim, n_filters, keep_p,
odim_name, variable_dtype, name):
"""Deconvolution with spatial partition."""
if sampled_2d_slices:
x = mtf.layers.conv2d_transpose_with_blocks(
x, mtf.Dimension(odim_name, n_filters),
filter_size=(2, 2), strides=(2, 2), padding='SAME',
h_blocks_dim=image_nx_dim, w_blocks_dim=image_ny_dim,
variable_dtype=variable_dtype,
name=name,
)
else:
x = mtf.layers.conv3d_transpose_with_blocks(
x, mtf.Dimension(odim_name, n_filters),
filter_size=(2, 2, 2), strides=(2, 2, 2), padding='SAME',
d_blocks_dim=image_nx_dim, h_blocks_dim=image_ny_dim,
variable_dtype=variable_dtype,
name=name,
)
x = mtf.dropout(x, keep_p)
return x
def unet_with_spatial_partition(mesh, mesh_impl, dataset_str, images, labels):
"""Builds the UNet model graph, train op and eval metrics.
Args:
mesh: a MeshTensorflow.mesh object.
mesh_impl: a mesh implementation, such as SimdMeshImpl and
PlacementMeshImpl.
dataset_str: a string of either train or eval. This is used for batch_norm.
images: a laid out Tensor with shape [batch, x, y, num_channels]
or [batch, x, y, z, num_channels].
labels: a laid out Tensor with shape [batch, x, y, num_classes]
or [batch, x, y, z, num_classes].
Returns:
Prediction and loss.
"""
is_training = (dataset_str == 'train')
if dataset_str == 'train':
batch_dim = mtf.Dimension('batch', FLAGS.batch_size_train)
else:
assert dataset_str == 'eval'
batch_dim = mtf.Dimension('batch', FLAGS.batch_size_eval)
image_nx_dim = mtf.Dimension('image_nx_block', FLAGS.image_nx_block)
image_ny_dim = mtf.Dimension('image_ny_block', FLAGS.image_ny_block)
image_sx_dim = mtf.Dimension('image_sx_block',
FLAGS.ct_resolution // FLAGS.image_nx_block)
image_sy_dim = mtf.Dimension('image_sy_block',
FLAGS.ct_resolution // FLAGS.image_ny_block)
image_sz_dim = mtf.Dimension('image_sz_block', FLAGS.ct_resolution)
image_c_dim = mtf.Dimension('image_c', FLAGS.image_c)
label_c_dim = mtf.Dimension('label_c', FLAGS.label_c)
mtf_images_shape, mtf_labels_shape = get_input_mtf_shapes(dataset_str)
mtf_dtype = tf.as_dtype(FLAGS.mtf_dtype)
variable_dtype = mtf.VariableDType(mtf_dtype, mtf_dtype, mtf_dtype)
# Import input features.
x = mtf.import_laid_out_tensor(
mesh,
mesh_impl.LaidOutTensor(images),
mtf_images_shape)
x = mtf.cast(x, mtf_dtype)
# Import ground truth labels.
t = mtf.import_laid_out_tensor(
mesh,
mesh_impl.LaidOutTensor(labels),
mtf_labels_shape)
t = mtf.cast(t, mtf_dtype)
# Transpose the blocks.
if FLAGS.sampled_2d_slices:
x = mtf.transpose(x, [batch_dim,
image_nx_dim, image_ny_dim,
image_sx_dim, image_sy_dim,
image_c_dim])
t = mtf.transpose(t, [batch_dim,
image_nx_dim, image_ny_dim,
image_sx_dim, image_sy_dim,
label_c_dim])
else:
x = mtf.transpose(x, [batch_dim,
image_nx_dim, image_ny_dim,
image_sx_dim, image_sy_dim,
image_sz_dim, image_c_dim])
t = mtf.transpose(t, [batch_dim,
image_nx_dim, image_ny_dim,
image_sx_dim, image_sy_dim,
image_sz_dim, label_c_dim])
# Network.
levels = []
all_bn_update_ops = []
# add levels with convolution or down-sampling
for depth in range(FLAGS.network_depth):
for n_conv in range(FLAGS.n_conv_per_block):
if depth == 0 and n_conv == 0:
# no dropout in 1st layer.
dropout_keep_p = 1.0
else:
dropout_keep_p = FLAGS.dropout_keep_p
x, bn_update_ops = conv_with_spatial_partition(
x, FLAGS.sampled_2d_slices,
image_nx_dim, image_ny_dim,
FLAGS.n_base_filters * (2**depth),
dropout_keep_p,
FLAGS.with_batch_norm,
is_training,
'conv_{}_{}'.format(depth, n_conv),
variable_dtype,
'conv_down_{}_{}'.format(depth, n_conv))
all_bn_update_ops.extend(bn_update_ops)
levels.append(x)
if depth < FLAGS.network_depth - 1:
if FLAGS.sampled_2d_slices:
x = mtf.layers.max_pool2d(x, ksize=(2, 2))
else:
x = mtf.layers.max_pool3d(x, ksize=(2, 2, 2))
# add levels with up-convolution or up-sampling
for depth in range(FLAGS.network_depth - 1)[::-1]:
x = deconv_with_spatial_partition(
x, FLAGS.sampled_2d_slices, image_nx_dim, image_ny_dim,
FLAGS.n_base_filters * (2**depth),
FLAGS.dropout_keep_p,
'conv_{}_{}'.format(depth, FLAGS.n_conv_per_block - 1),
variable_dtype, 'deconv_{}_0'.format(depth))
x = mtf.concat(
[x, levels[depth]],
concat_dim_name='conv_{}_{}'.format(depth, FLAGS.n_conv_per_block - 1))
for n_conv in range(FLAGS.n_conv_per_block):
x, bn_update_ops = conv_with_spatial_partition(
x, FLAGS.sampled_2d_slices,
image_nx_dim, image_ny_dim,
FLAGS.n_base_filters * (2**depth),
FLAGS.dropout_keep_p,
FLAGS.with_batch_norm,
is_training,
'conv_{}_{}'.format(depth, n_conv),
variable_dtype,
'conv_up_{}_{}'.format(depth, n_conv))
all_bn_update_ops.extend(bn_update_ops)
# no dropout in the final layer.
if FLAGS.sampled_2d_slices:
y = mtf.layers.conv2d_with_blocks(
x, mtf.Dimension('label_c', FLAGS.label_c),
filter_size=(1, 1), strides=(1, 1), padding='SAME',
h_blocks_dim=image_nx_dim, w_blocks_dim=image_ny_dim,
variable_dtype=variable_dtype,
name='final_conv_{}'.format(FLAGS.label_c),
)
else:
y = mtf.layers.conv3d_with_blocks(
x, mtf.Dimension('label_c', FLAGS.label_c),
filter_size=(1, 1, 1), strides=(1, 1, 1), padding='SAME',
d_blocks_dim=image_nx_dim, h_blocks_dim=image_ny_dim,
variable_dtype=variable_dtype,
name='final_conv_{}'.format(FLAGS.label_c),
)
# use mtf.constant to make sure there is no CPU-side constants.
def scalar(v, dtype):
return mtf.constant(mesh, v, shape=[], dtype=dtype)
argmax_t = mtf.argmax(t, label_c_dim)
liver_t = mtf.cast(mtf.equal(argmax_t, scalar(1, tf.int32)), mtf_dtype)
lesion_t = mtf.cast(mtf.equal(argmax_t, scalar(2, tf.int32)), mtf_dtype)
argmax_y = mtf.argmax(y, label_c_dim)
lesion_y = mtf.cast(mtf.equal(argmax_y, scalar(2, tf.int32)), mtf_dtype)
# summary of class ratios.
lesion_pred_ratio = mtf.reduce_mean(lesion_y)
lesion_label_ratio = mtf.reduce_mean(lesion_t)
# summary of accuracy.
accuracy = mtf.reduce_mean(mtf.cast(mtf.equal(argmax_y, argmax_t), mtf_dtype))
# Cross-entropy loss. Up-weight the liver region.
pixel_loss = mtf.layers.softmax_cross_entropy_with_logits(y, t, label_c_dim)
pixel_weight = scalar(1, mtf_dtype) + \
liver_t * scalar(FLAGS.xen_liver_weight - 1, mtf_dtype) + \
lesion_t * scalar(FLAGS.xen_lesion_weight - FLAGS.xen_liver_weight,
mtf_dtype)
loss_xen = mtf.reduce_mean(pixel_loss * pixel_weight)
# Dice loss
y_prob = mtf.softmax(y, reduced_dim=label_c_dim)
lesion_prob = mtf.reduce_sum(mtf.slice(y_prob, 2, 1, 'label_c'),
reduced_dim=mtf.Dimension('label_c', 1))
prob_intersect = mtf.reduce_sum(lesion_prob * lesion_t,
output_shape=mtf.Shape([batch_dim]))
prob_area_sum = mtf.reduce_sum(lesion_prob + lesion_t,
output_shape=mtf.Shape([batch_dim]))
loss_dice_per_case = mtf.reduce_mean(
scalar(-2, mtf_dtype) * prob_intersect / (
prob_area_sum + scalar(FLAGS.dice_epsilon, mtf_dtype)))
loss_dice_global = scalar(-2, mtf_dtype) * mtf.reduce_sum(prob_intersect) / (
mtf.reduce_sum(prob_area_sum) + scalar(FLAGS.dice_epsilon, mtf_dtype))
loss_dice = (loss_dice_per_case + loss_dice_global) * scalar(0.5, mtf_dtype)
loss = scalar(FLAGS.dice_loss_weight, mtf_dtype) * loss_dice + scalar(
1 - FLAGS.dice_loss_weight, mtf_dtype) * loss_xen
intersect = mtf.reduce_sum(lesion_y * lesion_t,
output_shape=mtf.Shape([batch_dim]))
area_sum = mtf.reduce_sum(lesion_y + lesion_t,
output_shape=mtf.Shape([batch_dim]))
# summary of dice.
dice_per_case = mtf.reduce_mean(scalar(2, mtf_dtype) * intersect / (
area_sum + scalar(0.000001, mtf_dtype)))
dice_global = scalar(2, mtf_dtype) * mtf.reduce_sum(intersect) / (
mtf.reduce_sum(area_sum) + scalar(0.000001, mtf_dtype))
eval_metrics = {
'lesion_pred_ratio': lesion_pred_ratio,
'lesion_label_ratio': lesion_label_ratio,
'accuracy_of_all_classes': accuracy,
'lesion_dice_per_case': dice_per_case,
'lesion_dice_global': dice_global,
'loss_xen': loss_xen,
'loss_dice': loss_dice,
'loss_dice_per_case': loss_dice_per_case,
'loss_dice_global': loss_dice_global,
}
if FLAGS.sampled_2d_slices:
y_prob_downsampled = mtf.layers.avg_pool2d(
y_prob, ksize=(FLAGS.pred_downsample,) * 2)
if FLAGS.output_ground_truth:
lesion_gt_downsampled = mtf.layers.avg_pool2d(
mtf.slice(t, 2, 1, 'label_c'), ksize=(FLAGS.pred_downsample,) * 2)
else:
y_prob_downsampled = mtf.layers.avg_pool3d(
y_prob, ksize=(FLAGS.pred_downsample,) * 3)
if FLAGS.output_ground_truth:
lesion_gt_downsampled = mtf.layers.avg_pool3d(
mtf.slice(t, 2, 1, 'label_c'), ksize=(FLAGS.pred_downsample,) * 3)
liver_prob_downsampled = mtf.slice(y_prob_downsampled, 1, 1, 'label_c')
lesion_prob_downsampled = mtf.slice(y_prob_downsampled, 2, 1, 'label_c')
preds = [
mtf.reduce_sum(liver_prob_downsampled,
reduced_dim=mtf.Dimension('label_c', 1)),
mtf.reduce_sum(lesion_prob_downsampled,
reduced_dim=mtf.Dimension('label_c', 1))]
if FLAGS.output_ground_truth:
preds.append(mtf.reduce_sum(
lesion_gt_downsampled, reduced_dim=mtf.Dimension('label_c', 1)))
preds.extend([intersect, area_sum])
return preds, loss, eval_metrics, all_bn_update_ops
| mesh-master | mesh_tensorflow/experimental/unet.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A toy model using Mesh TensorFlow.
Using input_reader to handle the input pipeline.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import mesh_tensorflow as mtf
import numpy as np
import six
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
# pylint: disable=g-direct-tensorflow-import
# pylint: disable=g-direct-third-party-import
from mesh_tensorflow.experimental import input_reader
from mesh_tensorflow.experimental import unet
from tensorflow.contrib import summary as contrib_summary
from tensorflow.contrib import tpu
from tensorflow.contrib.tpu.python.tpu import device_assignment
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.platform import flags
from tensorflow.python.tpu.ops import tpu_ops
FLAGS = flags.FLAGS
flags.DEFINE_boolean('use_tpu', True, 'Use TPU or GPU.')
flags.DEFINE_float('lr', 0.003, 'Learning rate.')
flags.DEFINE_float('lr_drop_steps', 20000,
'Learning rate drops for every `lr_drop_steps` steps.')
flags.DEFINE_float('lr_drop_rate', 0.3,
'Learning rate drops by this amount.')
flags.DEFINE_integer('num_train_iterations_per_loop', 500,
'Number of training iterations per loop.')
flags.DEFINE_integer('num_eval_iterations_per_loop', 2,
'Number of eval iterations per loop.')
flags.DEFINE_integer('num_training_loops', 1000,
'Number of training loops.')
flags.DEFINE_string('mesh_shape', 'rows:4, columns:4, cores:2',
'mesh shape')
flags.DEFINE_string('master', '', 'Can be a headless master.')
flags.DEFINE_string('checkpoint_dir', '', 'Path to saved models.')
flags.DEFINE_integer('save_checkpoints_steps', 500,
'Frequency for saving models.')
flags.DEFINE_boolean('on_gcp', False, 'Assign true if running on google cloud.')
flags.DEFINE_boolean('write_summary', True, 'Whether to write summary.')
flags.DEFINE_string('summary_dir', '', 'Path to saved summaries.')
flags.DEFINE_string('pred_output_dir', '', 'Path to saved pred results.')
class _CapturedObject(object):
"""A placeholder to capture an object.
This is useful when we need to capture a Python object in the Tensorflow
control flow body function and use it outside the control flow.
"""
def __init__(self):
self._object = None
self._captured = False
def capture(self, o):
if self._captured:
raise RuntimeError(
'InternalError: Object can capture only once. Please file bug.')
self._captured = True
self._object = o
def get(self):
if not self._captured:
raise RuntimeError(
'InternalError: Object is not captured properly before `get`. '
'Please file bug.')
return self._object
class _CkptLoaderHook(tf.estimator.SessionRunHook):
"""Load checkpoint right after the session started."""
def after_create_session(self, session, coord):
# pylint: disable=protected-access
saver_collection = tf.get_collection(tf.GraphKeys.SAVERS)
if saver_collection:
saver = saver_collection[0]
check_point = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
if check_point:
saver.restore(session, check_point)
class MeshContext(object):
"""Creates mtf graph, mesh, and mesh implementation."""
def __init__(self, sess, use_tpu, mesh_shape, layout_rules):
super(MeshContext, self).__init__()
self._use_tpu = use_tpu
self._mesh_shape = mtf.convert_to_shape(mesh_shape)
self._layout_rules = layout_rules
self._d_assignment = None
self._num_hosts = None
self._num_cores = None
self._cpu_devices, self._gpu_devices = self._list_cpu_gpu_devices(sess)
if self._use_tpu:
topology = sess.run(tpu.initialize_system())
topo_object = tpu.Topology(serialized=topology)
self._num_cores = int(np.prod(topo_object.mesh_shape))
self._num_hosts = int(topo_object.num_tasks)
num_cores_per_host = int(self._num_cores // self._num_hosts)
assert num_cores_per_host == int(topo_object.num_tpus_per_task)
# Get a device_assignment object for mtf.
self._d_assignment = device_assignment.device_assignment(
topology,
computation_shape=[1,] * mtf.utils.topology_rank(topology),
num_replicas=self._num_cores)
self._mesh_impl = mtf.simd_mesh_impl.SimdMeshImpl(
self._mesh_shape, self._layout_rules, None, self._d_assignment)
else:
self._mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
self._mesh_shape, self._layout_rules, self._gpu_devices)
def create_graph_mesh_and_mesh_impl(self):
"""Creates mtf graph, mesh, and mesh impl.
This function can be called inside model_fn, which might be tpu_rewritten.
Returns:
graph, mesh, mesh_impl
"""
if self._use_tpu:
assert self._d_assignment
graph = mtf.Graph()
# Worker 0 caches all the TPU binaries.
replica_cache_size = 300 * 1024 * 1024 # 300M per replica.
worker0_mem = replica_cache_size * 8 * self._num_hosts
devices_memory_usage = [worker0_mem] + [0] * (self._num_hosts - 1)
var_placer = mtf.utils.BalancedVariablePlacer(self._cpu_devices,
devices_memory_usage)
mesh = mtf.Mesh(graph, 'my_mesh', var_placer)
mesh_impl = mtf.simd_mesh_impl.SimdMeshImpl(
self._mesh_shape, self._layout_rules, None, self._d_assignment)
return graph, mesh, mesh_impl
else:
graph = mtf.Graph()
mesh = mtf.Mesh(graph, 'my_mesh', None)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
self._mesh_shape, self._layout_rules, self._gpu_devices)
return graph, mesh, mesh_impl
@property
def device_assignment(self):
return self._d_assignment
@property
def num_hosts(self):
return self._num_hosts
@property
def num_cores(self):
return self._num_cores
@property
def num_cores_per_host(self):
return self._num_cores // self._num_hosts
@property
def mesh_impl(self):
return self._mesh_impl
def _list_cpu_gpu_devices(self, sess):
"""Return the list of CPU and GPU (if any) devices in legacy name."""
def _convert_to_legacy_name(n):
n = re.sub('device:CPU', 'cpu', n)
n = re.sub('device:GPU', 'gpu', n)
return n
def _sort_device_name(devices):
parsed = []
for d in devices:
m = re.match('/job:(.*)/replica:(.*)/task:(.*)/.*', d)
parsed.append((m.group(1), int(m.group(2)), int(m.group(3)), d))
return [_[3] for _ in sorted(parsed)]
all_devices = sess.list_devices()
cpus = []
for d in all_devices:
if d.device_type == 'CPU':
cpus += [_convert_to_legacy_name(d.name)]
cpus = [n for n in _sort_device_name(cpus) if 'coordinator' not in n]
gpus = []
for d in all_devices:
if d.device_type == 'GPU':
gpus += [_convert_to_legacy_name(d.name)]
gpus = _sort_device_name(gpus)
return cpus, gpus
def _get_model_fn(train_or_eval, mesh_context):
"""Returns _model_fn."""
captured_hooks = _CapturedObject()
captured_output_dtypes_shapes = _CapturedObject()
assert train_or_eval in ['train', 'eval']
def _model_fn(input_fea, input_lab):
"""Creates a model, add summary, modes (train or eval), and hooks."""
# input_fea and input_lab should be a list (laid_out_tensors).
if not isinstance(input_fea, list):
input_fea = [input_fea]
if not isinstance(input_lab, list):
input_lab = [input_lab]
def _add_summary(lowering, train_or_eval, tf_loss, scalars, global_step):
"""Add all summaries."""
for k in scalars.keys():
if not isinstance(scalars[k], tf.Tensor):
scalars[k] = tf.cast(
lowering.export_to_tf_tensor(scalars[k]), tf.float32)
def _host_loss_summary(global_step, tf_loss, **scalars):
"""Add summary.scalar in host side."""
gs = tf.cast(global_step, tf.int64)
sum_loss = contrib_summary.scalar(
'{}_loss'.format(train_or_eval), tf_loss, step=gs)
sum_ops = [sum_loss.op]
for description, tf_metric in six.iteritems(scalars):
sum_metric = contrib_summary.scalar(
'{}_{}'.format(train_or_eval, description), tf_metric, step=gs)
sum_ops.append(sum_metric)
with tf.control_dependencies(sum_ops):
return tf.identity(tf_loss)
if FLAGS.use_tpu:
# Cast the global step to tf.int32, since
# outside_compilation does not support tf.int64.
tf_loss = tpu.outside_compilation(
_host_loss_summary,
tf.cast(global_step, tf.int32),
tf_loss,
**scalars)
else:
tf_loss = _host_loss_summary(
tf.cast(global_step, tf.int32),
tf_loss,
**scalars)
return tf_loss
global_step = tf.train.get_or_create_global_step()
graph, mesh, mesh_impl = mesh_context.create_graph_mesh_and_mesh_impl()
with mtf.utils.outside_all_rewrites():
# Do not tpu_rewrite this part. Inside this unet, If you use Tensorflow,
# instead of Mesh-Tensorflor, it will cause host to tpu send/rec.
preds, loss, scalars, bn_update_ops = (
unet.unet_with_spatial_partition(
mesh, mesh_impl, train_or_eval, input_fea, input_lab))
if train_or_eval == 'train':
var_grads = mtf.gradients(
[loss], [v.outputs[0] for v in graph.trainable_variables])
lr = FLAGS.lr * tf.pow(
FLAGS.lr_drop_rate,
tf.floor(tf.cast(global_step, tf.float32) / FLAGS.lr_drop_steps))
scalars['learning_rate'] = lr
optimizer = mtf.optimize.AdafactorOptimizer(learning_rate=lr)
update_ops = optimizer.apply_grads(var_grads, graph.trainable_variables)
# This is where the actual tf graph got built.
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
tf_update_ops = [lowering.lowered_operation(op) for op in update_ops]
tf_update_ops.append(tf.assign_add(global_step, 1))
tf_update_ops.extend(
[lowering.lowered_operation(op) for op in bn_update_ops])
else: # train_or_eval == 'eval':
preds = [mtf.anonymize(pred) for pred in preds]
# This is where the actual tf graph got built.
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
tf_preds = [tf.cast(
lowering.export_to_tf_tensor(pred), tf.float32) for pred in preds]
tf_loss = tf.cast(lowering.export_to_tf_tensor(loss), tf.float32)
if FLAGS.write_summary:
tf_loss = _add_summary(
lowering, train_or_eval, tf_loss, scalars, global_step)
master_to_slice_hook = mtf.MtfRestoreHook(lowering)
if train_or_eval == 'train':
with mtf.utils.outside_all_rewrites():
saver = tf.train.Saver(tf.global_variables(),
save_relative_paths=True)
tf.add_to_collection(tf.GraphKeys.SAVERS, saver)
saver_listener = mtf.MtfCheckpointSaverListener(lowering)
slice_to_master_hook = tf.train.CheckpointSaverHook(
FLAGS.checkpoint_dir,
save_steps=FLAGS.save_checkpoints_steps,
saver=saver, listeners=[saver_listener])
captured_hooks.capture([master_to_slice_hook, slice_to_master_hook])
return tf.group([tf_loss] + tf_update_ops)
else: # train_or_eval == 'eval':
if FLAGS.use_tpu:
tf_preds.extend([tf_loss, global_step])
tf_preds_dtypes = [tf_pred.dtype for tf_pred in tf_preds]
tf_preds_shapes = [tf_pred.shape for tf_pred in tf_preds]
captured_hooks.capture([master_to_slice_hook, None])
captured_output_dtypes_shapes.capture(
[tf_preds_dtypes, tf_preds_shapes])
return tpu_ops.outfeed_enqueue_tuple(tf_preds)
else:
tf_preds.extend([tf_loss, global_step])
captured_hooks.capture([master_to_slice_hook, None])
return tf_preds
return _model_fn, captured_hooks, captured_output_dtypes_shapes
def _get_scaffold(additional_initializers):
return tf.train.Scaffold(
init_op=control_flow_ops.group(
tf.global_variables_initializer(),
*additional_initializers),
local_init_op=tf.group(
tf.local_variables_initializer(),
tf.train.Scaffold.default_local_init_op(),
*additional_initializers))
def _print_variable_values(sess):
"""May give `Protocol buffer too large` error."""
np.set_printoptions(precision=4, linewidth=1000)
tf.logging.info('Printing variables.')
tf.logging.info('===================')
values = sess.run(tf.trainable_variables())
for variable, value in zip(tf.trainable_variables(), values):
tf.logging.info('{}, {}'.format(variable.name, value.shape))
tf.logging.info('{}'.format(np.array(value).flatten()))
def _train_phase(mesh_context, config, master):
"""Handles input pipeline and trains the network."""
if FLAGS.num_train_iterations_per_loop <= 0:
return
def _run_train_phase():
"""The real function that runs the training phase."""
# Setup input pipeline.
ds_creator = unet.get_dataset_creator('train')
mtf_shapes = unet.get_input_mtf_shapes('train')
model_train_fn, train_hooks, _ = _get_model_fn('train', mesh_context)
if FLAGS.use_tpu:
assert mesh_context.device_assignment
assert mesh_context.num_cores
simd_input_reader = input_reader.SimdMeshImplInputReader(
mesh_context.mesh_impl, ds_creator, mtf_shapes,
external_worker=(not FLAGS.on_gcp), is_eval_mode=False)
train_computation = tpu.replicate(
computation=model_train_fn,
inputs=[[]] * mesh_context.num_cores,
infeed_queue=simd_input_reader.infeed_queue,
device_assignment=mesh_context.device_assignment)
else:
placement_input_reader = input_reader.PlacementMeshImplInputReader(
mesh_context.mesh_impl, ds_creator, mtf_shapes, is_eval_mode=False)
train_computation = placement_input_reader.gpu_placement(model_train_fn)
###########################################################
# Training.
master_to_slice_hook, slice_to_master_hook = train_hooks.get()
ckpt_loader_hook = _CkptLoaderHook()
step_counter_hook = tf.train.StepCounterHook(every_n_steps=10)
all_hooks = [ckpt_loader_hook, master_to_slice_hook,
slice_to_master_hook, step_counter_hook]
if FLAGS.write_summary:
flush_summary = contrib_summary.flush()
with tf.train.MonitoredTrainingSession(
master=master,
scaffold=_get_scaffold(additional_initializers=[]),
hooks=all_hooks,
config=config) as sess:
if FLAGS.write_summary:
contrib_summary.initialize(session=sess)
if FLAGS.use_tpu:
simd_input_reader.start_infeed_thread(
sess, FLAGS.num_train_iterations_per_loop)
else:
placement_input_reader.initialize(sess)
for step in range(FLAGS.num_train_iterations_per_loop):
sess.run(train_computation)
if FLAGS.write_summary:
sess.run(flush_summary)
tf.logging.info('train steps: {}'.format(step))
with tf.Graph().as_default():
if FLAGS.write_summary:
summary_writer = contrib_summary.create_file_writer(FLAGS.summary_dir)
with summary_writer.as_default(), (
contrib_summary.always_record_summaries()):
_run_train_phase()
else:
_run_train_phase()
def _eval_phase(mesh_context, config, master):
"""Handles input pipeline and evaluates the network."""
if FLAGS.num_eval_iterations_per_loop <= 0:
return
def _run_eval_phase():
"""The real function that runs the evaluation phase."""
# Setup input pipeline.
ds_creator = unet.get_dataset_creator('eval')
mtf_shapes = unet.get_input_mtf_shapes('eval')
model_eval_fn, eval_hooks, output_dtypes_shapes = _get_model_fn(
'eval', mesh_context)
if FLAGS.use_tpu:
assert mesh_context.device_assignment
assert mesh_context.num_cores
simd_input_reader = input_reader.SimdMeshImplInputReader(
mesh_context.mesh_impl, ds_creator, mtf_shapes,
external_worker=(not FLAGS.on_gcp), is_eval_mode=True)
eval_computation = tpu.replicate(
computation=model_eval_fn,
inputs=[[]] * mesh_context.num_cores,
infeed_queue=simd_input_reader.infeed_queue,
device_assignment=mesh_context.device_assignment)
output_dtypes, output_shapes = output_dtypes_shapes.get()
outfeed_dequeue_ops = []
# Create outfeed_dequeue_ops.
for host_id in range(mesh_context.num_hosts):
# pylint: disable=protected-access
with ops.device(input_reader._host_id_to_tf_device(
host_id, external_worker=(not FLAGS.on_gcp))):
for device_ordinal in range(mesh_context.num_cores_per_host):
outfeed_dequeue_op = tpu_ops.outfeed_dequeue_tuple(
dtypes=output_dtypes,
shapes=output_shapes,
device_ordinal=device_ordinal)
# We don't need output other than from core 0.
if outfeed_dequeue_ops:
outfeed_dequeue_ops.append(
[tf.reduce_mean(x) for x in outfeed_dequeue_op])
else:
outfeed_dequeue_ops.append(outfeed_dequeue_op)
else:
placement_input_reader = input_reader.PlacementMeshImplInputReader(
mesh_context.mesh_impl, ds_creator, mtf_shapes, is_eval_mode=False)
eval_computation = placement_input_reader.gpu_placement(model_eval_fn)
###########################################################
# Evaluation.
master_to_slice_hook, _ = eval_hooks.get()
ckpt_loader_hook = _CkptLoaderHook()
all_hooks = [ckpt_loader_hook, master_to_slice_hook]
if FLAGS.write_summary:
flush_summary = contrib_summary.flush()
with tf.train.MonitoredSession(
session_creator=tf.train.ChiefSessionCreator(
master=master,
config=config),
hooks=all_hooks) as sess:
if FLAGS.write_summary:
contrib_summary.initialize(session=sess)
if FLAGS.use_tpu:
simd_input_reader.start_infeed_thread(
sess, FLAGS.num_eval_iterations_per_loop)
else:
placement_input_reader.initialize(sess)
pprocessor = unet.PostProcessor()
for step in range(FLAGS.num_eval_iterations_per_loop):
# Only get results from the 0-th core.
if FLAGS.use_tpu:
sess.run(eval_computation)
results = sess.run(outfeed_dequeue_ops)[0]
else:
results = sess.run(eval_computation)
pprocessor.record(results, FLAGS.pred_output_dir)
if FLAGS.write_summary:
sess.run(flush_summary)
tf.logging.info('eval steps: {}'.format(step))
pprocessor.finish()
with tf.Graph().as_default():
if FLAGS.write_summary:
summary_writer = contrib_summary.create_file_writer(FLAGS.summary_dir)
with summary_writer.as_default(), (
contrib_summary.always_record_summaries()):
_run_eval_phase()
else:
_run_eval_phase()
def train_and_eval():
"""Trains and evaluates MeshTensorflow model without TPUEstimator.
TODO(lehou): Pack everything nicely as a set of APIs.
"""
mesh_context = None
tf.logging.info('FLAGS.master: {}'.format(FLAGS.master))
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(FLAGS.master)
config = tf.ConfigProto()
config.allow_soft_placement = True
cluster_spec = resolver.cluster_spec()
if cluster_spec:
config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
with tf.Session(target=resolver.master(), config=config) as sess:
tf.tpu.experimental.initialize_tpu_system(resolver)
mesh_context = MeshContext(
sess, FLAGS.use_tpu, FLAGS.mesh_shape, unet.get_layout())
for _ in range(FLAGS.num_training_loops):
_train_phase(mesh_context, config, resolver.get_master())
_eval_phase(mesh_context, config, resolver.get_master())
if FLAGS.use_tpu:
with tf.Session(target=resolver.get_master(), config=config) as sess:
sess.run(tpu.shutdown_system())
tf.logging.info('finished.')
def main(_):
train_and_eval()
if __name__ == '__main__':
tf.app.run()
| mesh-master | mesh_tensorflow/experimental/model_executor.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| mesh-master | mesh_tensorflow/experimental/__init__.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Input pipeline for MeshTensorflow.
If you run MeshTensorflow models on TPUs, please use SimdMeshImplInputReader
as your input pipeline. Otherwise, please use PlacementMeshImplInputReader.
For SimdMeshImplInputReader, a user provides the following, and this set of APIs
will handle the input pipeline for MeshTensorflow.
1. An instance of mtf.simd_mesh_impl.SimdMeshImpl.
2. A function that creates a tf.data.Dataset.
The Dataset returns single examples (no batch dimension).
3. Shape (mtf.Shape) of each tensor given by tf.data.Dataset.
Each of these shapes must begin with the same batch dimension.
Example of usage:
simd_mesh_impl = mtf.simd_mesh_impl.SimdMeshImpl(...)
simd_input_reader = SimdMeshImplInputReader(simd_mesh_impl,
ds_creator,
mtf_input_shapes)
infeed_queue = simd_input_reader.infeed_queue
tpu_train_computation = tpu.replicate(
computation=model_fn,
inputs=[[]] * num_cores,
infeed_queue=infeed_queue, ...)
# In model_fn, import the input tensors using mtf.import_laid_out_tensor.
def model_fn(features, labels):
...
laidout_features = mtf.simd_mesh_impl.SimdMeshImpl.LaidOutTensor([features])
x = mtf.import_laid_out_tensor(mesh, laidout_features, mtf_io_shape)
h = mtf.layers.dense(h, ...)
...
# Start the infeed enqueue thread after you created a session:
with tf.Session(...) as sess:
simd_input_reader.start_infeed_thread(sess,
number_steps=num_training_steps)
for _ in range(num_training_steps):
sess.run(tpu_train_computation)
Also check out SimdMeshImplInputReader.gen_infeed_queue().
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import threading
import time
import numpy as np
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.framework import ops
from tensorflow.python.tpu import tpu_feed
_NONE_PNUM = None
_NO_DATA = None
def _host_device_to_id(device_str):
assert isinstance(device_str, str)
id_string = device_str.lower().split("/task:")[1].split("/device:")[0]
id_int = int(id_string)
assert str(id_int) == id_string
return id_int
def _host_id_to_tf_device(host_id, external_worker):
assert isinstance(host_id, int)
if external_worker:
return "/job:tpu_worker/task:{}/device:CPU:0".format(host_id)
else:
return "/task:{}/device:CPU:0".format(host_id)
class SubBatchSlicer(object):
"""Reads and distributes a sub-batch on a host."""
def __init__(self, sub_batch_ds_creator, host_id, all_sub_batch_pnums,
simd_mesh_impl, mtf_input_shapes, external_worker, global_batch):
self._host_id = host_id
self._all_sub_batch_pnums = all_sub_batch_pnums
self._simd_mesh_impl = simd_mesh_impl
self._mtf_input_shapes = mtf_input_shapes
self._external_worker = external_worker
self._global_batch = global_batch
self._validate_args()
with ops.device(_host_id_to_tf_device(self._host_id,
self._external_worker)):
self._ds_iterator = sub_batch_ds_creator().make_initializable_iterator()
@property
def initializer(self):
return self._ds_iterator.initializer
def get_slices(self):
"""Yields sliced tensors and which remote pnums they should go to.
Yields:
tf_tensor: The sliced tensor.
pnum: Which process number the tf_tensor should to go.
input_i: The input ordinal of the tf_tensor.
"""
with ops.device(_host_id_to_tf_device(self._host_id,
self._external_worker)):
all_input_tensors = self._ds_iterator.get_next()
if isinstance(all_input_tensors, tf.Tensor):
all_input_tensors = [all_input_tensors]
assert len(all_input_tensors) == len(self._all_sub_batch_pnums)
for input_i in range(len(all_input_tensors)):
input_tensor = all_input_tensors[input_i]
sub_batch_pnums = self._all_sub_batch_pnums[input_i]
mtf_input_shape = self._mtf_input_shapes[input_i]
# Initialize the cache for each input_i
self._init_slice_cache()
for pnum in sub_batch_pnums:
# TODO(lehou): tf.slice is kinda slow. Use tf.split instead.
input_slice = self._slice_tensor(input_tensor, mtf_input_shape, pnum)
yield input_slice, pnum, input_i
def _validate_args(self):
assert isinstance(self._all_sub_batch_pnums, list)
assert isinstance(self._mtf_input_shapes, list)
assert self._all_sub_batch_pnums
assert self._mtf_input_shapes
assert len(self._all_sub_batch_pnums) == len(self._mtf_input_shapes)
def _init_slice_cache(self):
# Cache for tensor slices
self._slice_dict = collections.defaultdict(list)
def _slice_tensor(self, input_tensor, mtf_input_shape, pnum):
"""Slice input_tensor according to mtf_input_shape and pnum."""
s_begin = self._simd_mesh_impl.slice_begin(mtf_input_shape, pnum)
if not self._global_batch:
# Always slice from 0 in the first dimension (batch dimension), since
# input_tensor a sub-batch tensor.
s_begin[0] = 0
if tuple(s_begin) in self._slice_dict:
return self._slice_dict[tuple(s_begin)]
s_shape = self._simd_mesh_impl.slice_shape(mtf_input_shape)
input_slice = tf.slice(input_tensor, s_begin, s_shape)
self._slice_dict[tuple(s_begin)] = input_slice
return input_slice
class ProcessDevices(object):
"""An utility class that maps between pnum to devices."""
def __init__(self, simd_mesh_impl):
"""Init tpu and host devices in logical order."""
self._num_cores = simd_mesh_impl.device_assignment.num_replicas
self._ordered_ordinals = []
self._ordered_tpus = []
self._ordered_hosts = []
self._ordered_host_ids = []
self._host_id_to_its_pnums = collections.defaultdict(list)
d_assignment = simd_mesh_impl.device_assignment
for pnum in range(self.num_cores):
physical_pnum = simd_mesh_impl.l2p(pnum)
# For MTF, there's always 1 core per replica. So logical_core=0.
self._ordered_ordinals.append(
d_assignment.tpu_ordinal(replica=physical_pnum, logical_core=0))
tpu_device = d_assignment.tpu_device(replica=physical_pnum)
host_device = d_assignment.host_device(replica=physical_pnum)
host_id = _host_device_to_id(host_device)
self._ordered_tpus.append(tpu_device)
self._ordered_hosts.append(host_device)
self._ordered_host_ids.append(host_id)
self._host_id_to_its_pnums[host_id].append(pnum)
self._num_hosts = len(set(self._ordered_hosts))
self._num_cores_per_host = self.num_cores // self._num_hosts
assert self.num_cores == self._num_hosts * self._num_cores_per_host
tf.logging.info("Process Devices "
"ordered_ordinals: {}, "
"ordered_tpus: {}, "
"ordered_hosts: {}, "
"host_id_to_its_pnums: {}.".format(
self.ordered_ordinals,
self.ordered_tpus,
self.ordered_hosts,
self.host_id_to_its_pnums))
@property
def ordered_ordinals(self):
return self._ordered_ordinals
@property
def ordered_tpus(self):
return self._ordered_tpus
@property
def ordered_hosts(self):
return self._ordered_hosts
@property
def ordered_host_ids(self):
return self._ordered_host_ids
@property
def host_id_to_its_pnums(self):
return self._host_id_to_its_pnums
@property
def num_cores(self):
return self._num_cores
@property
def num_hosts(self):
return self._num_hosts
@property
def num_cores_per_host(self):
return self._num_cores_per_host
class SimdMeshImplInputReader(object):
"""Handles input pipeline for SimdMeshImpl."""
def __init__(self,
simd_mesh_impl,
ds_creator,
mtf_input_shapes,
ds_prefetch_size=tf.data.experimental.AUTOTUNE,
external_worker=True,
is_eval_mode=False):
"""Input pipeline for the SIMD implementation of MeshTensorflow.
Args:
simd_mesh_impl: A mtf.simd_mesh_impl.SimdMeshImpl object.
ds_creator: A function that creates a dataset.
mtf_input_shapes: A list of mtf.Shape. Then length of it must be equal
to the number of elements generated by the ds_creator. NOTE, we assume:
1. The 0-th dimension is the batch dimension.
2. The batch dimension is consistent across all input shapes in
mtf_input_shapes.
ds_prefetch_size: The buffer size for prefetching
(default tf.data.experimental.AUTOTUNE).
external_worker: Whether you have an external tpu_worker or not. Set it to
False if you run the program locally, for example, during local unit
test.
is_eval_mode: In evaluation mode, only one dataset object will be created,
as opposed to one dataset for each sub-batch. Default is False. Set it
to True during evaluation, to ensure that one evaluation instance will
be used once and only once.
Note:
1. The efficiency is optimized according to the shape of the 0-th tensor:
mtf_input_shapes[0]. We recommand you to put the largest tensor as the
0-th input.
2. You need to call start_infeed_thread() before your train ops.
Example:
simd_mesh_impl = mtf.simd_mesh_impl.SimdMeshImpl(...)
# ds_creator is function that creates a tf.data.Dataset.
# This Dataset must return single examples (no batch dimension).
def ds_creator():
return tf.data.Dataset.from_tensors(x)
# mtf_input_shapes is a list of Shapes of all input tensors given by the
# dataset. All shapes must begin with the same batch dimension.
simd_input_reader = SimdMeshImplInputReader(simd_mesh_impl,
ds_creator,
mtf_input_shapes)
batch_dim = mtf.Dimension('batch', FLAGS.batch_size)
io_dim = mtf.Dimension('io', FLAGS.io_size)
mtf_input_shapes = [mtf.Shape([batch_dim, io_dim])]
infeed_queue = simd_input_reader.infeed_queue
tpu_train_computation = tpu.replicate(
computation=model_fn,
inputs=[[]] * num_cores,
infeed_queue=infeed_queue, ...)
with tf.Session() as sess:
simd_input_reader.start_infeed_thread(sess,
number_steps=num_training_steps)
for _ in range(num_training_steps):
sess.run(tpu_train_computation)
"""
super(SimdMeshImplInputReader, self).__init__()
assert mtf_input_shapes
assert isinstance(mtf_input_shapes, list)
# TODO(lehou): Support nested structures for ds_creator, mtf_input_shapes.
self._simd_mesh_impl = simd_mesh_impl
self._p_dev = ProcessDevices(simd_mesh_impl)
self._ds_creator = ds_creator
self._mtf_input_shapes = mtf_input_shapes
self._ds_prefetch_size = ds_prefetch_size
self._external_worker = external_worker
self._is_eval_mode = is_eval_mode
self._gen_infeed_queue()
@property
def infeed_queue(self):
return self._infeed_queue
def start_infeed_thread(self, sess, number_steps=-1, initial_wait_sec=0.5):
"""Start running enqueue ops in a thread.
Args:
sess: A tf.Session.
number_steps: Number of times to call sess.run(enqueue_ops).
default is -1 (forever).
initial_wait_sec: Number of seconds to wait before starting the enqueue
loop. Default is 0.5.
"""
def _thread_fn():
time.sleep(initial_wait_sec)
if number_steps > 0:
for _ in range(number_steps):
sess.run(self._enqueue_ops)
else:
while True:
sess.run(self._enqueue_ops)
sess.run(self._input_initializers)
self._infeed_thread = threading.Thread(target=_thread_fn)
self._infeed_thread.start()
def _gen_infeed_queue(self):
"""Generates _infeed_queue, _enqueue_ops, _input_initializers."""
pnum_maps = []
batch_size = self._mtf_input_shapes[0].to_integer_list[0]
for mtf_shape in self._mtf_input_shapes:
# Make sure that the batch size is the same across all input tensors.
assert batch_size == mtf_shape.to_integer_list[0]
pnum_maps.append(self._get_pnum_map(mtf_shape))
# For each sub-batch, we need to know which host should read it.
if self._is_eval_mode:
# There should be just one dataset-holding host. Make the last host do it.
hosts_to_hold_ds = [self._p_dev.num_hosts - 1]
else:
hosts_to_hold_ds = self._get_hosts_to_hold_ds(pnum_maps[0])
sub_batch_size = batch_size // len(hosts_to_hold_ds)
tf.logging.info("MTF sub_batch_size: {}".format(sub_batch_size))
assert sub_batch_size * len(hosts_to_hold_ds) == batch_size
def sub_batch_ds_creator():
return self._ds_creator().batch(
sub_batch_size, drop_remainder=True).prefetch(
self._ds_prefetch_size)
sub_batch_slicer_list = []
# For each sub-batch, create a SubBatchSlicer object.
for sub_batch_i, host_id in enumerate(hosts_to_hold_ds):
# Get the list of pnums for each input.
if self._is_eval_mode:
all_sub_batch_pnums = [
pnum_map.flatten().tolist() for pnum_map in pnum_maps]
sub_batch_slicer_list.append(SubBatchSlicer(sub_batch_ds_creator,
host_id,
all_sub_batch_pnums,
self._simd_mesh_impl,
self._mtf_input_shapes,
self._external_worker,
global_batch=True))
else:
all_sub_batch_pnums = []
for pnum_map in pnum_maps:
sub_batch_pnums = pnum_map[sub_batch_i, ...].flatten().tolist()
all_sub_batch_pnums.append(sub_batch_pnums)
sub_batch_slicer_list.append(SubBatchSlicer(sub_batch_ds_creator,
host_id,
all_sub_batch_pnums,
self._simd_mesh_impl,
self._mtf_input_shapes,
self._external_worker,
global_batch=False))
# Slots for all laidout tensors.
all_laidout_tensors = [[_NO_DATA] * len(self._mtf_input_shapes) \
for _ in range(self._p_dev.num_cores)]
# Read tf_tensors, put them in slots.
for sub_batch_slicer in sub_batch_slicer_list:
for tf_tensor, pnum, input_i in sub_batch_slicer.get_slices():
all_laidout_tensors[pnum][input_i] = tf_tensor
# Make sure that there are no Nones in all_laidout_tensors.
for laidout_tensors in all_laidout_tensors:
assert _NO_DATA not in laidout_tensors
with ops.device(_host_id_to_tf_device(hosts_to_hold_ds[0],
self._external_worker)):
self._infeed_queue, self._enqueue_ops = self._enqueue_laidout_tensors(
all_laidout_tensors)
self._input_initializers = [s.initializer for s in sub_batch_slicer_list]
def _get_pnum_map(self, mtf_shape):
"""Returns the pnum_map according to mtf_shape.
Args:
mtf_shape: A mtf.Shape object.
Returns:
A numpy array pnum_map. For the i-th sub-batch, pnum_map[i] is a numpy
array containing all pnums that tensor slices of the i-th sub-batch
will be send to.
"""
s_shape = self._simd_mesh_impl.slice_shape(mtf_shape)
shape_list = [dim_size // s_dim_size for dim_size, s_dim_size in zip(
mtf_shape.to_integer_list, s_shape)]
pnum_map_shape = shape_list + [
self._p_dev.num_cores // np.prod(shape_list)]
assert np.prod(pnum_map_shape) == self._p_dev.num_cores
# Initialize the pnum_map to _NONE_PNUM.
pnum_map = np.empty(pnum_map_shape, dtype=object)
pnum_map[:] = _NONE_PNUM
for pnum in range(self._p_dev.num_cores):
s_begin = self._simd_mesh_impl.slice_begin(mtf_shape, pnum)
coord = [dim_size // s_dim_size for dim_size, s_dim_size in zip(
s_begin, s_shape)]
# put pnum in pnum_map[coord]
pnum_array_ref = pnum_map[tuple(coord)]
for idx, value in enumerate(pnum_array_ref):
if value is _NONE_PNUM:
pnum_array_ref[idx] = pnum
break
tf.logging.info("MTF pnum_map: {}".format(pnum_map))
assert _NONE_PNUM not in pnum_map
return pnum_map
def _get_hosts_to_hold_ds(self, pnum_map):
"""Finds which host should read which sub-batch."""
assert _NONE_PNUM not in pnum_map
# This records how many datasets (ds) are already stored on each host.
num_dss_per_host = [0] * self._p_dev.num_hosts
# A list of host_ids that holds datasets (ds).
hosts_to_hold_ds = []
def _get_num_pnums_per_host(sub_batch_pnum_map):
num_pnums_per_host = [0] * self._p_dev.num_hosts
for pnum in sub_batch_pnum_map.flatten():
num_pnums_per_host[self._p_dev.ordered_host_ids[pnum]] += 1
return num_pnums_per_host
def _find_host_id_with_most_pnums_and_least_ds(num_pnums_per_host,
num_dss_per_host):
host_metics = [(
host_id, num_pnums_per_host[host_id],
num_dss_per_host[host_id]) \
for host_id in range(self._p_dev.num_hosts)]
# Major max key: num_pnums
# Minor max key: -num_dss. We need to find a relatively spare host.
host_id, _, _ = max(host_metics, key=lambda keys: (keys[1], -keys[2]))
return host_id
for sub_batch_pnum_map in pnum_map:
num_pnums_per_host = _get_num_pnums_per_host(sub_batch_pnum_map)
host_id = _find_host_id_with_most_pnums_and_least_ds(num_pnums_per_host,
num_dss_per_host)
num_dss_per_host[host_id] += 1
hosts_to_hold_ds.append(host_id)
return hosts_to_hold_ds
def _enqueue_laidout_tensors(self, all_laidout_tensors):
"""Generate enqueue ops to enqueue all_laidout_tensors."""
def _tpu_ordinal_function_impl(pnum):
return self._p_dev.ordered_ordinals[pnum]
def _placement_function_impl(pnum):
return self._p_dev.ordered_hosts[pnum]
laidout_tensors0 = all_laidout_tensors[0]
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(laidout_tensors0),
tuple_types=[x.dtype for x in laidout_tensors0],
tuple_shapes=[x.shape for x in laidout_tensors0])
enqueue_ops = infeed_queue.generate_enqueue_ops(
all_laidout_tensors,
tpu_ordinal_function=_tpu_ordinal_function_impl,
placement_function=_placement_function_impl)
return infeed_queue, enqueue_ops
class PlacementMeshImplInputReader(object):
"""Handles input pipeline for PlacementMeshImpl."""
def __init__(self,
placement_mesh_impl,
ds_creator,
mtf_input_shapes,
ds_prefetch_size=tf.data.experimental.AUTOTUNE,
is_eval_mode=False):
self._placement_mesh_impl = placement_mesh_impl
self._mtf_input_shapes = mtf_input_shapes
batch_size = mtf_input_shapes[0].dims[0].size
if is_eval_mode:
ds = ds_creator().batch(
batch_size, drop_remainder=False).prefetch(ds_prefetch_size)
else:
ds = ds_creator().batch(
batch_size, drop_remainder=True).prefetch(ds_prefetch_size)
self._ds_iterator = ds.make_initializable_iterator()
self._input_initializers = [self._ds_iterator.initializer]
def initialize(self, sess):
sess.run(self._input_initializers)
def gpu_placement(self, model_fn):
image, label = self._ds_iterator.get_next()
image_laid_out = self._placement_mesh_impl.make_slices(
image, self._mtf_input_shapes[0])
label_laid_out = self._placement_mesh_impl.make_slices(
label, self._mtf_input_shapes[1])
computation = model_fn(image_laid_out, label_laid_out)
return computation
| mesh-master | mesh_tensorflow/experimental/input_reader.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for third_party.py.mesh_tensorflow.experimental.data_aug_lib."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mesh_tensorflow.experimental.data_aug_lib as data_aug_lib
import tensorflow.compat.v1 as tf
class MtfUnetDataAugTest(tf.test.TestCase):
def constant_3d_image(self):
return tf.constant(
[[[-100, 2], [2, 3]], [[4, 35], [-1024, 7]]], dtype=tf.float32)
def constant_3d_label(self):
return tf.constant(
[[[0, 0], [0, 1]], [[1, 1], [2, 2]]], dtype=tf.float32)
def test_flip(self):
with tf.Session() as sess:
image_3d = self.constant_3d_image()
image_3d_np = sess.run(image_3d)
for flip_axis in [0, 1, 2]:
image_3d_flip, _ = data_aug_lib.maybe_flip(
image_3d, tf.zeros_like(image_3d), flip_axis, 0.0)
image_3d_flip_np = sess.run(image_3d_flip)
self.assertAllClose(image_3d_flip_np, image_3d_np)
image_3d_flip = image_3d
for flip_axis in [0, 1, 2]:
if flip_axis == 0:
image_3d_np = image_3d_np[::-1, ...]
elif flip_axis == 1:
image_3d_np = image_3d_np[:, ::-1, :]
else:
image_3d_np = image_3d_np[..., ::-1]
image_3d_flip, _ = data_aug_lib.maybe_flip(
image_3d_flip, tf.zeros_like(image_3d_flip), flip_axis, 1.0)
image_3d_flip_np = sess.run(image_3d_flip)
self.assertAllClose(image_3d_flip_np, image_3d_np)
def test_rot180(self):
with tf.Session() as sess:
image_3d = self.constant_3d_image()
image_3d_np = sess.run(image_3d)
for constant_axis in [0, 1, 2]:
image_3d_rot360, _ = data_aug_lib.maybe_rot180(
image_3d, tf.zeros_like(image_3d), constant_axis, 2)
image_3d_rot360, _ = data_aug_lib.maybe_rot180(
image_3d_rot360, tf.zeros_like(image_3d_rot360), constant_axis, 2)
image_3d_rot360_np = sess.run(image_3d_rot360)
self.assertAllClose(image_3d_rot360_np, image_3d_np)
def test_gen_fake_data(self):
with tf.Session() as sess:
image_3d = self.constant_3d_image()
label_3d = self.constant_3d_label()
image_3d_np = sess.run(image_3d)
label_3d_np = sess.run(label_3d)
image_3d_aug, label_3d_aug = \
data_aug_lib.maybe_gen_fake_data_based_on_real_data(
image_3d, label_3d, reso=2,
min_fake_lesion_ratio=0.0, gen_fake_probability=0.0)
image_3d_aug_np = sess.run(image_3d_aug)
label_3d_aug_np = sess.run(label_3d_aug)
self.assertAllClose(image_3d_aug_np, image_3d_np)
self.assertAllClose(label_3d_aug_np, label_3d_np)
if __name__ == "__main__":
tf.test.main()
| mesh-master | mesh_tensorflow/experimental/data_aug_lib_test.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for third_party.py.mesh_tensorflow.experimental.input_reader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import mesh_tensorflow as mtf
import mesh_tensorflow.experimental.input_reader as input_reader
import numpy as np
import tensorflow.compat.v1 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.core.protobuf.tpu import topology_pb2
from tensorflow.python.tpu import device_assignment
from tensorflow.python.tpu import tpu
class MtfInputReaderTest(parameterized.TestCase, tf.test.TestCase):
def initialize_system(self, sess):
"""Run tpu.initialize_system and return the number of TPU devices."""
topology_object = topology_pb2.TopologyProto()
topology = sess.run(tf.tpu.initialize_system())
topology_object.ParseFromString(topology)
num_cores = topology_object.num_tasks * (
topology_object.num_tpu_devices_per_task)
return topology, num_cores
@parameterized.parameters((True,), (False,))
def test_get_laidout_tensors(self, is_eval_mode):
mesh_shape = "mesh_x:2, mesh_y:1"
layout = "batch:mesh_x, io:mesh_y"
batch_io_dim = 4
with tf.Session() as sess:
topology, num_cores = self.initialize_system(sess)
# Get a device_assignment object for mtf.
d_assignment = device_assignment.device_assignment(
topology,
computation_shape=[1,] * mtf.utils.topology_rank(topology),
num_replicas=num_cores)
# Hacked dataset creator: creates different datasets for the first and
# second call, in order to test SimdMeshImplInputReader.
self.sub_batch_created_times = 0
def stateful_ds_creator():
whole_batch = tf.eye(batch_io_dim, dtype=tf.float32)
sub_batch = tf.slice(whole_batch,
[self.sub_batch_created_times * 2, 0],
[2, 4])
self.sub_batch_created_times += 1
return tf.data.Dataset.from_tensors(sub_batch).repeat().unbatch()
batch_dim = mtf.Dimension("batch", batch_io_dim)
io_dim = mtf.Dimension("io", batch_io_dim)
mtf_input_shapes = [mtf.Shape([batch_dim, io_dim])]
# Get mesh_impl.
mesh_shape = mtf.convert_to_shape(mesh_shape)
layout_rules = mtf.convert_to_layout_rules(layout)
mesh_impl = mtf.simd_mesh_impl.SimdMeshImpl(
mesh_shape, layout_rules, None, d_assignment)
simd_input_reader = input_reader.SimdMeshImplInputReader(
mesh_impl, stateful_ds_creator, mtf_input_shapes,
external_worker=False,
is_eval_mode=is_eval_mode)
def model_fn(features):
return features
replicated_computation = tpu.replicate(
computation=model_fn,
inputs=[[]] * num_cores,
infeed_queue=simd_input_reader.infeed_queue,
device_assignment=d_assignment)
simd_input_reader.start_infeed_thread(sess, 1)
results = sess.run(replicated_computation)
print("results: {}".format(results))
core_0_data = results[0][0]
core_1_data = results[1][0]
print("core_0_data: {}".format(core_0_data))
print("core_1_data: {}".format(core_1_data))
if is_eval_mode:
# If there is only one dataset object, then the stateful_ds_creator()
# should be called only once.
self.assertAllClose(
np.array([[1, 0, 0, 0], [0, 1, 0, 0]], dtype=np.float32),
core_0_data)
self.assertAllClose(
np.array([[1, 0, 0, 0], [0, 1, 0, 0]], dtype=np.float32),
core_1_data)
else:
# If there are two dataset objects, then the stateful_ds_creator()
# should be called twice.
self.assertAllClose(
np.array([[1, 0, 0, 0], [0, 1, 0, 0]], dtype=np.float32),
core_0_data)
self.assertAllClose(
np.array([[0, 0, 1, 0], [0, 0, 0, 1]], dtype=np.float32),
core_1_data)
sess.run(tf.tpu.shutdown_system())
if __name__ == "__main__":
tf.test.main()
| mesh-master | mesh_tensorflow/experimental/input_reader_test.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Apply data augmentation on the Liver Tumor Segmentation (LiTS) dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
from absl import app
from absl import flags
from six.moves import range
import tensorflow.compat.v1 as tf # tf
# pylint: disable=g-direct-tensorflow-import,g-direct-third-party-import
from mesh_tensorflow.experimental import data_aug_lib
FLAGS = flags.FLAGS
flags.DEFINE_string('input_file_pattern', '', 'Path to input CT scans.')
flags.DEFINE_string('output_folder', '', 'Path to output folder.')
flags.DEFINE_string('output_file_prefix',
'augmented', 'Filename prefix.')
flags.DEFINE_integer('ct_resolution', 128,
'Resolution of CT images along depth, height and '
'width dimensions.')
flags.DEFINE_integer('num_data_aug', 1000,
'The number of data augmentation output.')
flags.DEFINE_integer('process_no', None, 'Which process number I am.')
flags.DEFINE_float('gen_fake_probability', 0.50,
'How much to scale intensities of lesion/non-lesion areas.')
flags.DEFINE_float('min_fake_lesion_ratio', 0.05,
'Minimum amount of synthetic lession in liver.')
def _dataset_creator():
"""Returns an unbatched dataset."""
def _parser_fn(serialized_example):
"""Parses a single tf.Example into image and label tensors."""
features = {}
features['image/ct_image'] = tf.FixedLenFeature([], tf.string)
features['image/label'] = tf.FixedLenFeature([], tf.string)
parsed = tf.parse_single_example(serialized_example, features=features)
image = tf.decode_raw(parsed['image/ct_image'], tf.float32)
label = tf.decode_raw(parsed['image/label'], tf.float32)
# Preprocess color, clip to 0 ~ 1.
image = tf.clip_by_value(image / 1024.0 + 0.5, 0, 1)
spatial_dims = [FLAGS.ct_resolution] * 3
image = tf.reshape(image, spatial_dims)
label = tf.reshape(label, spatial_dims)
image, label = data_aug_lib.maybe_gen_fake_data_based_on_real_data(
image, label, FLAGS.ct_resolution,
FLAGS.min_fake_lesion_ratio, FLAGS.gen_fake_probability)
return image, label
dataset = tf.data.Dataset.list_files(
FLAGS.input_file_pattern, shuffle=True).repeat()
dataset = dataset.apply(functools.partial(
tf.data.TFRecordDataset, compression_type='GZIP'))
dataset = dataset.shuffle(2).map(_parser_fn, num_parallel_calls=2)
return dataset
def save_to_tfrecord(image, label, process_no, idx,
output_path, output_file_prefix):
"""Save to TFRecord."""
d_feature = {}
d_feature['image/ct_image'] = tf.train.Feature(
bytes_list=tf.train.BytesList(value=[image.reshape([-1]).tobytes()]))
d_feature['image/label'] = tf.train.Feature(
bytes_list=tf.train.BytesList(value=[label.reshape([-1]).tobytes()]))
example = tf.train.Example(features=tf.train.Features(feature=d_feature))
serialized = example.SerializeToString()
result_file = os.path.join(
output_path,
'{}-{}-{}.tfrecords'.format(output_file_prefix, process_no, idx))
options = tf.python_io.TFRecordOptions(
tf.python_io.TFRecordCompressionType.GZIP)
with tf.python_io.TFRecordWriter(result_file, options=options) as w:
w.write(serialized)
def apply_data_aug():
"""Apply data augmentation and save augmented results."""
if not tf.gfile.IsDirectory(FLAGS.output_folder):
tf.gfile.MakeDirs(FLAGS.output_folder)
dataset = _dataset_creator()
ds_iterator = dataset.make_initializable_iterator()
image, label = ds_iterator.get_next()
with tf.Session() as sess:
sess.run(ds_iterator.initializer)
for idx in range(FLAGS.num_data_aug):
image_np, label_np = sess.run([image, label])
save_to_tfrecord(
image_np, label_np, FLAGS.process_no, idx,
FLAGS.output_folder, FLAGS.output_file_prefix)
return
def main(argv):
del argv
apply_data_aug()
if __name__ == '__main__':
app.run(main)
| mesh-master | mesh_tensorflow/experimental/offline_data_aug.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for mesh_tensorflow.transformer.memory_layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mesh_tensorflow as mtf
from mesh_tensorflow.transformer import memory_layers
import mock
import numpy as np
import tensorflow.compat.v1 as tf
class FlatKeyValueMemoryTest(tf.test.TestCase):
def setUp(self):
super(FlatKeyValueMemoryTest, self).setUp()
self.graph = mtf.Graph()
self.mesh = mtf.Mesh(self.graph, "mtf_mesh")
self.variable_dtype = mtf.VariableDType(activation_dtype=tf.float32)
self.addCleanup(mock.patch.stopall)
self.initializer_mock = mock.MagicMock()
random_normal_initializer_mock = mock.patch.object(
tf, "random_normal_initializer").start()
random_normal_initializer_mock.return_value = self.initializer_mock
def _export_to_tf_tensor(self, mtf_tensor):
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(self.graph, {self.mesh: mesh_impl})
return lowering, lowering.export_to_tf_tensor(mtf_tensor)
def test_call_shape(self):
key_size = 5
value_size = 10
n_keys = 6
n_heads = 2
knn = 3
seq_len = 4
batch = 5
model_dim = mtf.Dimension("model", value_size)
seq_dim = mtf.Dimension("length", seq_len)
batch_dim = mtf.Dimension("batch", batch)
def initialize(shape, dtype):
return tf.reshape(1 + tf.range(np.prod(shape), dtype=dtype), shape)
self.initializer_mock.side_effect = initialize
kv_memory = memory_layers.ProductKeyValueMemory(key_size, n_keys,
n_heads, knn)
mtf_x = mtf.ones(self.mesh, mtf.Shape([batch_dim, seq_dim, model_dim]))
context = mock.MagicMock()
context.mesh = self.mesh
context.variable_dtype = tf.float32
out_tensor = kv_memory.call(context, mtf_x)
# Dimensions should be untouched
self.assertEqual(mtf_x.shape, out_tensor.shape)
def test_get_indices(self):
key_size = 2
n_keys = 3
product_size = 2
head_size = 2
batch = 2
seq_len = 2
knn = 2
n_key_dim = mtf.Dimension("n_keys", n_keys)
key_dim = mtf.Dimension("key", key_size // 2)
seq_dim = mtf.Dimension("length", seq_len)
batch_dim = mtf.Dimension("batch", batch)
head_dim = mtf.Dimension("n_heads", head_size)
product_dim = mtf.Dimension("product_key", product_size)
knn_dim = mtf.Dimension("knn", knn)
query_shape = mtf.Shape([batch_dim, seq_dim, head_dim,
product_dim, key_dim])
keys_shape = mtf.Shape([head_dim, product_dim, n_key_dim, key_dim])
query = mtf.ones(self.mesh, query_shape)
keys_vals = [
[
[[4], [1], [2]],
[[2], [-1], [2]],
],
[
[[1], [2], [5]],
[[6], [1], [4]],
],
]
# h1:
# First scores:
# [4, 2]
# [2, 2]
# Cartesian added scores:
# [6, 6]
# Indices:
# [0, 2] [0*n_k + 0, 0*n_k + 2]
# h2:
# First scores:
# [5, 2]
# [6, 4]
# Cartesian added scores:
# [11, 9]
# Indices:
# [6, 8] [2*n_k+0, 2*n_k+2]
expected_scores = np.broadcast_to(np.array([[6, 6], [11, 9]]),
[batch, seq_len, head_size, knn])
expected_indices = np.broadcast_to(np.array([[0, 2], [6, 8]]),
[batch, seq_len, head_size, knn])
keys = mtf.constant(self.mesh, keys_vals, keys_shape)
pkm = memory_layers.ProductKeyValueMemory(key_size, n_keys, head_size, knn)
mtf_scores, mtf_indices = pkm.get_indices(keys, query)
# Shapes.
expected_shape = mtf.Shape([batch_dim, seq_dim, head_dim, knn_dim])
self.assertEqual(expected_shape, mtf_scores.shape)
self.assertEqual(expected_shape, mtf_indices.shape)
# Values
lowering_s, scores = self._export_to_tf_tensor(mtf_scores)
lowering_i, indices = self._export_to_tf_tensor(mtf_indices)
self.evaluate(tf.global_variables_initializer())
self.evaluate(lowering_s.copy_masters_to_slices())
self.evaluate(lowering_i.copy_masters_to_slices())
scores, indices = self.evaluate([scores, indices])
self.assertAllEqual(expected_scores, scores)
self.assertAllEqual(expected_indices, indices)
if __name__ == "__main__":
tf.test.main()
| mesh-master | mesh_tensorflow/transformer/memory_layers_test.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of various types of attention."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mesh_tensorflow as mtf
import tensorflow.compat.v1 as tf
def attention(q,
k,
v,
memory_length_dim,
key_dim,
value_dim,
bias=None,
dropout_rate=0.0,
dropout_broadcast_dims=None,
extra_logit=None,
context=None,
float32_logits=True):
"""Dot-product attention - doesn't use positional dimensions.
key_dim is a Dimension representing the channels in the queries and keys
value_dim is a Dimension representing the channels in values
memory_length_dim is a Dimension representing the different key/value pairs.
Dimensions of q: other_query_dims + {key_dim}
Dimensions of k: other_memory_dims + {memory_length_dim, key_dim}
Dimensions of v: other_memory_dims + {memory_length_dim, value_dim}
other_memory_dims is a subset of other_query_dims
Typically, other_query_dims={batch, heads, length}
Typically, other_memory_dims={batch, heads}
Args:
q: a Tensor
k: a Tensor
v: a Tensor
memory_length_dim: a Dimension
key_dim: a Dimension
value_dim: a Dimension
bias: a Tensor to be added into the attention logits.
dropout_rate: a float.
dropout_broadcast_dims: an optional list of mtf.Dimension
extra_logit: an optional scalar or tensor
context: an optional Transformer.Context
float32_logits: a boolean - if True, then compute logits in float32 to avoid
numerical issues with bfloat16
Returns:
Tensor with shape q.shape - key_dim + value_dim
"""
orig_q_shape = q.shape
q, k, v, bias = maybe_reshape_attention_input_for_2d_sharding(
context, q, k, v, bias, [key_dim, value_dim])
if float32_logits:
k = mtf.cast(k, tf.float32)
q = mtf.cast(q, tf.float32)
logits = mtf.layers.us_einsum([q, k], reduced_dims=[key_dim])
if bias is not None:
logits += mtf.cast(bias, logits.dtype)
weights = mtf.softmax(logits, memory_length_dim, extra_logit=extra_logit)
weights = mtf.cast(weights, v.dtype)
if dropout_rate != 0.0:
weights = mtf.dropout(
weights, 1.0 - dropout_rate,
noise_shape=weights.shape - dropout_broadcast_dims)
outputs_shape = q.shape - key_dim + value_dim
outputs = mtf.einsum([weights, v], outputs_shape)
outputs = mtf.reshape(outputs, orig_q_shape - key_dim + value_dim)
return outputs
def hybrid_attention(q,
k,
v,
context,
memory_length_dim,
key_dim,
value_dim,
bias=None,
dropout_rate=0.0,
dropout_broadcast_dims=None,
extra_logit=None):
"""Dot-product attention - doesn't use positional dimensions.
key_dim is a Dimension representing the channels in the queries and keys
value_dim is a Dimension representing the channels in values
memory_length_dim is a Dimension representing the different key/value pairs.
Dimensions of q: other_query_dims + {key_dim}
Dimensions of k: other_memory_dims + {memory_length_dim, key_dim}
Dimensions of v: other_memory_dims + {memory_length_dim, value_dim}
other_memory_dims is a subset of other_query_dims
Typically, other_query_dims={batch, heads, length}
Typically, other_memory_dims={batch, heads}
Args:
q: a Tensor
k: a Tensor
v: a Tensor
context: context of the attention layer.
memory_length_dim: a Dimension
key_dim: a Dimension
value_dim: a Dimension
bias: a Tensor to be added into the attention logits.
dropout_rate: a float.
dropout_broadcast_dims: an optional list of mtf.Dimension
extra_logit: an optional scalar or tensor
Returns:
Tensor with shape q.shape - key_dim + value_dim
"""
logits = mtf.layers.us_einsum([q, k], reduced_dims=[key_dim])
if bias is not None:
logits += bias
query_length_dim = mtf.Dimension("length", memory_length_dim.size)
doubly_coeff = mtf.get_variable(
context.mesh, "doubly_coeff", [],
initializer=tf.constant_initializer(0.5),
dtype=context.variable_dtype)
doubly_coeff = mtf.maximum(mtf.minimum(doubly_coeff, 1.), 0.)
upper_weights = mtf.softmax(
logits, memory_length_dim, extra_logit=extra_logit)
lower_log_weights = mtf.log_softmax(
logits, query_length_dim, extra_logit=extra_logit)
doubly_weights = mtf.softmax(
lower_log_weights, memory_length_dim, extra_logit=extra_logit)
weights = doubly_coeff * doubly_weights + (1. - doubly_coeff) * upper_weights
if dropout_rate != 0.0:
weights = mtf.dropout(
weights, 1.0 - dropout_rate,
noise_shape=weights.shape - dropout_broadcast_dims)
outputs_shape = q.shape - key_dim + value_dim
outputs = mtf.einsum([weights, v], outputs_shape)
return outputs
def synthetic_attention(q,
k,
v,
memory_length_dim,
key_dim,
value_dim,
bias=None,
dropout_rate=0.0,
dropout_broadcast_dims=None,
extra_logit=None,
synthesize=True,
synthesize_mode="random_plus_alpha",
factorized_dim=16,
max_length=512,
context=None):
"""Synthetic Attention from Synthesizers (https://arxiv.org/abs/2005.00743).
key_dim is a Dimension representing the channels in the queries and keys
value_dim is a Dimension representing the channels in values
memory_length_dim is a Dimension representing the different key/value pairs.
Dimensions of q: other_query_dims + {key_dim}
Dimensions of k: other_memory_dims + {memory_length_dim, key_dim}
Dimensions of v: other_memory_dims + {memory_length_dim, value_dim}
other_memory_dims is a subset of other_query_dims
Typically, other_query_dims={batch, heads, length}
Typically, other_memory_dims={batch, heads}
Args:
q: a Tensor
k: a Tensor
v: a Tensor
memory_length_dim: a Dimension
key_dim: a Dimension
value_dim: a Dimension
bias: a Tensor to be added into the attention logits.
dropout_rate: a float.
dropout_broadcast_dims: an optional list of mtf.Dimension
extra_logit: an optional scalar or tensor
synthesize: flag to use synthetic attention or not
synthesize_mode: which variant of synthesizer to use
factorized_dim: factorized dim for synthesizers
max_length: max length of input sequence
context: context since we need context mode
Returns:
Tensor with shape q.shape - key_dim + value_dim
"""
if synthesize:
num_heads = v.shape.get_dim_by_name("heads")
tf.logging.info("Using synthesizer")
if synthesize_mode == "random":
tf.logging.info("Using Random Synthesizers")
r_shape = mtf.Shape([mtf.Dimension("length", max_length),
mtf.Dimension("heads", num_heads.size),
mtf.Dimension("memory_length", max_length)])
r = mtf.get_variable(context.mesh, "R", r_shape,
initializer=None,
dtype=context.variable_dtype)
r = mtf.slice(r, 0, memory_length_dim.size, memory_length_dim.name)
if context.mode == "incremental":
r = mtf.gather(r, context.position, r.shape.get_dim_by_name("length"))
else:
length_dim = q.shape.get_dim_by_name("length")
r = mtf.slice(r, 0, length_dim.size, "length")
logits = r
r_shape = logits.shape
elif synthesize_mode == "factorized":
tf.logging.info("Using Factorized Random Synthesizers")
k = factorized_dim
r1_shape = mtf.Shape([mtf.Dimension("tmp", k),
mtf.Dimension("heads", num_heads.size),
mtf.Dimension("memory_length", 512)])
r2_shape = mtf.Shape([mtf.Dimension("tmp", k),
mtf.Dimension("heads", num_heads.size),
mtf.Dimension("memory_length", 512)])
r_shape = mtf.Shape([mtf.Dimension("length", 512),
mtf.Dimension("heads", num_heads.size),
mtf.Dimension("memory_length", 512)])
r1 = mtf.get_variable(context.mesh, "R1", r1_shape,
initializer=None,
dtype=context.variable_dtype)
r2 = mtf.get_variable(context.mesh, "R2", r2_shape,
initializer=None,
dtype=context.variable_dtype)
r = mtf.einsum([r1, r2], r_shape)
r = mtf.slice(r, 0, memory_length_dim.size, memory_length_dim.name)
if context.mode == "incremental":
r = mtf.gather(r, context.position, r.shape.get_dim_by_name("length"))
else:
length_dim = q.shape.get_dim_by_name("length")
r = mtf.slice(r, 0, length_dim.size, "length")
logits = r
elif synthesize_mode == "dense_minus":
# Dense Synthesizer Model
tmp_dim = mtf.Dimension("memory_length", max_length)
logits = mtf.layers.dense(mtf.relu(q), [tmp_dim],
use_bias=False,
name="pi",
reduced_dims=[key_dim],
variable_dtype=None)
logits = mtf.slice(logits, 0, memory_length_dim.size,
memory_length_dim.name)
if context.mode == "incremental":
pass
else:
length_dim = q.shape.get_dim_by_name("length")
logits = mtf.slice(logits, 0, length_dim.size, "length")
elif synthesize_mode == "random_plus_alpha" or \
synthesize_mode == "random_plus":
# Mixture Random Synthesizer with learnable Alpha
tf.logging.info("Using Random Plus Alpha")
logits = mtf.einsum([q, k], reduced_dims=[key_dim])
num_heads = logits.shape.get_dim_by_name("heads")
r_shape = mtf.Shape([mtf.Dimension("length", 512),
mtf.Dimension("heads", num_heads.size),
mtf.Dimension("memory_length", 512)])
r = mtf.get_variable(context.mesh, "R", r_shape,
initializer=None,
dtype=context.variable_dtype)
r = mtf.slice(r, 0, memory_length_dim.size, memory_length_dim.name)
if context.mode == "incremental":
r = mtf.gather(r, context.position, r.shape.get_dim_by_name("length"))
else:
length_dim = q.shape.get_dim_by_name("length")
r = mtf.slice(r, 0, length_dim.size, length_dim.name)
if "alpha" in synthesize_mode:
alpha = mtf.get_variable(context.mesh,
"alpha",
mtf.Shape([mtf.Dimension("alpha", 1)]),
initializer=tf.zeros_initializer(),
dtype=context.variable_dtype)
alpha = mtf.sigmoid(alpha)
logits = ((1-alpha) * logits) + (alpha * r)
else:
logits = logits + r
elif synthesize_mode == "dense_plus_alpha" or \
synthesize_mode == "dense_plus":
# Mixture Dense Synthesizer with learnable alpha
tf.logging.info("Using Dense Plus Alpha Scaling")
logits = mtf.einsum([q, k], reduced_dims=[key_dim])
tmp_dim = mtf.Dimension("memory_length", 512)
r = mtf.layers.dense(mtf.relu(q), [tmp_dim],
use_bias=False,
name="pi",
reduced_dims=[key_dim],
variable_dtype=None)
r = mtf.slice(r, 0, memory_length_dim.size, memory_length_dim.name)
if context.mode == "incremental":
pass
else:
length_dim = q.shape.get_dim_by_name("length")
r = mtf.slice(r, 0, length_dim.size, "length")
if "alpha" in synthesize_mode:
alpha = mtf.get_variable(context.mesh,
"alpha",
mtf.Shape([mtf.Dimension("alpha", 1)]),
initializer=tf.zeros_initializer(),
dtype=context.variable_dtype)
alpha = mtf.sigmoid(alpha)
logits = ((1-alpha) * logits) + (alpha * r)
else:
logits = logits + r
if bias is not None:
logits += bias
weights = mtf.softmax(logits, memory_length_dim, extra_logit=extra_logit)
if dropout_rate != 0.0:
weights = mtf.dropout(
weights, 1.0 - dropout_rate,
noise_shape=weights.shape - dropout_broadcast_dims)
if synthesize and "plus" not in synthesize_mode:
if synthesize_mode == "dense_minus":
outputs_shape = mtf.Shape(q.shape.dims[:-1] + [value_dim])
else:
outputs_shape = mtf.Shape(q.shape.dims[:-1] + [num_heads, value_dim])
else:
outputs_shape = q.shape - [key_dim] + value_dim
outputs = mtf.einsum([weights, v], outputs_shape)
return outputs
class AttentionParams(object):
"""A set of parameters used for (multihead) attention."""
def __init__(self,
mesh,
query_input_dim,
memory_input_dim,
output_dim,
key_dim,
value_dim,
query_heads_dims,
memory_heads_dims,
variable_dtype,
shared_kv=False,
no_query=False,
combine_dims=True,
ensemble_dim=None,
keep_query_heads_dims=False,
fold_scaling_into_initializer=True,
make_attention_vars=True):
"""Create attention parameters.
combine_dims is a hack for faster execution. The heads and key/value
dimensions are combined in the variables and the computation. The hack
would not be necessary if XLA optimized einsum properly.
Args:
mesh: a Mesh
query_input_dim: a Dimension
memory_input_dim: a Dimension
output_dim: a Dimension
key_dim: a Dimension
value_dim: a Dimension
query_heads_dims: a list of Dimension
memory_heads_dims: a list of Dimension
variable_dtype: a mtf.VariableDType
shared_kv: a boolean
no_query: a boolean
combine_dims: a boolean
ensemble_dim: an optional Dimension
keep_query_heads_dims: a boolean, if true keep the query_heads_dims in the
output.
fold_scaling_into_initializer: a boolean
make_attention_vars: a boolean, whether to make the attention variables.
This is typically True. Only set to False for ExpertsAttention which
creates variables inside the moe.MoE1D-call.
"""
if shared_kv and key_dim != value_dim:
raise ValueError("shared_kv requires key_dim == value_dim")
self.mesh = mesh
self.query_input_dim = query_input_dim
self.memory_input_dim = memory_input_dim
self.output_dim = output_dim
self.key_dim = key_dim
self.value_dim = value_dim
self.query_heads_dims = query_heads_dims or []
self.memory_heads_dims = memory_heads_dims or []
self.variable_dtype = variable_dtype
self.shared_kv = shared_kv
self.no_query = no_query
self.combine_dims = combine_dims
self.keep_query_heads_dims = keep_query_heads_dims
self.fold_scaling_into_initializer = fold_scaling_into_initializer
self.make_attention_vars = make_attention_vars
if combine_dims:
self.q_shape = [query_input_dim, _combined_dim(self.q_dims)]
self.k_shape = [memory_input_dim, _combined_dim(self.k_dims)]
self.v_shape = [memory_input_dim, _combined_dim(self.v_dims)]
self.o_shape = [_combined_dim(self.o_dims), output_dim]
else:
self.q_shape = [query_input_dim] + self.q_dims
self.k_shape = [memory_input_dim] + self.k_dims
self.v_shape = [memory_input_dim] + self.v_dims
self.o_shape = self.o_dims + [output_dim]
if ensemble_dim:
self.q_shape = [ensemble_dim] + self.q_shape
self.k_shape = [ensemble_dim] + self.k_shape
self.v_shape = [ensemble_dim] + self.v_shape
self.o_shape = [ensemble_dim] + self.o_shape
self.init_weights()
def init_weights(self):
"""Initialize attention projection matrices."""
if mtf.layers.unit_scaling_convention():
init = tf.random_normal_initializer(stddev=1.0)
q_init = init
kv_init = init
o_init = init
else:
stddev = self.query_input_dim.size ** -0.5
if self.fold_scaling_into_initializer:
stddev *= self.key_dim.size ** -0.5
q_init = tf.random_normal_initializer(stddev=stddev)
kv_init = tf.random_normal_initializer(
stddev=self.memory_input_dim.size ** -0.5)
o_init = tf.random_normal_initializer(
stddev=mtf.Shape(self.query_heads_dims + [self.value_dim]).size**-0.5)
# Toggle producing wq, wv, wk which are not needed for the ExpertsAttention
if self.make_attention_vars:
if not self.no_query:
self.wq = mtf.get_variable(
self.mesh,
"q",
self.q_shape,
initializer=q_init,
dtype=self.variable_dtype)
if self.shared_kv:
self.wkv = mtf.get_variable(
self.mesh,
"kv",
self.k_shape,
initializer=kv_init,
dtype=self.variable_dtype)
else:
self.wk = mtf.get_variable(
self.mesh,
"k",
self.k_shape,
initializer=kv_init,
dtype=self.variable_dtype)
self.wv = mtf.get_variable(
self.mesh,
"v",
self.v_shape,
initializer=kv_init,
dtype=self.variable_dtype)
self.wo = mtf.get_variable(
self.mesh,
"o",
self.o_shape,
initializer=o_init,
dtype=self.variable_dtype)
def compute_q(self, query_antecedent):
"""Compute query Tensor q.
Args:
query_antecedent: a Tensor with dimensions
{query_input_dim} + other_dims
Returns:
a Tensor with dimensions
query_heads_dims + {key_dim} + other_dims
"""
ret = mtf.layers.us_einsum(
[query_antecedent, self.wq], reduced_dims=[self.query_input_dim])
if self.combine_dims:
ret = mtf.replace_dimensions(ret, ret.shape.dims[-1], self.q_dims)
if not self.fold_scaling_into_initializer:
ret *= self.key_dim.size ** -0.5
return ret
def compute_kv(self, memory_antecedent):
"""Compute key/value Tensor kv.
Args:
memory_antecedent: a Tensor with dimensions
{memory_input_dim} + other_dims
Returns:
a Tensor with dimensions
memory_heads_dims + {key_dim} + other_dims
"""
if not self.shared_kv:
raise ValueError("compute_kv can only be called with shared_kv")
ret = mtf.layers.us_einsum(
[memory_antecedent, self.wkv], reduced_dims=[self.memory_input_dim])
if self.combine_dims:
ret = mtf.replace_dimensions(ret, ret.shape.dims[-1], self.k_dims)
return ret
def compute_k(self, memory_antecedent):
"""Compute key Tensor k.
Args:
memory_antecedent: a Tensor with dimensions
{memory_input_dim} + other_dims
Returns:
a Tensor with dimensions
memory_heads_dims + {key_dim} + other_dims
"""
if self.shared_kv:
raise ValueError("compute_k cannot be called with shared_kv")
ret = mtf.layers.us_einsum(
[memory_antecedent, self.wk], reduced_dims=[self.memory_input_dim])
if self.combine_dims:
ret = mtf.replace_dimensions(ret, ret.shape.dims[-1], self.k_dims)
return ret
def compute_v(self, memory_antecedent):
"""Compute value Tensor v.
Args:
memory_antecedent: a Tensor with dimensions
{memory_input_dim} + other_dims
Returns:
a Tensor with dimensions
memory_heads_dims + {value_dim} + other_dims
"""
if self.shared_kv:
raise ValueError("compute_v cannot be called with shared_kv")
ret = mtf.layers.us_einsum(
[memory_antecedent, self.wv], reduced_dims=[self.memory_input_dim])
if self.combine_dims:
ret = mtf.replace_dimensions(ret, ret.shape.dims[-1], self.v_dims)
return ret
def compute_output(self, o, output_shape=None):
"""Compute output of multihead attention.
Args:
o: a Tensor with dimensions
query_heads_dims + {value_dim} + other_dims
output_shape: an optional Shape
Returns:
a Tensor with shape:
{output_dim} + other_dims
"""
if self.combine_dims:
o = mtf.transpose(o, o.shape - self.o_dims + self.o_dims)
o = mtf.replace_dimensions(o, self.o_dims, self.wo.shape.dims[-2])
reduced_dims = [self.wo.shape.dims[-2]]
else:
reduced_dims = self.o_dims
if self.keep_query_heads_dims:
reduced_dims = [self.value_dim]
return mtf.layers.us_einsum(
[o, self.wo], output_shape=output_shape, reduced_dims=reduced_dims)
@property
def q_dims(self):
return self.query_heads_dims + [self.key_dim]
@property
def k_dims(self):
return self.memory_heads_dims + [self.key_dim]
@property
def v_dims(self):
return self.memory_heads_dims + [self.value_dim]
@property
def o_dims(self):
return self.query_heads_dims + [self.value_dim]
class ExpertsAttentionParams(AttentionParams):
"""Create attention parameters using experts-layer."""
def __init__(self,
mesh,
query_input_dim,
memory_input_dim,
output_dim,
key_dim,
value_dim,
query_heads_dims,
memory_heads_dims,
variable_dtype,
shared_kv=False,
no_query=False,
combine_dims=True,
ensemble_dim=None,
keep_query_heads_dims=False,
fold_scaling_into_initializer=True,
context=None,
experts_hparams=None):
super(ExpertsAttentionParams, self).__init__(
mesh=mesh,
query_input_dim=query_input_dim,
memory_input_dim=memory_input_dim,
output_dim=output_dim,
key_dim=key_dim,
value_dim=value_dim,
query_heads_dims=query_heads_dims,
memory_heads_dims=memory_heads_dims,
variable_dtype=variable_dtype,
shared_kv=shared_kv,
no_query=no_query,
combine_dims=combine_dims,
ensemble_dim=ensemble_dim,
keep_query_heads_dims=keep_query_heads_dims,
fold_scaling_into_initializer=fold_scaling_into_initializer,
make_attention_vars=False)
self.context = context
# ExpertsAttention, for simplicitly, asserts that combine_dims is True, and
# for efficiency, that shared_kv is True.
if not self.combine_dims:
raise ValueError("self.combine_dims must be True for ExpertsAttention")
if not self.shared_kv:
raise ValueError("self.shared_kv must be True for ExpertsAttention")
if mtf.layers.unit_scaling_convention():
raise NotImplementedError
moe_output_dims = self.q_shape[-1]
tf.logging.info("ExpertsAttention moe_hidden_size: {}".format(
experts_hparams.hidden_size))
tf.logging.info("moe_output_dims: {}".format(moe_output_dims))
self.moe_layer = mtf.transformer.moe.MoE1D(
moe_gating=experts_hparams.moe_gating,
num_experts=experts_hparams.num_experts,
loss_coef=experts_hparams.loss_coef,
group_size=experts_hparams.group_size,
min_expert_capacity=experts_hparams.min_expert_capacity,
capacity_factor_train=experts_hparams.capacity_factor_train,
capacity_factor_eval=experts_hparams.capacity_factor_eval,
switch_policy_train=experts_hparams.switch_policy_train,
switch_policy_eval=experts_hparams.switch_policy_eval,
switch_dropout=experts_hparams.switch_dropout,
switch_temperature=experts_hparams.switch_temperature,
switch_jitter=experts_hparams.switch_jitter,
switch_top_k=experts_hparams.switch_top_k,
hidden_size=experts_hparams.hidden_size,
output_dim=moe_output_dims,
use_experts_attention=experts_hparams.use_experts_attention)
def _compute_merge_qkv(self, antecedent):
"""Computes qkv all in one call using MoE layer."""
# NOTE: This assumes querty and memory antecedent are the same
qk = self.moe_layer.call(self.context, antecedent)
# Split qk here since they went through experts-layers
q, k = qk
# Scale query
q *= self.key_dim.size ** -0.5
self._q = mtf.replace_dimensions(q, q.shape.dims[-1], self.q_dims)
self._k = mtf.replace_dimensions(k, k.shape.dims[-1], self.k_dims)
def compute_q(self, query_antecedent):
self._compute_merge_qkv(query_antecedent)
return self._q
def compute_k(self, memory_antecedent):
del memory_antecedent
return self._k
def compute_kv(self, memory_antecedent):
del memory_antecedent
return self._k
def compute_v(self, memory_antecedent):
del memory_antecedent
raise NotImplementedError("ExpertsAttention uses shared_kv = True.")
def _combined_dim(dims):
return mtf.Dimension(dims[0].name, mtf.Shape(dims).size)
def attention_params_simple(
mesh, io_dim, kv_dim, heads_dim, variable_dtype):
"""Common case attention parameters.
Args:
mesh: a Mesh
io_dim: a Dimension (channels dimension of inputs and outputs)
kv_dim: a Dimension (channels in keys and values)
heads_dim: a Dimension (number of attention "heads")
variable_dtype: a mtf.VariableDType
Returns:
an AttentionParams
"""
return AttentionParams(
mesh,
query_input_dim=io_dim,
memory_input_dim=io_dim,
output_dim=io_dim,
key_dim=kv_dim,
value_dim=kv_dim,
query_heads_dims=[heads_dim],
memory_heads_dims=[heads_dim],
variable_dtype=variable_dtype)
def local_attention_1d(q,
k,
v,
length_dim,
key_dim,
value_dim,
fully_autoregressive=True,
length_dim_num_splits=1,
radius=128,
sequence_id=1,
write_priority=None,
read_priority=None,
attention_kwargs=None):
"""Attention to the a neighborood around the source.
If fully_autoregressive, then query position p can only see memory positions
in the range (p - radius, p].
If not fully_autoregressive, then query position p can only see memory
positions in the range (p - window_size, p + radius].
In addition, if write_priority and read_priority are provided, then attention
is limited to position pairs where
read_priority[query position] >= write_priority[memory position]
Args:
q: a Tensor containing length_dim
k: a Tensor containing length_dim
v: an optional Tensor containing length_dim. If none then uses v=k.
length_dim: a Dimension
key_dim: a Dimension (the channels dimension of q and k)
value_dim: a Dimension (the channels dimension of v)
fully_autoregressive: a boolean
length_dim_num_splits: an optional integer indicating how many ways the
length dimension is split
radius: an integer
sequence_id: a Tensor or an integer
write_priority: an optional Tensor containing length_dim
read_priority: an optional Tensor containing length_dim
attention_kwargs: optional keyword arguments for attention()
Returns:
a Tensor with the shape x.shape - key_dim + value_dim
Raises:
ValueError: if channels or depth don't match.
"""
# Choose a suitable block size.
# We choose the greatest divisor of length_per_split less than or equal
# to max(window_size, 128)
length_per_split = length_dim.size // length_dim_num_splits
block_length = max(radius, 128)
while length_per_split % block_length != 0:
block_length -= 1
query_block_length = mtf.Dimension("query_block_length", block_length)
memory_block_length = mtf.Dimension("memory_block_length", block_length)
# The num_blocks dimension gets the same name as the length dimension,
# so it will be split in the same way.
num_blocks = mtf.Dimension(length_dim.name, length_dim.size // block_length)
def _reshape_query(x):
return mtf.replace_dimensions(
x, length_dim, [num_blocks, query_block_length])
def _reshape_memory(x):
x = mtf.replace_dimensions(
x, length_dim, [num_blocks, memory_block_length])
return (mtf.left_halo_exchange if fully_autoregressive
else mtf.halo_exchange)(
x, num_blocks, memory_block_length, radius)
q = _reshape_query(q)
k = _reshape_memory(k)
if v:
v = _reshape_memory(v)
else:
v = k
if sequence_id is None:
sequence_id = 1
if (not isinstance(sequence_id, mtf.Tensor) or
length_dim not in sequence_id.shape.dims):
sequence_id += mtf.zeros(q.mesh, [length_dim], tf.int32)
q_sequence_id = _reshape_query(sequence_id)
m_sequence_id = _reshape_memory(sequence_id)
pos = mtf.range(q.mesh, length_dim, dtype=tf.int32)
q_pos = _reshape_query(pos)
m_pos = _reshape_memory(pos)
padded_memory_block_length = mtf.Dimension(
"memory_block_length",
(1 if fully_autoregressive else 2) * radius + block_length)
relative_position = m_pos - q_pos
visible = mtf.equal(q_sequence_id, m_sequence_id)
visible = mtf.logical_and(visible, mtf.greater(relative_position, -radius))
visible = mtf.logical_and(visible, mtf.less_equal(
relative_position, 0 if fully_autoregressive else radius))
if read_priority is not None:
write_priority = _reshape_memory(write_priority)
read_priority = _reshape_query(read_priority)
visible = mtf.logical_and(
visible, mtf.greater_equal(read_priority, write_priority))
bias = visibility_mask_to_attention_bias(visible, q.dtype)
o = attention(q, k, v, padded_memory_block_length,
key_dim, value_dim, bias, **attention_kwargs)
return mtf.replace_dimensions(o, [num_blocks, query_block_length], length_dim)
def visibility_mask_to_attention_bias(visible, dtype):
"""Convert a boolean visibility mask to an attention bias.
The returned Tensor has large negative values in positions where
visible=False.
Args:
visible: a boolean Tensor
dtype: a dtype
Returns:
a Tensor with the given dtype and the same shape as "visible"
"""
return mtf.cast(mtf.logical_not(visible), dtype) * -1e9
def maybe_reshape_attention_input_for_2d_sharding(
context, q, k, v, bias, unsplittable_dims):
"""Reshape the inputs to attention to split over an unused mesh dimension.
In the case where the attention computation is unnecessarily replicated,
this function reshapes the attention inputs to remove the unnecessary
replication.
This becomes relevent when doing 2-dimenional model parallelism.
d_model is sharded over one mesh dimension and [vocab, num_heads, d_ff] are
sharded over the other mesh dimension. This fully distributes all of the
einsum operations, except for the internals of the attention computation.
To distribute that computation, this function creates a new tensor-dimension
from the low bits of either the batch dimension or the num_heads dimension,
and then splits that dimension over the unused mesh dimension.
Args:
context: a transformer.Context
q: a Tensor
k: a Tensor
v: a Tensor
bias: a Tensor
unsplittable_dims: a list of tensor-dimensions not to split. The key/value
dimensions should be passed here.
Returns:
reshaped_q: a Tensor
reshaped_k: a Tensor
reshaped_v: a Tensor
reshaped_bias: a Tensor
"""
original_inputs = q, k, v, bias
# we need to know the layout and mesh-shape to figure out what to do.
if not context or not context.model.layout or not context.model.mesh_shape:
return original_inputs
mesh_shape = mtf.convert_to_shape(context.model.mesh_shape)
layout_rules = mtf.convert_to_layout_rules(context.model.layout)
# find a mesh dim that is unused (no tensor-dimension is split across it)
mesh_axis_used = [False] * mesh_shape.ndims
for x in original_inputs:
for mesh_axis in layout_rules.tensor_layout(
x.shape, mesh_shape).tensor_axis_to_mesh_axis:
if mesh_axis is not None:
mesh_axis_used[mesh_axis] = True
if False not in mesh_axis_used:
return original_inputs
mesh_dim = mesh_shape.dims[mesh_axis_used.index(False)]
# Choose an appropriate name for the new tensor-dimension so that the layout
# will know to split it across the unused mesh dimension.
tensor_dim_name = None
tensor_dim_name = layout_rules.mesh_dimension_name_to_tensor_dimension_names(
mesh_dim.name)
if tensor_dim_name:
tensor_dim_name = tensor_dim_name[0]
else:
return original_inputs
# Find a tensor-dimension that we can further split, by breaking off the
# lower bits into our new tensor-dimension.
# This resplittable tensor-dimension must be presnent in all of q, k, v
# and must be large enough to be further split.
resplittable_dim = None
for d in q.shape.dims:
if d in k.shape.dims and d in v.shape.dims and d not in unsplittable_dims:
num_splits = mtf.tensor_dim_to_mesh_dim_size(
context.model.layout, context.model.mesh_shape, d)
if d.size % (num_splits * mesh_dim.size) == 0:
resplittable_dim = d
break
if not resplittable_dim:
return original_inputs
new_dim_high = mtf.Dimension(resplittable_dim.name, num_splits)
new_dim_low = mtf.Dimension(tensor_dim_name,
resplittable_dim.size // num_splits)
def _my_reshape(x):
if x and resplittable_dim in x.shape.dims:
return mtf.replace_dimensions(
x, resplittable_dim, [new_dim_high, new_dim_low])
else:
return x
return _my_reshape(q), _my_reshape(k), _my_reshape(v), _my_reshape(bias)
| mesh-master | mesh_tensorflow/transformer/attention.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from absl.testing import parameterized
import mesh_tensorflow as mtf
from mesh_tensorflow.transformer import utils
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
def mock_vocabulary(encode_dict, vocab_size=None):
vocab = absltest.mock.MagicMock()
vocab.vocab_size = vocab_size
idx_to_str = {v: k for k, v in encode_dict.items()}
vocab.decode = absltest.mock.MagicMock(
side_effect=lambda ids: [idx_to_str[id] for id in ids])
return vocab
class UtilsTest(parameterized.TestCase, tf.test.TestCase):
def testDynamicText2self_packed(self):
batch = 2
length = 5
input_tensors = {
"inputs": [[3, 1, 4, 1, 0], [1, 4, 3, 2, 1]],
"inputs_segmentation": [[1, 1, 2, 2, 0], [1, 2, 2, 2, 2]],
"inputs_position": [[0, 1, 0, 1, 0], [0, 0, 1, 2, 3]],
"targets": [[1, 1, 0, 0, 0], [9, 8, 1, 2, 1]],
"targets_segmentation": [[1, 2, 0, 0, 0], [1, 1, 1, 2, 2]],
"targets_position": [[0, 0, 0, 0, 0], [0, 1, 2, 0, 1]]
}
expected_output_tensors = {
"targets": [[3, 1, 1, 4, 1, 1, 0, 0, 0, 0],
[1, 9, 8, 1, 4, 3, 2, 1, 2, 1]],
"targets_segmentation": [[1, 1, 1, 2, 2, 2, 0, 0, 0, 0],
[1, 1, 1, 1, 2, 2, 2, 2, 2, 2]],
"targets_position": [[0, 1, 2, 0, 1, 2, 0, 0, 0, 0],
[0, 1, 2, 3, 0, 1, 2, 3, 4, 5]]
}
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", batch)
length_dim = mtf.Dimension("length", length)
input_shape = mtf.Shape([batch_dim, length_dim])
mtf_features = {
k: mtf.import_tf_tensor(mesh, v, input_shape)
for k, v in input_tensors.items()
}
mtf_outputs = utils._dynamic_text2self(mtf_features)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
for k, v in expected_output_tensors.items():
out = lowering.export_to_tf_tensor(mtf_outputs[k])
actual = self.evaluate(out)
self.assertAllEqual(actual, v)
def testDynamicText2self_unpacked(self):
batch = 2
length = 5
input_tensors = {
"inputs": [[3, 1, 4, 1, 0], [1, 4, 3, 2, 1]],
"targets": [[1, 1, 0, 0, 0], [9, 8, 1, 2, 1]],
}
expected_output_tensors = {
"targets": [[3, 1, 4, 1, 1, 1, 0, 0, 0, 0],
[1, 4, 3, 2, 1, 9, 8, 1, 2, 1]],
}
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh")
batch_dim = mtf.Dimension("batch", batch)
length_dim = mtf.Dimension("length", length)
input_shape = mtf.Shape([batch_dim, length_dim])
mtf_features = {
k: mtf.import_tf_tensor(mesh, v, input_shape)
for k, v in input_tensors.items()
}
mtf_outputs = utils._dynamic_text2self(mtf_features)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[""])
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
for k, v in expected_output_tensors.items():
out = lowering.export_to_tf_tensor(mtf_outputs[k])
actual = self.evaluate(out)
self.assertAllEqual(actual, v)
def testCleanDecodes(self):
cleaned_decodes = utils.clean_decodes([[2, 0, 2, 1, 3, 2, 0],
[1, 2, 2, 2, 2, 2, 2],
[2, 2, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 2, 2, 2]])
with self.test_session() as sess:
self.assertAllEqual(
sess.run(cleaned_decodes),
[[2, 0, 2, 1, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0], [2, 2, 1, 0, 0, 0, 0],
[2, 2, 2, 2, 2, 2, 2]])
@parameterized.named_parameters(
("int16", np.int16),
("int32", np.int32),
("int64", np.int64),
)
def test_maybe_add_pretokenized_features_with_int_inputs(self, dtype):
vocabulary = mock_vocabulary({"a": 1, "b": 2, "c": 3, "d": 4,},
vocab_size=1000)
examples = [{"targets": np.array([1, 2, 3, 4], dtype=dtype),
"inputs": np.array([1, 2, 3, 4], dtype=dtype)},
]
result = utils._maybe_add_pretokenized_features(examples, vocabulary)
expected = ["a", "b", "c", "d"]
self.assertAllEqual(result[0]["targets_pretokenized"], expected)
self.assertAllEqual(result[0]["inputs_pretokenized"], expected)
self.assertLen(result, 1)
def test_maybe_add_pretokenized_features_nonstandard_feature(self):
vocabulary = mock_vocabulary({"a": 1, "b": 2, "c": 3, "d": 4,},
vocab_size=1000)
examples = [{"notafeature": np.array([1, 2, 3, 4], dtype=np.int32),
"inputs": np.array([1, 2, 3, 4], dtype=np.int32)}
]
result = utils._maybe_add_pretokenized_features(examples, vocabulary)
self.assertSameElements(result[0].keys(),
("notafeature", "inputs", "inputs_pretokenized"))
self.assertAllEqual(result[0]["notafeature"], [1, 2, 3, 4])
def test_maybe_add_pretokenized_features_pretokenized_exists(self):
vocabulary = mock_vocabulary({"a": 1, "b": 2, "c": 3, "d": 4,},
vocab_size=1000)
examples = [{"inputs_pretokenized": "Hello world!",
"inputs": np.array([1, 2, 3, 4], dtype=np.int32)}
]
result = utils._maybe_add_pretokenized_features(examples, vocabulary)
self.assertEqual(result[0]["inputs_pretokenized"], "Hello world!")
self.assertSameElements(result[0].keys(), ("inputs", "inputs_pretokenized"))
self.assertLen(result, 1)
if __name__ == "__main__":
tf.test.main()
| mesh-master | mesh_tensorflow/transformer/utils_test.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Implementation of adaptive softmax.
See the papers https://arxiv.org/abs/1609.04309 and
https://arxiv.org/abs/1809.10853 for more details.
"""
import math
from typing import Dict, Sequence, Union
import gin
import mesh_tensorflow as mtf
from mesh_tensorflow.transformer import transformer
from mesh_tensorflow.transformer import vocab_embeddings
import tensorflow.compat.v1 as tf
class _Cluster(object):
"""Helper class for adaptive embeddings specifying a cluster of tokens.
Essentially a wrapper around a vocab embedding for the cluster with additional
metadata so that we can apply the embedding to the actual ids and hidden
states.
"""
def __init__(self, embedding, start_token_id, end_token_id,
length_projection_factor, vocab_dim):
"""Cluster constructor.
Args:
embedding: a FactorizedVocabEmbedding or transformer.VocabEmbedding, the
vocab embedding to use for the cluster.
start_token_id: an integer, the inclusive id of the first token in the
cluster.
end_token_id: an integer, the exclusive id of the last token in the
cluster.
length_projection_factor: a float between 0 and 1, the sequence length
dimension will be projected down to this number times the sequence
length dimension to contain the elements in this cluster. If the input
contains too many tokens in the cluster, tokens later in the input will
be ignored.
vocab_dim: an mtf.Dimension, the dimension the embedding uses as its
vocab.
"""
self._embedding = embedding
self._start_token_id = start_token_id
self._end_token_id = end_token_id
self._length_projection_factor = length_projection_factor
self._vocab_dim = vocab_dim
@property
def end_token_id(self):
return self._end_token_id
@property
def length_projection_factor(self):
return self._length_projection_factor
def ids_to_embedding(self, ids, context):
"""Ids to embeddings with ids not in cluster mapped to the zero vector."""
ids -= self._start_token_id
# The mtf.gather in the embedding's ids_to_embedding implementation will
# cause the one hot representations of tokens greater than cluster vocab
# dimension size to be the zero vector. Thus the embeddings for those tokens
# will be the zero vector.
ids = mtf.where(mtf.greater_equal(ids, 0), ids, self._vocab_dim.size)
# Handle the case of the head cluster where we will have entries at the end
# corresponding to the tail clusters.
ids = mtf.where(
mtf.less(ids, self._end_token_id - self._start_token_id),
ids,
self._vocab_dim.size,
)
return self._embedding.ids_to_embedding(ids, context)
def get_cluster_mask(self, targets):
"""Computes mask over the targets masking out tokens not in the cluster."""
return mtf.logical_and(
mtf.greater_equal(targets, self._start_token_id),
mtf.less(targets, self._end_token_id))
def get_cluster_length_dim(self, length_dim):
"""Returns dimension used instead of sequence length for the cluster."""
cluster_length = math.ceil(self._length_projection_factor * length_dim.size)
return mtf.Dimension(length_dim.name, int(cluster_length))
def get_project_to_cluster_length(self, cluster_mask, dtype):
"""Returns projection from length dim to the shorter cluster length dim."""
seq_length_dim = cluster_mask.shape.get_dim_by_name("length")
cluster_length_dim = self.get_cluster_length_dim(seq_length_dim)
return mtf.cast(cluster_mask, dtype) * mtf.one_hot(
mtf.cumsum(mtf.cast(cluster_mask, tf.int32), seq_length_dim) - 1,
output_dim=cluster_length_dim,
dtype=dtype)
def compute_loss(self, decoder, hidden, targets, context):
"""Computes the loss during training."""
logits = self._embedding.hidden_to_logits(hidden, context=context)
soft_targets = mtf.one_hot(
targets - self._start_token_id,
self._vocab_dim,
dtype=context.activation_dtype)
loss = mtf.layers.softmax_cross_entropy_with_logits(
logits, soft_targets, self._vocab_dim, z_loss=decoder.z_loss)
padding_mask = mtf.layers.weights_nonzero(
targets, dtype=context.activation_dtype)
return (mtf.reduce_sum(loss * padding_mask) /
decoder.loss_denominator(targets, context.num_microbatches))
def compute_log_softmax(self, hidden, context):
"""Returns the log softmax of logits computed from the hidden state."""
logits = self._embedding.hidden_to_logits(hidden, context=context)
return mtf.log_softmax(logits, reduced_dim=self._vocab_dim)
def get_log_softmax_prefix(self, log_softmax, end_index):
"""Returns first end_index entries in log_softmax along the vocab dim."""
prefix_dim = mtf.Dimension(self._vocab_dim.name, end_index)
indices = mtf.mtf_range(
log_softmax.mesh, dim=self._vocab_dim, dtype=tf.int32)
prefix_indices = mtf.where(mtf.less(indices, end_index), indices, -1)
projection = mtf.one_hot(
prefix_indices, prefix_dim, dtype=log_softmax.dtype)
return mtf.einsum([log_softmax, projection], reduced_dims=[self._vocab_dim])
def get_log_softmax_value(self, log_softmax, index):
"""Returns the entry at index of the log_softmax along the vocab dim."""
return mtf.gather(log_softmax, index, dim=self._vocab_dim)
@gin.configurable
class AdaptiveSoftmaxVocabEmbedding(object):
"""Vocab embedding implementing the adaptive softmax.
The adaptive softmax was first introduced in this paper
(https://arxiv.org/abs/1609.04309). Note that this implementation is actually
most similar to the adaptive vocab embeddings in
https://arxiv.org/abs/1809.10853 as it supports having different embedding
sizes for different clusters.
The adaptive softmax works by factorizing the traditional softmax over
multiple clusters:
p(v|h) = p(v|c,h) p(c|h),
where both probability distributions take the form of a softmax.
Further speed up is achieved by putting the class containing the most
frequently occurring tokens in the "head" cluster. Essentially, those tokens
are included as "classes" in the p(c|h) softmax. Thus computing their
probabilities requires only single softmax evaluation.
This implementation differs from vocab_embeddings.AdaptiveVocabEmbedding. That
implementation only supports variable embeddings sizes across clusters. This
implementation also supports the adaptive softmax.
A few conditions must be met in order to use this vocab:
- Unitransformer.shared_embedding_and_softmax_weights = True.
- If training, then
Unitranformer.loss_fn = adaptive_softmax.adaptive_softmax_loss_fn.
- Label smoothing is not supported and will be ignored silently.
- loss_on_targets_only is not supported and will be ignored silently.
"""
def __init__(self,
mesh: mtf.Mesh,
vocab_dim: mtf.Dimension,
output_dim: mtf.Dimension,
variable_dtype: mtf.VariableDType,
name: str,
ensemble_dim: mtf.Dimension,
clusters: Sequence[Dict[str, Union[int, float]]] = gin.REQUIRED):
"""Configurable embedding for the vocabulary.
Most of the arguments get passed to `mtf.layers.embedding_weights`.
The clustering parameters are specified by the `clusters` argument. It is a
list of dicts with keys:
- token_count: The number of tokens in the cluster.
- embedding_size: (optional) The hidden dimension size of the cluster's
embedding. Defaults to the model dimension size.
- length_projection_factor: (optional) Since MTF can't handle variable
length dimensions, we project from the sequence length dimension to a
dimension of size length_projection_factor * sequence_length during
training. This can save compute time and resources if the cluster has
many tokens that appear infrequently. If all of the tokens belonging to
the cluster cannot fit within this reduced dimension, some will be
discarded and ignored for the purposes of computing loss. Defaults 1.
Ignored for the head (first) cluster and not during training.
The first cluster will become the head cluster.
For example, let's say we have a vocab size of 500k and pass as clusters:
[
{"token_count": 50000, "embedding_size": 1024},
{"token_count": 100000, "embedding_size": 256},
{"token_count": 350000, "embedding_size": 64},
]
Then tokens with ids 0 (inclusive) to 50k (exclusive) will be in the first
cluster with embedding size of 1024, tokens with ids 50k to 150k will be in
the second cluster with embedding size of 256, and tokens with ids 150k to
500k will be in the third cluster with embedding size of 64.
Args:
mesh: a mtf.Mesh, the mesh used to layout the tensors.
vocab_dim: a mtf.Dimension, the dimension corresponding to vocabulary.
output_dim: a mtf.Dimension, the dimension corresponding to the model
hidden states.
variable_dtype: a mtf.VariableDType, the datatype information for the
variables used in the embedding tensors.
name: a string, a name to base variable names off of.
ensemble_dim: a mtf.Dimension, the dimension used for ensembling.
Absolutely no guarantees that this code will work with ensembling.
clusters: a list(dict), specification of the clusters. See above for more
information.
Raises:
ValueError: The sum of the token counts across the clusters does not equal
the vocabulary size or a length_projection_factor is not in the range
(0, 1].
"""
self._mesh = mesh
self._variable_dtype = variable_dtype
self._name = name
self._ensemble_dim = ensemble_dim
self._vocab_dim = vocab_dim
self._output_dim = output_dim
self._num_clusters = len(clusters)
token_counts = [cluster["token_count"] for cluster in clusters]
if sum(token_counts) != vocab_dim.size:
raise ValueError(
"The cluster token counts {} do not sum to the vocab size {}.".format(
token_counts, vocab_dim.size))
self._tail_clusters = []
start_token_id = 0
for i, cluster_spec in enumerate(clusters):
cluster = self._create_cluster(cluster_spec, i, start_token_id)
if i == 0:
self._head_cluster = cluster
else:
self._tail_clusters.append(cluster)
start_token_id += cluster_spec["token_count"]
def _create_cluster(self, cluster_spec, index, start_token_id):
"""Creates a cluster given its spec."""
token_count = cluster_spec["token_count"]
embedding_size = cluster_spec.get("embedding_size", self._output_dim.size)
length_projection_factor = cluster_spec.get("length_projection_factor", 1)
if length_projection_factor <= 0 or length_projection_factor > 1:
raise ValueError(
"Invalid length_projection_factor of {}. Must be in range (0, 1]"
.format(length_projection_factor))
if index == 0:
# Include the entries for the tail clusters in the head cluster "vocab".
cluster_vocab_dim = mtf.Dimension(self._vocab_dim.name,
token_count + self._num_clusters - 1)
else:
cluster_vocab_dim = mtf.Dimension(self._vocab_dim.name, token_count)
if embedding_size == self._output_dim.size:
# In this case we don't need to up project from the embedding space to
# the model state space.
cluster_embedding = transformer.VocabEmbedding(
mesh=self._mesh,
vocab_dim=cluster_vocab_dim,
output_dim=self._output_dim,
variable_dtype=self._variable_dtype,
name="{}_{}".format(self._name, index),
ensemble_dim=self._ensemble_dim)
else:
cluster_embedding = vocab_embeddings.FactorizedVocabEmbedding(
mesh=self._mesh,
vocab_dim=cluster_vocab_dim,
output_dim=self._output_dim,
variable_dtype=self._variable_dtype,
name="{}_{}".format(self._name, index),
ensemble_dim=self._ensemble_dim,
inner_dimension_size=embedding_size)
return _Cluster(
embedding=cluster_embedding,
start_token_id=start_token_id,
end_token_id=start_token_id + token_count,
length_projection_factor=length_projection_factor,
vocab_dim=cluster_vocab_dim)
def ids_to_embedding(self, ids: mtf.Tensor, context) -> mtf.Tensor:
all_clusters = self._tail_clusters + [self._head_cluster]
# Ids not in each cluster will be mapped to the zero vector. Since clusters
# are disjoint, this sum is correct.
return sum(
cluster.ids_to_embedding(ids, context) for cluster in all_clusters)
def hidden_to_logits(self, hidden: mtf.Tensor,
context: transformer.Context) -> mtf.Tensor:
"""Function called by mtf transformer to get the logits.
The benefit from the adaptive softmax comes from not having to compute the
logits over all of the vocab during training. Thus, we use the somewhat
hacky solution of returning the hidden states during training and then using
them to compute the loss in a custom loss function.
When not training, this method will be true to its name as return the
logits corresponding to the hidden state.
Args:
hidden: an mtf.Tensor, hidden model states of the final decoder layer.
context: a transformer.Context, the context used for the call to the
transformer.
Returns:
an mtf.Tensor
"""
if context.mode == tf.estimator.ModeKeys.TRAIN:
return hidden
else:
return self._hidden_to_logits(hidden, context)
def _hidden_to_logits(self, hidden, context):
"""Actually compute the logits over the entire vocab."""
head_size = self._head_cluster.end_token_id
# Note that computing the log softmax is equivalent to computing the logits.
head_log_softmax = self._head_cluster.compute_log_softmax(hidden, context)
logits = [
self._head_cluster.get_log_softmax_prefix(head_log_softmax, head_size)
]
for i, cluster in enumerate(self._tail_clusters):
tail_log_softmax = cluster.compute_log_softmax(hidden, context)
cluster_softmax = self._head_cluster.get_log_softmax_value(
head_log_softmax, head_size + i)
logits.append(cluster_softmax + tail_log_softmax)
return mtf.concat(logits, concat_dim_name=self._vocab_dim.name)
def compute_loss(self, decoder: transformer.Unitransformer,
hidden: mtf.Tensor, targets: mtf.Tensor,
context: transformer.Context) -> mtf.Tensor:
"""Returns the loss without computing a softmax over the entire vocab."""
loss = 0
tail_cluster_masks = []
for cluster in self._tail_clusters:
cluster_mask = cluster.get_cluster_mask(targets)
tail_cluster_masks.append(cluster_mask)
if cluster.length_projection_factor == 1:
targets_in_cluster = mtf.where(cluster_mask, targets, 0)
hidden_in_cluster = mtf.where(cluster_mask, hidden, 0)
else:
# TODO(mmatena): Unfold the batch dim to get a super long sequence dim
# to reduce the risk of overflowing the projection.
proj_to_cluster_len = cluster.get_project_to_cluster_length(
cluster_mask, dtype=targets.dtype)
targets_in_cluster = mtf.einsum(
[proj_to_cluster_len, targets],
reduced_dims=[targets.shape.get_dim_by_name("length")])
hidden_in_cluster = mtf.einsum(
[mtf.cast(proj_to_cluster_len, hidden.dtype), hidden],
reduced_dims=[hidden.shape.get_dim_by_name("length")])
loss += cluster.compute_loss(decoder, hidden_in_cluster,
targets_in_cluster, context)
tail_clusters_dim = mtf.Dimension("tail_clusters", len(tail_cluster_masks))
tail_node_targets = mtf.reduce_sum(
mtf.stack([(self._head_cluster.end_token_id + i) *
mtf.cast(mask, targets.dtype)
for i, mask in enumerate(tail_cluster_masks)],
tail_clusters_dim.name),
reduced_dim=tail_clusters_dim)
head_targets = mtf.where(
mtf.cast(tail_node_targets, tf.bool), tail_node_targets, targets)
loss += self._head_cluster.compute_loss(decoder, hidden, head_targets,
context)
return loss
@gin.configurable
def adaptive_softmax_loss_fn(decoder: transformer.Unitransformer,
context: transformer.Context, logits: mtf.Tensor,
targets: mtf.Tensor,
output_vocab_dim: mtf.Dimension) -> mtf.Tensor:
"""Custom loss to use when training with an adaptive softmax.
Embedding and softmax weights must be shared in order for this function to
work. Note that label smoothing and loss_on_targets_only is not supported and
will be silently ignored.
Args:
decoder: a transformer.Unitransformer
context: a transformer.Context
logits: an mtf.Tensor, note that this will actually be the hidden state of
the final decoder layer
targets: an mtf.Tensor
output_vocab_dim: an mtf.Dimension
Returns:
the loss
"""
del output_vocab_dim
hidden = logits
vocab_embedding = context.shared_params["embedding"]
return vocab_embedding.compute_loss(
decoder, hidden=hidden, targets=targets, context=context)
| mesh-master | mesh_tensorflow/transformer/adaptive_softmax.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layers used for Fixup initialization.
See: https://arxiv.org/abs/1901.09321 for the paper.
"""
import math
import gin
import mesh_tensorflow as mtf
from mesh_tensorflow.transformer import attention
from mesh_tensorflow.transformer import transformer
from mesh_tensorflow.transformer import transformer_layers
import tensorflow.compat.v1 as tf
def get_single_scalar_bias(x, name: str):
"""Simple helper method to return a scalar bias.
This is used as the `shift` in FixUp initialization and should be before
every projection or convolution.
Args:
x: A mtf variable, used to know which mesh and dtype to use.
name: The name of the bias.
Returns:
A (trainable) mtf Scalar.
"""
single_dimension = mtf.Dimension("single_bias", 1)
return mtf.get_variable(
x.mesh,
name,
mtf.Shape([single_dimension]),
initializer=tf.zeros_initializer(),
dtype=x.dtype)
def dense_product_fixup(x,
reduced_dims,
new_dims,
kernel_initializer,
activation_functions=None,
name="dense_product",
**kwargs):
"""Wrapper around dense_product that is explicit about kernel initialization.
Args:
x: a Tensor
reduced_dims: a list of Dimensions.
new_dims: a list of Dimensions.
kernel_initializer: The kernel initializer to use for the dense product. For
fixup, this is the initializer scaled according to the number of encoder
and decoder layers.
activation_functions: a list of activation functions (or a singleton)
Each can be a either: - a callable function from Tensor to Tensor - a
string function name from namespace mtf) - None or "linear", meaning no
activation function
name: an optional string
**kwargs: additional kwargs for mtf.layers.dense()
Returns:
Component wise product of dense layers with fixup init.
"""
return mtf.layers.dense_product(
x,
reduced_dims,
new_dims,
activation_functions,
name,
kernel_initializer=kernel_initializer,
**kwargs)
class AttentionParamsFixup(attention.AttentionParams):
"""Create attention parameters with Fixup initialization.
See class docstring for DenseReluDenseFixup for details.
For SelfAttention layer, m = 4, i.e., 4 weight matrix multiplications. See
https://github.com/hongyi-zhang/Fixup/issues/8#issuecomment-505750941.
So the scaling factor for SelfAttention layer is num_blocks**(-1/6).
Attributes:
mesh: a Mesh
query_input_dim: a Dimension
memory_input_dim: a Dimension
output_dim: a Dimension
key_dim: a Dimension
value_dim: a Dimension
query_heads_dims: a list of Dimension
memory_heads_dims: a list of Dimension
variable_dtype: a mtf.VariableDType
shared_kv: a boolean
fold_scaling_into_initializer: a boolean
num_blocks: an integer specifying the number of TransformerLayer objects.
For a vanilla Transformer model with 12 encoder layers and 12 decoder
layers, the number of blocks is 2 * 12 + 3 * 12 = 60 where each encoder
layer has 2 blocks, SelfAttention and Feedforward block and decoder
additionally has the encoder-decoder attention block.
o_init_fixup: a tf.initializer for the self.wo.
init_fixup: a tf.initializer for the self.wq, self.wk, self.wv and self.wkv.
"""
def __init__(
self,
mesh,
query_input_dim,
memory_input_dim,
output_dim,
key_dim,
value_dim,
query_heads_dims,
memory_heads_dims,
variable_dtype,
shared_kv=False,
fold_scaling_into_initializer=False,
num_blocks=None,
default_init="he",
init_distribution="uniform",
**kwargs):
self.num_blocks = num_blocks
self.default_init = default_init
self.init_distribution = init_distribution
if mtf.layers.unit_scaling_convention():
raise ValueError(
"Fixup initialization is not compatible with unit scaling convention."
)
if fold_scaling_into_initializer:
raise ValueError("Fixup initialization is not compatible with "
"`fold_scaling_into_initializer.")
super(AttentionParamsFixup, self).__init__(
mesh,
query_input_dim,
memory_input_dim,
output_dim,
key_dim,
value_dim,
query_heads_dims,
memory_heads_dims,
variable_dtype,
shared_kv=shared_kv,
fold_scaling_into_initializer=fold_scaling_into_initializer,
**kwargs)
def init_weights(self):
o_init_fixup = tf.initializers.zeros()
# Since tf.initializers.variance_scaling returns sqrt(3 * scale / n), (note
# that scale is inside sqrt), we need to square the scale factor. Hence the
# exponent is -1/3 instead of -1/6 as described in the class docstring.
if self.default_init == "glorot":
init_fixup = tf.initializers.variance_scaling(
mode="fan_avg",
distribution=self.init_distribution,
scale=math.pow(self.num_blocks, -1. / 3))
elif self.default_init == "he":
init_fixup = tf.initializers.variance_scaling(
mode="fan_in",
distribution=self.init_distribution,
scale=2 * math.pow(self.num_blocks, -1. / 3))
else:
raise ValueError(
("Unsupported default initialization. Only 'glorot' and 'he'"
" initializations are supported."))
if not self.no_query:
self.wq = mtf.get_variable(
self.mesh,
"q_fixup",
self.q_shape,
initializer=init_fixup,
dtype=self.variable_dtype)
if self.shared_kv:
self.wkv = mtf.get_variable(
self.mesh,
"kv_fixup",
self.k_shape,
initializer=init_fixup,
dtype=self.variable_dtype)
else:
self.wk = mtf.get_variable(
self.mesh,
"k_fixup",
self.k_shape,
initializer=init_fixup,
dtype=self.variable_dtype)
self.wv = mtf.get_variable(
self.mesh,
"v_fixup",
self.v_shape,
initializer=init_fixup,
dtype=self.variable_dtype)
self.wo = mtf.get_variable(
self.mesh,
"o_fixup",
self.o_shape,
initializer=o_init_fixup,
dtype=self.variable_dtype)
@gin.configurable
class DenseReluDenseFixup(transformer.TransformerLayer):
"""Two dense layers with ReLU or other activation on hidden layer.
Implements weights initialization in https://arxiv.org/abs/1901.09321.
tf.initializers.variance_scaling from Uniform(-limit, limit) where limit =
sqrt(3 * scale / fan).
Using scale = 2 and fan = fan_in makes it He initializer from
https://arxiv.org/abs/1502.01852
Using scale = 1 and fan = fan_avg makes it Glorot initializer from
http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf
Fixup initialization multiplies an extra scaling factor to these standard
initializers. In general, this factor is num_blocks^{1/(2m-2)} where num
blocks is the number of TransformerLayer objects and m is the number of matrix
multiplication. This is equal to `L` in https://arxiv.org/abs/1901.09321.
For DenseReluDense layer, m = 2 (up-projection and
down-projection) and the extra factor is 1/sqrt(num_blocks).In order to use
tf.initializers.variance_scaling for Fixup initialization, we need to set the
scale and mode arguments properly.
For He initializer, we want to sample from Uniform(-limit_fixup, limit_fixup)
where limit_fixup = sqrt(3 * 2 / fan_in) * 1/sqrt(num_blocks) = sqrt(3 * (2 /
num_blocks) / fan_in). In other words, the scale = 2 / num_blocks with mode =
fan_in.
For Glorot initializer, we want to sample from Uniform(-limit_fixup,
limit_fixup) where limit_fixup = sqrt(3 / fan_avg) * 1/sqrt(num_blocks) =
sqrt(3 * (1 / num_blocks) / fan_avg). In other words, the scale = 1 /
num_blocks with mode = fan_avg.
Note that these settings apply equally for both truncated normal and uniform
distributions from which we sample the weights.
Attributes:
hidden_size: an integer - size of the hidden layer
dropout_rate: a floating-point number
activation: an activation function or a list of activation functions. see
documentation for mtf.layers.dense_product()
use_bias: a boolean, whether to use bias in the dense layers.
num_blocks: an integer specifying the number of TransformerLayer objects.
For a vanilla Transformer model with 12 encoder layers and 12 decoder
layers, the number of blocks is 2 * 12 + 3 * 12 = 60 where each encoder
layer has 2 blocks, SelfAttention and Feedforward block and decoder
additionally has the encoder-decoder attention block.
downproject_initializer: a tf.initializer for d_model to d_ff projection.
upproject_initializer: a tf.initializer for d_ff to d_model.
"""
def __init__(
self,
hidden_size=4096,
dropout_rate=0.0,
activation="relu",
use_bias=False,
default_init="he",
init_distribution="uniform",
num_blocks=gin.REQUIRED):
self.hidden_size = hidden_size
self.dropout_rate = dropout_rate
self.activation = activation
self.use_bias = use_bias
self.downproject_initializer = tf.initializers.zeros()
if default_init == "glorot":
self.upproject_initializer = tf.initializers.variance_scaling(
mode="fan_avg",
distribution=init_distribution,
scale=1.0 / num_blocks)
elif default_init == "he":
self.upproject_initializer = tf.initializers.variance_scaling(
mode="fan_in", distribution=init_distribution, scale=2.0 / num_blocks)
else:
raise ValueError(
"Unsupported default initialization. Only 'glorot' and 'he'"
" initializations are supported.")
def call(self, context, x, losses=None):
"""Call the layer."""
io_channels = x.shape.dims[-1]
hidden_channels = mtf.Dimension("d_ff", self.hidden_size)
h = dense_product_fixup(
x,
reduced_dims=x.shape.dims[-1:],
new_dims=hidden_channels,
activation_functions=self.activation,
use_bias=self.use_bias,
variable_dtype=context.variable_dtype,
name="wi",
kernel_initializer=self.upproject_initializer,
expert_dims=context.model.ensemble_dims)
if context.train and self.dropout_rate != 0.0:
h = mtf.dropout(
h, 1.0 - self.dropout_rate, noise_shape=h.shape - context.length_dim)
shift = get_single_scalar_bias(x, "shift")
h_res = mtf.add(h, shift)
h = mtf.reshape(h_res, h.shape)
return mtf.layers.dense(
h,
io_channels,
use_bias=self.use_bias,
activation=None,
variable_dtype=context.variable_dtype,
reduced_dims=h.shape.dims[-1:],
name="wo",
expert_dims=context.model.ensemble_dims,
kernel_initializer=self.downproject_initializer)
@gin.configurable
class SelfAttentionFixup(transformer_layers.SelfAttention):
"""Multi-head self-attention layer with the Fixup initialization."""
def __init__(self,
num_blocks=gin.REQUIRED,
default_init="glorot",
init_distribution="uniform",
**kwargs):
# Any arg in `kwargs` should be defined in SelfAttention constructor.
super(SelfAttentionFixup, self).__init__(**kwargs)
self.num_blocks = num_blocks
self.default_init = default_init
self.init_distribution = init_distribution
def make_params(self, context):
if self.num_heads == 1:
query_heads_dims = None
memory_heads_dims = None
elif self.num_memory_heads == 0:
query_heads_dims = [mtf.Dimension("heads", self.num_heads)]
memory_heads_dims = query_heads_dims
elif self.num_memory_heads == 1:
query_heads_dims = [mtf.Dimension("heads", self.num_heads)]
memory_heads_dims = None
else:
if self.num_heads % self.num_memory_heads != 0:
raise ValueError("num_memory_heads must divide num_heads")
memory_heads_dims = [mtf.Dimension("heads", self.num_memory_heads)]
query_heads_dims = memory_heads_dims + [
mtf.Dimension("query_heads", self.num_heads // self.num_memory_heads)
]
return AttentionParamsFixup(
context.mesh,
query_input_dim=context.model.model_dim,
memory_input_dim=context.model.model_dim,
output_dim=context.model.model_dim,
key_dim=self.kv_dim,
value_dim=self.kv_dim,
query_heads_dims=query_heads_dims,
memory_heads_dims=memory_heads_dims,
variable_dtype=context.variable_dtype,
shared_kv=self.shared_kv,
ensemble_dim=context.model.ensemble_dim,
combine_dims=self.combine_dims,
keep_query_heads_dims=self.keep_query_heads_dims,
fold_scaling_into_initializer=self.fold_scaling_into_initializer,
num_blocks=self.num_blocks,
default_init=self.default_init,
init_distribution=self.init_distribution)
@gin.configurable
class EncDecAttentionFixup(transformer_layers.EncDecAttention):
"""Multi-head attention over encoder output with Fixup initialization."""
def __init__(self, relative_attention_type=None, **kwargs):
super(EncDecAttentionFixup, self).__init__(
relative_attention_type=relative_attention_type, **kwargs)
def _get_memory_antecedent(self, context):
return context.encoder_output
def call(self, context, x, losses=None):
"""Call the layer."""
return transformer_layers.enc_dec_attention(
self, self._get_memory_antecedent(context), context, x, losses)
@gin.configurable
def sublayer_fixup_scale(x, layer_stack, context):
"""Multiply by single one-initialized scalar."""
del layer_stack
dim = mtf.Dimension("single_scale", 1)
fixup_weight = mtf.get_variable(
x.mesh, "fixup_scale_weight", shape=mtf.Shape([dim]),
dtype=context.variable_dtype,
initializer=tf.constant_initializer(1.))
return mtf.reshape(x * fixup_weight, x.shape)
@gin.configurable
def sublayer_fixup_shift(x, layer_stack, context):
"""Shift by single zero-initialized scalar."""
del layer_stack
dim = mtf.Dimension("single_bias", 1)
fixup_bias = mtf.get_variable(
x.mesh, "fixup_bias", shape=mtf.Shape([dim]),
dtype=context.variable_dtype,
initializer=tf.zeros_initializer())
res = mtf.add(x, fixup_bias)
res = mtf.reshape(res, x.shape)
return res
| mesh-master | mesh_tensorflow/transformer/fixup_layers.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for mesh_tensorflow.transformer.adaptive_softmax."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mesh_tensorflow as mtf
from mesh_tensorflow.transformer import adaptive_softmax
import mock
import numpy as np
import scipy.special
import tensorflow.compat.v1 as tf
def initialize_by_shape(shape_to_value):
"""Create an initializer with values specified by tensor shape."""
def initialize(shape, dtype):
shape = tuple(shape)
if shape not in shape_to_value:
raise ValueError(
'Shape {} not found in shape to value map.'.format(shape))
return tf.reshape(
tf.constant(shape_to_value[tuple(shape)], dtype=dtype), shape)
return initialize
def _log_softmax(logits):
log_z = scipy.special.logsumexp(logits)
return logits - log_z
def _softmax_cross_entropy_with_logits(logits, target):
soft_target = np.zeros(len(logits))
soft_target[target] = 1
return -np.sum(_log_softmax(logits) * soft_target)
class AdaptiveSoftmaxTest(tf.test.TestCase):
def setUp(self):
super(AdaptiveSoftmaxTest, self).setUp()
self.graph = mtf.Graph()
self.mesh = mtf.Mesh(self.graph, 'mtf_mesh')
self.variable_dtype = mtf.VariableDType(activation_dtype=tf.float32)
self.addCleanup(mock.patch.stopall)
self.initializer_mock = mock.MagicMock()
random_normal_initializer_mock = mock.patch.object(
tf, 'random_normal_initializer').start()
random_normal_initializer_mock.return_value = self.initializer_mock
def _export_to_tf_tensor(self, mtf_tensor):
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[''])
lowering = mtf.Lowering(self.graph, {self.mesh: mesh_impl})
return lowering, lowering.export_to_tf_tensor(mtf_tensor)
def test_adaptive_softmax_loss_fn_tailClustersAllProject_correctlyComputesTheLoss(
self):
# Arrange.
seq_len = 16
vocab_size = 8
model_size = 2
vocab_dim = mtf.Dimension('vocab', vocab_size)
model_dim = mtf.Dimension('model', model_size)
length_dim = mtf.Dimension('length', seq_len)
decoder = mock.MagicMock()
decoder.z_loss = 0.0
decoder.loss_denominator = mock.MagicMock()
decoder.loss_denominator.return_value = 1.0
# 7 tokens in head cluster
# 5 tokens in tail cluster 1
# 4 tokens in tail cluster 2
targets_array = [2, 4, 4, 6, 2, 5, 7, 5, 2, 1, 6, 7, 0, 0, 3, 2]
targets = tf.constant(targets_array, dtype=tf.int32)
hidden = tf.constant(
[[0, -10], [1, -11], [2, -12], [3, -13], [4, -14], [5, -15], [6, -16],
[7, -17], [8, -18], [9, -19], [10, -20], [11, -21], [12, -22],
[13, -23], [14, -24], [15, -25]],
dtype=tf.float32)
mtf_targets = mtf.import_tf_tensor(
self.mesh, targets, shape=mtf.Shape([length_dim]))
mtf_hidden = mtf.import_tf_tensor(
self.mesh, hidden, shape=mtf.Shape([length_dim, model_dim]))
self.initializer_mock.side_effect = initialize_by_shape({
(5, 2): [[1, 6], [2, 7], [3, 8], [4, 9], [5, 10]],
(3, 2): [[11, 14], [12, 15], [13, 16]],
(2, 1): [[17, 18]],
(1, 2): [[19], [20]],
})
vocab_embedding = adaptive_softmax.AdaptiveSoftmaxVocabEmbedding(
self.mesh,
vocab_dim,
output_dim=model_dim,
variable_dtype=self.variable_dtype,
name='embedding',
ensemble_dim=None,
clusters=[{
'token_count': 3,
'embedding_size': 2
}, {
'token_count': 3,
'embedding_size': 2,
'length_projection_factor': 0.5,
}, {
'token_count': 2,
'embedding_size': 1,
'length_projection_factor': 0.125,
}])
context = mock.MagicMock()
context.activation_dtype = tf.float32
context.shared_params = {'embedding': vocab_embedding}
# Act.
mtf_loss = adaptive_softmax.adaptive_softmax_loss_fn(
decoder, context, mtf_hidden, mtf_targets, output_vocab_dim=None)
lowering, loss = self._export_to_tf_tensor(mtf_loss)
self.evaluate(tf.global_variables_initializer())
self.evaluate(lowering.copy_masters_to_slices())
actual_loss, = self.evaluate([loss])
# Assert.
def expected_head_loss(position, label):
factor = model_dim.size**-0.5
logits = [
factor * (1 * position - 6 * (10 + position)),
factor * (2 * position - 7 * (10 + position)),
factor * (3 * position - 8 * (10 + position)),
factor * (4 * position - 9 * (10 + position)),
factor * (5 * position - 10 * (10 + position)),
]
return _softmax_cross_entropy_with_logits(logits, label)
expected_head_labels = [2, 3, 3, 4, 2, 3, 4, 3, 2, 1, 4, 4, 0, 0, 3, 2]
expected_head_loss = sum(
expected_head_loss(position, expected_label)
for position, expected_label in enumerate(expected_head_labels)
if expected_label)
def expected_tail_cluster_1_loss(position):
factor = model_dim.size**-0.5
logits = [
factor * (11 * position - 14 * (10 + position)),
factor * (12 * position - 15 * (10 + position)),
factor * (13 * position - 16 * (10 + position)),
]
first_token_in_cluster_id = 3
return _softmax_cross_entropy_with_logits(
logits, targets_array[position] - first_token_in_cluster_id)
expected_tail_cluster_1_loss = sum([
expected_tail_cluster_1_loss(position=1),
expected_tail_cluster_1_loss(position=2),
expected_tail_cluster_1_loss(position=5),
expected_tail_cluster_1_loss(position=7),
expected_tail_cluster_1_loss(position=14),
])
def expected_tail_cluster_2_loss(position):
factor = model_dim.size**-0.5
logits = [
factor * (17 * 19 * position - 17 * 20 * (10 + position)),
factor * (18 * 19 * position - 18 * 20 * (10 + position)),
]
first_token_in_cluster_id = 6
return _softmax_cross_entropy_with_logits(
logits, targets_array[position] - first_token_in_cluster_id)
# Due to the length_projection_factor of 1/8, only 2 tokens will be counted
# despite there being 4 tokens in this cluster.
expected_tail_cluster_2_loss = sum([
expected_tail_cluster_2_loss(position=3),
expected_tail_cluster_2_loss(position=6),
])
expected_loss = (
expected_head_loss + expected_tail_cluster_1_loss +
expected_tail_cluster_2_loss)
self.assertAllClose(actual_loss, expected_loss)
def test_adaptive_softmax_loss_fn_tailClusterDoesNotProject_correctlyComputesTheLoss(
self):
# Arrange.
seq_len = 16
vocab_size = 8
model_size = 2
vocab_dim = mtf.Dimension('vocab', vocab_size)
model_dim = mtf.Dimension('model', model_size)
length_dim = mtf.Dimension('length', seq_len)
decoder = mock.MagicMock()
decoder.z_loss = 0.0
decoder.loss_denominator = mock.MagicMock()
decoder.loss_denominator.return_value = 1.0
# 7 tokens in head cluster
# 5 tokens in tail cluster 1
# 4 tokens in tail cluster 2
targets_array = [2, 4, 4, 6, 2, 5, 7, 5, 2, 1, 6, 7, 0, 0, 3, 2]
targets = tf.constant(targets_array, dtype=tf.int32)
hidden = tf.constant(
[[0, -10], [1, -11], [2, -12], [3, -13], [4, -14], [5, -15], [6, -16],
[7, -17], [8, -18], [9, -19], [10, -20], [11, -21], [12, -22],
[13, -23], [14, -24], [15, -25]],
dtype=tf.float32)
mtf_targets = mtf.import_tf_tensor(
self.mesh, targets, shape=mtf.Shape([length_dim]))
mtf_hidden = mtf.import_tf_tensor(
self.mesh, hidden, shape=mtf.Shape([length_dim, model_dim]))
self.initializer_mock.side_effect = initialize_by_shape({
(5, 2): [[1, 6], [2, 7], [3, 8], [4, 9], [5, 10]],
(3, 2): [[11, 14], [12, 15], [13, 16]],
(2, 1): [[17, 18]],
(1, 2): [[19], [20]],
})
vocab_embedding = adaptive_softmax.AdaptiveSoftmaxVocabEmbedding(
self.mesh,
vocab_dim,
output_dim=model_dim,
variable_dtype=self.variable_dtype,
name='embedding',
ensemble_dim=None,
clusters=[{
'token_count': 3,
'embedding_size': 2
}, {
'token_count': 3,
'embedding_size': 2,
'length_projection_factor': 0.5,
}, {
'token_count': 2,
'embedding_size': 1,
'length_projection_factor': 1,
}])
context = mock.MagicMock()
context.activation_dtype = tf.float32
context.shared_params = {'embedding': vocab_embedding}
# Act.
mtf_loss = adaptive_softmax.adaptive_softmax_loss_fn(
decoder, context, mtf_hidden, mtf_targets, output_vocab_dim=None)
lowering, loss = self._export_to_tf_tensor(mtf_loss)
self.evaluate(tf.global_variables_initializer())
self.evaluate(lowering.copy_masters_to_slices())
actual_loss, = self.evaluate([loss])
# Assert.
def expected_head_loss(position, label):
factor = model_dim.size**-0.5
logits = [
factor * (1 * position - 6 * (10 + position)),
factor * (2 * position - 7 * (10 + position)),
factor * (3 * position - 8 * (10 + position)),
factor * (4 * position - 9 * (10 + position)),
factor * (5 * position - 10 * (10 + position)),
]
return _softmax_cross_entropy_with_logits(logits, label)
expected_head_labels = [2, 3, 3, 4, 2, 3, 4, 3, 2, 1, 4, 4, 0, 0, 3, 2]
expected_head_loss = sum(
expected_head_loss(position, expected_label)
for position, expected_label in enumerate(expected_head_labels)
if expected_label)
def expected_tail_cluster_1_loss(position):
factor = model_dim.size**-0.5
logits = [
factor * (11 * position - 14 * (10 + position)),
factor * (12 * position - 15 * (10 + position)),
factor * (13 * position - 16 * (10 + position)),
]
first_token_in_cluster_id = 3
return _softmax_cross_entropy_with_logits(
logits, targets_array[position] - first_token_in_cluster_id)
expected_tail_cluster_1_loss = sum([
expected_tail_cluster_1_loss(position=1),
expected_tail_cluster_1_loss(position=2),
expected_tail_cluster_1_loss(position=5),
expected_tail_cluster_1_loss(position=7),
expected_tail_cluster_1_loss(position=14),
])
def expected_tail_cluster_2_loss(position):
factor = model_dim.size**-0.5
logits = [
factor * (17 * 19 * position - 17 * 20 * (10 + position)),
factor * (18 * 19 * position - 18 * 20 * (10 + position)),
]
first_token_in_cluster_id = 6
return _softmax_cross_entropy_with_logits(
logits, targets_array[position] - first_token_in_cluster_id)
expected_tail_cluster_2_loss = sum([
expected_tail_cluster_2_loss(position=3),
expected_tail_cluster_2_loss(position=6),
expected_tail_cluster_2_loss(position=10),
expected_tail_cluster_2_loss(position=11),
])
expected_loss = (
expected_head_loss + expected_tail_cluster_1_loss +
expected_tail_cluster_2_loss)
self.assertAllClose(actual_loss, expected_loss)
def test_hidden_to_logits_returnsHiddenDuringTraining(self):
# Arrange.
seq_len = 2
vocab_size = 3
model_size = 2
vocab_dim = mtf.Dimension('vocab', vocab_size)
model_dim = mtf.Dimension('model', model_size)
length_dim = mtf.Dimension('length', seq_len)
context = mock.MagicMock()
context.activation_dtype = tf.float32
context.mode = tf.estimator.ModeKeys.TRAIN
embeddings = tf.constant([[1, 0], [0, 2]], dtype=tf.float32)
mtf_embeddings = mtf.import_tf_tensor(
self.mesh, embeddings, shape=mtf.Shape([length_dim, model_dim]))
self.initializer_mock.side_effect = initialize_by_shape({
(3, 2): [[1, 6], [2, 7], [3, 8]],
})
vocab_embedding = adaptive_softmax.AdaptiveSoftmaxVocabEmbedding(
self.mesh,
vocab_dim,
output_dim=model_dim,
variable_dtype=self.variable_dtype,
name='embedding',
ensemble_dim=None,
clusters=[{
'token_count': 3,
'embedding_size': 2
}])
mtf_logits = vocab_embedding.hidden_to_logits(
mtf_embeddings, context=context)
self.assertEqual(mtf_logits, mtf_embeddings)
def test_hidden_to_logits_returnsCorrectLogitsDuringEval(self):
# Arrange.
seq_len = 2
vocab_size = 8
model_size = 2
vocab_dim = mtf.Dimension('vocab', vocab_size)
model_dim = mtf.Dimension('model', model_size)
length_dim = mtf.Dimension('length', seq_len)
context = mock.MagicMock()
context.activation_dtype = tf.float32
context.mode = tf.estimator.ModeKeys.EVAL
embeddings = tf.constant([[1, 0], [0, 2]], dtype=tf.float32)
mtf_embeddings = mtf.import_tf_tensor(
self.mesh, embeddings, shape=mtf.Shape([length_dim, model_dim]))
self.initializer_mock.side_effect = initialize_by_shape({
(5, 2): [[1, 6], [2, 7], [3, 8], [4, 9], [5, 10]],
(3, 2): [[11, 14], [12, 15], [13, 16]],
(2, 1): [[17, 18]],
(1, 2): [[19], [20]],
})
vocab_embedding = adaptive_softmax.AdaptiveSoftmaxVocabEmbedding(
self.mesh,
vocab_dim,
output_dim=model_dim,
variable_dtype=self.variable_dtype,
name='embedding',
ensemble_dim=None,
clusters=[{
'token_count': 3,
'embedding_size': 2
}, {
'token_count': 3,
'embedding_size': 2,
'length_projection_factor': 0.5,
}, {
'token_count': 2,
'embedding_size': 1,
'length_projection_factor': 0.125,
}])
# Act.
mtf_logits = vocab_embedding.hidden_to_logits(
mtf_embeddings, context=context)
lowering, logits = self._export_to_tf_tensor(mtf_logits)
self.evaluate(tf.global_variables_initializer())
self.evaluate(lowering.copy_masters_to_slices())
actual_logits, = self.evaluate([logits])
# Assert.
def scaled_log_softmax(a):
a = np.array(a, dtype=float) * model_dim.size**-0.5
return _log_softmax(a)
head_log_softmax1 = scaled_log_softmax([1, 2, 3, 4, 5])
head_log_softmax2 = scaled_log_softmax([2 * 6, 2 * 7, 2 * 8, 2 * 9, 2 * 10])
expected_logits = [
np.concatenate([
head_log_softmax1[:3],
head_log_softmax1[3] + scaled_log_softmax([11, 12, 13]),
head_log_softmax1[4] + scaled_log_softmax([17 * 19, 18 * 19]),
]),
np.concatenate([
head_log_softmax2[:3],
head_log_softmax2[3] + scaled_log_softmax([2 * 14, 2 * 15, 2 * 16]),
head_log_softmax2[4] +
scaled_log_softmax([2 * 17 * 20, 2 * 18 * 20]),
]),
]
self.assertAllClose(actual_logits, expected_logits, atol=5e-5)
def test_ids_to_embedding_correctlyEmbeds(self):
seq_len = 6
vocab_size = 5
model_size = 2
vocab_dim = mtf.Dimension('vocab', vocab_size)
model_dim = mtf.Dimension('model', model_size)
length_dim = mtf.Dimension('length', seq_len)
ids = tf.constant([0, 1, 2, 3, 4, 0], dtype=tf.int32)
mtf_ids = mtf.import_tf_tensor(
self.mesh, ids, shape=mtf.Shape([length_dim]))
self.initializer_mock.side_effect = initialize_by_shape({
(3, 2): [[0, 1], [2, 0], [-1000, -4000]],
(3, 1): [[1], [2], [3]],
(1, 2): [[1], [2]],
})
vocab_embedding = adaptive_softmax.AdaptiveSoftmaxVocabEmbedding(
self.mesh,
vocab_dim,
output_dim=model_dim,
variable_dtype=self.variable_dtype,
name='embedding',
ensemble_dim=None,
clusters=[{
'token_count': 2,
'embedding_size': 2
}, {
'token_count': 3,
'embedding_size': 1
}])
mtf_embedding = vocab_embedding.ids_to_embedding(mtf_ids, context=None)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[''])
lowering = mtf.Lowering(self.graph, {self.mesh: mesh_impl})
actual_embedding = lowering.export_to_tf_tensor(mtf_embedding)
self.evaluate(tf.global_variables_initializer())
self.evaluate(lowering.copy_masters_to_slices())
actual, = self.evaluate([actual_embedding])
self.assertAllClose(actual,
[[0, 1], [2, 0], [1, 2], [2, 4], [3, 6], [0, 1]])
def test_constructor_tokenCountsDontSumToVocabSize_raisesValueError(self):
vocab_dim = mtf.Dimension('vocab', 5)
model_dim = mtf.Dimension('model', 2)
with self.assertRaises(ValueError):
adaptive_softmax.AdaptiveSoftmaxVocabEmbedding(
self.mesh,
vocab_dim,
output_dim=model_dim,
variable_dtype=self.variable_dtype,
name='embedding',
ensemble_dim=None,
clusters=[{
'token_count': 3,
'embedding_size': 2
}, {
'token_count': 3,
'embedding_size': 1
}])
def test_constructor_projectFactorNotWithinZeroAndOne_raisesValueError(self):
vocab_dim = mtf.Dimension('vocab', 3)
model_dim = mtf.Dimension('model', 2)
with self.assertRaises(ValueError):
adaptive_softmax.AdaptiveSoftmaxVocabEmbedding(
self.mesh,
vocab_dim,
output_dim=model_dim,
variable_dtype=self.variable_dtype,
name='embedding',
ensemble_dim=None,
clusters=[{
'token_count': 3,
'embedding_size': 2,
'length_projection_factor': 1.1,
}])
if __name__ == '__main__':
tf.test.main()
| mesh-master | mesh_tensorflow/transformer/adaptive_softmax_test.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The implementation of a Funnel Transformer in Mesh TensorFlow."""
import gin
import mesh_tensorflow as mtf
from mesh_tensorflow.transformer import transformer
import tensorflow.compat.v1 as tf
@gin.configurable
class FunnelTransformerLayerStack(transformer.TransformerLayer):
"""A stack of layers for FunnelTransformer."""
def __init__(self,
layers,
n_blocks=gin.REQUIRED,
block_param_size=gin.REQUIRED,
block_repeat_size=gin.REQUIRED,
pooling_size=2,
sublayers_initial=None,
sublayers_per_layer=None,
sublayers_final=None,
pooling_type="mean",
n_submodules=2):
"""Create a LayerStack specialized for FunnelTransformer.
The design of this class follows the transformer.LayerStack. See the
docstring of that class for how the layer stack is built. Here we only
discuss the features unique to the Funnel Transformer.
This implementation has subtle difference from the Funnel Transformer
introduced in https://arxiv.org/abs/2006.03236.
1. Application to encoder-decoder model.
The original Funnel Transformer was proposed for the encoder-only
architectures such as BERT. In A.3 section of the paper, they discuss
potential extension of the core idea to other model architectures. For
encoder-decoder models, the authors suggest that the Funnel Transformer idea
can be used to modify the encoder such that "the key difference compared to
conventional models is the source side compression Funnel-Transformer
provides".
Therefore, we don't modify the decoder, i.e., we use the standard
transformer.LayerStack and this class is only applicable to the encoder.
2. Relative attention
We use the simplified reletive attention scalar from the T5, whereas the
Funnel Transformer paper uses the relative attention from the Transformer-XL
(https://arxiv.org/abs/1901.02860).
3. The order of pooling operation
In the Funnel Transformer paper, only the query is pooled while key and
value are kept intact. The resulting attention output has the same length as
the query, enabling the residual connection.
In our implementation, we apply the regular SelfAttention and then apply the
pooling to the output. Since each sequence position in query is
independently computed, we expect the difference between these
implmentations to be negligible.
Args:
layers: a list of TransformerLayer
n_blocks: an integer specifying the number of Funnel Transformer blocks.
block_param_size: a list of integers specifying the number of layers in
each block.
block_repeat_size: a list of integers specifying the number of repeated
layers in each block. The repeated layers share the parameters.
pooling_size: an integer specifying the pool size
sublayers_initial: an optional list of sublayer functions
sublayers_per_layer: an optional list of sublayer functions
sublayers_final: an optional list of sublayer functions
pooling_type: a string specifying the pooling type. One of "mean", "max",
or "min".
n_submodules: an integer specifying the number of submodules (e.g.,
SelfAttention and DenseReluDense for each layer of a block.
"""
if len(block_param_size) != n_blocks:
raise ValueError(
"Number of blocks should match the length of block_param_size.")
if len(block_repeat_size) != n_blocks:
raise ValueError(
"Number of blocks should match the length of block_repeat_size.")
if len(layers) != sum(block_param_size) * n_submodules:
raise ValueError(
"Total number of submodules should match the number of layers.")
self._layers = layers
self.n_blocks = n_blocks
self.block_param_size = block_param_size
self.block_repeat_size = block_repeat_size
self.pooling_size = pooling_size
self._sublayers_initial = sublayers_initial
self._sublayers_per_layer = sublayers_per_layer
self._sublayers_final = sublayers_final
if pooling_type == "mean":
self.pool_fn = mtf.reduce_mean
elif pooling_type == "max":
self.pool_fn = mtf.reduce_max
elif pooling_type == "min":
self.pool_fn = mtf.reduce_min
else:
raise ValueError(
"Unknown pooling type. Choose among 'mean', 'max' or 'min'")
self.n_submodules = n_submodules
def update_context(self, context, x, pool_dim_name):
"""Update the length dimension, sequence_id and position information."""
pooled_seq_length = x.shape.get_dim_by_name(pool_dim_name).size
# For position, we slice the first `pooled_seq_length` indices instead of
# striding. This ensures that the 3rd position before the pooling becomes
# 2nd position after pooling instead of remembering its position before
# pooling.
new_context_position = mtf.slice(
context.position,
begin=0,
size=pooled_seq_length,
slice_dim_name=pool_dim_name)
context.position = new_context_position
pooled_seq_length = x.shape.get_dim_by_name(pool_dim_name).size
new_length_dim = mtf.Dimension(
name=pool_dim_name, size=pooled_seq_length)
new_sequence_id = mtf.stride_tensor_1d(
context.sequence_id,
pool_dim=context.length_dim,
pool_size=self.pooling_size)
context.length_dim = new_length_dim
context.sequence_id = new_sequence_id
def call(self, context, x):
"""Call the layer stack."""
x = self._call_sublayers(self._sublayers_initial, x, context)
context.layer_outputs.append(x)
assert context.layer_index == 0
for block_idx in range(self.n_blocks):
for param_idx in range(self.block_param_size[block_idx]):
# Number of layers to (locally) share parameters.
cur_repeat_size = self.block_repeat_size[block_idx]
for repeat_idx in range(cur_repeat_size):
# context.do_pooling = block_idx > 0 and sub_idx == 0
# Submodules are transformer.TransformerLayer objects such as
# SelfAttention and DenseReluDense.
for submodule_idx in range(self.n_submodules):
layer = self._layers[context.layer_index]
name = (f"funnel_block_{block_idx:03d}/"
f"param_idx_{param_idx:03d}/"
f"submodule_{submodule_idx:03d}")
# Override the layer name given in transformer.make_layer_stack.
layer.set_name(name)
with tf.variable_scope(layer.name or ""):
x = self._layer_fn(x, layer, context)
# Do pooling if the current layer
# 1) does not belong to the first block
# 2) is the first layer within the current block
# 3) is the first submodule (typically SelfAttention).
sub_idx = (param_idx * cur_repeat_size + repeat_idx)
if block_idx > 0 and sub_idx == 0 and submodule_idx == 0:
x = mtf.pool_tensor_1d(
x,
pool_dim=context.length_dim,
reduce_fn=self.pool_fn,
pool_size=self.pooling_size)
self.update_context(context, x, pool_dim_name="length")
if context.layer_index != len(self._layers) - 1:
context.layer_outputs.append(x)
context.layer_index += 1
x = self._call_sublayers(self._sublayers_final, x, context)
x = transformer.sublayer_mask_padding(x, self, context)
context.layer_outputs.append(x)
self.set_context(context)
return x
def _call_sublayers(self, sublayers, x, context):
for s in sublayers:
x = s(x, self, context)
return x
def _layer_fn(self, x, layer, context):
"""Call the layer and its associated sublayers.
Args:
x: a Tensor
layer: a Layer
context: a Context
Returns:
a Tensor
"""
context.current_layer = layer
context.current_layer_input = x
y = self._call_sublayers(self._sublayers_per_layer, x, context)
# When pooling is done, context.current_layer_input will be updated inside
# SelfAttentionPoolQ.call method, i.e., x != context.current_layer_input. So
# we use context.current_layer_input to check the shape consistency.
if y.shape != context.current_layer_input.shape:
raise ValueError(
"Layer %s returned misshaped output x=%s y=%s"
% (layer.__class__.__name__, x, y))
return y
@property
def layers(self):
return self._layers
def set_context(self, context):
self._context = context
@property
def context(self):
return self._context
@gin.configurable
class BitransformerFunnel(transformer.Bitransformer):
"""Bitransformer with the compressed sequence length in the encoder.
See base class for details.
This class updates the encoder's information passed to the decoder in order to
account for the reduced sequence length. This update is done in `call_simple`
and `decode` methods.
"""
def call_simple(self,
inputs,
targets,
compute_loss,
mode=tf.estimator.ModeKeys.TRAIN,
variable_dtype=mtf.VariableDType(tf.float32),
encoder_sequence_id=None,
decoder_sequence_id=None,
decoder_subsequence_id=None,
encoder_position=None,
decoder_position=None,
num_microbatches=1):
"""Compute logits based on inputs (all positions in parallel).
This is called during training and evaluation.
This class inherits the trnasformer.Bitransformer with one difference. The
encoder is Funnel Transformer. So the length dimension is reduced. The
decoder needs to use the updated `encoder_sequence_id`.
Args:
inputs: an int32 Tensor with shape [<batch_dims>, length_dim]
targets: an optional int32 Tensor with shape [<batch_dims>, length_dim]
compute_loss: a boolean
mode: a tf.estimator.ModeKeys
variable_dtype: a mtf.VariableDType
encoder_sequence_id: an optional Tensor
decoder_sequence_id: an optional Tensor
decoder_subsequence_id: an optional Tensor
encoder_position: an optional Tensor
decoder_position: an optional Tensor
num_microbatches: integer - greater than one if the step has been
serialized into multiple microbatches to save memory.
Returns:
logits: a Tensor with shape [<batch_dims>, output_vocab_dim]
loss: an optional Scalar (if compute_loss=True)
"""
# encoder_sequene_id and decoder_sequence_id are used to delineate packed
# examples but are also necessary to indicate padding where sequence_id==0.
# If they are absent, then we assume that padding is indicated by zeros in
# the inputs/targets, and we make up sequence_id tensors to indicate this.
if encoder_sequence_id is None:
encoder_sequence_id = mtf.minimum(inputs, 1)
if decoder_sequence_id is None:
decoder_sequence_id = mtf.minimum(targets, 1)
encoder_layer_outputs = []
shared_params = self._shared_params(inputs.mesh, variable_dtype)
encoder_output, encoder_loss = self.encoder.call_simple(
inputs,
None,
compute_loss,
mode=mode,
variable_dtype=variable_dtype,
sequence_id=encoder_sequence_id,
position=encoder_position,
shared_params=shared_params,
layer_outputs=encoder_layer_outputs,
num_microbatches=num_microbatches)
encoder_output = mtf.layers.rename_length_to_memory_length(encoder_output)
# The sequence_id is updated inside the layer_stack due to pooling. So we
# need to use the updated sequence_id stored in the context.
encoder_sequence_id = self.encoder.layer_stack.context.sequence_id
encoder_sequence_id = mtf.layers.rename_length_to_memory_length(
encoder_sequence_id)
logits, loss = self.decoder.call_simple(
transformer.autoregressive_inputs(
targets, sequence_id=decoder_sequence_id),
targets,
compute_loss,
mode=mode,
variable_dtype=variable_dtype,
sequence_id=decoder_sequence_id,
subsequence_id=decoder_subsequence_id,
encoder_output=encoder_output,
encoder_sequence_id=encoder_sequence_id,
encoder_inputs=mtf.layers.rename_length_to_memory_length(inputs),
position=decoder_position,
shared_params=shared_params,
encoder_layer_outputs=encoder_layer_outputs,
num_microbatches=num_microbatches)
if loss is not None and encoder_loss is not None:
loss += encoder_loss
return logits, loss
@gin.configurable(module="BitransformerFunnel")
def decode(self,
inputs,
variable_dtype=mtf.VariableDType(tf.float32),
beam_size=1,
alpha=0.6,
temperature=0.0,
sampling_keep_top_k=-1,
decode_length_multiplier=1.5,
decode_length_constant=10,
max_decode_length=None):
"""Sampling or beam search for Funnel Transformer.
Args:
inputs: a Tensor with shape [<batch_dims>, beam_dim, length_dim]
variable_dtype: a mtf.VariableDType
beam_size: an integer >= 1
alpha: a floating point value (length bonus for beam search)
temperature: a value between 0 and 1 (must be 0 if beam_size > 1)
0.0 means argmax, 1.0 means sample according to predicted distribution.
sampling_keep_top_k: a value between 1 and vocab_size used to sample from
only the k most likely logits. Set to -1 to sample from all logits.
decode_length_multiplier: a float
decode_length_constant: a float
max_decode_length: an optional integer
Returns:
a Tensor with shape [<batch_dims>, beam_dim, length_dim]
"""
encoder_layer_outputs = []
shared_params = self._shared_params(inputs.mesh, variable_dtype)
encoder_sequence_id = mtf.minimum(inputs, 1)
encoder_output, encoder_loss = self.encoder.call_simple(
inputs=inputs,
targets=None,
compute_loss=False,
mode=tf.estimator.ModeKeys.PREDICT,
variable_dtype=variable_dtype,
sequence_id=encoder_sequence_id,
shared_params=shared_params,
layer_outputs=encoder_layer_outputs)
del encoder_loss
encoder_output = mtf.layers.rename_length_to_memory_length(encoder_output)
# The sequence_id is updated inside the layer_stack due to pooling. So we
# need to use the updated sequence_id stored in the context.
encoder_sequence_id = self.encoder.layer_stack.context.sequence_id
encoder_sequence_id = mtf.layers.rename_length_to_memory_length(
encoder_sequence_id)
batch_dims = inputs.shape[:-1]
length_dim = inputs.shape[-1]
if max_decode_length is None:
decode_length_dim = length_dim
else:
decode_length_dim = mtf.Dimension("length", max_decode_length)
if beam_size == 1:
ids_shape = mtf.Shape(batch_dims + [decode_length_dim])
partial_sequences = mtf.zeros(inputs.mesh, ids_shape, dtype=tf.int32)
return self.decoder.sample_autoregressive(
partial_sequences,
temperature=temperature,
sampling_keep_top_k=sampling_keep_top_k,
variable_dtype=variable_dtype,
encoder_output=encoder_output,
encoder_sequence_id=encoder_sequence_id,
encoder_inputs=mtf.layers.rename_length_to_memory_length(inputs),
shared_params=shared_params,
has_partial_sequences=False,
encoder_layer_outputs=encoder_layer_outputs)
else:
if temperature != 0:
raise ValueError(
"don't know how to beam search with nonzero temperature")
if sampling_keep_top_k != -1:
raise ValueError(
"don't know how to beam search with top-k value other than -1.")
# beam search
beam_dim = mtf.Dimension("beam", beam_size)
ids_shape = mtf.Shape(batch_dims + [beam_dim, decode_length_dim])
partial_sequences = mtf.zeros(inputs.mesh, ids_shape, dtype=tf.int32)
input_length = mtf.reduce_sum(
mtf.to_float(mtf.cast(inputs, tf.bool)),
reduced_dim=length_dim)
max_input_length = mtf.reduce_max(input_length)
decode_length = mtf.cast(
max_input_length * decode_length_multiplier
+ decode_length_constant, tf.int32)
return self.decoder.beam_search(
partial_sequences,
decode_length,
variable_dtype=variable_dtype,
encoder_output=encoder_output,
encoder_sequence_id=encoder_sequence_id,
encoder_inputs=inputs,
alpha=alpha,
shared_params=shared_params,
encoder_layer_outputs=encoder_layer_outputs)
| mesh-master | mesh_tensorflow/transformer/funnel_transformer.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mesh Tensorflow Transformer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mesh_tensorflow.transformer.attention
import mesh_tensorflow.transformer.dataset
import mesh_tensorflow.transformer.learning_rate_schedules
import mesh_tensorflow.transformer.moe
import mesh_tensorflow.transformer.t2t_vocabulary
import mesh_tensorflow.transformer.transformer
import mesh_tensorflow.transformer.transformer_layers
import mesh_tensorflow.transformer.utils
import mesh_tensorflow.transformer.vocabulary
| mesh-master | mesh_tensorflow/transformer/__init__.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extension to implement universal transformer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import gin
import mesh_tensorflow as mtf
from mesh_tensorflow.transformer import transformer
import tensorflow.compat.v1 as tf
@gin.configurable
class UTLayerStack(transformer.TransformerLayer):
"""A stack of layers for Universal Transformer.
This implementation is largely adapted from t2t universal transformer
implementation. Reference:
third_party/py/tensor2tensor/models/research
"""
def __init__(
self,
layers,
dropout_rate=0.0,
norm_epsilon=1e-6,
num_vanilla_transformer_layers=2,
couple_carry_transform_gates=True,
act_type=gin.REQUIRED,
recurrence_type=gin.REQUIRED,
act_max_steps=gin.REQUIRED,
act_epsilon=gin.REQUIRED,
num_rec_steps=gin.REQUIRED,
num_inrecurrence_layers=gin.REQUIRED,
position_start_index=gin.REQUIRED,
add_or_concat_timing_signal=gin.REQUIRED,
step_timing_signal_type=gin.REQUIRED,
add_position_timing_signal=gin.REQUIRED,
add_step_timing_signal=gin.REQUIRED,
mix_with_transformer_before_ut=gin.REQUIRED,
mix_with_transformer_after_ut=gin.REQUIRED,
gates_inputs=gin.REQUIRED,
gate_ffn_layer=gin.REQUIRED,
use_gated_transformer=gin.REQUIRED,
gating_type=gin.REQUIRED,
):
"""Create a LayerStack for Universal Transformer.
Args:
layers: a list of TransformerLayer
dropout_rate: a floating-point number
norm_epsilon: a floating-point number
num_vanilla_transformer_layers: number of vanilla transformer layers
before the ACT layer.
couple_carry_transform_gates: whether to couple carry and transform gates.
act_type: act type
recurrence_type: recurrence type (allowable values: "act").
act_max_steps: maximum number of act steps
act_epsilon: halting threshold
num_rec_steps: maximum number of recurrent steps
num_inrecurrence_layers: number of inrecurrence layers
position_start_index: start index in embedding
add_or_concat_timing_signal: bool,
whether to add or concat the timing signal
step_timing_signal_type: step timing signal type
add_position_timing_signal: bool, whether to add position timing signal
add_step_timing_signal: bool, whether to add step timing signal
mix_with_transformer_before_ut: whether to mix transformer layers before
ut.
mix_with_transformer_after_ut: whether to mix transformer layers after ut.
gates_inputs: controlling the cary/transform gate.
gate_ffn_layer: gate ff layer type
use_gated_transformer: whether to use gated transformer.
gating_type: gating type.
"""
self._layers = layers
self._dropout_rate = dropout_rate
self._norm_epsilon = norm_epsilon
self.num_vanilla_transformer_layers = num_vanilla_transformer_layers
self.act_type = act_type
self.recurrence_type = recurrence_type
self.act_max_steps = act_max_steps
self.act_epsilon = act_epsilon
self.num_rec_steps = num_rec_steps
self.num_inrecurrence_layers = num_inrecurrence_layers
self.position_start_index = position_start_index
self.add_or_concat_timing_signal = add_or_concat_timing_signal
self.step_timing_signal_type = step_timing_signal_type
self.add_position_timing_signal = add_position_timing_signal
self.add_step_timing_signal = add_step_timing_signal
self.mix_with_transformer_before_ut = mix_with_transformer_before_ut
self.mix_with_transformer_after_ut = mix_with_transformer_after_ut
self.gates_inputs = gates_inputs
self.gate_ffn_layer = gate_ffn_layer
self.couple_carry_transform_gates = couple_carry_transform_gates
self.use_gated_transformer = use_gated_transformer
self.gating_type = gating_type
def get_timing_signal_1d(self,
context,
length,
channels,
min_timescale=1.0,
max_timescale=1.0e4,
start_index=0):
"""Gets a bunch of sinusoids of different frequencies.
Each channel of the input Tensor is incremented by a sinusoid of a different
frequency and phase.
This allows attention to learn to use absolute and relative positions.
Timing signals should be added to some precursors of both the query and the
memory inputs to attention.
The use of relative position is possible because sin(x+y) and cos(x+y) can
be expressed in terms of y, sin(x) and cos(x).
In particular, we use a geometric sequence of timescales starting with
min_timescale and ending with max_timescale. The number of different
timescales is equal to channels / 2. For each timescale, we
generate the two sinusoidal signals sin(timestep/timescale) and
cos(timestep/timescale). All of these sinusoids are concatenated in
the channels dimension.
Args:
context: mtf context.
length: a mtf.Dimension, length of timing signal sequence.
channels: a mtf.Dimension, size of timing embeddings to create.
The number of different timescales is equal to channels / 2.
min_timescale: a float
max_timescale: a float
start_index: index of first position
Returns:
a Tensor of timing signals [1, length, channels]
"""
position = context.get_position() + start_index
num_timescales = mtf.constant(context.mesh, channels.size // 2)
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) /
mtf.maximum(num_timescales - 1, 1))
channel_dim_name = channels.name
inv_timescales = (
min_timescale * mtf.exp(
mtf.mtf_range(context.mesh,
mtf.Dimension(channel_dim_name, channels.size // 2),
context.activation_dtype) * -log_timescale_increment))
scaled_time = position * inv_timescales
# Please note that this slightly differs from the published paper.
# See a discussion here:
# https://github.com/tensorflow/tensor2tensor/pull/177
# concat_dim_name = scaled_time.shape.dimension_names[1]
concat_dim_name = channels.name
signal = mtf.concat(
[mtf.sin(scaled_time), mtf.cos(scaled_time)],
concat_dim_name=concat_dim_name)
if channels.size % 2 != 0:
raise NotImplementedError("Odd channel size not implemented.")
new_dims = [mtf.Dimension("expanded", 1)
] + length.shape.dims + channels.shape.dim
signal = mtf.reshape(signal, mtf.Shape(new_dims))
return signal
def add_position_timing_signal_func(self, context, x, step):
"""Add n-dimensional embedding as the position (horizontal) timing signal.
Args:
context: mtf context
x: a tensor with shape [batch, length, depth]
step: step
Returns:
a Tensor with the same shape as x.
"""
if not self.position_start_index:
index = 0
elif self.position_start_index == "random":
# Shift all positions randomly
# TODO(dehghani): What would be reasonable for max number of shift?
index = mtf.random_uniform(
context.mesh, [], maxval=x.shape.dims[1].size, dtype=tf.int32)
elif self.position_start_index == "step":
# Shift positions based on the step
if self.recurrence_type == "act":
num_steps = self.act_max_steps
else:
num_steps = self.num_rec_steps
index = mtf.cast(x.shape.dims[1].size * step / num_steps, dtype=tf.int32)
length = context.length_dim
channels = context.model.model_dim
signal = self.get_timing_signal_1d(
context, length, channels, start_index=index)
if self.add_or_concat_timing_signal == "add":
x_with_timing = x + mtf.cast(signal, x.dtype)
# Unimplemented
if self.add_or_concat_timing_signal == "concat":
batch_dim = x.shape.dims[0]
out_shape = mtf.Shape([batch_dim] + signal.shape.dims[1:])
signal_tiled = mtf.broadcast(signal, out_shape)
x_with_timing = mtf.concat(
(x, signal_tiled), concat_dim_name=signal_tiled.dimension_names[-1])
return x_with_timing
def get_layer_timing_signal_learned_1d(self, context, channels, layer,
num_layers):
"""get n-dimensional embedding as the layer (vertical) timing signal.
Adds embeddings to represent the position of the layer in the tower.
Args:
context: mtf context
channels: dimension of the timing signal
layer: layer num
num_layers: total number of layers
Returns:
a Tensor of timing signals [channels].
"""
layer_dim = mtf.Dimension("layer", num_layers)
shape = mtf.Shape([layer_dim, channels])
layer_embedding = (
mtf.get_variable(
context.mesh,
"layer_embedding",
shape,
dtype=context.variable_dtype,
initializer=tf.random_normal_initializer(0, channels.size**-0.5)) *
(channels.size**0.5))
return mtf.gather(layer_embedding, layer, layer_dim)
def add_step_timing_signal_func(self, context, x, step):
"""Add n-dimensional embedding as the step (vertical) timing signal.
Args:
context: mtf context
x: a tensor with shape [batch, length, depth]
step: step
Returns:
a Tensor with the same shape as x.
"""
if self.recurrence_type == "act":
num_steps = self.act_max_steps
else:
num_steps = self.num_rec_steps
channels = x.shape.dims[-1]
if self.step_timing_signal_type == "learned":
signal = self.get_layer_timing_signal_learned_1d(context, channels, step,
num_steps)
elif self.step_timing_signal_type == "sinusoid":
signal = self.get_layer_timing_signal_sinusoid_1d(context, channels, step,
num_steps)
if self.add_or_concat_timing_signal == "add":
x_with_timing = x + mtf.cast(signal, x.dtype)
elif self.add_or_concat_timing_signal == "concat":
batch_dim = x.shape.dims[0]
out_shape = mtf.Shape([batch_dim] + x.shape.dims[1:])
signal_tiled = mtf.broadcast(signal, out_shape)
x_with_timing = mtf.concat(
(x, signal_tiled), concat_dim_name=signal_tiled.dimension_names[-1])
return x_with_timing
def step_preprocess(self, context, x, step):
"""Preprocess the input at the beginning of each step.
Args:
context: mtf context
x: input tensor
step: step
Returns:
preprocessed input.
"""
original_channel_size = x.shape.dims[-1]
if self.add_step_timing_signal:
x = self.add_step_timing_signal_func(context, x, step)
if ((self.add_position_timing_signal or self.add_position_timing_signal) and
self.add_or_concat_timing_signal == "concat"):
# linear projection to the original dimension of x
new_dims = x.shape.dims[:-1] + [original_channel_size]
x = mtf.layers.dense(
x, variable_dtype=context.variable_dtype,
new_dims=new_dims, activation=None, use_bias=False)
# TODO(yanqiz): implement sru in a separate CL
return x
def vanilla_transformer_layer(self, context, x, mask):
"""Build a vanilla transformer layer."""
for lnum, layer in enumerate(self._layers):
scope_name = layer.name
with tf.variable_scope(scope_name or ""):
norm_x = self._layer_norm(context, (x * mask) if mask else x)
with tf.variable_scope(layer.__class__.__name__):
y = layer.call(context, norm_x)
if y.shape != x.shape:
raise ValueError("Layer %s returned misshaped output x=%s y=%s" %
(layer.__class__.__name__, x, y))
if self.use_gated_transformer:
y = self.gating(context, x, y, mask)
x += self._dropout(context, y)
if lnum != len(self._layers) - 1:
context.layer_outputs.append(x)
context.layer_index += 1
return x
def gating(self, context, x, transformed_x, mask):
"""Implementation of various gating layers."""
gate_ffn_layer = self.gate_ffn_layer
if self.gating_type == "highway":
gate_inputs = [x]
transform_gate = self.ffn_layer_multi_inputs(
context,
mask,
gate_inputs,
ffn_layer_type=gate_ffn_layer,
activation=mtf.sigmoid,
preprocess=True)
carry_gate = self.ffn_layer_multi_inputs(
context,
mask,
gate_inputs,
ffn_layer_type=gate_ffn_layer,
activation=mtf.sigmoid,
preprocess=True)
new_state = x * carry_gate + transformed_x * transform_gate
return new_state
elif self.gating_type == "gru":
gate_inputs = [x, transformed_x]
transition_function_update_gate = self.ffn_layer_multi_inputs(
context,
mask,
gate_inputs,
ffn_layer_type=gate_ffn_layer,
activation=mtf.sigmoid,
preprocess=True)
transition_function_reset_gate = self.ffn_layer_multi_inputs(
context,
mask,
gate_inputs,
ffn_layer_type=gate_ffn_layer,
activation=mtf.sigmoid,
preprocess=True)
reset_state = transition_function_reset_gate * x
gate_inputs = [reset_state, transformed_x]
transition_function_candidate = self.ffn_layer_multi_inputs(
context,
mask,
gate_inputs,
ffn_layer_type=gate_ffn_layer,
activation=mtf.sigmoid,
preprocess=True)
transition_function_output = (
(1 - transition_function_update_gate) * transformed_x +
transition_function_update_gate * transition_function_candidate)
return transition_function_output
def ut_basic(self, context, x, mask):
def ut_function(x, step):
new_state = self.step_preprocess(context, x, step)
for _ in range(self.num_inrecurrence_layers):
new_state = self.vanilla_transformer_layer(context, new_state, mask)
return new_state
for i in range(self.num_rec_steps):
x = ut_function(x, i)
return x
def act_layer(self, context, x, mask):
"""Build a Universal Transformer ACT layer."""
state = x
act_max_steps = self.act_max_steps
threshold = 1.0 - self.act_epsilon
state_shape_static = state.shape.dims
state_slice = slice(0, 3)
if self.act_type == "global":
state_slice = slice(0, 2)
# Dynamic shape for update tensors below
update_shape = state_shape_static[state_slice]
# Halting probabilities (p_t^n in the paper)
halting_probability = mtf.zeros(
context.mesh, update_shape, dtype=context.activation_dtype)
# Remainders (R(t) in the paper)
remainders = mtf.zeros(
context.mesh, update_shape, dtype=context.activation_dtype)
# Number of updates performed (N(t) in the paper)
n_updates = mtf.zeros(
context.mesh, update_shape, dtype=context.activation_dtype)
# Previous cell states (s_t in the paper)
previous_state = mtf.zeros_like(state)
step = mtf.constant(context.mesh, 0, dtype=tf.int32)
def ut_function(state, step, halting_probability, remainders, n_updates,
previous_state):
"""implements act (position-wise halting).
Args:
state: 3-D Tensor: [batch_size, length, channel]
step: indicates number of steps taken so far
halting_probability: halting probability
remainders: act remainders
n_updates: act n_updates
previous_state: previous state
Returns:
transformed_state: transformed state
step: step+1
halting_probability: halting probability
remainders: act remainders
n_updates: act n_updates
new_state: new state
"""
state = self.step_preprocess(context, state, step)
if self.act_type == "random":
# random as halting probability
p = mtf.random_uniform(
context.mesh,
shape=halting_probability.shape.dims,
dtype=context.variable_dtype)
else:
last_dim_name = state.shape.dimension_names[-1]
new_dims = [mtf.Dimension(last_dim_name, 1)]
with tf.variable_scope(
"sigmoid_activation_for_pondering", reuse=tf.AUTO_REUSE):
p = mtf.layers.dense(
state,
variable_dtype=context.variable_dtype,
reduced_dims=[state.shape.dims[-1]],
new_dims=new_dims,
activation=mtf.sigmoid,
use_bias=True)
if self.act_type == "global":
# average over all positions (as a global halting prob)
p = mtf.reduce_mean(p, reduced_dim=p.shape.dims[1])
p = mtf.squeeze(p)
else:
# maintain position-wise probabilities
new_shape = p.shape.dims[:-1]
p = mtf.reshape(p, new_shape)
# Mask for inputs which have not halted yet
still_running = mtf.cast(
mtf.less(halting_probability, 1.0), context.activation_dtype)
# Mask of inputs which halted at this step
new_halted = mtf.cast(
mtf.greater(halting_probability + p * still_running, threshold),
context.activation_dtype) * still_running
# Mask of inputs which haven't halted, and didn't halt this step
still_running = mtf.cast(
mtf.less_equal(halting_probability + p * still_running, threshold),
context.activation_dtype) * still_running
# Add the halting probability for this step to the halting
# probabilities for those input which haven't halted yet
halting_probability += p * still_running
# Compute remainders for the inputs which halted at this step
remainders += new_halted * (1 - halting_probability)
# Add the remainders to those inputs which halted at this step
halting_probability += new_halted * remainders
# Increment n_updates for all inputs which are still running
n_updates += still_running + new_halted
# Compute the weight to be applied to the new state and output
# 0 when the input has already halted
# p when the input hasn't halted yet
# the remainders when it halted this step
input_tensor = p * still_running + new_halted * remainders
update_weights = input_tensor
# apply transformation on the state
transformed_state = state
for _ in range(self.num_inrecurrence_layers):
transformed_state = self.vanilla_transformer_layer(
context, transformed_state, mask)
# update running part in the weighted state and keep the rest
new_state = ((transformed_state * update_weights) +
(previous_state * (1 - update_weights)))
if self.act_type == "accumulated":
# Add in the weighted state
new_state = (transformed_state * update_weights) + previous_state
step += 1
return (transformed_state, step, halting_probability, remainders,
n_updates, new_state)
for _ in range(act_max_steps + 1):
(state, step, halting_probability, remainders, n_updates,
previous_state) = ut_function(state, step, halting_probability,
remainders, n_updates, previous_state)
ponder_times = n_updates
mtf.scalar_summary("ponder_times", mtf.reduce_mean(ponder_times))
return previous_state
def ffn_layer_multi_inputs(self,
context,
mask,
inputs_list,
ffn_layer_type="dense",
kernel_initializer=None,
activation=None,
preprocess=False,
postprocess=False):
"""Implements a Feed-forward layer with multiple inputs, pad-removing, etc.
Args:
context: mtf context
mask: mask
inputs_list: list of input tensors
ffn_layer_type: dense / dense_dropconnect/ dense_relu_dense
kernel_initializer: kernel initializer
activation: activation function
preprocess: if preprocess the input --> default: layer-norm
postprocess: if postprocess the output --> default: drop-out and residual
Returns:
a tensor
Raises:
ValueError: Unknown ffn_layer type.
"""
# need at least one inputs
num_inputs = len(inputs_list)
assert num_inputs > 0
if preprocess:
# In case of having more than one input to the ffn,
# we just apply layer norm on them independently as preprocessing
for i, inputs in enumerate(inputs_list):
inputs_list[i] = self._layer_norm(
context, (inputs * mask) if mask else inputs)
# the output size is the hidden size of the main inputs
ffn_inputs = inputs_list[0]
if len(inputs_list) != 1:
ffn_inputs = mtf.concat(inputs_list, context.model.model_dim.name)
if ffn_layer_type == "dense":
# last_dims = [
# mtf.Dimension(ffn_inputs.shape.dims[-1].name, hidden_size)
# ]
output = mtf.layers.dense(
ffn_inputs,
reduced_dims=[ffn_inputs.shape.dims[-1]],
new_dims=[context.model.model_dim],
activation=activation,
use_bias=True,
variable_dtype=context.variable_dtype,
expert_dims=context.model.ensemble_dims,
kernel_initializer=kernel_initializer)
elif ffn_layer_type == "dense_relu_dense":
output = mtf.layers.dense_relu_dense(
ffn_inputs,
hidden_channels=context.model.model_dim,
dropout=self.relu_dropout
)
else:
raise ValueError("Unknown ffn_layer type: %s" % ffn_layer_type)
if postprocess:
output = self._layer_norm(context, (output * mask) if mask else output)
return output
def ut_highway(self, context, layer_inputs, mask):
"""A highway network layer."""
def ut_function(x, step):
"""highway layer implementation."""
state, inputs, memory = x
new_state = self.step_preprocess(context, state, step)
for _ in range(self.num_inrecurrence_layers):
new_state = self.vanilla_transformer_layer(context, new_state, mask)
transformed_state = new_state
gate_inputs = []
if "s" in self.gates_inputs:
gate_inputs.append(state)
if "t" in self.gates_inputs:
gate_inputs.append(transformed_state)
if "i" in self.gates_inputs:
gate_inputs.append(inputs)
gate_ffn_layer = self.gate_ffn_layer
transform_gate = self.ffn_layer_multi_inputs(
context,
mask,
gate_inputs,
ffn_layer_type=gate_ffn_layer,
activation=mtf.sigmoid,
preprocess=True)
if self.couple_carry_transform_gates:
carry_gate = mtf.sub(1.0, transform_gate, name="carry")
else:
carry_gate = self.ffn_layer_multi_inputs(
context,
mask,
gate_inputs,
ffn_layer_type=gate_ffn_layer,
activation=mtf.sigmoid,
preprocess=True)
new_state = state * carry_gate + transformed_state * transform_gate
mtf.scalar_summary("highway_transform_gate_layer",
mtf.reduce_mean(transform_gate))
mtf.scalar_summary("highway_carry_gate_layer",
mtf.reduce_mean(carry_gate))
return new_state, inputs, memory
for i in range(self.num_rec_steps):
layer_inputs = ut_function(layer_inputs, i)
output, _, _ = layer_inputs
return output
def call(self, context, x):
"""Call the layer stack."""
if isinstance(context.sequence_id, mtf.Tensor):
# We use this mask to zero out the padding regions at each layer.
# This "fixes" a bug where extreme values leak from the padding into the
# non-padding regions.
# TODO(noam): understand this better and make a more principled fix.
mask = mtf.cast(
mtf.not_equal(context.sequence_id, 0), context.activation_dtype)
else:
mask = None
x = self._dropout(context, x)
context.layer_outputs.append(x)
if self.mix_with_transformer_before_ut:
for _ in range(self.num_vanilla_transformer_layers):
x = self.vanilla_transformer_layer(context, x, mask)
# Call a ACT layer
if self.recurrence_type == "act":
x = self.act_layer(context, x, mask)
elif self.recurrence_type == "basic":
x = self.ut_basic(context, x, mask)
elif self.recurrence_type == "highway":
layer_inputs = (x, x, x)
x = self.ut_highway(context, layer_inputs, mask)
if self.mix_with_transformer_after_ut:
for _ in range(self.num_vanilla_transformer_layers):
x = self.vanilla_transformer_layer(context, x, mask)
x = self._layer_norm(context, x, name="final_layer_norm")
x = self._dropout(context, x)
if mask:
x *= mask
context.layer_outputs.append(x)
return x
def _dropout(self, context, x):
if context.train and self._dropout_rate > 0:
return mtf.dropout(
x,
rate=self._dropout_rate,
noise_shape=mtf.Shape(context.batch_dims + [context.model.model_dim]))
else:
return x
def _layer_norm(self, context, x, name=None):
"""Layer normalization.
Deprecated - can we remove this?
Args:
context: a Context
x: a Tensor
name: an optional string
Returns:
a Tensor
"""
return mtf.layers.layer_norm(x, context.model.model_dim, self._norm_epsilon)
@property
def num_layers(self):
return len(self.layers)
@property
def layers(self):
return self._layers
| mesh-master | mesh_tensorflow/transformer/universal_transformer.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Wrapper around vocabulary from the Tensor2Tensor library.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import gin
from mesh_tensorflow.transformer import vocabulary
class T2tVocabulary(vocabulary.Vocabulary):
"""Wrapper around tensor2tensor SubwordTextEncoder.
1 is already reserved for EOS - no need to shift.
"""
def __init__(self, filepath):
"""Create a T2tVocabulary.
Args:
filepath: a string
"""
# Only import tensor2tensor if necessary.
from tensor2tensor.data_generators import text_encoder # pylint: disable=g-import-not-at-top
from tensor2tensor.data_generators.ops import subword_text_encoder_ops # pylint: disable=g-import-not-at-top
self._filepath = filepath
self._subword_text_encoder = text_encoder.SubwordTextEncoder(filepath)
self._subword_text_encoder_encode = (
subword_text_encoder_ops.subword_text_encoder_encode)
@property
def vocab_size(self):
"""Number of ids (including 0=PAD and 1=EOS).
Returns:
an integer
"""
return self._subword_text_encoder.vocab_size
def encode(self, s):
"""Encode a python string as a list of integers.
Args:
s: a string
Returns:
a list of integers (not terminated by EOS)
"""
return self._subword_text_encoder.encode(s)
def decode(self, ids):
"""Decode a list of integers to a python string.
Args:
ids: a list of integers (not terminated by EOS)
Returns:
a string
"""
return self._subword_text_encoder.decode(ids)
def encode_tf(self, s):
"""Encode a tf.Scalar string to a tf.Tensor.
This will be necessary for on-the-fly tokenization.
Args:
s: a tf.Scalar with dtype tf.string
Returns:
a 1d tf.Tensor with dtype tf.int32
"""
ids = self._subword_text_encoder_encode(s, self._filepath)
# the c++ op apppends 1=EOS - drop it.
return ids[:-1]
@gin.configurable
def get_t2t_vocabulary(data_dir=gin.REQUIRED,
vocabulary_filename=gin.REQUIRED):
return T2tVocabulary(os.path.join(data_dir, vocabulary_filename))
| mesh-master | mesh_tensorflow/transformer/t2t_vocabulary.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for mesh_tensorflow.transformer.dataset."""
from absl.testing import absltest
from absl.testing import parameterized
from mesh_tensorflow.transformer import dataset
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
tf.disable_v2_behavior()
tf.enable_eager_execution()
class DatasetTest(parameterized.TestCase):
_PACK_PARAMETERS = ({"use_custom_ops": False},)
def assert_dataset(self, ds, expected_ds, expected_dtypes):
actual_ds = list(tfds.as_numpy(ds))
self.assertLen(actual_ds, len(expected_ds))
for actual, expected in zip(actual_ds, expected_ds):
self.assertCountEqual(list(actual.keys()), list(expected.keys()))
for k, v in actual.items():
np.testing.assert_array_equal(v, expected[k])
if k in expected_dtypes:
self.assertEqual(v.dtype.type, expected_dtypes[k])
@parameterized.parameters(*_PACK_PARAMETERS)
def test_pack_dataset(self, use_custom_ops):
x = [{"inputs": [7, 8, 5, 1], "targets": [3, 9, 1], "idx": [0]},
{"inputs": [8, 4, 9, 3, 1], "targets": [4, 1], "idx": [1]}]
ds = create_default_dataset(x, feature_names=("inputs", "targets", "idx"))
packed_ds = dataset.pack_dataset(
ds,
length={"inputs": 10, "targets": 7},
keys=("inputs", "targets"),
use_custom_ops=use_custom_ops)
expected = [{
"inputs": [7, 8, 5, 1, 8, 4, 9, 3, 1, 0],
"inputs_segmentation": [1, 1, 1, 1, 2, 2, 2, 2, 2, 0],
"inputs_position": [0, 1, 2, 3, 0, 1, 2, 3, 4, 0],
"targets": [3, 9, 1, 4, 1, 0, 0],
"targets_position": [0, 1, 2, 0, 1, 0, 0],
"targets_segmentation": [1, 1, 1, 2, 2, 0, 0],
}]
self.assert_dataset(
packed_ds, expected, {"inputs": tf.int32, "targets": tf.int32})
@parameterized.parameters(*_PACK_PARAMETERS)
def test_pack_dataset_no_eos(self, use_custom_ops):
x = [{"inputs": [7, 8, 5], "targets": [3, 9]},
{"inputs": [8, 4, 9, 3], "targets": [4]}]
ds = create_default_dataset(x)
packed_ds = dataset.pack_dataset(
ds,
length={"inputs": 8, "targets": 5},
use_custom_ops=use_custom_ops)
# Packing still works without the eos.
expected = [{
"inputs": [7, 8, 5, 8, 4, 9, 3, 0],
"inputs_segmentation": [1, 1, 1, 2, 2, 2, 2, 0],
"inputs_position": [0, 1, 2, 0, 1, 2, 3, 0],
"targets": [3, 9, 4, 0, 0],
"targets_position": [0, 1, 0, 0, 0],
"targets_segmentation": [1, 1, 2, 0, 0],
}]
self.assert_dataset(
packed_ds, expected, {"inputs": tf.int32, "targets": tf.int32})
@parameterized.parameters(*_PACK_PARAMETERS)
def test_pack_dataset_long_seq(self, use_custom_ops):
x = [{"inputs": [7, 8, 5, 6, 9, 4, 1], "targets": [3, 9, 1]},
{"inputs": [8, 4, 9, 3, 5, 7, 9, 1], "targets": [4, 1]}]
ds = create_default_dataset(x)
packed_ds = dataset.pack_dataset(
ds,
length={"inputs": 7, "targets": 3},
use_custom_ops=use_custom_ops)
expected = [{
"inputs": [7, 8, 5, 6, 9, 4, 1],
"inputs_segmentation": [1, 1, 1, 1, 1, 1, 1],
"inputs_position": [0, 1, 2, 3, 4, 5, 6],
"targets": [3, 9, 1],
"targets_position": [0, 1, 2],
"targets_segmentation": [1, 1, 1],
}, {
# EOS is trimmed
"inputs": [8, 4, 9, 3, 5, 7, 9],
"inputs_segmentation": [1, 1, 1, 1, 1, 1, 1],
"inputs_position": [0, 1, 2, 3, 4, 5, 6],
"targets": [4, 1, 0],
"targets_position": [0, 1, 0],
"targets_segmentation": [1, 1, 0],
}]
self.assert_dataset(
packed_ds, expected, {"inputs": tf.int32, "targets": tf.int32})
def create_default_dataset(x, feature_names=("inputs", "targets")):
output_types = {feature_name: tf.int32 for feature_name in feature_names}
output_shapes = {feature_name: [None] for feature_name in feature_names}
ds = tf.data.Dataset.from_generator(
lambda: x, output_types=output_types, output_shapes=output_shapes)
return ds
if __name__ == "__main__":
absltest.main()
| mesh-master | mesh_tensorflow/transformer/dataset_test.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Custom layers for the evolved transformer.
See https://arxiv.org/abs/1901.11117 for more details.
"""
import gin
import mesh_tensorflow as mtf
from mesh_tensorflow.transformer import transformer
from mesh_tensorflow.transformer import transformer_layers
import tensorflow.compat.v1 as tf
@gin.configurable
class GatedLinearUnitLayer(transformer.TransformerLayer):
"""Layer performing a Gated Linear Unit transformation on its input.
See https://arxiv.org/pdf/1612.08083.pdf.
"""
def call(self, context, x, losses=None):
"""Call the layer."""
return mtf.layers.dense_product(
x,
reduced_dims=x.shape.dims[-1:],
new_dims=x.shape.dims[-1:],
activation_functions=["linear", "sigmoid"],
variable_dtype=context.variable_dtype,
name="glu",
expert_dims=context.model.ensemble_dims)
@gin.configurable
class EncoderConvolutionalLayer(transformer.TransformerLayer):
"""The convolutional layers custom to the evolved transformer encoder.
The input is projected to 4 times the model dimension size followed by a ReLU
on the left branch while it goes through a 3x1 convolution on the right
branch. The outputs of the branches are summed and then passed through a layer
norm. The output of the layer norm then goes through separable 9x1
convolution.
"""
def __init__(self,
d_model,
dropout_rate,
initializer_scale=1.0,
norm_epsilon=1e-6):
"""Create an EncoderConvolutionalLayer.
Args:
d_model: a positive integer, the dimension of the model dim.
dropout_rate: a float between 0 and 1.
initializer_scale: a positive float, the scale for the initializers of the
separable convolutional filters.
norm_epsilon: a small positive float, the epsilon for the layer norm.
"""
self._dropout_rate = dropout_rate
self._norm_epsilon = norm_epsilon
self._conv3x1 = transformer_layers.Conv1DLayer(
filter_size=3, output_size=int(d_model / 2), activation="relu")
self._sep_conv9x1 = transformer_layers.SeparableConv1DLayer(
min_relative_pos=-4,
max_relative_pos=4,
output_size=int(d_model / 2),
depthwise_filter_initializer_scale=initializer_scale,
pointwise_filter_initializer_scale=initializer_scale)
def call(self, context, x, losses=None):
"""Call the layer."""
model_dim = context.model.model_dim
# Note that the left output dim can also be thought of the hidden dimension
# in the feedforward network.
with tf.variable_scope("conv1x1"):
hidden_dim = mtf.Dimension(model_dim.name, 4 * model_dim.size)
left_state = mtf.layers.dense(
x,
reduced_dims=x.shape.dims[-1:],
new_dims=[hidden_dim],
activation=mtf.relu,
use_bias=False,
variable_dtype=context.variable_dtype)
left_state = _dropout(left_state, context, self._dropout_rate)
with tf.variable_scope("conv3x1"):
right_state = self._conv3x1.call(context, x, losses)
right_state = _dropout(right_state, context, self._dropout_rate)
right_state = _pad_channels_dim(right_state, hidden_dim.size)
hidden_state = left_state + right_state
hidden_state = mtf.layers.layer_norm(
hidden_state,
epsilon=self._norm_epsilon,
dim=hidden_state.shape.dims[-1])
with tf.variable_scope("sep_conv9x1"):
output = self._sep_conv9x1.call(context, hidden_state, losses)
output = _dropout(output, context, self._dropout_rate)
return _pad_channels_dim(output, model_dim.size)
@gin.configurable
class DecoderAttentionLayer(transformer.TransformerLayer):
"""The attention layers custom to the evolved transformer decoder.
This layer consists of applying both a self attention and enc-dec attention to
the input and then summing their outputs.
"""
def __init__(self, base_num_heads):
"""Create an DecoderAttentionLayer.
Args:
base_num_heads: a positive integer, the base number of heads the attention
layers are using.
"""
self._self_attention = transformer_layers.SelfAttention(num_heads=2 *
base_num_heads)
self._enc_dec_attention = transformer_layers.EncDecAttention(
num_heads=base_num_heads)
def call(self, context, x, losses=None):
"""Call the layer."""
with tf.variable_scope("self_attention"):
left_state = self._self_attention.call(context, x, losses)
with tf.variable_scope("enc_dec_attention"):
right_state = self._enc_dec_attention.call(context, x, losses)
return left_state + right_state
@gin.configurable
class DecoderConvolutionalLayer(transformer.TransformerLayer):
"""The convolutional layers custom to the evolved transformer decoder.
The input is passed through a 11x1 separable convolution followed by a ReLU on
the left branch while it goes through a 7x1 separable convolution on the right
branch. The outputs of the branches are summed and then passed through a layer
norm. The output of the layer norm then goes through separable 7x1
convolution.
"""
def __init__(self,
d_model,
dropout_rate,
initializer_scale=1.0,
norm_epsilon=1e-6):
"""Create an DecoderConvolutionalLayer.
Args:
d_model: a positive integer, the dimension of the model dim.
dropout_rate: a float between 0 and 1.
initializer_scale: a positive float, the scale for the initializers of the
separable convolutional filters.
norm_epsilon: a small positive float, the epsilon for the layer norm.
"""
self._d_model = d_model
self._dropout_rate = dropout_rate
self._norm_epsilon = norm_epsilon
self._sep_conv11x1 = transformer_layers.SeparableConv1DLayer(
min_relative_pos=-10,
max_relative_pos=0,
output_size=int(2 * d_model),
depthwise_filter_initializer_scale=initializer_scale,
pointwise_filter_initializer_scale=initializer_scale,
activation="relu")
self._sep_conv7x1_1 = transformer_layers.SeparableConv1DLayer(
min_relative_pos=-6,
max_relative_pos=0,
output_size=int(d_model / 2),
depthwise_filter_initializer_scale=initializer_scale,
pointwise_filter_initializer_scale=initializer_scale)
self._sep_conv7x1_2 = transformer_layers.SeparableConv1DLayer(
min_relative_pos=-6,
max_relative_pos=0,
output_size=d_model,
depthwise_filter_initializer_scale=initializer_scale,
pointwise_filter_initializer_scale=initializer_scale)
def call(self, context, x, losses=None):
"""Call the layer."""
with tf.variable_scope("sep_conv11x1"):
left_state = self._sep_conv11x1.call(context, x, losses)
left_state = _dropout(left_state, context, self._dropout_rate)
with tf.variable_scope("sep_conv7x1_1"):
right_state = self._sep_conv7x1_1.call(context, x, losses)
right_state = _dropout(right_state, context, self._dropout_rate)
right_state = _pad_channels_dim(right_state,
left_state.shape.dims[-1].size)
hidden_state = left_state + right_state
hidden_state = mtf.layers.layer_norm(
hidden_state,
epsilon=self._norm_epsilon,
dim=hidden_state.shape.dims[-1])
with tf.variable_scope("sep_conv7x1_2"):
output = self._sep_conv7x1_2.call(context, hidden_state, losses)
return _dropout(output, context, self._dropout_rate)
def _pad_channels_dim(tensor, size):
channels_dim = tensor.shape.dims[-1]
if channels_dim.size > size:
raise ValueError("Cannot pad to size of {} when the original size "
"of {} is bigger".format(size, channels_dim.size))
elif channels_dim.size == size:
return tensor
else:
return mtf.pad(tensor, [0, size - channels_dim.size], channels_dim.name)
def _dropout(x, context, dropout_rate):
if context.train and dropout_rate > 0:
return mtf.dropout(
x,
rate=dropout_rate,
noise_shape=mtf.Shape(context.batch_dims + x.shape.dims[-1:]))
else:
return x
| mesh-master | mesh_tensorflow/transformer/evolved_transformer.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Dataset utilities for Transformer example.
During training/eval, transformer gets input from a tf.data.Dataset. The
utilities in this file are for loading such tf.data.Datasets from various data
sources.
Format:
The tf.data.Dataset outputs a dictionary of features, each of which is
an integer tensor with fixed shape [batch_size, sequence_length].
The keys of the dictionary (some of which are optional) are:
{
"inputs"
"inputs_segmentation"
"inputs_position"
"targets"
"targets_segmentation"
"targets_position"
}
We follow the convention that ID=0 represents padding and ID=1 represents EOS.
All sequences are terminated by EOS. There is no BOS token included.
"inputs" represents the input sequences in a sequence-to-sequence problem. A
language-modeling problem has no "inputs" feature.
"targets" represents the target sequences of a sequence-to-sequence problem or
the sequences in a language-modeling problem.
A dataset may be "packed", in which case each row in the tensors represents
multiple training examples concatenated together (each terminated by EOS=1).
In this case, the output dictionary will contain additional features:
"inputs_segmentation" (if "inputs" is present)
"targets_segmentation"
"inputs_position" (if "inputs" is present)
"targets_position"
"inputs_segmentation" and "inputs_position" are both aligned with "inputs".
"inputs_segmentation" specifies which of the original examples a particular
token belongs to. "inputs_position" specifies the position of this token in the
original sequence. "targets_segmentation" and "targets_position" are similarly
defined.
Example:
Two original sequence-pairs are packed together to form the first combined
example in the batch:
The original sequence-pairs are:
{"inputs": [8, 7, 1=EOS], "targets": [4, 1=EOS]}
{"inputs": [2, 3, 4, 1=EOS], "targets": [5, 6, 1=EOS]}
The output dictionary looks like this:
{
"inputs": [[8, 7, 1, 2, 3, 4, 1, 0, 0, 0], ...]
"inputs_segmentation": [[1, 1, 1, 2, 2, 2, 2, 0, 0, 0], ...]
"inputs_position": [[0, 1, 2, 0, 1, 2, 3, 0, 0, 0], ...]
"targets": [[4, 1, 5, 6, 1, 0, 0, 0, 0, 0], ...]
"targets_segmentation": [[1, 1, 2, 2, 2, 0, 0, 0, 0, 0], ...]
"targets_position": [[0, 1, 0, 1, 2, 0, 0, 0, 0, 0], ...]
}
The "_segmentation" tensors have 1s and 2s to demacrate these two original
examples, and 0s for padding. The "_position" tensors contain the positions
within the two original examples.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import gin
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
@gin.configurable
def pack_or_pad(
dataset, length, pack=True, feature_keys=None, ensure_eos=False):
"""Creates a 'packed' version of a dataset or pads examples with zeros.
If pack=True, then multiple examples concatenated to form one combined
example with the given length.
If pack=False, then examples are padded with zeros to 'length'.
Args:
dataset: a tf.data.Dataset
length: an integer or a dict from feature-key to integer
pack: a boolean, whether to pack (True) or pad (False).
feature_keys: (optional) collection of strings, the feature names to limit
packing or padding to. Packing will filter out other features whereas
padding will pass them through unchanged. Defaults to all features.
ensure_eos: a boolean or collection of strings, whether to replace the final
token with EOS=1 if it is not PAD=0. If True, will be applied to all keys
in `feature_keys`. If False, will be applied to none. If a collection of
strings, will only be applied to these features in the collection.
Returns:
a tf.data.Dataset where all features have fixed shape [length].
"""
feature_keys = set(feature_keys or tf.data.get_output_shapes(dataset).keys())
if pack:
dataset = pack_dataset(dataset, length=length, keys=feature_keys)
# Pad/trim length of each example to length.
dataset = trim_and_pad_dataset(
dataset, length=length, feature_keys=feature_keys)
if ensure_eos:
eos_keys = feature_keys if isinstance(ensure_eos, bool) else ensure_eos
dataset = ensure_dataset_eos(dataset, eos_keys)
return dataset
def ensure_dataset_eos(dataset, feature_keys=None):
"""Replaces the final token of features with EOS=1 if it is not PAD=0.
Args:
dataset: a tf.data.Dataset
feature_keys: (optional) collection of strings, the feature names to ensure
end with EOS or padding. Defaults to all features.
Returns:
a tf.data.Dataset where all specified features end with PAD=0 or EOS=1.
"""
feature_keys = feature_keys or set(tf.data.get_output_shapes(dataset).keys())
def _ensure_eos(k, v):
if k not in feature_keys:
return v
return tf.concat([v[0:-1], tf.clip_by_value(v[-1:], 0, 1)], axis=0)
return dataset.map(
lambda ex: {k: _ensure_eos(k, v) for k, v in ex.items()},
num_parallel_calls=tf.data.experimental.AUTOTUNE)
def encode_dataset(dataset, vocabulary):
"""Encode from strings to token ids.
Args:
dataset: a tf.data.Dataset with string values.
vocabulary: a mesh_tensorflow.transformer.Vocabulary
Returns:
a tf.data.Dataset with integer-vector values ending in EOS=1
"""
def encode(features):
return {k: vocabulary.encode_tf(v) for k, v in features.items()}
return dataset.map(encode, num_parallel_calls=tf.data.experimental.AUTOTUNE)
@gin.configurable
def pretokenized_tfds_dataset(dataset_name=gin.REQUIRED,
text2self=gin.REQUIRED,
tfds_data_dir=gin.REQUIRED,
dataset_split=gin.REQUIRED,
batch_size=None,
sequence_length=gin.REQUIRED,
vocabulary=None):
"""Reads a tensorflow_datasets dataset.
Args:
dataset_name: a string
text2self: a boolean
tfds_data_dir: a boolean
dataset_split: a string
batch_size: an integer, DEPRECATED
sequence_length: an integer
vocabulary: ignored
Returns:
a tf.data.Dataset of batches
"""
del batch_size
del vocabulary
dataset = tfds.load(
dataset_name,
split=dataset_split,
as_supervised=True,
data_dir=tfds_data_dir,
shuffle_files=dataset_split == "train")
if dataset_split == "train":
dataset = dataset.repeat()
dataset = dataset.shuffle(1000)
def shift_and_append_eos(t):
# tfds encoder does not reserve an EOS token, so we need to shift
# in order to do so. We also append EOS=1.
return tf.concat([t + 1, [1]], 0)
def feature_map(inputs, targets):
if text2self:
return {"targets": shift_and_append_eos(targets)}
else:
return {"inputs": shift_and_append_eos(inputs),
"targets": shift_and_append_eos(targets)}
dataset = dataset.map(feature_map,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
return pack_or_pad(dataset, sequence_length)
@gin.configurable
def sample_from_text_line_datasets(glob_weight_list,
shuffle_buffer_size=100000,
prefetch=1000): # pylint: disable=missing-docstring
globs, weights = zip(*glob_weight_list)
datasets = [
tf.data.TextLineDataset(tf.gfile.Glob(g)).repeat().shuffle(
shuffle_buffer_size).prefetch(prefetch) for g in globs
]
return tf.data.experimental.sample_from_datasets(
datasets=datasets, weights=weights)
@gin.configurable
def make_text_line_dataset(glob=gin.REQUIRED):
return sample_from_text_line_datasets([(glob, 1.0)])
@gin.configurable
def simple_text_line_dataset(glob=gin.REQUIRED, shuffle_buffer_size=100000):
return tf.data.TextLineDataset(
tf.gfile.Glob(glob)).shuffle(shuffle_buffer_size)
@gin.configurable
def packed_parallel_tsv_dataset(dataset=gin.REQUIRED,
dataset_split=gin.REQUIRED,
batch_size=None,
sequence_length=gin.REQUIRED,
vocabulary=gin.REQUIRED,
append_eos=True,
eos_id=1,
max_encoded_len=0):
"""Reads parallel tab-separated text file. One example per line."""
del batch_size
del dataset_split
def _parse_fn(record): # pylint: disable=missing-docstring
tokens = tf.decode_csv(
record,
record_defaults=[""] * 2,
field_delim="\t",
use_quote_delim=False)
return {"inputs": tokens[0], "targets": tokens[1]}
def _encode_fn(features): # pylint: disable=missing-docstring
inputs_vocabulary = vocabulary[0] if isinstance(vocabulary,
tuple) else vocabulary
targets_vocabulary = vocabulary[1] if isinstance(vocabulary,
tuple) else vocabulary
inputs_enc = inputs_vocabulary.encode_tf(features["inputs"])
targets_enc = targets_vocabulary.encode_tf(features["targets"])
if append_eos:
inputs_enc = tf.concat([tf.cast(inputs_enc, tf.int64), [eos_id]], 0)
targets_enc = tf.concat([tf.cast(targets_enc, tf.int64), [eos_id]], 0)
return {"inputs": inputs_enc, "targets": targets_enc}
dataset = dataset.map(
_parse_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.map(
_encode_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def _filter_fn(features): # pylint: disable=missing-docstring
return tf.less_equal(
tf.reduce_max(
tf.stack([tf.size(v) for v in features.values()], axis=0)),
max_encoded_len)
if max_encoded_len:
tf.logging.info("Filtering encoded examples longer than %d" %
max_encoded_len)
dataset = dataset.filter(_filter_fn)
return pack_or_pad(dataset, sequence_length)
@gin.configurable
def untokenized_tfds_dataset(dataset_name=gin.REQUIRED,
text2self=gin.REQUIRED,
tfds_data_dir=gin.REQUIRED,
dataset_split=gin.REQUIRED,
batch_size=None,
sequence_length=gin.REQUIRED,
vocabulary=gin.REQUIRED,
pack=gin.REQUIRED):
"""Reads a tensorflow_datasets dataset.
Returns a tf.data.Dataset containing single tokenized examples where each
feature ends in EOS=1.
Args:
dataset_name: a string
text2self: a boolean, if true, run unsupervised LM-style training. if false,
the dataset must support supervised mode.
tfds_data_dir: a boolean
dataset_split: a string
batch_size: an integer
sequence_length: an integer
vocabulary: a vocabulary.Vocabulary
pack: if True, multiple examples emitted by load_internal() are concatenated
to form one combined example.
Returns:
a tf.data.Dataset of batches
"""
del batch_size
dataset = tfds.load(
dataset_name, split=dataset_split,
as_supervised=not text2self, data_dir=tfds_data_dir)
if dataset_split == "train":
dataset = dataset.repeat()
dataset = dataset.shuffle(1000)
if not text2self:
dataset = supervised_to_dict(dataset, text2self)
dataset = encode_all_features(dataset, vocabulary)
return pack_or_pad(dataset, sequence_length, pack)
def supervised_to_dict(dataset, text2self):
"""Turns a supervised dataset into a dataset with a feature dictionary.
if text2self, then the features dictionary contains a "targets" key.
else, the features dictionary contains "inputs" and "targets" keys.
Args:
dataset: a tf.data.Dataset
text2self: a boolean
Returns:
a tf.data.Dataset
"""
def my_fn(inputs, targets):
if text2self:
return {"targets": targets}
else:
return {"inputs": inputs, "targets": targets}
return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def encode_all_features(dataset, vocabulary):
"""Encode all features.
Args:
dataset: a tf.data.Dataset
vocabulary: a vocabulary.Vocabulary
Returns:
a tf.data.Dataset
"""
def my_fn(features):
"""Encode all features that are strings and return a dictionary.
Args:
features: a dictionary
Returns:
a dictionary
"""
ret = {}
for k, v in features.items():
if v.dtype == tf.string:
v = vocabulary.encode_tf(v)
v = tf.concat([tf.cast(v, tf.int64), [1]], 0)
ret[k] = v
else:
tf.logging.info(
"encode_all_features: skipping non-string feature %s:%s", k, v)
return ret
return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def pretokenized_tfrecord_dataset(filenames,
text2self,
eos_included,
repeat,
batch_size,
sequence_length,
vocab_shift=0):
"""Reads tensor2tensor-style data files.
The dataset is defined by sets of TFRecord files of TFExample protos.
There should be a "targets" feature (a 1d tensor of integers)
If not text2self, there should also be an "inputs" feature.
Other features get ignored.
eos_included specifies whether the inputs and targets were written with an
EOS token, as in tensor2tensor
Args:
filenames: a list of strings
text2self: a boolean
eos_included: a boolean
repeat: a boolean
batch_size: an integer, DEPRECATED
sequence_length: an integer
vocab_shift: an optional integer - add this value to all ids
Returns:
A tf.data.Dataset of batches
"""
del batch_size
dataset = tf.data.TFRecordDataset(filenames, buffer_size=64 * 1024 * 1024)
if repeat:
dataset = dataset.repeat()
keys = ["targets"] if text2self else ["inputs", "targets"]
def decode_example(serialized_example):
"""Return a dict of Tensors from a serialized tensorflow.Example."""
decoded = tf.io.parse_example(
serialized=[serialized_example],
features={k: tf.VarLenFeature(tf.int64) for k in keys})
decoded = {k: v.values for k, v in decoded.items()}
if vocab_shift:
decoded = {k: v + vocab_shift for k, v in decoded.items()}
if not eos_included:
decoded = {k: tf.concat([v, [1]], 0) for k, v in decoded.items()}
return decoded
dataset = dataset.map(decode_example,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
return pack_or_pad(dataset, sequence_length)
@gin.configurable
def pretokenized_t2t_dataset(dataset_name=gin.REQUIRED,
text2self=False,
data_dir=gin.REQUIRED,
dataset_split="train",
batch_size=None,
sequence_length=gin.REQUIRED,
vocabulary=None,
eos_included=True,
vocab_shift=0):
"""Loads the Tensor2tensor dataset specified by dataset_name.
Args:
dataset_name: TensorFlow Datasets dataset name.
text2self: a boolean
data_dir: string, data_dir for TensorFlow Datasets
dataset_split: a string - "train" or "dev"
batch_size: an integer, DEPRECATED
sequence_length: an integer
vocabulary: ignored
eos_included: a boolean
vocab_shift: an optional integer - add this value to all ids read
Returns:
A tf.data.Dataset of batches
"""
del vocabulary
filepattern = os.path.join(
data_dir, dataset_name + "-" + dataset_split + "-*")
filenames = tf.gfile.Glob(filepattern)
tf.logging.info("Found %s files matching %s" % (len(filenames), filepattern))
if not filenames:
raise ValueError("No matching files found")
dataset = pretokenized_tfrecord_dataset(
filenames=filenames,
text2self=text2self,
eos_included=eos_included,
repeat=dataset_split == "train",
batch_size=batch_size,
sequence_length=sequence_length,
vocab_shift=vocab_shift)
if dataset_split == "train":
dataset = dataset.shuffle(1000)
return dataset
@gin.configurable
def pack_dataset(dataset, length, keys=None, use_custom_ops=False):
"""Creates a 'packed' version of a dataset on-the-fly.
Borrowed from the tensor2tensor library.
TODO(noam): make this faster
This is meant to replace the irritation of having to create a separate
"packed" version of a dataset to train efficiently on TPU.
Each example in the output dataset represents several examples in the
input dataset.
For each key in the input dataset, two additional keys are created:
<key>_segmentation: an int32 tensor identifying the parts
representing the original example.
<key>_position: an int32 tensor identifying the position within the original
example.
Example:
Two input examples get combined to form an output example.
The input examples are:
{"inputs": [8, 7, 1, 0], "targets":[4, 1, 0]}
{"inputs": [2, 3, 4, 1], "targets":[5, 6, 1]}
The output example is:
{
"inputs": [8, 7, 1, 2, 3, 4, 1, 0, 0, 0]
"inputs_segmentation": [1, 1, 1, 2, 2, 2, 2, 0, 0, 0]
"inputs_position": [0, 1, 2, 0, 1, 2, 3, 0, 0, 0]
"targets": [4, 1, 5, 6, 1, 0, 0, 0, 0, 0]
"targets_segmentation": [1, 1, 2, 2, 2, 0, 0, 0, 0, 0]
"targets_position": [0, 1, 0, 1, 2, 0, 0, 0, 0, 0]
}
0 represents padding in both the inputs and the outputs.
Sequences in the incoming examples are truncated to length "length", and the
sequences in the output examples all have fixed (padded) length "length".
Args:
dataset: a tf.data.Dataset
length: an integer, or a dict from feature-key to integer
keys: a collection of strings (e.g. ["inputs", "targets"])
use_custom_ops: a boolean - custom ops are faster but require a custom-built
binary, which is not currently possible on cloud-tpu.
Returns:
a tf.data.Dataset
"""
shapes = tf.data.get_output_shapes(dataset)
if keys is None:
keys = list(shapes.keys())
for k in keys:
if k not in shapes:
raise ValueError("Key %s not found in dataset. Available keys are %s"
% (k, shapes.keys()))
if not shapes[k].is_compatible_with(tf.TensorShape([None])):
raise ValueError("Tensors to be packed must be one-dimensional.")
# make sure that the length dictionary contains all keys as well as the
# keys suffixed by "_segmentation" and "_position"
length_dict = {}
for k in keys:
for suffix in ["", "_segmentation", "_position"]:
length_dict[k + suffix] = length if isinstance(length, int) else length[k]
length = length_dict
# trim to length
dataset = dataset.map(lambda x: {k: x[k][:length[k]] for k in keys},
num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Setting batch_size=length ensures that the concatenated sequences (if they
# have length >=1) are sufficient to fill at least one packed example.
batch_size = max(length.values())
dataset = dataset.padded_batch(
batch_size, padded_shapes={k: [-1] for k in keys})
if use_custom_ops and len(keys) <= 2:
dataset = _pack_with_custom_ops(dataset, keys, length)
else:
dataset = _pack_with_tf_ops(dataset, keys, length)
# Set the Tensor shapes correctly since they get lost in the process.
def my_fn(x):
return {k: tf.reshape(v, [length[k]]) for k, v in x.items()}
return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def _pack_with_tf_ops(dataset, keys, length):
"""Helper-function for packing a dataset which has already been batched.
See pack_dataset()
Uses tf.while_loop. Slow.
Args:
dataset: a dataset containing padded batches of examples.
keys: a collection of strings
length: an dict from feature-key to integer
Returns:
a dataset.
"""
empty_example = {}
for k in keys:
empty_example[k] = tf.zeros([0], dtype=tf.int32)
empty_example[k + "_position"] = tf.zeros([0], dtype=tf.int32)
keys_etc = empty_example.keys()
def write_packed_example(partial, outputs):
new_partial = empty_example.copy()
new_outputs = {}
for k in keys_etc:
new_outputs[k] = outputs[k].write(
outputs[k].size(),
tf.pad(partial[k],
[[0, length[k] - tf.size(partial[k])]]))
return new_partial, new_outputs
def map_fn(x):
"""Internal function to flat_map over.
Consumes a batch of input examples and produces a variable number of output
examples.
Args:
x: a single example
Returns:
a tf.data.Dataset
"""
partial = empty_example.copy()
i = tf.zeros([], dtype=tf.int32)
first_key, *_ = keys
dynamic_batch_size = tf.shape(x[first_key])[0]
outputs = {}
for k in keys:
outputs[k] = tf.TensorArray(
tf.int32, size=0, dynamic_size=True, element_shape=[length[k]])
outputs[k + "_position"] = tf.TensorArray(
tf.int32, size=0, dynamic_size=True, element_shape=[length[k]])
def cond_fn(i, partial, outputs):
del partial, outputs
return i < dynamic_batch_size
def body_fn(i, partial, outputs):
"""Body function for while_loop.
Args:
i: integer scalar
partial: dictionary of Tensor (partially-constructed example)
outputs: dictionary of TensorArray
Returns:
A triple containing the new values of the inputs.
"""
can_append = True
one_example = {}
for k in keys:
val = tf.cast(x[k][i], tf.int32)
val = val[:tf.reduce_sum(tf.cast(tf.not_equal(val, 0), tf.int32))]
one_example[k] = val
for k in keys:
can_append = tf.logical_and(
can_append,
tf.less_equal(
tf.size(partial[k]) + tf.size(one_example[k]), length[k]))
def false_fn():
return write_packed_example(partial, outputs)
def true_fn():
return partial, outputs
partial, outputs = tf.cond(can_append, true_fn, false_fn)
new_partial = {}
for k in keys:
new_seq = one_example[k][:length[k]]
new_seq_len = tf.size(new_seq)
new_partial[k] = tf.concat([partial[k], new_seq], 0)
new_partial[k + "_position"] = tf.concat(
[partial[k + "_position"],
tf.range(new_seq_len, dtype=tf.int32)], 0)
partial = new_partial
return i+1, partial, outputs
i, partial, outputs = tf.while_loop(
cond_fn, body_fn, (i, partial, outputs),
back_prop=False,
shape_invariants=(
tf.TensorShape([]),
{k: tf.TensorShape([None]) for k in keys_etc},
{k: tf.TensorShape(None) for k in keys_etc},
))
partial, outputs = write_packed_example(partial, outputs)
packed = {k: outputs[k].stack() for k in keys_etc}
for k in keys:
packed[k + "_segmentation"] = (
tf.cumsum(
tf.cast(tf.equal(packed[k + "_position"], 0), tf.int32), axis=1) *
tf.cast(tf.not_equal(packed[k], 0), tf.int32))
return packed
dataset = dataset.map(map_fn,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset.unbatch()
def _pack_with_custom_ops(dataset, keys, length):
"""Helper-function for packing a dataset which has already been batched.
See pack_dataset()
Relies on custom ops which require a custom compiled binary.
Faster than _pack_with_tf_ops(), and denser packing.
Args:
dataset: a dataset containing padded batches of examples.
keys: a collection of strings (must have length 1 or 2)
length: a dictionary from key to integer
Returns:
a dataset.
"""
from tensor2tensor.data_generators.ops import pack_sequences_ops # pylint: disable=g-import-not-at-top
# faster and better packing but requires custom-built binary.
if len(keys) == 1:
k1, = keys
k2 = k1
elif len(keys) == 2:
k1, k2 = keys
else:
raise ValueError(f"Packing op requires 1 or 2 keys. Got {len(keys)}")
def custom_pack_batch(x):
"""Map-function."""
(k1_packed, k1_segmentation, k1_position,
k2_packed, k2_segmentation, k2_position) = (
pack_sequences_ops.pack_sequences2(
# cast to int64 for compatibility with custom ops
tf.cast(x[k1], tf.int64),
tf.cast(x[k2], tf.int64),
length[k1],
length[k2]))
packed = {
k1: k1_packed,
k1 + "_segmentation": k1_segmentation,
k1 + "_position": k1_position,
}
if len(keys) == 2:
packed.update({
k2: k2_packed,
k2 + "_segmentation": k2_segmentation,
k2 + "_position": k2_position,
})
# cast back to int32
for k, v in packed.items():
packed[k] = tf.cast(v, tf.int32)
return packed
dataset = dataset.map(
custom_pack_batch, num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.unbatch()
return dataset
def trim_and_pad_dataset(dataset, length, feature_keys=None):
"""Trim and pad first dimension of features to size `length`.
Args:
dataset: tf.data.Dataset, the dataset to trimp/pad examples in.
length: int, or a dict from feature-key to int
feature_keys: (optional) collection of strings, the feature names to limit
trimming/padding to. Defaults to all features.
Returns:
Trimmed/padded tf.data.Dataset.
"""
def _trim_and_pad(k, t):
"""Trim/pad to the first axis of `t` to be of size `length`."""
if feature_keys and k not in feature_keys:
return t
length_k = length if isinstance(length, int) else length[k]
t = t[:length_k]
pad_amt = length_k - tf.shape(t)[0]
padded_t = tf.pad(t, [(0, pad_amt)] + [(0, 0)] * (len(t.shape) - 1))
padded_t.set_shape([length_k] + t.shape.as_list()[1:])
return padded_t
return dataset.map(
lambda x: {k: _trim_and_pad(k, t) for k, t in x.items()},
num_parallel_calls=tf.data.experimental.AUTOTUNE)
EvalDataset = collections.namedtuple(
"EvalDataset",
[
"name", # string, the task name
"dataset_fn", # function which returns a tf.data.Dataset
"postprocess_fn", # function which converts decodes to evalable strs
"metric_fns", # list of metric_fn(targets, predictions) returning dicts
]
)
def pad_dataset_with_zeroed_out_examples(ds):
def _zero_out(x):
return {k: tf.zeros_like(v) for k, v in x.items()}
return ds.concatenate(ds.map(_zero_out).repeat())
| mesh-master | mesh_tensorflow/transformer/dataset.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Utilities for running training and inference.
The `run` function for training the Transformer model is defined in this file.
TODO(katherinelee): add details about gin.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import random
import re
import gin
import gin.tf
import mesh_tensorflow as mtf
from mesh_tensorflow.transformer import dataset as transformer_dataset
from mesh_tensorflow.transformer import learning_rate_schedules
from mesh_tensorflow.transformer import transformer
import numpy as np
import pkg_resources
import six
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
from tensorflow.core.protobuf import rewriter_config_pb2 # pylint: disable=g-direct-tensorflow-import
from tensorflow.python.ops import resources # pylint: disable=g-direct-tensorflow-import
from tensorflow.python.tpu import tpu_config # pylint: disable=g-direct-tensorflow-import
from tensorflow.python.tpu import tpu_estimator # pylint: disable=g-direct-tensorflow-import
tf.flags.DEFINE_multi_string("gin_file", None, "Path to a Gin file.")
tf.flags.DEFINE_multi_string("gin_param", None, "Gin parameter binding.")
tf.flags.DEFINE_list("gin_location_prefix", [], "Gin file search path.")
FLAGS = tf.flags.FLAGS
_DEFAULT_CONFIG_FILE = "./gin/defaults.gin"
# List of features used by model.
_MODEL_FEATURES = [
"inputs", "inputs_position", "inputs_segmentation", "targets",
"targets_position", "targets_segmentation", "targets_subsegmentation"
]
def _filter_features(ex):
"""Filters example features, keeping only valid model features."""
return {k: v for k, v in ex.items() if k in _MODEL_FEATURES}
def parse_gin_defaults_and_flags(skip_unknown=False, finalize_config=True):
"""Parses all default gin files and those provided via flags."""
# Register .gin file search paths with gin
for gin_file_path in FLAGS.gin_location_prefix:
gin.add_config_file_search_path(gin_file_path)
# Set up the default values for the configurable parameters. These values will
# be overridden by any user provided gin files/parameters.
gin.parse_config_file(
pkg_resources.resource_filename(__name__, _DEFAULT_CONFIG_FILE),
skip_unknown=skip_unknown)
gin.parse_config_files_and_bindings(
FLAGS.gin_file, FLAGS.gin_param,
skip_unknown=skip_unknown,
finalize_config=finalize_config)
# TODO(noam): maybe add gin-config to mtf.get_variable so we can delete
# this stupid VariableDtype class and stop passing it all over creation.
@gin.configurable
def get_variable_dtype(
master_dtype=tf.bfloat16,
slice_dtype=tf.float32,
activation_dtype=tf.float32):
"""Datatypes to use for the run.
Args:
master_dtype: string, datatype for checkpoints
keep this the same between training and eval/inference
slice_dtype: string, datatype for variables in memory
must be tf.float32 for training
activation_dtype: string, datatype for activations
less memory usage if tf.bfloat16 but possible numerical issues
Returns:
a mtf.VariableDtype
"""
return mtf.VariableDType(
master_dtype=tf.as_dtype(master_dtype),
slice_dtype=tf.as_dtype(slice_dtype),
activation_dtype=tf.as_dtype(activation_dtype))
def inputs_vocabulary(vocabulary):
"""Get the inputs vocabulary.
Args:
vocabulary: Vocabulary or (inputs_vocabulary, targets_vocabulary) tuple.
Returns:
a Vocabulary
"""
if isinstance(vocabulary, tuple):
vocabulary = vocabulary[0]
return vocabulary
def targets_vocabulary(vocabulary):
"""Get the targets vocabulary.
Args:
vocabulary: Vocabulary or (inputs_vocabulary, targets_vocabulary) tuple.
Returns:
a Vocabulary
"""
if isinstance(vocabulary, tuple):
vocabulary = vocabulary[1]
return vocabulary
@gin.configurable
def separate_vocabularies(inputs=gin.REQUIRED, targets=gin.REQUIRED):
"""Gin-configurable helper function to generate a tuple of vocabularies."""
return (inputs, targets)
@gin.configurable
def init_checkpoint_variable_mapping(name, mapping_fn=None):
"""Maps from varaible name in graph to variable name in checkpoint."""
if mapping_fn:
return mapping_fn(name)
else:
return name
# TODO(katherinelee): Update layout_rules string when noam updates the
# definition in run
def build_model(model_type="bitransformer",
input_vocab_size=gin.REQUIRED,
output_vocab_size=gin.REQUIRED,
layout_rules=None,
mesh_shape=None):
"""Build a transformer model.
Currently, four types of models are supported:
"bitransformer": The traditional encoder-decoder architecture from
"Attention is All You Need". Requires a non-text2self dataset.
"lm": an autoregressive language model (one layer stack). Effectively the
decoder of the bitransformer. There is no attention over the encoder, since
there is no encoder. Requires a text2self dataset, with targets, but no
inputs.
"delimited_lm": an autoregressive language model trained on a text2text
dataset. Each training example is expressed as
[<input_tokens>, EOS, <target_tokens>, EOS]. Model checkpoints are
compatible with "lm" models. One strategy is to pretrain as "lm"
then fine-tune as "delimited_lm".
"aligned": a non-autoregressive single-stack model (like BERT). Requires
a non-text2self dataset with inputs and targets. The targets and inputs
have the same length and each entry in the inputs is aligned to the
corresponding entry in targets, eg:
"inputs": "The X sat on X X."
'targets": "The cat sat on the mat."
(except, inputs are token ID sequences, not strings)
"bi_teacher_student": a teacher-student model where both the student and
teacher are bitransformers. Requires a non-text2self dataset.
A text2self dataset has targets that are offset of the inputs. Non-text2self
datasets have targets that differ from their inputs, like:
input: 'hello'
target: 'bonjour'
Args:
model_type: a string, one of "bitransformer", "lm", "delimited_lm",
"aligned", or "bi_teacher_student"
input_vocab_size: an integer
output_vocab_size: an integer
layout_rules: optional, input to mtf.convert_to_layout_rules
mesh_shape: optional, an input to mtf.convert_to_shape()
Returns:
a Unitransformer or Bitransformer
"""
if model_type == "bitransformer":
return transformer.make_bitransformer(
input_vocab_size=input_vocab_size,
output_vocab_size=output_vocab_size,
mesh_shape=mesh_shape,
layout=layout_rules)
elif model_type == "bi_student_teacher":
return transformer.make_bi_student_teacher(
input_vocab_size=input_vocab_size,
output_vocab_size=output_vocab_size,
mesh_shape=mesh_shape,
layout=layout_rules)
elif model_type in ["lm", "delimited_lm", "aligned"]:
return transformer.Unitransformer(
autoregressive=model_type in ["lm", "delimited_lm"],
layer_stack=transformer.make_layer_stack(),
input_vocab_size=input_vocab_size,
output_vocab_size=output_vocab_size,
mesh_shape=mesh_shape,
layout=layout_rules)
else:
raise ValueError("unknown model_type")
@gin.configurable
def tpu_mesh_shape(tpu_topology=gin.REQUIRED,
model_parallelism=gin.REQUIRED,
ensemble_parallelism=None):
"""Create a mesh_shape for data-parallelism and model-parallelism on TPU.
Example: tpu_mesh_shape("4x4", 8) -> mtf.Shape(("batch", 4), ("model", 8))
Since there are 4x4x2=32 total cores, and we want 8-way model paralleism.
This function is passed through gin to the argument `mesh_shape` inside the
function `run`.
Alternatively, for model_parallelism, pass a mesh_spec (see simd_mesh_impl.py)
TODO(noam): describe
Args:
tpu_topology: a string - e.g. "2x2" or "v3-8"
model_parallelism: an integer - the number of cores per model replica
alternatively a list that can be passed to
simd_mesh_impl.HierarchicalTiling
ensemble_parallelism: an optional integer - if present then create an
"ensemble" mesh-dimension as well, for splitting the models in an
ensemble.
Returns:
a mtf.Shape
"""
if tpu_topology.startswith("v"):
num_cores = int(tpu_topology.split("-")[-1])
else:
tpu_dim = [int(x) for x in tpu_topology.split("x")]
num_cores = functools.reduce(lambda x, y: x*y, tpu_dim) * 2
if isinstance(model_parallelism, list):
# model_parallelism is actually a spec used to
# construct a simd_mesh_impl.HierarchicalTiling object
return mtf.simd_mesh_impl.HierarchicalTiling.spec_to_mesh_shape(
model_parallelism, num_cores)
data_parallelism = num_cores // model_parallelism
if ensemble_parallelism:
data_parallelism //= ensemble_parallelism
dims = []
if ensemble_parallelism and ensemble_parallelism > 1:
dims.append(mtf.Dimension("ensemble", ensemble_parallelism))
if data_parallelism > 1:
dims.append(mtf.Dimension("batch", data_parallelism))
if model_parallelism > 1:
dims.append(mtf.Dimension("model", model_parallelism))
return mtf.Shape(dims)
@gin.configurable
def variable_filter_max_size(v, max_size=1e7):
return v.size <= max_size
@gin.configurable
def tpu_estimator_model_fn(model_type,
transformer_model,
vocabulary,
model_dir,
use_tpu,
mesh_shape,
layout_rules,
batch_size,
sequence_length,
autostack,
keep_checkpoint_max,
save_checkpoints_steps,
learning_rate_schedule=None,
optimizer=None,
outer_batch_size=1,
tpu_summaries=False,
predict_fn=None,
score_in_predict_mode=False,
variable_filter=None,
init_checkpoint=None,
init_variable_filter="",
ensemble_inputs=None,
mesh_devices=None,
model_info_file=None,
hierarchical_tiling_spec=None):
"""Create a TPUEstimator model function.
Args:
model_type: a string. One of "bitransformer", "lm", "delimited_lm",
"aligned", or "bi_teacher_student"
transformer_model: a transformer.Unitransformer or transformer.Bitransformer
vocabulary: a vocabulary.Vocabulary or (inputs_vocabulary,
targets_vocabulary) tuple. Used for decoding in predict mode.
model_dir: a string, directory to save the model to.
use_tpu: a boolean
mesh_shape: a mtf.Shape
layout_rules: a mtf.LayoutRules
batch_size: an integer
sequence_length: an integer or a dict from feature-key to integer
the (packed) sequence length, e.g. {"inputs": 512, "targets": 128}
autostack: a boolean
keep_checkpoint_max: an integer, maximum number of checkpoints to keep
save_checkpoints_steps: an integer, save a checkpoint every this number of
steps
learning_rate_schedule: a constant or a function from step to learning rate
optimizer: a class extending optimize.Optimizer, required for training
outer_batch_size: outer batch dimension that could be used to enable the mix
of data-parallel and model-parallel training of Mixture of Experts (MoE)
models
tpu_summaries: a boolean, use rewrites to make summaries work on TPU. This
may be slow, since it uses a host call hack.
predict_fn: an optional function, see docs for `run` for more information.
score_in_predict_mode: compute log-likelihood scores instead of predictions
variable_filter: controls which variables are trained.
If None (default), train all trainable variables.
If a string regex, train all variables that match this regex.
If a function (mtf.Variable -> boolean), then train variables for which
the function returns True.
init_checkpoint: a string, if not None then read in variables from this
checkpoint path when initializing variables. Will only initialize
variables that appear both in the current graph and the checkpoint.
init_variable_filter: a string, used only when init_checkpoint is set.
controls which variables are loaded from the checkpoint using regex.
if empty string (default), all variables from the checkpoint are loaded.
ensemble_inputs: an optional integer - pass the size of the ensemble to
train an ensemble where each model gets different inputs.
You also need to configure Unitransformer.ensemble to the right size.
If None, then all models are trained on the same inputs.
mesh_devices: a list of strings, the device names to use for each mesh
slice. Only required for GPU.
model_info_file: an optional string, information about variables and
operations will be logged to this file during the TRAIN mode.
hierarchical_tiling_spec: an optional list that can be passed as the
spec argument to simd_mesh_impl.HierarchicalTiling
Returns:
a function to be passed to TPUEstimator
"""
mesh_devices = mesh_devices or [""] * mesh_shape.size
def my_model_fn(features, labels, mode, params=None, config=None):
"""Estimator model function.
Args:
features: dictionary where keys are strings like "inputs" and "targets"
and the values are the actual values of "inputs". See TPUEstimator's
docs for more information
labels: ignored argument
mode: a tf.estimator.ModeKeys
params: dictionary containing the key "context"
config: ignored argument
Returns:
a TPUEstimatorSpec
"""
del labels, config
if mode == tf.estimator.ModeKeys.PREDICT and score_in_predict_mode:
mode = "score"
global_step = tf.train.get_global_step()
if use_tpu and "context" in params:
ctx = params["context"]
num_hosts = ctx.num_hosts
host_placement_fn = ctx.tpu_host_placement_function
device_list = [host_placement_fn(host_id=t) for t in range(num_hosts)]
# TODO(ylc): Better estimation of replica cache size?
replica_cache_size = 300 * 1000000 # 300M per replica
# Worker 0 caches all the TPU binaries.
worker0_mem = replica_cache_size * ctx.num_replicas
devices_memeory_usage = [worker0_mem] + [0] * (num_hosts - 1)
var_placer = mtf.utils.BalancedVariablePlacer(device_list,
devices_memeory_usage)
physical_shape = [int(i) for i in
params["context"].device_assignment.topology.mesh_shape]
if len(physical_shape) == 4:
physical_shape = (
mtf.simd_mesh_impl.physical_shape_3d_from_topology_proto_4d(
physical_shape))
if hierarchical_tiling_spec is not None:
logical_to_physical = mtf.simd_mesh_impl.HierarchicalTiling(
hierarchical_tiling_spec,
physical_shape).logical_to_physical
else:
logical_to_physical = mtf.simd_mesh_impl.auto_logical_to_physical_tpu(
mesh_shape.to_integer_list, physical_shape)
mesh_impl = mtf.simd_mesh_impl.SimdMeshImpl(
mesh_shape, layout_rules, mesh_devices, ctx.device_assignment,
logical_to_physical=logical_to_physical)
else:
var_placer = None
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
mesh_shape, layout_rules, mesh_devices)
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh", var_placer)
if (outer_batch_size and
mode not in [tf.estimator.ModeKeys.PREDICT, "score"]):
outer_batch_dim = mtf.Dimension("outer_batch", outer_batch_size)
batch_dim = mtf.Dimension("batch", batch_size // outer_batch_size)
batch_dims = [outer_batch_dim, batch_dim]
else:
batch_dim = mtf.Dimension("batch", batch_size)
batch_dims = [batch_dim]
ensemble_dims = ([mtf.Dimension("ensemble", ensemble_inputs)]
if ensemble_inputs else [])
predict_batch_size = features.pop("predict_batch_size", None)
mtf_features = {}
for key, x in features.items():
# Some auxiliary features may have been generated in packing.
# The names of these new features are of the form
# "<original_feature_name>_<suffix>", e.g. "inputs_segmentation".
# We look up the lengths based on the original feature name, without
# the "_<suffix>".
feature_length = sequence_length[key.split("_")[0]]
length_dim = mtf.Dimension("length", feature_length)
feature_shape = mtf.Shape(
ensemble_dims + batch_dims + [length_dim])
x = tf.cast(features[key], tf.int32)
x = tf.reshape(x, feature_shape.to_integer_list)
if not use_tpu:
tf.logging.info("feature %s : %s" % (key, x))
mtf_features[key] = mtf.import_fully_replicated(
mesh, x, feature_shape, name=key)
def _verify_feature_exists(feature_name, should_exist):
if should_exist != (feature_name in mtf_features):
message = (
"mode=%s model_type=%s should%s have feature %s" %
(mode, model_type, "" if should_exist else " not", feature_name))
if "lm" in model_type:
message += (
"\nA common mistake is that model_type=\"delimited_lm\" should "
"be used with tasks that produce inputs and targets, while "
"model_type=\"lm\" should be used with tasks that produce "
"targets only.")
raise ValueError(message)
# Verify that the right features exist, and transform them if necessary
if mode == tf.estimator.ModeKeys.PREDICT:
_verify_feature_exists("inputs", True)
# "targets" may or may not exist depending on whether we are doing
# evaluation or open-ended inference.
elif model_type in ("lm", "delimited_lm") and mode == "score":
# in scoring mode the inputs and targets may already be combined.
if "inputs" in mtf_features:
if model_type == "lm":
tf.logging.warning(
"Scoring of lm models will include loss from the 'inputs'.")
mtf_features = _dynamic_text2self(mtf_features)
else:
_verify_feature_exists("targets", True)
_verify_feature_exists("inputs", model_type != "lm")
if model_type == "delimited_lm":
mtf_features = _dynamic_text2self(mtf_features)
# Detokenize in the graph if supported by vocabulary and accelerator.
def _maybe_detokenize(ids, vocab):
if not use_tpu and hasattr(vocab, "decode_tf"):
return vocab.decode_tf(ids)
return ids
if mode == "score":
# compute log-likelihoods per sequence
if predict_fn:
# predict_fn contains a custom scoring function
# this code-path has not been tested
scores = predict_fn(
model=transformer_model,
features=mtf_features,
variable_dtype=get_variable_dtype())
targets = mtf_features["targets"]
if isinstance(transformer_model, transformer.Unitransformer):
length_dim = targets.shape.dims[-1]
inputs = transformer.autoregressive_inputs(
mtf_features["targets"])
elif isinstance(transformer_model,
(transformer.Bitransformer,
transformer.StudentTeacher)):
inputs = mtf_features["inputs"]
else:
raise ValueError("unrecognized class")
logits, _ = transformer_model.call_simple(
inputs=inputs,
targets=targets,
compute_loss=False,
mode=mode,
variable_dtype=get_variable_dtype())
logits = mtf.cast(logits, tf.float32)
_, length_dim, vocab_dim = logits.shape.dims
cross_entropy = mtf.layers.softmax_cross_entropy_with_logits(
logits, mtf_features["targets"], vocab_dim)
# 0=padding and negative targets are a hack to indicate no loss
cross_entropy *= mtf.cast(
mtf.greater(targets, 0), cross_entropy.dtype)
if model_type == "delimited_lm":
cross_entropy *= mtf.cast(mtf.logical_not(
transformer.delimited_lm_inputs_mask(targets)), cross_entropy.dtype)
scores = -mtf.reduce_sum(cross_entropy, reduced_dim=length_dim)
scores = mtf.anonymize(scores)
targets = mtf.anonymize(targets)
lowering = mtf.Lowering(graph, {mesh: mesh_impl}, autostack=autostack)
targets = clean_decodes(lowering.export_to_tf_tensor(targets))
targets = _maybe_detokenize(targets, targets_vocabulary(vocabulary))
predictions = {
"targets": targets,
"scores": lowering.export_to_tf_tensor(scores)
}
elif mode == tf.estimator.ModeKeys.PREDICT:
inputs = mtf_features["inputs"]
if predict_fn:
mtf_samples = predict_fn(
model=transformer_model,
features=mtf_features,
variable_dtype=get_variable_dtype())
elif isinstance(transformer_model, transformer.Unitransformer):
# pad so that there is enough room for the targets
inputs = mtf.pad(
inputs, [0, sequence_length["targets"]], length_dim.name)
mtf_samples = transformer_model.sample_autoregressive(
inputs, variable_dtype=get_variable_dtype(),
remove_partial_sequences=True)
elif isinstance(
transformer_model,
(transformer.Bitransformer, transformer.StudentTeacher)):
mtf_samples = transformer_model.decode(
inputs, variable_dtype=get_variable_dtype())
else:
raise ValueError("unrecognized class")
mtf_samples = mtf.anonymize(mtf_samples)
inputs = mtf.anonymize(inputs)
lowering = mtf.Lowering(graph, {mesh: mesh_impl}, autostack=autostack)
inputs = clean_decodes(lowering.export_to_tf_tensor(inputs))
outputs = clean_decodes(lowering.export_to_tf_tensor(mtf_samples))
inputs = _maybe_detokenize(inputs, inputs_vocabulary(vocabulary))
outputs = _maybe_detokenize(outputs, targets_vocabulary(vocabulary))
if predict_batch_size is not None:
inputs = inputs[:predict_batch_size]
outputs = outputs[:predict_batch_size]
predictions = {
"inputs": inputs,
"outputs": outputs}
if mode in ["score", tf.estimator.ModeKeys.PREDICT]:
# When exporting a model, we need to communicate to TF-Serving that
# master variables need to be copied to their slave slice variables.
# Estimator uses a Scaffold's "local_init_op" for this purpose, so we
# augment the default "local_init_op" here.
#
# The "ready_op" is also constructed here to ensure the variables
# initialized by "local_init_op" are the same ones checked by "ready_op".
#
# WARNING: Any variables created outside of this model_fn()
# (e.g. tpu_estimator/iterations_per_loop) will NOT be initialized nor
# checked by these ops.
def scaffold_fn():
return tf.train.Scaffold(
local_init_op=tf.group(
tf.train.Scaffold.default_local_init_op(),
lowering.copy_masters_to_slices(),
name="mtf_local_init_op"),
ready_op=tf.concat(
[tf.report_uninitialized_variables(),
resources.report_uninitialized_resources()],
axis=0,
name="mtf_ready_op"))
return tpu_estimator.TPUEstimatorSpec(
mode=tf.estimator.ModeKeys.PREDICT,
predictions=predictions,
scaffold_fn=scaffold_fn,
prediction_hooks=[mtf.MtfRestoreHook(lowering)])
assert (mode == tf.estimator.ModeKeys.TRAIN or
mode == tf.estimator.ModeKeys.EVAL)
def logits_and_loss(mtf_features, num_microbatches=1):
"""Compute logits and loss.
Args:
mtf_features: a dictionary
num_microbatches: integer
Returns:
logits: a mtf.Tensor
loss: a mtf.Tensor
"""
if model_type in ["lm", "delimited_lm"]:
inputs = transformer.autoregressive_inputs(
mtf_features["targets"],
sequence_id=mtf_features.get("targets_segmentation", None))
else:
inputs = mtf_features["inputs"]
if isinstance(transformer_model, transformer.Unitransformer):
position_kwargs = dict(
sequence_id=mtf_features.get("targets_segmentation", None),
position=mtf_features.get("targets_position", None),
)
elif isinstance(
transformer_model,
transformer.Bitransformer) or model_type == "bi_student_teacher":
position_kwargs = dict(
encoder_sequence_id=mtf_features.get("inputs_segmentation", None),
decoder_sequence_id=mtf_features.get("targets_segmentation",
None),
decoder_subsequence_id=mtf_features.get("targets_subsegmentation",
None),
encoder_position=mtf_features.get("inputs_position", None),
decoder_position=mtf_features.get("targets_position", None),
)
else:
raise ValueError("unrecognized class")
return transformer_model.call_simple(
inputs=inputs,
targets=mtf_features["targets"],
compute_loss=True,
mode=mode,
variable_dtype=get_variable_dtype(),
num_microbatches=num_microbatches,
**position_kwargs)
if mode == tf.estimator.ModeKeys.TRAIN:
num_microbatches = serialize_num_microbatches(batch_dim,
sequence_length,
mesh_shape,
layout_rules)
if num_microbatches > 1:
def serialized_fn(mtf_features):
return {"loss": logits_and_loss(mtf_features, num_microbatches)[1]}
var_grads, loss_dict = mtf.serialize_training_step(
mtf_features, serialized_fn, batch_dim, num_microbatches)
loss = loss_dict["loss"]
else:
loss = logits_and_loss(mtf_features)[1]
var_grads = mtf.gradients(
[loss], [v.outputs[0] for v in graph.trainable_variables])
if tpu_summaries:
mtf.scalar_summary("loss", loss)
if callable(learning_rate_schedule):
# the following happens on CPU since TPU can't handle summaries.
with mtf.utils.outside_all_rewrites():
learning_rate = learning_rate_schedule(
step=tf.train.get_global_step())
tf.summary.scalar("learning_rate", learning_rate)
else:
learning_rate = learning_rate_schedule
if isinstance(variable_filter, str):
pattern = re.compile(variable_filter)
variable_filter_fn = lambda v: pattern.search(v.name)
elif variable_filter is None:
variable_filter_fn = lambda v: True
elif callable(variable_filter):
variable_filter_fn = variable_filter
else:
raise ValueError(
"variable_filter must be None, a string, or a callable function")
trainable_vars = [
v for v in graph.trainable_variables if variable_filter_fn(v)]
trainable_var_grads = [
g for g, v in zip(var_grads, graph.trainable_variables)
if variable_filter_fn(v)]
if len(trainable_vars) != len(graph.trainable_variables):
tf.logging.info("Variables being trained:")
tf.logging.info([v.name for v in trainable_vars])
tf.logging.info("Variables not being trained:")
tf.logging.info([v.name for v in graph.trainable_variables
if not variable_filter_fn(v)])
update_ops = optimizer(learning_rate=learning_rate).apply_grads(
trainable_var_grads, trainable_vars
)
lowering = mtf.Lowering(
graph, {mesh: mesh_impl},
autostack=autostack,
log_file=model_info_file)
tf_loss = lowering.export_to_tf_tensor(loss)
tf_loss = tf.cast(tf_loss, tf.float32)
if not use_tpu:
tf_loss = tf.Print(tf_loss, [tf_loss, tf.train.get_global_step()],
"step, tf_loss")
tf_update_ops = [lowering.lowered_operation(op) for op in update_ops]
tf_update_ops.append(tf.assign_add(global_step, 1))
train_op = tf.group(tf_update_ops)
if hasattr(transformer_model, "initialize"):
with mtf.utils.outside_all_rewrites():
transformer_model.initialize()
if tpu_summaries:
# has to be outside of
# with mtf.utils.outside_all_rewrites()
host_call = mtf.utils.create_host_call(model_dir)
mtf.utils.remove_summaries()
else:
host_call = None
with mtf.utils.outside_all_rewrites():
if init_checkpoint:
ckpt_vars = {v for v, _ in tf.train.list_variables(init_checkpoint)}
if init_variable_filter:
pattern = re.compile(init_variable_filter)
ckpt_vars = {v for v in ckpt_vars if pattern.search(v)}
global_vars = {v.op.name for v in tf.global_variables()}
restore_vars = {
v for v in global_vars if init_checkpoint_variable_mapping(v)
in ckpt_vars}
tf.logging.info("Initializing variables from %s:", init_checkpoint)
tf.logging.debug("\n".join(sorted(restore_vars)))
tf.logging.info("Variables in %s but not in graph:", init_checkpoint)
tf.logging.info("\n".join(sorted(
ckpt_vars -
{init_checkpoint_variable_mapping(v) for v in global_vars})))
tf.logging.info("Variables in graph but not in %s:", init_checkpoint)
tf.logging.info("\n".join(sorted(global_vars - restore_vars)))
tf.train.init_from_checkpoint(
init_checkpoint,
{init_checkpoint_variable_mapping(v): v for v in restore_vars}
)
# Copy master variables to slices. Must be called first.
restore_hook = mtf.MtfRestoreHook(lowering)
saver = tf.train.Saver(
tf.global_variables(),
sharded=True,
max_to_keep=keep_checkpoint_max,
keep_checkpoint_every_n_hours=2,
defer_build=False,
save_relative_paths=True)
tf.add_to_collection(tf.GraphKeys.SAVERS, saver)
saver_listener = mtf.MtfCheckpointSaverListener(lowering)
saver_hook = tf.train.CheckpointSaverHook(
model_dir,
save_steps=save_checkpoints_steps,
saver=saver,
listeners=[saver_listener])
gin_config_saver_hook = gin.tf.GinConfigSaverHook(
model_dir, summarize_config=True, include_step_in_filename=False)
training_hooks = [
restore_hook,
saver_hook,
gin_config_saver_hook,
]
if use_tpu:
return tpu_estimator.TPUEstimatorSpec(
mode=tf.estimator.ModeKeys.TRAIN,
loss=tf_loss,
train_op=train_op,
host_call=host_call,
training_hooks=training_hooks)
else:
return tf.estimator.EstimatorSpec(
tf.estimator.ModeKeys.TRAIN,
loss=tf_loss,
train_op=train_op,
training_chief_hooks=training_hooks)
elif mode == tf.estimator.ModeKeys.EVAL:
# perplexity eval
logits, loss = logits_and_loss(mtf_features)
# compute cross-entropy while still on TPU to avoid having to outfeed the
# logits, which might be big.
logits = mtf.cast(logits, tf.float32)
vocab_dim = logits.shape.dims[-1]
targets = mtf_features["targets"]
cross_entropy = mtf.layers.softmax_cross_entropy_with_logits(
logits, targets, vocab_dim)
anon_cross_entropy = mtf.anonymize(cross_entropy)
predictions = mtf.cast(mtf.argmax(logits, vocab_dim), targets.dtype)
anon_predictions = mtf.anonymize(predictions)
anon_targets = mtf.anonymize(targets)
# 0=padding and negative targets are a hack to indicate no loss
anon_weights = mtf.cast(mtf.greater(anon_targets, 0), tf.float32)
if model_type == "delimited_lm":
anon_weights *= mtf.cast(
mtf.logical_not(transformer.delimited_lm_inputs_mask(anon_targets)),
dtype=tf.float32)
lowering = mtf.Lowering(graph, {mesh: mesh_impl}, autostack=autostack)
tf_loss = tf.cast(lowering.export_to_tf_tensor(loss), tf.float32)
tf_loss = tf.cast(tf_loss, tf.float32)
tf_predictions = lowering.export_to_tf_tensor(anon_predictions)
tf_cross_entropy = lowering.export_to_tf_tensor(anon_cross_entropy)
def simple_metrics(xent, predictions, labels, weights):
"""Simple metrics for teacher-forced eval."""
token_correct = tf.cast(
tf.equal(predictions, labels), tf.float32) * weights
sequence_correct = tf.cast(
tf.equal(tf.reduce_sum(token_correct, -1),
tf.reduce_sum(weights, -1)),
tf.float32)
sequence_weights = tf.cast(
tf.not_equal(tf.reduce_sum(weights, -1), 0),
tf.float32)
# the purpose of "mean_label" is as a checksum to ensure that
# models were evaluated on the same data.
return {"neg_log_perplexity": tf.metrics.mean(-xent, weights),
"token_accuracy": tf.metrics.mean(token_correct, weights),
"sequence_accuracy": tf.metrics.mean(
sequence_correct, sequence_weights),
"mean_label": tf.metrics.mean(
tf.cast(labels, tf.float32), weights),
"num_eval_tokens": metric_sum(weights, name="num_eval_tokens"),
"max_targets_length": metric_max(tf.reduce_sum(
weights, axis=-1), name="max_targets_length"),
}
labels = lowering.export_to_tf_tensor(anon_targets)
weights = lowering.export_to_tf_tensor(anon_weights)
eval_metrics = (simple_metrics, [
tf_cross_entropy, tf_predictions, labels, weights])
with mtf.utils.outside_all_rewrites():
restore_hook = mtf.MtfRestoreHook(lowering)
return tpu_estimator.TPUEstimatorSpec(
tf.estimator.ModeKeys.EVAL,
evaluation_hooks=[restore_hook],
loss=tf_loss,
eval_metrics=eval_metrics)
return my_model_fn
def metric_sum(values, name=None, **kwargs):
del kwargs
with tf.variable_scope(name, "metric_sum", [values]):
accum = tf.get_variable(
"accum", shape=[], dtype=tf.float32, trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES],
initializer=tf.zeros_initializer())
update_op = tf.assign_add(accum, tf.reduce_sum(tf.cast(values, tf.float32)))
return accum, update_op
def metric_max(values, name=None, **kwargs):
del kwargs
with tf.variable_scope(name, "metric_max", [values]):
accum = tf.get_variable(
"accum", shape=[], dtype=tf.float32, trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES],
initializer=tf.zeros_initializer())
update_op = tf.assign(
accum, tf.maximum(accum, tf.reduce_max(tf.cast(values, tf.float32))))
return accum, update_op
def _dynamic_text2self(mtf_features):
"""Convert a packed feature dictionary from text2text into text2self.
This conversion is used when training a "delimited_lm" model.
This allows us to train a text2self model on data that has been tokenized and
packed in text2text format.
Inputs and targets for each example get concatenated into the new targets.
Length doubles.
Args:
mtf_features: a feature dictionary containing
"inputs", "inputs_segmentation", "inputs_position",
"targets", "targets_segmentation", "targets_position"
Returns:
a feature dictionary containing
"targets", "targets_segmentation", "targets_position"
"""
tf.logging.info(
"_dynamic_text2self: Converting text2text problem to text2self")
inputs = mtf_features["inputs"]
targets = mtf_features["targets"]
inputs_length_dim = inputs.shape.dims[-1]
targets_length_dim = targets.shape.dims[-1]
is_packed = "inputs_segmentation" in mtf_features
if is_packed:
inputs_segmentation = mtf_features["inputs_segmentation"]
targets_segmentation = mtf_features["targets_segmentation"]
inputs_position = mtf_features["inputs_position"]
targets_position = mtf_features["targets_position"]
else:
inputs_segmentation = mtf.cast(
mtf.not_equal(inputs, 0), tf.int32)
targets_segmentation = mtf.cast(
mtf.not_equal(targets, 0), tf.int32)
inputs_position = mtf.range(
inputs.mesh, inputs_length_dim, dtype=tf.int32) * inputs_segmentation
targets_position = mtf.range(
targets.mesh, targets_length_dim, dtype=tf.int32) * targets_segmentation
# compute lengths of inputs and targets portions of each segment
# segments_dim must be larger than the maximum number of segments.
segments_dim = mtf.Dimension("segments", targets_length_dim.size)
inputs_segment_length = mtf.reduce_sum(
mtf.one_hot(inputs_segmentation, segments_dim, dtype=tf.int32),
reduced_dim=inputs_length_dim)
targets_segment_length = mtf.reduce_sum(
mtf.one_hot(targets_segmentation, segments_dim, dtype=tf.int32),
reduced_dim=targets_length_dim)
# segment 0 means padding. Zero out the segment lengths for segment 0.
segments_range = mtf.range(targets.mesh, segments_dim, dtype=tf.int32)
nonzero_segment = mtf.to_int32(mtf.not_equal(segments_range, 0))
inputs_segment_length *= nonzero_segment
targets_segment_length *= nonzero_segment
combined_segment_length = inputs_segment_length + targets_segment_length
# for targets, position in sequence increases by inputs_segment_length
targets_position += mtf.gather(
inputs_segment_length, targets_segmentation, segments_dim)
# this is the new length dimension
new_length_dim = mtf.Dimension(
"new_length", inputs_length_dim.size + targets_length_dim.size)
new_length_range = mtf.range(
targets.mesh, new_length_dim, dtype=tf.int32)
# compute permutation tensors mapping from the old length dimension to the
# new length dimension
combined_segment_length_cumulative = mtf.cumsum(
combined_segment_length, segments_dim, exclusive=True)
# segment 0 is padding - this causes it to get mapped out of range.
combined_segment_length_cumulative += new_length_dim.size * mtf.to_int32(
mtf.equal(segments_range, 0))
inputs_destination = inputs_position + mtf.gather(
combined_segment_length_cumulative, inputs_segmentation, segments_dim)
inputs_permutation = mtf.to_int32(mtf.equal(
new_length_range, inputs_destination))
targets_destination = targets_position + mtf.gather(
combined_segment_length_cumulative, targets_segmentation, segments_dim)
targets_permutation = mtf.to_int32(mtf.equal(
new_length_range, targets_destination))
# map from the old length dimension to the new length dimension
def _convert(t, perm):
return mtf.rename_dimension(
mtf.einsum([t, perm],
output_shape=inputs.shape.dims[:-1] + [new_length_dim]),
"new_length", "length")
targets = (
_convert(inputs, inputs_permutation) +
_convert(targets, targets_permutation))
if is_packed:
targets_segmentation = (
_convert(inputs_segmentation, inputs_permutation) +
_convert(targets_segmentation, targets_permutation))
targets_position = (
_convert(inputs_position, inputs_permutation) +
_convert(targets_position, targets_permutation))
return {
"targets": targets,
"targets_segmentation": targets_segmentation,
"targets_position": targets_position,
}
else:
return {"targets": targets}
def get_inputs_from_file(input_filename, ignore_comments=False):
"""Read data from file and strip new lines."""
inputs = [line.rstrip() for line in tf.io.gfile.GFile(input_filename)]
# Strip the last empty line.
if not inputs[-1]:
inputs.pop()
if ignore_comments:
inputs = [l for l in inputs if not l.startswith("#")]
return inputs
def encode_inputs(inputs,
vocabulary,
model_type,
batch_size,
sequence_length,
eos_id=1,
unscored_prefix=None):
"""Encode string inputs for inference/scoring.
Args:
inputs: list of strings
vocabulary: a mtf.transformer.vocabulary.Vocabulary
model_type: a string
batch_size: an integer
sequence_length: an integer (maximum decode length)
eos_id: EOS id
unscored_prefix: an optional list of strings
Returns:
all_input_ids: encoded inputs
"""
n = len(inputs)
all_input_ids = []
for line_num, line in enumerate(inputs):
ids = inputs_vocabulary(vocabulary).encode(line.strip())
if unscored_prefix:
prefix_str = unscored_prefix[line_num].strip()
ids = [-i for i in inputs_vocabulary(vocabulary).encode(prefix_str)] + ids
if model_type != "lm":
# for text2self problems, the inputs represent a partial sequence
# to be continued, and should not be terminated by EOS.
# for sequence-to-sequence problems, the input needs to be EOS-terminated
ids += [eos_id]
if len(ids) > sequence_length:
ids = ids[:sequence_length]
else:
ids.extend([0] * (sequence_length - len(ids)))
all_input_ids.append(ids)
# pad to make an integral number of batches
all_input_ids.extend([all_input_ids[0]] * (-n % batch_size))
all_input_ids = np.array(all_input_ids, dtype=np.int32)
return all_input_ids
def encode_delimited_lm(inputs,
targets,
vocabulary,
batch_size,
sequence_length,
eos_id=1,
include_final_eos=True):
"""Encode inputs and targets for scoring a delimited langauge model.
Args:
inputs: list of strings
targets: list of strings
vocabulary: a mtf.transformer.vocabulary.Vocabulary
batch_size: an integer
sequence_length: an integer (maximum decode length)
eos_id: EOS id
include_final_eos: a boolean
Returns:
all_ids: encoded inputs
"""
n = len(inputs)
all_ids = []
for inp, tgt in zip(inputs, targets):
input_ids = inputs_vocabulary(vocabulary).encode(inp.strip()) + [eos_id]
target_ids = targets_vocabulary(vocabulary).encode(tgt.strip())
if include_final_eos:
target_ids.append(eos_id)
ids = input_ids + target_ids
if len(ids) > sequence_length:
ids = ids[:sequence_length]
else:
ids.extend([0] * (sequence_length - len(ids)))
all_ids.append(ids)
# pad to make an integral number of batches
all_ids.extend([all_ids[0]] * (-n % batch_size))
all_ids = np.array(all_ids, dtype=np.int32)
return all_ids
@gin.configurable
def decode(estimator,
input_fn,
vocabulary,
checkpoint_path=None):
"""Decode from an input_fn.
Args:
estimator: a TPUEstimator
input_fn: function that returns a tf.Dataset
vocabulary: a vocabulary.Vocabulary or (inputs_vocabulary,
targets_vocabulary) tuple
checkpoint_path: an optional string
Yields:
decoded strings
"""
result_iter = estimator.predict(
input_fn, checkpoint_path=checkpoint_path)
def _maybe_detokenize(value, vocab):
if isinstance(value, six.binary_type):
return value
return vocab.decode([int(x) for x in value])
for i, result in enumerate(result_iter):
input_string = _maybe_detokenize(
result["inputs"], inputs_vocabulary(vocabulary))
output_string = _maybe_detokenize(
result["outputs"], targets_vocabulary(vocabulary))
yield output_string
if i & (i - 1) == 0:
# LOG every power of 2.
tf.logging.info("decoded {}: {}".format(i, input_string))
tf.logging.info(" -> {}".format(output_string))
@gin.configurable
def compute_log_likelihoods(estimator,
input_fn,
checkpoint_path=None):
"""Decode from an input_fn.
Args:
estimator: a TPUEstimator
input_fn: function that returns a tf.Dataset
checkpoint_path: an optional string
Returns:
list of floats
"""
result_iter = estimator.predict(
input_fn, checkpoint_path=checkpoint_path)
return [float(f) for f in result_iter]
def write_lines_to_file(lines, filename):
"""Write each line to a filename, replacing the file if it exists.
Args:
lines: list of str, lines to write out.
filename: str, path to filename.
"""
if tf.io.gfile.exists(filename):
tf.io.gfile.remove(filename)
with tf.io.gfile.GFile(filename, "w") as output_file:
for line in lines:
output_file.write("{}\n".format(str(line).replace("\n", " ")))
def _get_combined_dataset_input_fn(
datasets, batch_size, sequence_length, check_for_metrics=False):
"""Creates input function for estimator for inference, eval, and scoring.
Args:
datasets: A list of mesh_tensorflow.transformer.dataset.EvalDataset tuples.
These will get combined together into a single tf.data.Dataset.
batch_size: an integer
sequence_length: an integer or a dict from feature-key to integer
the (packed) sequence length, e.g. {"inputs": 512, "targets": 128}
check_for_metrics: If True, then only include datasets which have associated
metric functions.
Returns:
An input function for estimator.
"""
def input_fn(params):
"""Input function for estimator."""
del params
combined_ds = None
for dataset in datasets:
if not check_for_metrics or dataset.metric_fns:
ds = dataset.dataset_fn(sequence_length=sequence_length)
ds = ds.map(
_filter_features, num_parallel_calls=tf.data.experimental.AUTOTUNE)
combined_ds = ds if not combined_ds else combined_ds.concatenate(ds)
combined_ds = combined_ds.batch(batch_size, drop_remainder=False)
# Pad the final batch.
combined_ds = transformer_dataset.trim_and_pad_dataset(
combined_ds, length=batch_size)
combined_ds = combined_ds.prefetch(tf.data.experimental.AUTOTUNE)
return combined_ds
return input_fn
def get_step_from_checkpoint_path(checkpoint_path):
"""Returns the global step for the checkpoint at `checkpoint_path`.
Assumes `checkpoint_path` corresponds to a file which contains the substring
model.ckpt-{global_step}
Args:
checkpoint_path: str of path to a checkpoint file.
Returns:
int of the global step corresponding to the checkpoint file.
Raises:
ValueError if checkpoint_path does not correspond to a model checkpoint file
which contains the global_step in its filename.
"""
match = re.match(r".*model\.ckpt\-(\d+).*", checkpoint_path)
if match is None:
raise ValueError("Invalid checkpoint path {}".format(checkpoint_path))
return int(match.group(1))
# TODO(noam): include more descriptive definitions
@gin.configurable
def decode_from_file(estimator,
vocabulary,
model_type,
batch_size,
sequence_length,
checkpoint_path=None,
input_filename=gin.REQUIRED,
output_filename=gin.REQUIRED,
eos_id=1,
repeats=1):
"""Decode from a text file and write to output_filename.
Args:
estimator: a TPUEstimator
vocabulary: a mtf.transformer.vocabulary.Vocabulary
model_type: a string
batch_size: an integer
sequence_length: an integer or a dict from feature-key to integer
the (packed) sequence length, e.g. {"inputs": 512, "targets": 128}
checkpoint_path: an optional string
input_filename: a string
output_filename: a string
eos_id: EOS id
repeats: an integer, the number of times to repeat each input.
"""
inputs = get_inputs_from_file(input_filename)
all_input_ids = encode_inputs(inputs, vocabulary, model_type, batch_size,
sequence_length["inputs"], eos_id=eos_id)
def input_fn(params):
del params
dataset = tf.data.Dataset.from_tensor_slices({"inputs": all_input_ids})
dataset = dataset.flat_map(
lambda x: tf.data.Dataset.from_tensors(x).repeat(repeats))
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
checkpoint_step = get_step_from_checkpoint_path(checkpoint_path)
decodes = list(decode(
estimator, input_fn, vocabulary, checkpoint_path=checkpoint_path))
# Remove any padded examples
dataset_size = len(inputs) * repeats
decodes = decodes[:dataset_size]
output_filename = "{}-{}".format(output_filename, checkpoint_step)
write_lines_to_file(decodes, output_filename)
@gin.configurable
def decode_from_dataset(estimator,
vocabulary,
model_type,
batch_size,
sequence_length,
checkpoint_path=None,
infer_dataset_fn=gin.REQUIRED,
dataset_split="validation",
decode_output_dir=gin.REQUIRED):
"""Decode using inputs from the Task examples and writes results to files.
Args:
estimator: a TPUEstimator
vocabulary: a mtf.transformer.vocabulary.Vocabulary
model_type: a string
batch_size: an integer
sequence_length: an integer or a dict from feature-key to integer
the (packed) sequence length, e.g. {"inputs": 512, "targets": 128}
checkpoint_path: Checkpoint to use for inference.
infer_dataset_fn: A function returning a list of dataset.EvalDataset tuples.
See `eval_dataset_fn` argument to `eval_model` for details.
dataset_split: str, which dataset split to load.
decode_output_dir: a string, where to write inputs, targets, and decodes.
"""
if model_type != "lm":
raise ValueError("This function currently only supports decoder-only LMs.")
infer_datasets = infer_dataset_fn(
sequence_length=sequence_length,
vocabulary=vocabulary,
dataset_split=dataset_split,)
input_fn = _get_combined_dataset_input_fn(
infer_datasets, batch_size, sequence_length)
checkpoint_step = get_step_from_checkpoint_path(checkpoint_path)
# TODO(dei): Deal with case where decode() does not return the right number
# of outputs. This can happen if the generator in decode() has failures.
decodes = list(decode(
estimator, input_fn, vocabulary, checkpoint_path=checkpoint_path))
tf.logging.info("Caching inference examples.")
with tf.Graph().as_default():
for infer_dataset in infer_datasets:
ds = infer_dataset.dataset_fn()
# Create list of postprocessed text targets
examples_for_ds = list(tfds.as_numpy(ds))
examples_for_ds = _maybe_add_pretokenized_features(
examples_for_ds, vocabulary)
# Extract the portion of decodes corresponding to this dataset
dataset_size = len(examples_for_ds)
predictions = decodes[:dataset_size]
# Remove the used decodes.
del decodes[:dataset_size]
# Write the predictions to file.
predictions_filename = os.path.join(
decode_output_dir,
"{}_{}_predictions".format(infer_dataset.name, checkpoint_step),
)
write_lines_to_file(predictions, predictions_filename)
# Write the ground-truth targets to file.
targets = []
for ex in examples_for_ds:
targets_pretokenized = ex["targets_pretokenized"]
targets.append(infer_dataset.postprocess_fn(
targets_pretokenized, example=ex, is_target=True))
targets_filename = os.path.join(
decode_output_dir, "{}_targets".format(infer_dataset.name))
write_lines_to_file(targets, targets_filename)
# Write the inputs to a file.
inputs = [ex["inputs_pretokenized"] for ex in examples_for_ds]
inputs_filename = os.path.join(
decode_output_dir, "{}_inputs".format(infer_dataset.name))
write_lines_to_file(inputs, inputs_filename)
@gin.configurable
def clean_decodes(ids, eos_id=1, pad_id=0, length_axis=-1):
"""Replaces everything after EOS with PAD (along last axis).
Args:
ids: a d Tensor of type int.
eos_id: int, EOS id.
pad_id: int, PAD id.
length_axis: an integer.
Returns:
a Tensor of type int of ids.
"""
eos_and_after = tf.cumsum(tf.cast(tf.equal(ids, eos_id), tf.int32),
exclusive=True, axis=length_axis)
valid_ids = tf.equal(eos_and_after, 0)
return tf.where_v2(valid_ids, ids, pad_id)
@gin.configurable
def save_scores(results, vocabulary,
scores_filename=None, save_example_text=True):
"""Processes results from scoring examples and maybe saves them to disk.
Args:
results: list of dictionaries containing the results for each scored
example.
vocabulary: a function that that returns a tf.data.Dataset with examples
containing the string field 'targets' and optionally the field 'inputs'
scores_filename: a string (path of file to write scores to). If None, scores
are returned but not written to disk.
save_example_text: a boolean - If True, then the text for each example is
also saved/returned.
Returns:
List of float scores, one score per example. If save_example_text is True,
the text of the inputs/targets for each example are also returned.
"""
if not results:
raise ValueError("No examples were scored.")
scores = [r["scores"] for r in results]
if scores_filename is not None:
write_lines_to_file(["%f" % f for f in scores], scores_filename+".scores")
if save_example_text:
results = _maybe_add_pretokenized_features(results, vocabulary)
# Targets will always exist.
targets = [r.get("targets_pretokenized", r["targets"]) for r in results]
if scores_filename is not None:
write_lines_to_file(targets, scores_filename+".targets")
# Write sequence lengths
def get_sequence_length(tokens, pad_id=0):
tokens = np.array(tokens)
if not np.isin(pad_id, tokens):
return len(tokens)
# Argmax returns the index of the first occurrence of pad_id.
return np.argmax(tokens == pad_id)
seq_lengths = [get_sequence_length(r["targets"]) for r in results]
if scores_filename is not None:
write_lines_to_file(seq_lengths, scores_filename+".lengths")
# Inputs may only exist for some tasks.
if "inputs" in results[0]:
inputs = [r.get("inputs_pretokenized", r["inputs"]) for r in results]
if scores_filename is not None:
write_lines_to_file(inputs, scores_filename+".inputs")
return scores, inputs, targets
else:
return scores, targets
return scores
def score_with_estimator(estimator, input_fn, eval_checkpoint_step, model_dir,
vocabulary, score_postprocess_fn=save_scores,
num_examples=None):
"""For each example returned by input_fn, compute log likelihood.
Args:
estimator: a TPUEstimator
input_fn: a function that that returns a tf.data.Dataset with examples
containing the string field 'targets' and optionally the field 'inputs'
eval_checkpoint_step: int, list of ints, or None, see `eval_model`
docstring.
model_dir: string, estimator model_dir
vocabulary: a vocabulary.Vocabulary or (inputs_vocabulary,
targets_vocabulary) tuple
score_postprocess_fn: a function that takes in model outputs and
post-processes, saves, and returns them.
num_examples: int, the total # of examples being scored, None if unknown
Returns:
a list of floats
"""
checkpoint_path, = get_checkpoint_iterator(eval_checkpoint_step, model_dir)
result_iter = estimator.predict(input_fn, checkpoint_path=checkpoint_path)
# TODO(dei): This code is not well-designed for large-scale scoring, where the
# number of examples might exceed available memory.
results = list(result_iter)
if num_examples is None:
targets = [r["targets"] for r in results]
num_padded = next((i for i, x in enumerate(targets[::-1]) if x.any()), None)
num_examples = len(targets) - num_padded
results = results[:num_examples]
return score_postprocess_fn(results, vocabulary)
def _maybe_add_pretokenized_features(examples, vocabulary):
"""Ensures decoded versions of "inputs" and "targets" exist in each example.
Args:
examples: List of example dictionaries containing mappings from feature
name to np.array of integers.
vocabulary: The vocabulary.
Returns:
examples dictionary with decoded plaintext entries for each feature in
features that was present in the original example.
"""
vocabulary = {"inputs": inputs_vocabulary(vocabulary),
"targets": targets_vocabulary(vocabulary)}
# This is just used for logging purposes.
added_pretokenized = {"inputs": False, "targets": False}
for example in examples:
for feature_name in ["inputs", "targets"]:
pretokenized_feature_name = feature_name + "_pretokenized"
if feature_name in example and pretokenized_feature_name not in example:
s = vocabulary[feature_name].decode(example[feature_name].tolist())
example[pretokenized_feature_name] = s
if not added_pretokenized[feature_name]:
added_pretokenized[feature_name] = True
tf.logging.warning(
"Feature '%s' is being approximated by decoding from the"
"tokenized feature '%s.'",
pretokenized_feature_name, feature_name)
return examples
@gin.configurable
def score_from_strings(estimator, vocabulary, model_type, batch_size,
sequence_length, model_dir, eval_checkpoint_step,
inputs=gin.REQUIRED, targets=gin.REQUIRED,
score_postprocess_fn=gin.REQUIRED, eos_id=1,
score_eos=True):
"""Compute log likelihoods per example and write to a text file.
inputs & targets must either be the same length (in lines) or have inputs
evenly divide targets N times, where each input has N decodes sequentially
in targets.
The function returns a list of floats represnenting the log-liekelihood of the
target given the input. If `scores_filename` is present, then these are also
written out as a text file, one per line.
Args:
estimator: a TPUEstimator
vocabulary: a mtf.transformer.vocabulary.Vocabulary
model_type: a string
batch_size: an integer
sequence_length: an integer or a dict from feature-key to integer
the (packed) sequence length, e.g. {"inputs": 512, "targets": 128}
model_dir: string, estimator model_dir
eval_checkpoint_step: int, list of ints, or None, see `eval_model`
docstring.
inputs: optional - a list of strings (inputs) the same length as targets
alternatively, a string filepath for a text file (one string per line)
targets: a list of strings (targets)
alternatively, a string filepath for a text file (one string per line)
score_postprocess_fn: Function that takes in model outputs and
post-processes then returns then.
eos_id: EOS id
score_eos: a boolean - whether to score the final eos token of each line
If this is set to false, the scores can be interpreted as prefix
log-likelihoods
Returns:
a list of floats
"""
if isinstance(inputs, str):
inputs = get_inputs_from_file(inputs)
if isinstance(targets, str):
targets = get_inputs_from_file(targets)
has_inputs = inputs is not None
if has_inputs:
if len(inputs) < len(targets):
# We assume that the targets file contains n targets for each input.
# So we repeat each input n times.
if len(targets) % len(inputs):
raise ValueError("len(inputs) must divide len(targets), got %d and %d"
% (len(inputs), len(targets)))
repeats = len(targets) // len(inputs)
inputs = [inputs[i // repeats] for i in range(len(targets))]
elif len(targets) < len(inputs):
# `targets` is a list of one string. Use it as a target for all inputs.
if len(targets) != 1:
raise ValueError("Expected only one target string")
targets = targets * len(inputs)
if has_inputs and model_type == "lm":
has_inputs = False
all_target_ids = encode_inputs(
targets, vocabulary, model_type, batch_size,
sequence_length["targets"], eos_id=eos_id if score_eos else 0,
unscored_prefix=inputs)
else:
if has_inputs:
all_input_ids = encode_inputs(inputs, vocabulary, model_type, batch_size,
sequence_length["inputs"], eos_id=eos_id)
all_target_ids = encode_inputs(
targets, vocabulary, model_type, batch_size,
sequence_length["targets"], eos_id=eos_id if score_eos else 0)
def input_fn(params):
del params
m = ({"inputs": all_input_ids, "targets": all_target_ids} if has_inputs
else {"targets": all_target_ids})
dataset = tf.data.Dataset.from_tensor_slices(m)
dataset = dataset.flat_map(tf.data.Dataset.from_tensors)
dataset = dataset.batch(batch_size, drop_remainder=True)
return dataset.prefetch(tf.data.experimental.AUTOTUNE)
return score_with_estimator(
estimator, input_fn, eval_checkpoint_step, model_dir,
vocabulary, score_postprocess_fn, len(targets))
@gin.configurable
def score_from_dataset(estimator, vocabulary, batch_size, sequence_length,
model_dir, eval_checkpoint_step, dataset_split,
score_dataset_fn=None,
score_postprocess_fn=gin.REQUIRED):
"""Compute log likelihoods per example and write to a text file.
The function returns a list of floats representing the log-likelihood of the
target given the input. If `scores_filename` is present, then these are also
written out as a text file, one per line. If multiple datasets are returned,
their scores will be concatenated.
Args:
estimator: a TPUEstimator
vocabulary: a mtf.transformer.vocabulary.Vocabulary
batch_size: an integer
sequence_length: an integer or a dict from feature-key to integer
the (packed) sequence length, e.g. {"inputs": 512, "targets": 128}
model_dir: string, estimator model_dir
eval_checkpoint_step: int, list of ints, or None, see `eval_model`
docstring.
dataset_split: a string
score_dataset_fn: A function returning a list of dataset.EvalDataset tuples.
See `eval_dataset_fn` argument to `eval_model` for details.
score_postprocess_fn: Function that takes in model outputs and
post-processes then returns then.
Returns:
scores: a list of floats, the log likelihood scores
targets: a list of strings, scored targets
"""
scoring_datasets = score_dataset_fn(
sequence_length=sequence_length,
vocabulary=vocabulary,
dataset_split=dataset_split)
input_fn = _get_combined_dataset_input_fn(
scoring_datasets, batch_size, sequence_length)
return score_with_estimator(
estimator, input_fn, eval_checkpoint_step, model_dir,
vocabulary, score_postprocess_fn, None)
def get_estimator(model_type, vocabulary, mesh_shape,
layout_rules, model_dir, batch_size, sequence_length,
autostack, learning_rate_schedule, keep_checkpoint_max,
save_checkpoints_steps, optimizer, predict_fn,
variable_filter, ensemble_inputs, use_tpu, tpu_job_name,
iterations_per_loop, cluster, init_checkpoint=None,
mesh_devices=None, score_in_predict_mode=False):
"""Create TPU estimator for the transfomer Mesh-TF model.
Args:
model_type: a string - either "bitransformer", "bi_student_teacher", lm" or
"aligned"
vocabulary: a vocabulary.Vocabulary or (inputs_vocabulary,
targets_vocabulary) tuple
mesh_shape: a function passed in through gin that returns a mtf.Shape
layout_rules: an input to mtf.convert_to_layout_rules()
model_dir: a string, model directory path.
batch_size: an integer, global batch size.
sequence_length: a dict, see `train_model` docstring for details.
autostack: boolean, internally combine variables
learning_rate_schedule: an optional function taking the scalar name argument
`step` and the numeric argument `total_train_steps` and return the scalar
learning rate
keep_checkpoint_max: an integer, maximum number of checkpoints to keep
save_checkpoints_steps: integer, steps per checkpoint
optimizer: a class extending optimize.Optimizer, required for training
predict_fn: an optional function that can be used to override the default
transformer prediction behavior. Must return a tensor of shape [batch_dim,
length_dim] that will be the prediction for each example. Must accept the
following arguments:
- model: a Unitransformer or Bitransformer
- features: a dict representing an example. Every value will be an
mtf.Tensor with shape [batch_dim, length_dim].
- variable_dtype: an mtf.VariableDType
variable_filter: a string, a variable will only be trained if its name
matches this regex. If None (default), train all trainable variables.
ensemble_inputs: an integer, see `train_model` docstring for details.
use_tpu: string, the Cloud TPU to use for training
tpu_job_name: string, name of TPU worker binary
iterations_per_loop: integer, steps per train loop
cluster: a TPUClsuterResolver object
init_checkpoint: a string, if not None then read in variables from this
checkpoint path when initializing variables. Will only initialize
variables that appear both in the current graph and the checkpoint.
mesh_devices: a list of strings, the device names to use for each mesh
slice. Only required for GPU.
score_in_predict_mode: a bool, compute log-likelihood scores instead of
predictions.
Returns:
an Estimator object.
"""
my_tpu_config = tpu_config.TPUConfig(
tpu_job_name=tpu_job_name,
iterations_per_loop=iterations_per_loop,
num_cores_per_replica=1,
per_host_input_for_training=tpu_config.InputPipelineConfig.BROADCAST,
)
session_config = None
if use_tpu:
# meta-optimizer drastically slows down startup time and has little benefit
# when running on TPU.
session_config = tf.ConfigProto(
graph_options=tf.GraphOptions(
rewrite_options=rewriter_config_pb2.RewriterConfig(
disable_meta_optimizer=True)))
run_config = tpu_config.RunConfig(
cluster=cluster,
model_dir=model_dir,
tpu_config=my_tpu_config,
session_config=session_config,
save_checkpoints_steps=save_checkpoints_steps,
save_checkpoints_secs=None)
transformer_model = build_model(
model_type=model_type,
input_vocab_size=inputs_vocabulary(vocabulary).vocab_size,
output_vocab_size=targets_vocabulary(vocabulary).vocab_size,
layout_rules=layout_rules,
mesh_shape=mesh_shape)
model_fn = tpu_estimator_model_fn(
model_type=model_type,
transformer_model=transformer_model,
vocabulary=vocabulary,
model_dir=model_dir,
use_tpu=use_tpu,
mesh_shape=mesh_shape,
layout_rules=layout_rules,
batch_size=batch_size,
sequence_length=sequence_length,
autostack=autostack,
learning_rate_schedule=learning_rate_schedule,
keep_checkpoint_max=keep_checkpoint_max,
save_checkpoints_steps=save_checkpoints_steps,
optimizer=optimizer,
predict_fn=predict_fn,
variable_filter=variable_filter,
ensemble_inputs=ensemble_inputs,
init_checkpoint=init_checkpoint,
mesh_devices=mesh_devices,
score_in_predict_mode=score_in_predict_mode)
estimator = tpu_estimator.TPUEstimator(
model_fn=model_fn,
config=run_config,
train_batch_size=batch_size,
eval_batch_size=batch_size,
predict_batch_size=batch_size,
use_tpu=use_tpu,
export_to_tpu=False,
params={})
return estimator
def train_model(estimator, vocabulary, sequence_length, batch_size,
train_dataset_fn, train_steps, ensemble_inputs,
dataset_split="train", skip_seen_data=False,
seen_data_init_step=0, checkpoint_input_pipeline=False):
"""Train a Mesh-TF model.
Args:
estimator: Estimator object, created with the appropriate model_fn.
vocabulary: a vocabulary.Vocabulary or (inputs_vocabulary,
targets_vocabulary) tuple
sequence_length: a dict from feature-key to integer the (packed)
sequence length, e.g. {"inputs": 512, "targets": 128}
batch_size: an integer, global batch size
train_dataset_fn: A function returning a tf.data.Dataset. Should accept the
following arguments:
- sequence_length: an integer or a dict from feature-key to integer
the (packed) sequence length, e.g. {"inputs": 512, "targets": 128}
- vocabulary: Vocabulary instance to use for encoding.
- dataset_split: str, which dataset split to load.
train_steps: an integer, number of steps for training.
ensemble_inputs: an optional integer - pass the size of the ensemble to
train an ensemble where each model gets different inputs. You also need to
configure Unitransformer.ensemble to the right size. If None, then all
models are trained on the same inputs.
dataset_split: str, which dataset split to train on.
skip_seen_data: a boolean, is `False` by default. Used when a training run
restarts to skip already seen data. This flag is only consistent when
every setting (such as batch size and random seed) on the model is the
same between the original run and the new run. May require a significant
amount of time to skip a large number of steps.
seen_data_init_step: an integer, when `skip_seen_data` is True, skip seen
steps from this starting point. Useful when finetuning.
checkpoint_input_pipeline: a boolean, whether to checkpoint the input
pipeline in order to restart from the previous run. May require a large
amount of disk space for complicated input pipelines.
"""
if skip_seen_data and checkpoint_input_pipeline:
raise ValueError(
"At most one of `skip_seen_data` and `checkpoint_input_pipeline` may "
"be set.")
def input_fn(params):
del params
dataset = train_dataset_fn(
sequence_length=sequence_length,
vocabulary=vocabulary,
dataset_split=dataset_split)
dataset = dataset.repeat().batch(
batch_size * (ensemble_inputs or 1), drop_remainder=True)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
# On the first time data is read in after relaunching, skip data that has
# already been seen.
if skip_seen_data and estimator.latest_checkpoint() is not None:
recovered_step = estimator.get_variable_value("global_step")
steps_to_skip = recovered_step - seen_data_init_step
if steps_to_skip > 0:
tf.logging.info("Skipping %d steps of data.", steps_to_skip)
dataset = dataset.skip(steps_to_skip)
return dataset
hooks = []
if checkpoint_input_pipeline:
hooks.append(
tf.data.experimental.CheckpointInputPipelineHook(estimator))
estimator.train(input_fn=input_fn, max_steps=train_steps, hooks=hooks)
@gin.configurable
def infer_model(estimator,
vocabulary,
sequence_length,
batch_size,
model_type,
model_dir,
eval_checkpoint_step,
checkpoint_paths=None,
decode_fn=decode_from_file):
"""Infer a Mesh-TF model.
Args:
estimator: Estimator object, created with the appropriate model_fn.
vocabulary: a vocabulary.Vocabulary or (inputs_vocabulary,
targets_vocabulary) tuple
sequence_length: a dict from feature-key to integer the (packed)
sequence length, e.g. {"inputs": 512, "targets": 128}
batch_size: an integer, global batch size
model_type: a string - either "bitransformer", "bi_student_teacher", lm" or
"aligned"
model_dir: string, estimator model_dir
eval_checkpoint_step: int, list of ints, or None, see `eval_model`
docstring.
checkpoint_paths: optional list of checkpoints to run inference for
decode_fn: decoding function, defaults to decode_from_file
"""
if checkpoint_paths is None:
checkpoint_paths = get_checkpoint_iterator(eval_checkpoint_step, model_dir)
for checkpoint_path in checkpoint_paths:
decode_fn(
estimator,
vocabulary=vocabulary,
model_type=model_type,
batch_size=batch_size,
sequence_length=sequence_length,
checkpoint_path=checkpoint_path)
def eval_model(estimator,
vocabulary,
sequence_length,
batch_size,
dataset_split,
model_dir,
eval_dataset_fn,
eval_summary_dir,
eval_checkpoint_step,
eval_with_score=False,
output_eval_examples=True):
"""Eval a Mesh-TF model.
Args:
estimator: an Estimator object or a callable that returns one.
vocabulary: a vocabulary.Vocabulary or (inputs_vocabulary,
targets_vocabulary) tuple
sequence_length: a dict from feature-key to integer the (packed)
sequence length, e.g. {"inputs": 512, "targets": 128}. May also be set to
`None` to automatically compute the maximum length of the examples, which
requires `estimator` to be a callable.
batch_size: an integer, global batch size
dataset_split: a string
model_dir: a string, directory with the model.
eval_dataset_fn: A function returning a list of dataset.EvalDataset tuples.
Must be provided for mode="eval". Should accept the following arguments:
- sequence_length: an integer or a dict from feature-key to integer
the (packed) sequence length, e.g. {"inputs": 512, "targets": 128}
- vocabulary: Vocabulary instance to use for encoding.
- dataset_split: str, which dataset split to load.
dataset.EvalDataset tuples are namedtuples with the following fields:
- name: string, the task name
- dataset_fn: function which returns a tf.data.Dataset of tokenized and
padded examples. Must not require any arguments and must include the
feature keys 'inputs' and 'targets_pretokenized'.
- postprocess_fn: function which converts original targets to values
that can be processed by a `metric_fn`.
- list_of_metric_fns: list of metric functions with the call signature
`metric_fn(targets, predictions)` which returns a dict mapping
submetric names to scalar values. TensorBoard summaries and other tags
will be written out using the submetric names.
eval_summary_dir: str, path to write TensorBoard events file summaries for
eval. If None, use model_dir/eval_{split}.
eval_checkpoint_step: int, list of ints, or None. If an int or list of ints,
evaluation or inference will be run on the checkpoint files in `model_dir`
whose global steps are closest to the global steps provided. If None and
mode="eval", run eval continuously waiting for new checkpoints via
`tf.train.checkpoints_iterator`.
eval_with_score: bool, whether to evaluate using log likelihood scores of
targets instead of decoded predictions.
output_eval_examples: bool, whether to dump inputs, targets and predictions
of the eval examples in plaintext to eval_summary_dir.
"""
if eval_dataset_fn is None:
raise ValueError("Must provide eval_dataset_fn through gin for eval.")
if sequence_length is None and not callable(estimator):
raise ValueError(
"A callable must be passed for the estimator when automatically "
"computing the sequence length.")
eval_datasets = eval_dataset_fn(
sequence_length=sequence_length,
vocabulary=vocabulary,
dataset_split=dataset_split,
)
valid_eval_datasets = []
for eval_dataset in eval_datasets:
if not eval_dataset.metric_fns:
tf.logging.info("Skipping %s because metric_fns is empty",
eval_dataset.name)
continue
# Convert to EvalDataset tuple in case eval_dataset_fn returns raw tuples
valid_eval_datasets.append(transformer_dataset.EvalDataset(*eval_dataset))
eval_datasets = valid_eval_datasets
if not eval_datasets:
tf.logging.info(
"All provided EvalDatasets have metric_fns=[]; eval is not possible.")
return
eval_summary_dir = eval_summary_dir or os.path.join(
model_dir, "{}_eval".format(dataset_split))
summary_writer = tf.summary.FileWriter(eval_summary_dir)
# Pre-load in all of the targets once before entering continuous eval loop
cached_targets = {}
cached_examples = {}
# Need to create a separate graph for loading in original targets
# or else TF will complain that we modified the graph
max_sequence_length = {"inputs": 0, "targets": 0}
tf.logging.info("Caching evaluation examples.")
with tf.Graph().as_default():
for eval_dataset in eval_datasets:
if eval_dataset.metric_fns:
ds = eval_dataset.dataset_fn()
# Create list of postprocessed text targets
inputs = []
targets = []
examples = []
for ex in tfds.as_numpy(ds):
max_sequence_length["inputs"] = max(
max_sequence_length["inputs"], len(ex["inputs"]))
max_sequence_length["targets"] = max(
max_sequence_length["targets"], len(ex["targets"]))
examples.append(ex)
if "inputs_pretokenized" in ex:
inputs.append(ex["inputs_pretokenized"])
if "targets_pretokenized" in ex:
targets_pretokenized = ex["targets_pretokenized"]
if isinstance(targets_pretokenized, bytes):
targets_pretokenized = targets_pretokenized.decode("utf-8")
targets.append(
eval_dataset.postprocess_fn(
targets_pretokenized, example=ex, is_target=True)
)
if output_eval_examples:
targets_filename = os.path.join(
eval_summary_dir,
"{}_targets".format(eval_dataset.name),
)
write_lines_to_file(targets, targets_filename)
inputs_filename = os.path.join(eval_summary_dir,
"{}_inputs".format(eval_dataset.name))
write_lines_to_file(inputs, inputs_filename)
cached_targets[eval_dataset.name] = targets
cached_examples[eval_dataset.name] = examples
if sequence_length is None:
tf.logging.info("Setting sequence lengths to %s", max_sequence_length)
sequence_length = max_sequence_length
estimator = functools.partial(estimator, sequence_length=sequence_length)
elif (sequence_length["inputs"] < max_sequence_length["inputs"] or
sequence_length["targets"] < max_sequence_length["targets"]):
tf.logging.warning(
"Given sequence lengths are insufficient for some evaluation inputs or "
"targets. These sequences will be truncated to fit, likely leading to "
"sub-optimal results. Consider passing `None` for sequence_length to "
"have them be automatically computed.\n Got: %s,\n Max Lengths: %s",
sequence_length, max_sequence_length)
elif (sequence_length["inputs"] > max_sequence_length["inputs"] or
sequence_length["targets"] > max_sequence_length["targets"]):
tf.logging.warning(
"Given sequence lengths are longer than necessary for some evaluation "
"inputs or targets, resulting in wasted computation. Consider passing "
"`None` for sequence_length to have them be automatically computed.\n"
" Got: %s,\n Max Lengths: %s",
sequence_length, max_sequence_length)
if callable(estimator):
estimator = estimator()
input_fn = _get_combined_dataset_input_fn(
eval_datasets, batch_size, sequence_length, check_for_metrics=True)
checkpoint_paths = get_checkpoint_iterator(eval_checkpoint_step, model_dir)
for checkpoint_path in checkpoint_paths:
tf.logging.info("Checkpoint path %s" % checkpoint_path)
global_step = int(get_step_from_checkpoint_path(checkpoint_path))
if eval_with_score:
outputs, _ = score_with_estimator(
estimator, input_fn, global_step, model_dir, vocabulary,
num_examples=sum(len(cex) for cex in cached_examples.values()))
else:
outputs = [
d.decode("utf-8") if isinstance(d, bytes) else d
for d in decode(estimator, input_fn, vocabulary, checkpoint_path)
]
for eval_dataset in eval_datasets:
# Extract the portion of decodes corresponding to this dataset
examples = cached_examples[eval_dataset.name]
dataset_size = len(examples)
predictions = [
eval_dataset.postprocess_fn(d, example=ex)
for d, ex in zip(outputs[:dataset_size], examples)
]
# Remove the used decodes.
del outputs[:dataset_size]
global_step = int(get_step_from_checkpoint_path(checkpoint_path))
if output_eval_examples:
predictions_filename = os.path.join(
eval_summary_dir,
"{}_{}_predictions".format(eval_dataset.name, global_step),
)
write_lines_to_file(predictions, predictions_filename)
for metric_fn in eval_dataset.metric_fns:
summary = tf.Summary()
targets = cached_targets[eval_dataset.name]
metric_result = metric_fn(targets, predictions)
for metric_name, metric_value in metric_result.items():
tag = "eval/{}/{}".format(eval_dataset.name, metric_name)
tf.logging.info("%s at step %d: %.3f", tag, global_step, metric_value)
summary.value.add(tag=tag, simple_value=metric_value)
summary_writer.add_summary(summary, global_step)
summary_writer.flush()
# Only padding should remain.
expected_pad = -sum(len(t) for t in cached_targets.values()) % batch_size
if outputs and len(outputs) != expected_pad:
raise ValueError("{} padded outputs, {} expected.".format(
len(outputs), expected_pad))
def export_model(estimator, export_dir, vocabulary, sequence_length,
model_type, eval_with_score=False, batch_size=1,
checkpoint_path=None):
"""Export a model in TF SavedModel format to be used for inference on CPUs.
Args:
estimator: Estimator object, estimator created with the appropriate
model_fn.
export_dir: str, a directory in which to create timestamped subdirectories
containing exported SavedModels.
vocabulary: sentencepiece vocab, vocabulary instance to use for encoding.
sequence_length: an integer or a dict from feature-key to integer
the (packed) sequence length, e.g. {"inputs": 512, "targets": 128}
model_type: a string, see `get_estimator` docstring for details.
eval_with_score: If True, compute log-likelihood scores of targets.
If False, do inference to generate outputs.
batch_size: int, number of sequences per batch. Should match estimator.
checkpoint_path: str, path to checkpoint. If None (default), use the most
recent in the model directory.
Returns:
The string path to the exported directory.
"""
def serving_input_fn():
"""Constructs input portion of Graph in serving.
Input is a batch of strings.
Returns:
a ServingInputReceiver
"""
def str_placeholder(name):
return tf.placeholder(dtype=tf.string, shape=[None], name=name)
if model_type == "lm" or not eval_with_score:
# In this case, users of exported model provide only one feature, which is
# "targets" if scoring or "inputs" if doing prediction.
input_key = "targets" if eval_with_score else "inputs"
vocab_to_use = (targets_vocabulary(vocabulary) if eval_with_score
else inputs_vocabulary(vocabulary))
targets = str_placeholder(input_key)
predict_batch_size = tf.shape(targets)[0]
dataset = tf.data.Dataset.from_tensor_slices({input_key: targets})
dataset = transformer_dataset.encode_all_features(dataset, vocab_to_use)
receiver_tensors = {input_key: targets}
else:
# When scoring for encoder-decoder models, both "inputs" and "targets"
# must be provided.
inputs = str_placeholder("inputs")
targets = str_placeholder("targets")
predict_batch_size = tf.shape(inputs)[0]
inputs_dataset = transformer_dataset.encode_all_features(
tf.data.Dataset.from_tensor_slices({"inputs": inputs}),
inputs_vocabulary(vocabulary))
targets_dataset = transformer_dataset.encode_all_features(
tf.data.Dataset.from_tensor_slices({"targets": targets}),
targets_vocabulary(vocabulary))
dataset = tf.data.Dataset.zip((inputs_dataset, targets_dataset))
dataset = dataset.map(lambda x, y: {**x, **y},
num_parallel_calls=tf.data.experimental.AUTOTUNE)
receiver_tensors = {"inputs": inputs, "targets": targets}
dataset = transformer_dataset.pack_or_pad(
dataset=dataset,
length=sequence_length,
pack=False,
feature_keys=receiver_tensors.keys()
)
# Batch, and pad final batch.
tf.debugging.assert_less_equal(predict_batch_size, batch_size)
dataset = dataset.batch(batch_size, drop_remainder=False)
dataset = transformer_dataset.trim_and_pad_dataset(
dataset, length=batch_size)
features = tf.data.experimental.get_single_element(dataset)
features["predict_batch_size"] = predict_batch_size
return tf.estimator.export.ServingInputReceiver(
features=features, receiver_tensors=receiver_tensors)
return estimator.export_saved_model(
export_dir, serving_input_fn, checkpoint_path=checkpoint_path)
def compute_batch_size(sequence_length,
mesh_shape,
layout_rules,
method_and_value):
"""Compute the total batch size in sequences.
method_and_value is a (string, int) pair.
The method string is one of the following four options:
"sequences_per_batch"
"tokens_per_batch"
"sequences_per_replica"
"tokens_per_replica"
According to the method string, the value represents either a number of
sequences or a number of tokens, and represents either the size of the total
batch or the fraction of the batch assigned to each model replica.
For example ("tokens_per_replica", 2048) means that the batch size should be
set so that the number of tokens per model replica is 2048. So if the
sequence length is 1024 and there is 16-way data-parallelism, then the number
of sequences per batch would be 2048 * 16 / 1024 = 32.
The "per_batch" versions are useful for ensuring indentical overall batch
sizes across different mesh shapes/layouts. The "per_replica" versions are
useful for scaling up the total batch size relative to the degree of
data-parallelism
Args:
sequence_length: an integer or a dict from feature-key to integer
the (packed) sequence length, e.g. {"inputs": 512, "targets": 128}
mesh_shape: an input to mtf.convert_to_shape()
layout_rules: an input to mtf.convert_to_layout_rules()
method_and_value: a pair
Returns:
an integer - the number of sequences per batch
"""
def checkdiv(a, b):
if a % b:
raise ValueError("%d is not divisible by %d" % (a, b))
return a // b
num_replicas = (
mtf.tensor_dim_to_mesh_dim_size(
layout_rules, mesh_shape, mtf.Dimension("batch", 0)) *
mtf.tensor_dim_to_mesh_dim_size(
layout_rules, mesh_shape, mtf.Dimension("outer_batch", 0)))
method, value = method_and_value
if method == "sequences_per_batch":
return value
sequence_length = max(sequence_length.values())
if method == "tokens_per_batch":
return checkdiv(value, sequence_length)
elif method == "sequences_per_replica":
return value * num_replicas
elif method == "tokens_per_replica":
return checkdiv(value, sequence_length) * num_replicas
else:
raise ValueError("unknown method %s" % method,)
@gin.configurable
def serialize_num_microbatches(batch_dim,
sequence_length,
mesh_shape,
layout_rules,
tokens_per_microbatch_per_replica=None):
"""Number of microbatches per batch for serialized training.
We want to split each training step into multiple sequential steps
to limit memory usage. Gradients are accumulated locally and reduced once.
This function determines the number of microbatches per batch.
If tokens_per_microbatch_per_replica=None, then the batch is not split.
Args:
batch_dim: a mtf.Dimension
sequence_length: an integer or a dict from feature-key to integer
the (packed) sequence length, e.g. {"inputs": 512, "targets": 128}
mesh_shape: an input to mtf.convert_to_shape()
layout_rules: an input to mtf.convert_to_layout_rules()
tokens_per_microbatch_per_replica: an optional integer, e.g. 2048
Returns:
an integer
"""
if not tokens_per_microbatch_per_replica:
return 1
batch_per_replica = mtf.tensor_dim_to_size_per_split(
layout_rules, mesh_shape, batch_dim)
# number of sequences per microbatch
microbatch_size = max(
1, tokens_per_microbatch_per_replica // max(sequence_length.values()))
# decrease microbatch_size until it is a divisor of batch_per_replica
# This is guaranteed to stop at microbatch_size=1 if not earlier.
while batch_per_replica % microbatch_size:
microbatch_size -= 1
num_microbatches = batch_per_replica // microbatch_size
tf.logging.info(
"serialize_num_microbatches: "
"tokens_per_microbatch_per_replica=%d "
"batch_dim=%s "
"sequence_length=%s "
"batch_per_replica=%d "
"num_microbatches=%d",
tokens_per_microbatch_per_replica,
batch_dim,
sequence_length,
batch_per_replica,
num_microbatches)
return int(num_microbatches)
@gin.configurable
def auto_train_steps(batch_size,
sequence_length,
train_tokens=2 ** 36):
"""Automatically compute number of training steps.
Since the batch size and sequence length can vary across experiments, we
specify the amount of training in terms of (non-unique) input tokens processed
over the course of training the model. The number of steps is computed as
train_steps = train_tokens // (batch_size * sequence_length)
Args:
batch_size: an integer
sequence_length: an integer or a dict from feature-key to integer
the (packed) sequence length, e.g. {"inputs": 512, "targets": 128}
train_tokens: an integer (train_steps * batch_size * sequence_length)
Returns:
an integer
"""
return train_tokens // (batch_size * max(sequence_length.values()))
@gin.configurable
def get_checkpoint_iterator(checkpoint_step, model_dir, skip_until=0,
stop_after=None, find_closest=True):
"""Get an iterable of checkpoint paths from a provided checkpoint step(s).
Args:
checkpoint_step: If checkpoint_step is an int, return a singleton list with
that checkpoint path in it. If find_closest, the checkpoint with the
closest global step will be reurned. If checkpoint_step is a
list of ints, replace each int with its corresponding path (if
find_closest, the path with the closest global step). If
checkpoint_step == "all", return the path of every checkpoint in
model_dir, starting from the earliest checkpoint. If
checkpoint_step == -1, return the latest checkpoint as specified in
model_dir/checkpoint. If checkpoint_step is None, return
`tf.train.checkpoints_iterator` for `model_dir`.
model_dir: str, directory to look for checkpoints in.
skip_until: an integer - for "all" or "None" behavior, filter out
checkpoint numbers that are <= skip_until.
stop_after: an optional integer - for "None behavior, if specified
stop after finding a checkpoint number that is >= stop_at. When a
checkpoint number == stop_at is found, it is yielded before exiting.
find_closest: If True and a specified checkpoint step does not exist, will
choose the nearest checkpoint to that step. If False, then will
only look for a checkpoint matching the exact specified step.
Returns:
An iterable which yields checkpoint paths.
"""
def _get_closest_checkpoint(target_checkpoint):
"""Returns checkpoint with closest global step to `target_checkpoint`."""
checkpoints = set()
for f in tf.io.gfile.listdir(model_dir):
try:
checkpoints.add(int(get_step_from_checkpoint_path(f)))
except ValueError:
continue
if not checkpoints:
raise ValueError("No checkpoint files found in {}".format(model_dir))
closest = float("inf")
for c in checkpoints:
if abs(target_checkpoint - c) < abs(target_checkpoint - closest):
closest = c
if closest != target_checkpoint:
tf.logging.info(
"Using checkpoint at step %d which is closest to requested step %d",
closest,
target_checkpoint,
)
return closest
def _get_checkpoint_path(step):
return os.path.join(model_dir, "model.ckpt-{}".format(step))
def _get_checkpoint_path_if_exists(step):
path = _get_checkpoint_path(step)
return path if tf.train.checkpoint_exists(path) else None
def _filter_fn(p):
return get_step_from_checkpoint_path(p) > skip_until
if checkpoint_step == "all":
ckpt_paths = tf.gfile.Glob(os.path.join(model_dir, "model.ckpt*"))
# Use set for deduplication; glob will find multiple files for each ckpt
ckpt_steps = {get_step_from_checkpoint_path(p) for p in ckpt_paths}
return filter(_filter_fn,
[_get_checkpoint_path(s) for s in sorted(list(ckpt_steps))])
elif checkpoint_step == -1:
return [tf.train.latest_checkpoint(model_dir)]
elif checkpoint_step is None:
checkpoints_iterator = filter(
_filter_fn, tf.train.checkpoints_iterator(model_dir))
if stop_after is not None:
def _generate_checkpoints():
for p in checkpoints_iterator:
step = get_step_from_checkpoint_path(p)
if step <= stop_after:
yield p
if step >= stop_after:
break
return _generate_checkpoints()
else:
return checkpoints_iterator
elif find_closest:
if isinstance(checkpoint_step, int):
return [_get_checkpoint_path(_get_closest_checkpoint(checkpoint_step))]
else:
closests = np.unique(
[_get_closest_checkpoint(c) for c in checkpoint_step])
return [_get_checkpoint_path(closest) for closest in closests]
else:
if isinstance(checkpoint_step, int):
checkpoint_step = [checkpoint_step]
checkpoints = [_get_checkpoint_path_if_exists(c) for c in checkpoint_step]
checkpoints = [c for c in checkpoints if c]
if not checkpoints:
raise ValueError("You asked for checkpoints '%s' but none were found." %
str(checkpoint_step))
return checkpoints
# TODO(noam): provide a more informative string for layout_rules:
# example: "d_ff:model,heads:model,vocab:model"
@gin.configurable
def run(tpu_job_name,
tpu,
gcp_project,
tpu_zone,
model_dir,
model_type="bitransformer",
vocabulary=None,
train_dataset_fn=None,
eval_dataset_fn=None,
dataset_split="train",
autostack=True,
eval_checkpoint_step=None,
export_checkpoint_step=None,
export_path="",
mode="train",
iterations_per_loop=100,
save_checkpoints_steps=5000,
keep_checkpoint_max=None,
eval_summary_dir=None,
batch_size=("tokens_per_replica", 2048),
train_steps=auto_train_steps,
total_run_steps=None,
sequence_length=None,
mesh_shape=gin.REQUIRED,
mesh_devices=None,
layout_rules=gin.REQUIRED,
learning_rate_schedule=None,
optimizer=None,
predict_fn=None,
variable_filter=None,
perplexity_eval_steps=100,
init_checkpoint=None,
ensemble_inputs=None,
train_model_fn=train_model,
skip_seen_data=False,
seen_data_init_step=0,
output_eval_examples=True,
checkpoint_input_pipeline=False):
"""Run training, eval, or inference depending on `mode`.
Args:
tpu_job_name: string, name of TPU worker binary
tpu: string, the Cloud TPU to use for training
gcp_project: string, project name for the Cloud TPU-enabled project
tpu_zone: string, GCE zone where the Cloud TPU is located in
model_dir: string, estimator model_dir
model_type: a string, see `get_estimator` docstring for details.
vocabulary: a vocabulary.Vocabulary or (inputs_vocabulary,
targets_vocabulary) tuple.
train_dataset_fn: A function returning a tf.data.Dataset, see `train_model`
docstring for details.
eval_dataset_fn: A function returning a list of dataset.EvalDataset tuples.
See `eval_model` docstring for details.
dataset_split: a string
autostack: boolean, see `get_estimator` docstring for details.
eval_checkpoint_step: int, list of ints, or None, see `eval_model` doc
string for details.
export_checkpoint_step: int or None, see `export_model` doc string for
details.
export_path: a string, path to export the saved model
mode: string, one of
train - train the model
eval - eval the model by decoding predictions
score_eval - eval the model by computing log likelihood scores of targets
perplexity_eval - eval the model by computing perplexity
infer - decode predictions based on inputs
score_from_dataset - compute scores of targets from a dataset
score_from_strings - compute scores of targets from strings or a file
export_score - export a model that scores provided examples
export_infer - export a model that decodes predictions based on inputs
iterations_per_loop: integer, steps per train loop
save_checkpoints_steps: integer, see `get_estimator` docstring.
keep_checkpoint_max: an integer, see `get_estimator` docstring.
eval_summary_dir: str, see `eval_model` docstring for details.
batch_size: An integer or a (method, value) pair to pass to
compute_batch_size(). Note that this is the global batch size and not the
per-shard batch size.
train_steps: An integer or a function with the same signature as
auto_train_steps(). Total number of training steps in this run.
total_run_steps: An integer, used when training is split over multiple
runs. This value is gin-configurable and used to set the total_run_steps
for the learning_rate_schedule.
sequence_length: an integer or a dict from feature-key to integer
the (packed) sequence length, e.g. {"inputs": 512, "targets": 128}.
May also be set to `None` in eval mode to automatically compute the
maximum length of the examples.
mesh_shape: an input to mtf.convert_to_shape()
mesh_devices: a list of strings, see `get_estimator` docstring.
layout_rules: an input to mtf.convert_to_layout_rules()
learning_rate_schedule: a function which takes the scalar name argument
`step` and the numeric argument `total_train_steps` and returns the scalar
learning rate. Alternatively a float. Alternatively, a list of
such factos to be multiplied together.
optimizer: a class extending optimize.Optimizer, required for training
predict_fn: an optional function, see `get_estimator` docstring for details.
variable_filter: a string, see `get_estimator` docstring for details.
perplexity_eval_steps: an integer - number of steps for perplexity eval
init_checkpoint: a string, see `get_estimator` docstring for details.
ensemble_inputs: an integer, see `train_model` docstring for details.
train_model_fn: an optional train function, is `train_model` by default.
skip_seen_data: a boolean, is `False` by default. Used when a training run
restarts to skip already seen data. This flag is only consistent when
every setting (such as batch size and random seed) on the model is the
same between the original run and the new run. May require a significant
amount of time to skip a large number of steps.
seen_data_init_step: an integer, when `skip_seen_data` is True, skip seen
steps from this starting point. Useful when finetuning.
output_eval_examples: a boolean, is `True` by default. Used to decide
whether to output whether to dump inputs, targets, and predictions of the
eval examples in plaintext to eval_summary_dir.
checkpoint_input_pipeline: a boolean, whether to checkpoint the input
pipeline in order to restart from the previous run. May require a large
amount of disk space for complicated input pipelines.
"""
if isinstance(sequence_length, int):
sequence_length = {"inputs": sequence_length,
"targets": sequence_length}
if not isinstance(batch_size, int):
batch_size = compute_batch_size(
sequence_length, mesh_shape, layout_rules, batch_size)
if not isinstance(train_steps, int):
train_steps = train_steps(batch_size, sequence_length)
if total_run_steps is None:
total_run_steps = train_steps
if isinstance(learning_rate_schedule, list):
learning_rate_schedule = functools.partial(
learning_rate_schedules.product_learning_rate,
total_train_steps=total_run_steps, factors=learning_rate_schedule)
if callable(learning_rate_schedule):
learning_rate_schedule = functools.partial(
learning_rate_schedule, total_train_steps=total_run_steps)
tf.logging.info("model_type=%s" % model_type,)
tf.logging.info("mode=%s" % mode,)
tf.logging.info("sequence_length=%s" % sequence_length,)
tf.logging.info("batch_size=%s" % batch_size,)
tf.logging.info("train_steps=%s" % train_steps,)
if total_run_steps is not None:
tf.logging.info("total_run_steps=%s" % total_run_steps,)
tf.logging.info("mesh_shape=%s" % mesh_shape,)
tf.logging.info("layout_rules=%s" % layout_rules,)
if mode == "train" and dataset_split != "train":
raise ValueError("mode==\"train\" requires dataset_split==\"train\"")
if mode != "train":
ensemble_inputs = None
mesh_shape = mtf.convert_to_shape(mesh_shape)
layout_rules = mtf.convert_to_layout_rules(layout_rules)
cluster = tf.distribute.cluster_resolver.TPUClusterResolver(
tpu, zone=tpu_zone, project=gcp_project) if tpu else None
tf.logging.info(
"Building TPUConfig with tpu_job_name={}".format(tpu_job_name)
)
score_in_predict_mode = "score" in mode
estimator_fn = functools.partial(
get_estimator,
model_type=model_type,
vocabulary=vocabulary,
layout_rules=layout_rules,
mesh_shape=mesh_shape,
model_dir=model_dir,
batch_size=batch_size,
sequence_length=sequence_length,
autostack=autostack,
learning_rate_schedule=learning_rate_schedule,
keep_checkpoint_max=keep_checkpoint_max,
save_checkpoints_steps=save_checkpoints_steps,
optimizer=optimizer,
predict_fn=predict_fn,
score_in_predict_mode=score_in_predict_mode,
variable_filter=variable_filter,
init_checkpoint=init_checkpoint,
ensemble_inputs=ensemble_inputs,
use_tpu=tpu,
tpu_job_name=tpu_job_name,
iterations_per_loop=iterations_per_loop,
cluster=cluster,
mesh_devices=mesh_devices)
if mode not in ("eval", "score_eval"):
if sequence_length is None:
raise ValueError(f"`sequence_length` must be specified in '{mode}' mode.")
estimator = estimator_fn()
if mode == "train":
# train_dataset_fn could be None if train_model_fn is not equal to
# train_model
if train_dataset_fn is None:
raise ValueError("Must provide train_dataset_fn through gin")
train_model_fn(estimator, vocabulary, sequence_length, batch_size,
train_dataset_fn, train_steps, ensemble_inputs,
skip_seen_data=skip_seen_data,
seen_data_init_step=seen_data_init_step,
checkpoint_input_pipeline=checkpoint_input_pipeline)
elif mode == "perplexity_eval":
if eval_dataset_fn is None:
if train_dataset_fn is not None:
tf.logging.warning("Using train_dataset_fn for perplexity eval")
eval_datasets = [transformer_dataset.EvalDataset(
name="eval",
dataset_fn=functools.partial(train_dataset_fn,
sequence_length=sequence_length,
vocabulary=vocabulary,
dataset_split=dataset_split),
postprocess_fn=None,
metric_fns=None)]
else:
raise ValueError(
"for perplexity_eval, "
"must provide one of eval_dataset_fn and train_dataset_fn")
else:
eval_datasets = eval_dataset_fn(
sequence_length=sequence_length,
vocabulary=vocabulary,
dataset_split=dataset_split,
)
def _input_fn(params, eval_dataset):
del params
ds = eval_dataset.dataset_fn().map(
_filter_features, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds = transformer_dataset.pad_dataset_with_zeroed_out_examples(ds)
ds = (ds.batch(batch_size * (ensemble_inputs or 1), drop_remainder=True)
.prefetch(tf.data.experimental.AUTOTUNE))
return ds
checkpoint_paths = get_checkpoint_iterator(eval_checkpoint_step, model_dir)
for checkpoint_path in checkpoint_paths:
for eval_dataset in eval_datasets:
tf.random.set_random_seed(12345)
random.seed(12345)
num_examples = batch_size * perplexity_eval_steps
# include the number of examples in the evaluation name so as to
# make sure we are comparing apples to apples.
name = "%s_%s_%d" % (eval_dataset.name, dataset_split, num_examples)
_ = estimator.evaluate(
input_fn=functools.partial(_input_fn, eval_dataset=eval_dataset),
steps=perplexity_eval_steps,
checkpoint_path=checkpoint_path,
name=name)
elif mode in ("eval", "score_eval"):
eval_model(
estimator_fn,
vocabulary,
sequence_length,
batch_size,
dataset_split,
model_dir,
eval_dataset_fn,
eval_summary_dir,
eval_checkpoint_step,
eval_with_score=(mode == "score_eval"),
output_eval_examples=output_eval_examples)
elif mode == "infer":
infer_model(estimator, vocabulary, sequence_length, batch_size, model_type,
model_dir, eval_checkpoint_step)
elif mode == "score_from_strings":
score_from_strings(estimator=estimator,
vocabulary=vocabulary,
model_type=model_type,
batch_size=batch_size,
sequence_length=sequence_length,
model_dir=model_dir,
eval_checkpoint_step=eval_checkpoint_step)
elif mode == "score_from_dataset":
score_from_dataset(estimator, vocabulary, batch_size, sequence_length,
model_dir, eval_checkpoint_step, dataset_split)
elif mode in ["export_score", "export_infer", "export"]:
if mode == "export":
tf.logging.warning("Mode 'export' is deprecated. "
"Defaulting to 'export_infer'.")
if export_checkpoint_step:
checkpoint_path = get_checkpoint_iterator(
export_checkpoint_step, model_dir)
if isinstance(checkpoint_path, list):
checkpoint_path = checkpoint_path[0]
else:
checkpoint_path = next(checkpoint_path)
else:
# Use the latest checkpoint in the model directory.
checkpoint_path = None
export_model(estimator, export_path, vocabulary, sequence_length,
model_type, score_in_predict_mode, batch_size, checkpoint_path)
else:
raise ValueError(
"unknown mode %s - must be train/perplexity_eval/eval/infer/export"
% mode)
| mesh-master | mesh_tensorflow/transformer/utils.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MTF implementation of Transformer sequence/seq2seq model.
This implmentation is meant to be extensible, allowing users to define their
own custom layers. It is meant to eventually replace the existing
mesh-tensorflow Transformer implementation in the Tensor2Tensor library.
The interface is for the user to create a Unitransformer or Bitransformer
object and then call its methods (call_simple, sample_autoregressive, etc.)
The Unitransformer or Bitransformer is configured by creating a LayerStack
object containing instances of TransformerLayer. Users can subclass
TransformerLayer to create new types of layers.
Supported so far:
- autoregressive single-stack Transformer (e.g. a simple language model)
- encoder-decoder models (requires two Transformers)
- non-autoregressive single-stack Transformer (e.g. BERT)
- fast autoregressive sampling with temperature
- beam search
- mixture of experts layer
- local attention layer
- shared embedding / shared embedding and softmax weights
Not yet supported: TODO(noam)
- compressed attention layer
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import math
import gin
import mesh_tensorflow as mtf
import tensorflow.compat.v1 as tf
class TransformerLayer(object):
"""Abstract base class for transformer layers.
The point of this object hierarchy is to make Transformer extensible. You can
configure a Transformer with your own custom layers without changing the base
library.
Transformer layers should subclass TransformerLayer. In the constructor, the
subclasses simply record their hyperparameters. Subclasses must implement a
call() method, representing a call to that layer. The call method is passed
an input tensor and a Context object. Variables should be created inside of
the call().
Examples of subclasses can be found in transformer_layers.py.
In addition to other global hyperparameters, the Context has a "mode", which
might be tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL, or another
special value. Autoregressive decoding uses the special modes "first_part"
and "incremental".
In "first_part" mode, the known first part of the sequence is passed through
all layers so that they can create any necessary initial states.
In "incremental" mode (which is called from the body of a while loop), the
input consists of only one position. Layers with recurrent states must
generate both output and the new states.
"""
def call(self, context, x):
"""Call the layer.
Args:
context: a Context
x: an input Tensor
Returns:
y: a Tensor
"""
raise NotImplementedError("Not implemented")
def to_json(self):
return json.dumps(self, cls=json.JSONEncoder)
def set_name(self, name):
self._name = name
@property
def name(self):
return getattr(self, "_name", None)
class Context(object):
"""Extra information that layers need at call time.
This structure is created by Unitransformer and is passed to the layers.
It contains information that may be necessary to some layers in some
modes.
In "first_part" and "incremental" modes, some layers modify the context
by producing and consuming "states" and "constant_states". The "states"
are loop variables that change at each decoding step. The "constant_states"
are produced once in "first_part" mode and read in the iterative decoding
step.
"""
def __init__(self,
model,
mesh,
batch_dims,
length_dim,
variable_dtype,
beam_dim=None,
mode=tf.estimator.ModeKeys.TRAIN,
position=None,
position_is_default=False,
sequence_id=None,
subsequence_id=None,
states=None,
new_states=None,
losses=None,
initial_position=None,
layer_outputs=None,
encoder_output=None,
encoder_sequence_id=None,
constant_states=None,
shared_params=None,
encoder_layer_outputs=None,
write_priority=None,
read_priority=None,
inputs=None,
encoder_inputs=None,
num_microbatches=1):
"""Create a context.
Args:
model: a pointer back at the unitransformer object
mesh: a mtf.Mesh
batch_dims: a list of mtf.Dimension
length_dim: a mtf.Dimension
variable_dtype: a mtf.VariableDType
beam_dim: an optional mtf.Dimension (present in beam search)
mode: either a tf.estimator.ModeKeys or one of the following:
"first_part"
"incremental"
position: an optional Tensor - represents position in the sequence.
Passing None means that the position should be considered to be the
index in the Tensor (along length_dim).
position_is_default: a boolean - is the position equal to
mtf.range(mesh, length_dim, tf.int32). This allows a shortcut in
embedding lookup, as we can just slice the embedding variable.
sequence_id: an optional int32 Tensor aligned with position - used to
separate out different sequences which have been concatenated
to form a single training example. Also used to mark padding.
Id 0 is used for padding, and different positive values
are used for the different sequences.
subsequence_id: an optional int32 Tensor - used to represent multiple
targets corresponding to the same input. Should only be provided when
being called as a decoder. If provided, then position should line up
with this rather than sequence_id. The sequence_id will represent the
groups of sub-targets corresponding to each input.
states: an optional list of Tensors representing loop variables
(consumed in "incremental" mode)
new_states: an optional list of Tensors onto which to append the new
values of loop variables.
(produced in "first_part" and "incremental" modes)
losses: an optional list of Tensors onto which to append losses
initial_position: an optional Tensor ("first_part" mode)
layer_outputs: an optional list onto which to append layer outputs
encoder_output: an optional Tensor (output of the encoder stack)
encoder_sequence_id: an optional int32 Tensor (similar to sequence_id)
but aligned with the encoder output.
constant_states: an optional list of structures produced during
"first_part" mode and consumed during "incremental" mode.
shared_params: an optional dictionary which can be populated by
parameters that are shared between Transformers - e.g. between the
encoder and decoder Unitransformers in a Bitransformer.
encoder_layer_outputs: optional - readonly list of tensor activations when
decoding, one per each input layer + the embedding layer
write_priority: an optional Tensor
in self-attention, position a can see position b iff
read_priority[a] >= write_priority[b]
read_priority: an optional Tensor
inputs: an optional int32 Tensor with the input token ids
encoder_inputs: an optional int32 Tensor with the input token ids to the
encoder half of the Bitransformer of which this Unitransformer is the
decoder.
num_microbatches: integer - greater than one if the step has been
serialized into multiple microbatches to save memory.
"""
self.model = model
self.mesh = mesh
self.batch_dims = batch_dims
self.length_dim = length_dim
self.variable_dtype = variable_dtype
self.beam_dim = beam_dim
self.mode = mode
self.position = position
self.position_is_default = position_is_default
self.sequence_id = sequence_id
self.subsequence_id = subsequence_id
self.states = states
self.new_states = new_states
self.losses = losses
self.initial_position = initial_position
self.layer_outputs = layer_outputs
if self.layer_outputs is None:
self.layer_outputs = []
self.encoder_output = encoder_output
self.encoder_sequence_id = encoder_sequence_id
self.constant_states = constant_states
self.next_constant_state = 0
self.shared_params = shared_params or {}
self.layer_index = 0
self.encoder_layer_outputs = encoder_layer_outputs
# put values here to share them between layers
self.cache = {}
self.write_priority = write_priority
self.read_priority = read_priority
self.inputs = inputs
self.encoder_inputs = encoder_inputs
self.num_microbatches = num_microbatches
@property
def train(self):
return self.mode == tf.estimator.ModeKeys.TRAIN
@property
def activation_dtype(self):
return self.variable_dtype.activation_dtype
@property
def encoder_length_dim(self):
ret, = [d for d in self.encoder_output.shape.dims
if d.name == "memory_length"]
return ret
def get_states(self, n):
"""Get the next n recurrent states.
Called by layers in "incremental" mode.
Args:
n: an integer
Returns:
a list of n Tensors
"""
return self.states[len(self.new_states):len(self.new_states) + n]
def record_new_states(self, new_states):
"""Record the new values of recurrent states.
Called by layers in "first_part" or "incremental" mode.
Args:
new_states: a list of Tensors
"""
self.new_states.extend(new_states)
def record_constant_state(self, s):
"""Record state in "first_part" mode to be read in "incremental" mode.
This is to record state that is computed once and does not change
at every decoding step.
Args:
s: a structure
"""
self.constant_states.append(s)
def get_constant_state(self):
"""Read state that was written in "first_part" mode.
Returns:
a structure
"""
ret = self.constant_states[self.next_constant_state]
self.next_constant_state += 1
return ret
@property
def nonpadding(self):
"""Tensor with zeros in padding positions and ones elsewhere."""
if self.sequence_id is None:
return None
if self.sequence_id == 1:
return 1
else:
return mtf.cast(
mtf.not_equal(self.sequence_id, 0), self.activation_dtype)
def get_position(self):
if self.position_is_default:
return mtf.range(self.mesh, self.length_dim, tf.int32)
else:
return self.position
@gin.configurable
class LayerStack(TransformerLayer):
"""A stack of layers with residual connections and layer norms."""
def __init__(self,
layers,
sublayers_initial=None,
sublayers_per_layer=None,
sublayers_final=None,
dropout_rate=None,
norm_epsilon=None,
recompute_grads=False):
"""Create a LayerStack.
`layers` is a list of TransformerLayer objects representing the
building blocks of the transformer model, e.g.
transformer_layers.SelfAttention.
In addition, there are a bunch of other transformations which occur around
the layer body, and at the beginning and the end of the layer stack. We
call these "sublayers". They are configurable with the `sublayers_initial`,
`sublayers_per_layer`, and `sublayers_final` arguments, each of which takes
a list of sublayer functions.
Each of the sublayer functions has signature:
x, layer_stack, context -> y
where x is the input tensor and y is the output tensor.
The default sublayers specified in defaults.gin are:
transformer.LayerStack.sublayers_initial = [
@transformer.sublayer_dropout,
]
transformer.LayerStack.sublayers_per_layer = [
@transformer.sublayer_rms_norm,
@transformer.sublayer_call_layer,
@transformer.sublayer_dropout,
@transformer.sublayer_residual,
]
transformer.LayerStack.sublayers_final = [
@transformer.sublayer_rms_norm,
@transformer.sublayer_dropout,
]
Refer to these as examples of how to write and call your own sublayer
functions.
`dropout_rate` and `norm_epsilon` should only be specified in a legacy mode,
for compatiblity with older checkpoints.
Args:
layers: a list of TransformerLayer
sublayers_initial: an optional list of sublayer functions
sublayers_per_layer: an optional list of sublayer functions
sublayers_final: an optional list of sublayer functions
dropout_rate: DEPRECATED - a floating-point number
norm_epsilon: DEPRECATED - a floating-point number
recompute_grads: a boolean
"""
self._layers = layers
self._recompute_grads = recompute_grads
self._sublayers_initial = sublayers_initial
self._sublayers_per_layer = sublayers_per_layer
self._sublayers_final = sublayers_final
if (dropout_rate is not None) != (norm_epsilon is not None):
raise ValueError(
"LayerStack.dropout_rate and LayerStack.norm_epsilon should either "
"be both not None (legacy mode) or both None (normal mode)")
if dropout_rate is not None:
self._legacy_init(dropout_rate, norm_epsilon)
def _legacy_init(self, dropout_rate, norm_epsilon):
"""Legacy initialization for use with old checkpoints.
dropout_rate and norm_epsilon are specified in LayerStack.
Custom sublayers are not specified.
Args:
dropout_rate: a float
norm_epsilon: a float
"""
self.dropout_rate = dropout_rate
self.norm_epsilon = norm_epsilon
if (self._sublayers_initial is not None or
self._sublayers_per_layer is not None or
self._sublayers_final is not None):
tf.logging.warning("legacy mode - ignoring custom sublayers")
self._sublayers_initial = [sublayer_legacy_dropout]
self._sublayers_per_layer = [sublayer_legacy_rms_norm,
sublayer_call_layer,
sublayer_legacy_dropout,
sublayer_residual]
self._sublayers_final = [sublayer_legacy_final_rms_norm,
sublayer_legacy_dropout]
def call(self, context, x):
"""Call the layer stack."""
x = self._call_sublayers(self._sublayers_initial, x, context)
context.layer_outputs.append(x)
for lnum, layer in enumerate(self._layers):
with tf.variable_scope(layer.name or ""):
if self._recompute_grads:
def fn(x, l=layer, c=context):
return self._layer_fn(x, l, c)
x = mtf.recompute_grad(fn, [x])
else:
x = self._layer_fn(x, layer, context)
if lnum != len(self._layers) - 1:
context.layer_outputs.append(x)
context.layer_index += 1
x = self._call_sublayers(self._sublayers_final, x, context)
x = sublayer_mask_padding(x, self, context)
context.layer_outputs.append(x)
return x
def _call_sublayers(self, sublayers, x, context):
for s in sublayers:
x = s(x, self, context)
return x
def _layer_fn(self, x, layer, context):
"""Call the layer and its associated sublayers.
Args:
x: a Tensor
layer: a Layer
context: a Context
Returns:
a Tensor
"""
context.current_layer = layer
context.current_layer_input = x
y = self._call_sublayers(self._sublayers_per_layer, x, context)
if y.shape != x.shape:
raise ValueError(
"Layer %s returned misshaped output x=%s y=%s"
% (layer.__class__.__name__, x, y))
return y
@property
def num_layers(self):
return len(self.layers)
@property
def layers(self):
return self._layers
@gin.configurable
def sublayer_call_layer(x, layer_stack, context):
x = sublayer_mask_padding(x, layer_stack, context)
layer = context.current_layer
with tf.variable_scope(layer.__class__.__name__):
return layer.call(context, x)
@gin.configurable
def sublayer_mask_padding(x, layer_stack, context):
"""Zero out padding regions.
This "fixes" a bug where extreme values leak from the padding into the
non-padding regions.
TODO(noam): undertand this better and make a more principled fix.
Args:
x: a Tensor
layer_stack: ignored
context: a Tensor
Returns:
a Tensor
"""
del layer_stack
if isinstance(context.sequence_id, mtf.Tensor):
return x * mtf.cast(
mtf.not_equal(context.sequence_id, 0), context.activation_dtype)
else:
return x
@gin.configurable
def sublayer_rms_norm(x, layer_stack, context, epsilon=1e-6, name="rms_norm"):
"""RMS normalization.
Args:
x: an input mtf.Tensor
layer_stack: a LayerStack
context: a Context
epsilon: a float
name: a string
Returns:
a mtf.Tensor
"""
del layer_stack
model_dim = context.model.model_dim
with tf.variable_scope(name):
scale = mtf.get_variable(
context.mesh,
"scale",
mtf.Shape(context.model.ensemble_dims + [model_dim]),
initializer=tf.ones_initializer(),
dtype=context.variable_dtype)
variance = mtf.reduce_mean(mtf.square(x), reduced_dim=model_dim)
return x * mtf.rsqrt(variance + epsilon) * scale
@gin.configurable
def sublayer_legacy_rms_norm(x, layer_stack, context):
"""Deprecated - keep for checkpoint/operative_config.gin compatibility."""
return sublayer_rms_norm(x, layer_stack, context, name="layer_norm")
@gin.configurable
def sublayer_legacy_final_rms_norm(x, layer_stack, context):
"""Deprecated - keep for checkpoint/operative_config.gin compatibility."""
return sublayer_rms_norm(x, layer_stack, context, name="final_layer_norm")
@gin.configurable
def sublayer_rms_norm_subsampled(x, layer_stack, context, percentage=100.,
epsilon=1e-6):
"""RMS normalization."""
del layer_stack
model_dim = context.model.model_dim
with tf.variable_scope("layer_norm_subsampled"):
scale = mtf.get_variable(
context.mesh,
"scale",
mtf.Shape(context.model.ensemble_dims + [model_dim]),
initializer=tf.ones_initializer(),
dtype=context.variable_dtype)
var_dim = mtf.Dimension(
model_dim.name,
int(math.ceil(model_dim.size * percentage/100)))
var_activations = mtf.slice(x, 0, var_dim.size, var_dim.name)
variance = mtf.reduce_mean(
mtf.square(var_activations), reduced_dim=var_dim)
return x * mtf.rsqrt(variance + epsilon) * scale
@gin.configurable
def sublayer_scale_norm(x, layer_stack, context, epsilon=1e-6, name="scale_norm"):
"""Scale normalization.
Args:
x: an input mtf.Tensor
layer_stack: a LayerStack
context: a Context
epsilon: a float
name: a string
Returns:
a mtf.Tensor
"""
del layer_stack
model_dim = context.model.model_dim
with tf.variable_scope(name):
scale = mtf.get_variable(
context.mesh,
"scale",
context.model.ensemble_dims,
initializer=tf.ones_initializer(),
dtype=context.variable_dtype)
variance = mtf.reduce_mean(mtf.square(x), reduced_dim=model_dim)
return x * mtf.rsqrt(variance + epsilon) * scale
@gin.configurable
def sublayer_residual(x, layer_stack, context):
del layer_stack
return x + context.current_layer_input
@gin.configurable
def sublayer_dropout(x, layer_stack, context, dropout_rate=0.0):
del layer_stack
if context.train and dropout_rate > 0:
return mtf.dropout(
x, rate=dropout_rate,
noise_shape=mtf.Shape(context.batch_dims + [context.model.model_dim]))
else:
return x
@gin.configurable
def sublayer_clip_activation_gradient(x, layer_stack, context, rms_norm=1.0):
"""Clip activation gradient by RMS-norm."""
del layer_stack, context
return mtf.layers.clip_activation_gradient(x, rms_norm)
@gin.configurable
def sublayer_legacy_dropout(x, layer_stack, context):
return sublayer_dropout(x, layer_stack, context,
dropout_rate=layer_stack.dropout_rate)
@gin.configurable
def sublayer_rezero(x, layer_stack, context, initial_value=0.0):
"""Multiply by zero-initialized scalar (residual not included)."""
del layer_stack
rezero_weight = mtf.get_variable(
x.mesh, "rezero_weight", shape=context.model.ensemble_dims,
dtype=context.variable_dtype,
initializer=tf.constant_initializer(initial_value))
return x * rezero_weight
@gin.configurable
class ReversibleLayerStack(LayerStack):
"""A version of LayerStack that uses a revnet.
This should be very memory-efficient if LayerStack.recompute_grads
is set to True.
Also, sublayers_per_layer should be overridden in gin, so as to remove the
residual.
"Reformer" https://arxiv.org/abs/2001.04451 uses something like this.
"""
def call(self, context, x):
"""Call the layer stack."""
x = self._call_sublayers(self._sublayers_initial, x, context)
context.layer_outputs.append(x)
x1, x1_backwards, x2, x2_backwards = x, None, x, None
for lnum, layer in enumerate(self._layers):
with tf.variable_scope(layer.name or ""):
def fn(x, l=layer, c=context):
return self._layer_fn(x, l, c)
x1, x1_backwards, x2, x2_backwards = (
mtf.layers.reversible_half_residual_and_swap(
x1, x1_backwards, x2, x2_backwards, fn,
recompute_grads=self._recompute_grads))
if lnum != len(self._layers) - 1:
context.layer_outputs.append(x)
context.layer_index += 1
x = x1 + x2
x = self._call_sublayers(self._sublayers_final, x, context)
context.layer_outputs.append(x)
return x
@gin.configurable
def sublayer_true_layer_norm(x, layer_stack, context, epsilon=1e-6):
"""True (aka normal) Normalization."""
del layer_stack
model_dim = context.model.model_dim
with tf.variable_scope("true_layer_norm"):
return mtf.layers.layer_norm(x, model_dim, epsilon)
@gin.configurable
class Unitransformer(object):
"""A Transformer model with only one layer stack, e.g. a language model.
This class is also used as part of Bitransformer, which contains two
Unitransformers.
"""
def __init__(self,
layer_stack,
d_model=1024,
input_vocab_size=gin.REQUIRED,
output_vocab_size=gin.REQUIRED,
autoregressive=gin.REQUIRED,
max_length=gin.REQUIRED,
shared_embedding_and_softmax_weights=False,
label_smoothing=0.0,
z_loss=1e-4,
name="transformer",
layout=None,
mesh_shape=None,
vocab_divisor=128,
ensemble=None,
loss_fn=None,
positional_embedding=True,
sinusoid_positional_embedding=False,
input_full_attention=False,
loss_on_targets_only=False,
loss_denominator=None,
token_dropout_rate=0.0):
"""Create a Unitransformer.
Args:
layer_stack: a LayerStack
d_model: an integer
input_vocab_size: an integer
output_vocab_size: an integer
autoregressive: a boolean
max_length: an integer
shared_embedding_and_softmax_weights: a boolean
label_smoothing: a float
z_loss: a float
name: a string
layout: optional - an input to mtf.convert_to_layout_rules
Some layers (e.g. MoE layers) cheat by looking at layout and mesh_shape
mesh_shape: optional - an input to mtf.convert_to_shape
Some layers (e.g. MoE layers) cheat by looking at layout and mesh_shape
vocab_divisor: an integer
ensemble: an optional integer (for creating an ensemble of models)
loss_fn: an optional function to override self._compute_loss
positional_embedding: a boolean
sinusoid_positional_embedding: a boolean, whether to use the sinusoid
positional embedding from the "Attention Is All You Need" paper. If
True, this will override the positional_embedding setting.
input_full_attention: a boolean
This is an option for seq-to-seq as a language model. Each example
consists of [<inputs>, EOS=1, <targets>, EOS=1]. In the self-attention
layers, positions in the inputs portion of the sequence can see the
entire inputs portion, while positions in the targets portion of the
sequence cannot see future positions.
loss_on_targets_only: a boolean
This is an option for seq-to-seq as a language model. Each example
consists of [<inputs>, EOS=1, <targets>, EOS=1]. We zero-out the
loss for the inputs portion of the example.
loss_denominator: an optional float. The default behavior is to
compute the mean loss across all tokens in the batch, making the
denomiator the size of the targets tensor (omitting ensemble
dimensions).
Passing a float here provides an alternative denomiator.
One use case is that when fine-tuning a model using a much smaller
batch size than the original training batch, one might want to use the
same denominator as was used for the pretraining. This complication
might be avoided by always using loss_denominator = 1.0.
token_dropout_rate: an optional floating point value
"""
self.layer_stack = layer_stack
self.model_dim = mtf.Dimension("d_model", d_model)
self.input_vocab_size_unpadded = input_vocab_size
self.input_vocab_dim = mtf.Dimension(
"vocab", _round_up_to_multiple(input_vocab_size, vocab_divisor))
self.output_vocab_size_unpadded = output_vocab_size
if output_vocab_size:
self.output_vocab_dim = mtf.Dimension(
"vocab", _round_up_to_multiple(output_vocab_size, vocab_divisor))
else:
self.output_vocab_dim = None
if autoregressive:
raise ValueError("autoregressive Transformer needs output vocabulary")
self.autoregressive = autoregressive
if sinusoid_positional_embedding:
positional_embedding = True
if positional_embedding:
self.max_length_dim = mtf.Dimension("max_length", max_length)
else:
self.max_length_dim = None
self.shared_embedding_and_softmax_weights = (
shared_embedding_and_softmax_weights)
self.label_smoothing = label_smoothing
self.z_loss = z_loss
self.name = name
self.layout = layout
self.mesh_shape = mesh_shape
self.ensemble_dim = (
mtf.Dimension("ensemble", ensemble) if ensemble else None)
self._loss_fn = loss_fn
self.positional_embedding = positional_embedding
self.sinusoid_positional_embedding = sinusoid_positional_embedding
self.input_full_attention = input_full_attention
self.loss_on_targets_only = loss_on_targets_only
self._loss_denominator = loss_denominator
if self.input_full_attention and not self.autoregressive:
raise ValueError(
"input_full_attention only makes sense with autoregressive")
self.token_dropout_rate = token_dropout_rate
@property
def fully_autoregressive(self):
return self.autoregressive and not self.input_full_attention
@property
def ensemble_dims(self):
return [self.ensemble_dim] if self.ensemble_dim else []
def _compute_loss(self, context, logits, targets, output_vocab_dim):
"""Regular cross entropy loss.
Args:
context: a Context
logits: a Tensor, the logits from the decoder
targets: an Tensor
output_vocab_dim: a Dimension
Returns:
A 0-dimensional tensor of the loss.
"""
# Use a custom loss function if one is injected.
if self._loss_fn:
return self._loss_fn(self, context, logits, targets, output_vocab_dim)
off_value = self.label_smoothing / output_vocab_dim.size
on_value = 1.0 - self.label_smoothing + off_value
soft_targets = mtf.one_hot(
mtf.maximum(targets, 0),
output_vocab_dim,
dtype=context.activation_dtype,
on_value=on_value,
off_value=off_value)
loss = mtf.layers.softmax_cross_entropy_with_logits(
logits,
soft_targets,
output_vocab_dim,
z_loss=self.z_loss if context.train else 0.0)
weights = mtf.cast(mtf.greater(targets, 0), context.activation_dtype)
if self.loss_on_targets_only:
weights *= mtf.cast(mtf.logical_not(delimited_lm_inputs_mask(targets)),
dtype=context.activation_dtype)
return (mtf.reduce_sum(loss * weights) /
self.loss_denominator(targets, context.num_microbatches))
def _call_internal(self, context, inputs, targets=None):
"""Compute logits based on inputs (all positions in parallel).
Also updates context if applicable.
Args:
context: a Context
inputs: a Tensor
targets: an optional Tensor
Returns:
logits: a Tensor with shape [<batch_dims>, length_dim, output_vocab_dim]
"""
mesh = inputs.mesh
if self.ensemble_dim and self.ensemble_dim not in inputs.shape.dims:
# Training an ensemble where all models are trained on the same examples.
inputs = mtf.broadcast(inputs, [self.ensemble_dim] + inputs.shape.dims)
if targets:
targets = mtf.broadcast(
targets, [self.ensemble_dim] + targets.shape.dims)
if "embedding" in context.shared_params:
vocab_embedding = context.shared_params["embedding"]
else:
vocab_embedding = get_vocab_embedding_cls()(
mesh,
self.input_vocab_dim,
self.model_dim,
context.variable_dtype,
name="embedding",
ensemble_dim=self.ensemble_dim)
if context.train:
inputs = mtf.dropout(inputs, rate=self.token_dropout_rate)
x = vocab_embedding.ids_to_embedding(inputs, context)
if self.positional_embedding or self.sinusoid_positional_embedding:
if self.sinusoid_positional_embedding:
pos_emb_var = sinusoid_positional_embedding_weights(
mesh, self.max_length_dim, self.model_dim,
context.variable_dtype.activation_dtype)
elif "positional_embedding" in context.shared_params:
pos_emb_var = context.shared_params["positional_embedding"]
else:
pos_emb_var = mtf.layers.embedding_weights(
mesh, self.max_length_dim, self.model_dim, context.variable_dtype,
"positional_embedding", ensemble_dim=self.ensemble_dim)
if (context.length_dim is not None and
context.length_dim.size > self.max_length_dim.size):
message = (
"Length dimenison exceeds size of positional embedding table. "
"length_dim.size > max_length_dim.size %s vs %s."
% (context.length_dim, self.max_length_dim))
if context.position_is_default:
# Definitely getting overflow in this case.
raise ValueError(message)
else:
tf.logging.warning(
message +
" This may be OK if there are several shorter sequences packed "
"together. Otherwise, the later positions will get zeros.")
if context.position_is_default:
pos_emb = mtf.rename_dimension(
mtf.slice(pos_emb_var, 0, context.length_dim.size,
self.max_length_dim.name),
self.max_length_dim.name, context.length_dim.name)
else:
pos_emb = mtf.gather(
pos_emb_var, context.position, self.max_length_dim,
output_shape=x.shape)
x += pos_emb
x = self.layer_stack.call(context, x)
if self.output_vocab_dim is None:
return x
if self.shared_embedding_and_softmax_weights:
logits = vocab_embedding.hidden_to_logits(x, context)
else:
logits = mtf.layers.dense(
x, self.output_vocab_dim, use_bias=False,
variable_dtype=context.variable_dtype,
reduced_dims=x.shape.dims[-1:],
name="logits")
if targets is not None and context.losses is not None:
context.losses.append(
self._compute_loss(context, logits, targets, self.output_vocab_dim))
if self.ensemble_dim:
logits = reduce_ensemble_logits(
logits, self.ensemble_dim, self.output_vocab_dim)
return logits
def loss_denominator(self, targets, num_microbatches):
"""Denominator applied to losses.
This is usually the size of the targets tensor (omitting ensemble
dimensions). Alternatively, it is an override value passed to the
class constructor.
Args:
targets: a mtf.Tensor
num_microbatches: an integer - greater than one if the step has been
serialized into multiple microbatches to save memory.
Returns:
a float
"""
if self._loss_denominator is not None:
return float(self._loss_denominator)
else:
ret = float(targets.shape.size) * num_microbatches
if self.ensemble_dim:
# The ensembling should not decrease the gradient to each model
ret /= self.ensemble_dim.size
tf.logging.info("loss denominator: %d" % ret)
return float(ret)
def call_simple(self,
inputs,
targets,
compute_loss,
mode=tf.estimator.ModeKeys.TRAIN,
variable_dtype=mtf.VariableDType(tf.float32),
sequence_id=None,
subsequence_id=None,
position=None,
encoder_output=None,
encoder_sequence_id=None,
encoder_inputs=None,
shared_params=None,
layer_outputs=None,
encoder_layer_outputs=None,
num_microbatches=1):
"""Compute logits based on inputs (all positions in parallel).
This is called during training and evaluation.
Args:
inputs: an int32 Tensor with shape [<batch_dims>, length_dim] For training
autoregressive models this should be equal to
autoregressive_inputs(targets, sequence_id).
targets: an optional int32 Tensor with shape [<batch_dims>, length_dim]
compute_loss: a boolean
mode: a tf.estimator.ModeKeys
variable_dtype: a mtf.VariableDType
sequence_id: an optional Tensor
subsequence_id: an optional Tensor
position: an optional Tensor
encoder_output: an optional Tensor
encoder_sequence_id: an optional Tensor
encoder_inputs: an optional Tensor
shared_params: an optional dictionary
layer_outputs: an optional list to append Tensor layer activations to
encoder_layer_outputs: optional - readonly list of tensor activations when
decoding, one per each input layer + the embedding layer
num_microbatches: integer - greater than one if the step has been
serialized into multiple microbatches to save memory.
Returns:
logits: a Tensor with shape [<batch_dims>, output_vocab_dim]
loss: an optional Scalar (if compute_loss=True)
"""
batch_dims = inputs.shape.dims[:-1]
length_dim = inputs.shape.dims[-1]
length_range = mtf.range(inputs.mesh, length_dim, dtype=tf.int32)
if not self.positional_embedding:
# To make relative attention faster, we drop the information about the
# position in the subsequence. The relative attention code then
# assumes that the positions are given by index in the tensor,
# which still leads to the correct computation of relative position.
position = None
if position is None:
position_is_default = True
position = length_range
else:
position_is_default = False
if self.input_full_attention:
# The inputs part of each sequence can fully attend within itself.
full_attention_region = delimited_lm_inputs_mask(targets)
# We can include one additional position to the right - the position
# where the final EOS of the inputs is read and the first target token
# is predicted.
full_attention_region = mtf.logical_or(
full_attention_region,
mtf.shift(full_attention_region, offset=1, dim=length_dim, wrap=False)
)
# We set read_priority and write_priority to 0 in the full-attention
# region and equal to the position elsewhere.
read_priority = write_priority = length_range * mtf.cast(
mtf.logical_not(full_attention_region), tf.int32)
elif self.autoregressive:
# Vanilla autoregressive model - each position can see previous positions.
read_priority = write_priority = length_range
else:
read_priority = write_priority = None
context = Context(
model=self,
mesh=inputs.mesh,
batch_dims=batch_dims,
length_dim=length_dim,
variable_dtype=variable_dtype,
mode=mode,
losses=[] if compute_loss else None,
sequence_id=sequence_id,
subsequence_id=subsequence_id,
position=position,
position_is_default=position_is_default,
encoder_output=encoder_output,
encoder_sequence_id=encoder_sequence_id,
shared_params=shared_params,
layer_outputs=layer_outputs,
encoder_layer_outputs=encoder_layer_outputs,
write_priority=write_priority,
read_priority=read_priority,
inputs=inputs,
encoder_inputs=encoder_inputs,
num_microbatches=num_microbatches)
with tf.variable_scope(self.name):
logits = self._call_internal(context, inputs, targets)
if compute_loss:
loss = mtf.add_n(context.losses)
else:
loss = None
return logits, loss
@gin.configurable(module="Unitransformer")
def sample_autoregressive(self,
partial_sequences,
stop_at_token=1,
max_steps=None,
temperature=0.0,
variable_dtype=mtf.VariableDType(tf.float32),
encoder_output=None,
encoder_sequence_id=None,
encoder_inputs=None,
shared_params=None,
has_partial_sequences=True,
encoder_layer_outputs=None,
never_end=False,
remove_partial_sequences=False,
sampling_keep_top_k=-1,
bos_id=0):
"""Sample randomly one token at a time.
The partial_sequences represent partial sequences to be continued. The
first tokens of each sequence are nonzero representing the given partial
sequences and the last tokens of each sequence are zeros, representing what
needs to be filled in.
If there are no partial sequences (you want to sample from the beginning),
then pass partial_sequences=mtf.zeros(mesh, shape, dtype=tf.int32) and
has_partial_sequences=False (so we can skip computation).
Args:
partial_sequences: an int32 Tensor with shape [<batch_dims>, length_dim]
stop_at_token: an optional integer eos id. Stop when we produce it.
max_steps: an optional integer, the max number of steps to decode.
temperature: an optional floating point value between 0.0 and 1.0 0.0
means argmax, 1.0 means sample according to predicted distribution.
variable_dtype: a mtf.VariableDType
encoder_output: an optional Tensor
encoder_sequence_id: an optional Tensor
encoder_inputs: an optional Tensor
shared_params: an optional dictionary
has_partial_sequences: a boolean
encoder_layer_outputs: optional - readonly list of tensor activations when
decoding, one per each input layer + the embedding layer
never_end: a boolean - if set, then avoid generating stop_at_token
remove_partial_sequences: a boolean - whether to remove the partial
sequences from the output
sampling_keep_top_k: an integer - if not -1, only sample from the top k
logits.
bos_id: beginning of sequence id
Returns:
a Tensor with shape [<batch_dims>, length_dim]
"""
if not self.autoregressive:
raise ValueError("must be autoregressive")
inputs = partial_sequences
batch_dims = inputs.shape.dims[:-1]
length_dim = inputs.shape.dims[-1]
initial_position = mtf.reduce_sum(
mtf.to_int32(mtf.not_equal(inputs, 0)), reduced_dim=length_dim)
sequence_id = 1 if encoder_sequence_id is not None else None
length_range = mtf.range(inputs.mesh, length_dim, tf.int32)
if self.input_full_attention:
read_priority = write_priority = length_range * mtf.to_int32(
mtf.greater(length_range, initial_position))
else:
read_priority = write_priority = length_range
context_first_part = Context(
model=self,
mesh=inputs.mesh,
batch_dims=batch_dims,
length_dim=length_dim,
variable_dtype=variable_dtype,
mode="first_part",
position=length_range,
position_is_default=True,
new_states=[],
initial_position=initial_position,
sequence_id=sequence_id,
encoder_output=encoder_output,
encoder_sequence_id=encoder_sequence_id,
constant_states=[],
shared_params=shared_params,
encoder_layer_outputs=encoder_layer_outputs,
write_priority=write_priority,
read_priority=read_priority,
inputs=inputs,
encoder_inputs=encoder_inputs)
shifted_inputs = autoregressive_inputs(inputs)
with tf.variable_scope(self.name):
logits = self._call_internal(context_first_part, shifted_inputs)
del logits
constant_states = context_first_part.constant_states
if not has_partial_sequences:
initial_states = [
mtf.zeros_like(t) for t in context_first_part.new_states]
partial_sequences_eos_count = 0
else:
initial_states = context_first_part.new_states
partial_sequences_eos_count = mtf.reduce_sum(
mtf.to_int32(mtf.equal(partial_sequences, stop_at_token)),
reduced_dim=length_dim)
def cond_fn(position, ids, *unused_states):
"""Should we run another loop iteration."""
past_end = mtf.greater_equal(position, length_dim.size)
if max_steps:
past_end = mtf.logical_or(
past_end, mtf.greater_equal(position - initial_position, max_steps))
is_done = past_end
if stop_at_token is not None:
eos_count = mtf.reduce_sum(
mtf.to_int32(mtf.equal(ids, stop_at_token)),
reduced_dim=length_dim)
has_additional_eos = mtf.greater(eos_count, partial_sequences_eos_count)
is_done = mtf.logical_or(is_done, has_additional_eos)
all_done = mtf.reduce_all(is_done)
return mtf.logical_not(all_done)
def body_fn(position, ids, *states):
"""One step in the decode loop."""
inputs_this_step = mtf.gather(ids, position - 1, length_dim)
# Setting proper bos_id for position == 0. No-op otherwise.
if bos_id:
inputs_this_step += bos_id * mtf.ones_like(inputs_this_step) * mtf.cast(
mtf.equal(position, 0), tf.int32)
context_incremental = Context(
model=self,
mesh=inputs.mesh,
batch_dims=batch_dims,
length_dim=length_dim,
variable_dtype=variable_dtype,
mode="incremental",
position=position,
states=states,
new_states=[],
sequence_id=sequence_id,
encoder_output=encoder_output,
encoder_sequence_id=encoder_sequence_id,
constant_states=constant_states,
shared_params=shared_params,
encoder_layer_outputs=encoder_layer_outputs,
write_priority=write_priority,
read_priority=position,
inputs=inputs_this_step,
encoder_inputs=encoder_inputs)
with tf.variable_scope(self.name, reuse=True):
logits = self._call_internal(context_incremental, inputs_this_step)
if never_end:
logits += mtf.one_hot(
mtf.constant(logits.mesh, stop_at_token, dtype=tf.int32),
self.output_vocab_dim, on_value=-1e9, off_value=0.0,
dtype=logits.dtype)
# TBD whether this should be before or after never_end:
# Note for adding top_p sampling in the future, in other code bases, the
# option to apply temperature is done before the top-k truncation. This
# implementation does this in the opposite order. For top-k this doesn't
# matter, but for top_p it will.
if sampling_keep_top_k != -1:
if sampling_keep_top_k <= 0:
raise ValueError("sampling_keep_top_k must either be -1 or positive.")
k_largest = mtf.nth_largest_element(
logits, n=sampling_keep_top_k,
reduced_dim=self.output_vocab_dim)
logits = mtf.where(mtf.less_equal(logits, k_largest),
mtf.ones_like(logits)*-1e6, logits)
ids_this_step = mtf.sample_with_temperature(
logits, self.output_vocab_dim, temperature)
new_position = position + 1
new_ids = ids + ids_this_step * mtf.one_hot(
position, length_dim, dtype=tf.int32)
return [new_position, new_ids] + context_incremental.new_states
while_loop_inputs = [initial_position, inputs] + initial_states
final_position, outputs = mtf.while_loop(
cond_fn, body_fn, while_loop_inputs)[:2]
del final_position
if has_partial_sequences and remove_partial_sequences:
# remove partial sequences from outputs
partial_length = mtf.reduce_sum(
mtf.to_int32(mtf.not_equal(partial_sequences, 0)),
reduced_dim=length_dim)
outputs = mtf.dynamic_shift(
outputs, -partial_length, length_dim, wrap=False)
return outputs
def beam_search(self,
inputs,
decode_length,
variable_dtype=mtf.VariableDType(tf.float32),
encoder_output=None,
encoder_sequence_id=None,
encoder_inputs=None,
alpha=0.6,
shared_params=None,
encoder_layer_outputs=None,
bos_id=0):
"""Beam search.
Args:
inputs: an int32 zero-Tensor with shape [<batch_dims>, beam_dim,
length_dim].
decode_length: an int32 mtf scalar. Maximum decode length.
variable_dtype: a mtf.VariableDType
encoder_output: an optional Tensor
encoder_sequence_id: an optional Tensor
encoder_inputs: an optional Tensor
alpha: a floating point value (length bonus)
shared_params: an optional dictionary
encoder_layer_outputs: optional - readonly list of tensor activations when
decoding, one per each input layer + the embedding layer
bos_id: beginning of sequence id
Returns:
a Tensor with shape [<batch_dims>, beam_dim, length_dim]
"""
if not self.autoregressive:
raise ValueError("must be autoregressive")
batch_dims = inputs.shape.dims[:-2]
if len(batch_dims) != 1:
raise NotImplementedError(
"beam search supports exactly one batch dimension.")
beam_dim = inputs.shape.dims[-2]
length_dim = inputs.shape.dims[-1]
length_range = mtf.range(inputs.mesh, length_dim, tf.int32)
initial_position = mtf.reduce_sum(
mtf.to_int32(mtf.not_equal(inputs, 0)), reduced_dim=length_dim)
sequence_id = 1 if encoder_sequence_id is not None else None
if self.input_full_attention:
# This only makes sense in the case of beam search with given partial
# sequences, which is not yet implemented.
# TODO(noam): implement
raise NotImplementedError(
"Beam search for language models not yet implemented")
else:
read_priority = write_priority = length_range
context_first_part = Context(
model=self,
mesh=inputs.mesh,
batch_dims=batch_dims + [beam_dim],
length_dim=length_dim,
variable_dtype=variable_dtype,
mode="first_part",
position=length_range,
position_is_default=True,
new_states=[],
initial_position=initial_position,
sequence_id=sequence_id,
encoder_output=encoder_output,
encoder_sequence_id=encoder_sequence_id,
constant_states=[],
shared_params=shared_params,
encoder_layer_outputs=encoder_layer_outputs,
write_priority=write_priority,
read_priority=read_priority,
inputs=inputs,
encoder_inputs=encoder_inputs)
shifted_inputs = autoregressive_inputs(inputs)
with tf.variable_scope(self.name):
logits = self._call_internal(context_first_part, shifted_inputs)
del logits
# There are no partial targets.
# Replace initial states by zeros to avoid computing them.
initial_states = [mtf.zeros_like(t) for t in context_first_part.new_states]
constant_states = context_first_part.constant_states
def logits_fn(step_num, ids, states):
"""logits_fn for mtf.beam_search.beam_search()."""
inputs_this_step = mtf.gather(ids, step_num - 1, length_dim)
# Setting proper bos_id for step_num == 0. No-op otherwise.
if bos_id:
inputs_this_step += bos_id * mtf.ones_like(inputs_this_step) * mtf.cast(
mtf.equal(step_num, 0), tf.int32)
context_incremental = Context(
model=self,
mesh=inputs.mesh,
batch_dims=batch_dims + [beam_dim],
length_dim=length_dim,
variable_dtype=variable_dtype,
mode="incremental",
position=step_num,
states=states,
new_states=[],
sequence_id=sequence_id,
encoder_output=encoder_output,
encoder_sequence_id=encoder_sequence_id,
constant_states=constant_states,
shared_params=shared_params,
encoder_layer_outputs=encoder_layer_outputs,
write_priority=write_priority,
read_priority=step_num,
inputs=inputs_this_step,
encoder_inputs=encoder_inputs)
with tf.variable_scope(self.name, reuse=True):
logits = self._call_internal(context_incremental, inputs_this_step)
return mtf.to_float(logits), context_incremental.new_states
beams, unused_scores = mtf.beam_search.beam_search(
logits_fn,
inputs,
alpha,
states=initial_states,
decode_length=decode_length,
use_tpu=True,
dtype=tf.float32,
mesh_shape=self.mesh_shape,
layout=self.layout)
return mtf.gather(
beams, mtf.constant(inputs.mesh, 0, dtype=tf.int32), beam_dim)
@gin.configurable
def shift_targets(targets, bos_id=0, eos_id=1):
"""Transforms decoder labels to decoder inputs.
DEPRECATED - use autoregressive_inputs()
Args:
targets: decoder labels
bos_id: begin of sequence id, defaults to 0
eos_id: end of sequence id, defaults to 1
Returns:
Decoder inputs.
"""
tf.logging.warning("warning: shift_targets is deprecated - "
"use autoregressive_inputs() instead.")
length_dim = targets.shape.dims[-1]
shifted_targets = mtf.shift(targets, offset=1, dim=length_dim, wrap=False)
# We should have a 0 at the beginning of each sequence rather than the
# shifted EOS (e.g. 1) from the previous sequence.
shifted_targets *= mtf.to_int32(mtf.not_equal(shifted_targets, eos_id))
if bos_id:
shifted_targets += mtf.to_int32(
mtf.logical_and(
mtf.equal(shifted_targets, 0),
mtf.not_equal(targets, 0))) * bos_id
return shifted_targets
def autoregressive_inputs(targets, sequence_id=None):
"""Generate inputs for an autoregressive model, by shifting the targets.
For the first element of each sequence, the returned input id is 0.
For a "packed" dataset, also pass the sequence_id tensor, which aligns
with the targets tensor and contains different values for different
concatenated examples.
Args:
targets: a tf.int32 Tensor with shape [..., length_dim]
sequence_id: an optional Tensor with the same shape as targets
Returns:
a Tensor with dtype tf.int32 and the same shape as targets.
"""
length_dim = targets.shape.dims[-1]
inputs = mtf.shift(targets, offset=1, dim=length_dim, wrap=False)
# Negative ids are used to indicate masked loss during training.
# Switch them back to positive numbers.
inputs = mtf.abs(inputs)
# We should have a 0 at the beginning of each sequence rather than the
# shifted EOS (e.g. 1) from the previous sequence.
if sequence_id is not None:
not_first_in_sequence = mtf.equal(
sequence_id,
mtf.shift(sequence_id, offset=1, dim=length_dim, wrap=False))
inputs *= mtf.to_int32(not_first_in_sequence)
return inputs
@gin.configurable
class Bitransformer(object):
"""A Transformer sequence-to-sequence model with two layer stacks."""
def __init__(self, encoder, decoder, shared_embedding=True):
"""Create a Bitransformer.
Args:
encoder: a mtf.unitransformer
decoder: a mtf.unitransformer
shared_embedding: a boolean
"""
self.encoder = encoder
self.decoder = decoder
self.shared_embedding = shared_embedding
@property
def output_vocab_dim(self):
return self.decoder.output_vocab_dim
def loss_denominator(self, targets, num_microbatches):
return self.decoder.loss_denominator(targets, num_microbatches)
@property
def z_loss(self):
return self.decoder.z_loss
def _shared_params(self, mesh, variable_dtype):
"""Create parameters that are shared between encoder and decoder.
Args:
mesh: a Mesh
variable_dtype: a VariableDType
Returns:
a dictionary
"""
shared_params = {}
if self.shared_embedding:
with tf.variable_scope("shared"):
if not (self.encoder.model_dim == self.decoder.model_dim and
self.encoder.input_vocab_dim == self.decoder.input_vocab_dim):
raise ValueError(
"shared_embedding requires encoder and decoder to have identical"
" d_model and vocabulary sizes")
shared_params["embedding"] = get_vocab_embedding_cls()(
mesh,
self.encoder.input_vocab_dim,
self.encoder.model_dim,
variable_dtype,
name="embedding",
ensemble_dim=self.encoder.ensemble_dim)
if (self.encoder.positional_embedding
and self.decoder.positional_embedding
and self.encoder.max_length_dim == self.decoder.max_length_dim):
if (self.encoder.sinusoid_positional_embedding and
self.decoder.sinusoid_positional_embedding):
pos_emb_var = sinusoid_positional_embedding_weights(
mesh, self.encoder.max_length_dim, self.encoder.model_dim,
variable_dtype.activation_dtype)
else:
pos_emb_var = mtf.layers.embedding_weights(
mesh,
self.encoder.max_length_dim,
self.encoder.model_dim,
variable_dtype,
"positional_embedding",
ensemble_dim=self.encoder.ensemble_dim)
shared_params["positional_embedding"] = pos_emb_var
return shared_params
def call_simple(self,
inputs,
targets,
compute_loss,
mode=tf.estimator.ModeKeys.TRAIN,
variable_dtype=mtf.VariableDType(tf.float32),
encoder_sequence_id=None,
decoder_sequence_id=None,
decoder_subsequence_id=None,
encoder_position=None,
decoder_position=None,
num_microbatches=1):
"""Compute logits based on inputs (all positions in parallel).
This is called during training and evaluation.
Args:
inputs: an int32 Tensor with shape [<batch_dims>, length_dim]
targets: an optional int32 Tensor with shape [<batch_dims>, length_dim]
compute_loss: a boolean
mode: a tf.estimator.ModeKeys
variable_dtype: a mtf.VariableDType
encoder_sequence_id: an optional Tensor
decoder_sequence_id: an optional Tensor
decoder_subsequence_id: an optional Tensor
encoder_position: an optional Tensor
decoder_position: an optional Tensor
num_microbatches: integer - greater than one if the step has been
serialized into multiple microbatches to save memory.
Returns:
logits: a Tensor with shape [<batch_dims>, output_vocab_dim]
loss: an optional Scalar (if compute_loss=True)
"""
# encoder_sequene_id and decoder_sequence_id are used to delineate packed
# examples but are also necessary to indicate padding where sequence_id==0.
# If they are absent, then we assume that padding is indicated by zeros in
# the inputs/targets, and we make up sequence_id tensors to indicate this.
if encoder_sequence_id is None:
encoder_sequence_id = mtf.minimum(inputs, 1)
if decoder_sequence_id is None:
decoder_sequence_id = mtf.minimum(targets, 1)
encoder_layer_outputs = []
shared_params = self._shared_params(inputs.mesh, variable_dtype)
encoder_output, encoder_loss = self.encoder.call_simple(
inputs,
None,
compute_loss,
mode=mode,
variable_dtype=variable_dtype,
sequence_id=encoder_sequence_id,
position=encoder_position,
shared_params=shared_params,
layer_outputs=encoder_layer_outputs,
num_microbatches=num_microbatches)
encoder_output = mtf.layers.rename_length_to_memory_length(encoder_output)
if encoder_sequence_id is not None:
encoder_sequence_id = mtf.layers.rename_length_to_memory_length(
encoder_sequence_id)
logits, loss = self.decoder.call_simple(
autoregressive_inputs(targets, sequence_id=decoder_sequence_id),
targets,
compute_loss,
mode=mode,
variable_dtype=variable_dtype,
sequence_id=decoder_sequence_id,
subsequence_id=decoder_subsequence_id,
encoder_output=encoder_output,
encoder_sequence_id=encoder_sequence_id,
encoder_inputs=mtf.layers.rename_length_to_memory_length(inputs),
position=decoder_position,
shared_params=shared_params,
encoder_layer_outputs=encoder_layer_outputs,
num_microbatches=num_microbatches)
if loss is not None and encoder_loss is not None:
loss += encoder_loss
return logits, loss
@gin.configurable(module="Bitransformer")
def decode(self,
inputs,
variable_dtype=mtf.VariableDType(tf.float32),
beam_size=1,
alpha=0.6,
temperature=0.0,
sampling_keep_top_k=-1,
decode_length_multiplier=1.5,
decode_length_constant=10,
max_decode_length=None):
"""Sampling or beam search.
TODO(noam): should we make the output length dimension different from the
input length dimension?
Args:
inputs: a Tensor with shape [<batch_dims>, beam_dim, length_dim]
variable_dtype: a mtf.VariableDType
beam_size: an integer >= 1
alpha: a floating point value (length bonus for beam search)
temperature: a value between 0 and 1 (must be 0 if beam_size > 1)
0.0 means argmax, 1.0 means sample according to predicted distribution.
sampling_keep_top_k: a value between 1 and vocab_size used to sample from
only the k most likely logits. Set to -1 to sample from all logits.
decode_length_multiplier: a float
decode_length_constant: a float
max_decode_length: an optional integer
Returns:
a Tensor with shape [<batch_dims>, beam_dim, length_dim]
"""
encoder_layer_outputs = []
shared_params = self._shared_params(inputs.mesh, variable_dtype)
encoder_sequence_id = mtf.minimum(inputs, 1)
encoder_output, encoder_loss = self.encoder.call_simple(
inputs=inputs,
targets=None,
compute_loss=False,
mode=tf.estimator.ModeKeys.PREDICT,
variable_dtype=variable_dtype,
sequence_id=encoder_sequence_id,
shared_params=shared_params,
layer_outputs=encoder_layer_outputs)
del encoder_loss
encoder_output = mtf.layers.rename_length_to_memory_length(encoder_output)
encoder_sequence_id = mtf.layers.rename_length_to_memory_length(
encoder_sequence_id)
batch_dims = inputs.shape[:-1]
length_dim = inputs.shape[-1]
if max_decode_length is None:
decode_length_dim = length_dim
else:
decode_length_dim = mtf.Dimension("length", max_decode_length)
if beam_size == 1:
ids_shape = mtf.Shape(batch_dims + [decode_length_dim])
partial_sequences = mtf.zeros(inputs.mesh, ids_shape, dtype=tf.int32)
return self.decoder.sample_autoregressive(
partial_sequences,
temperature=temperature,
sampling_keep_top_k=sampling_keep_top_k,
variable_dtype=variable_dtype,
encoder_output=encoder_output,
encoder_sequence_id=encoder_sequence_id,
encoder_inputs=mtf.layers.rename_length_to_memory_length(inputs),
shared_params=shared_params,
has_partial_sequences=False,
encoder_layer_outputs=encoder_layer_outputs)
else:
if temperature != 0:
raise ValueError(
"don't know how to beam search with nonzero temperature")
if sampling_keep_top_k != -1:
raise ValueError(
"don't know how to beam search with top-k value other than -1.")
# beam search
beam_dim = mtf.Dimension("beam", beam_size)
ids_shape = mtf.Shape(batch_dims + [beam_dim, decode_length_dim])
partial_sequences = mtf.zeros(inputs.mesh, ids_shape, dtype=tf.int32)
input_length = mtf.reduce_sum(
mtf.to_float(mtf.cast(inputs, tf.bool)),
reduced_dim=length_dim)
max_input_length = mtf.reduce_max(input_length)
decode_length = mtf.cast(
max_input_length * decode_length_multiplier
+ decode_length_constant, tf.int32)
return self.decoder.beam_search(
partial_sequences,
decode_length,
variable_dtype=variable_dtype,
encoder_output=encoder_output,
encoder_sequence_id=encoder_sequence_id,
encoder_inputs=inputs,
alpha=alpha,
shared_params=shared_params,
encoder_layer_outputs=encoder_layer_outputs)
@gin.configurable
class StudentTeacher(object):
"""A teacher and a student to be taught via distillation."""
def __init__(self,
student,
teacher,
temperature=None,
fraction_soft=None,
distill_start_step=0,
teacher_checkpoint=None,
initialize_student_weights=False):
"""Create a StudentTeacher.
Args:
student: a Unitransformer or Bitransformer
teacher: a Unitransformer or Bitransformer
temperature: a float, the temperature of the softmax for distilling from
the teacher. Required only when training.
fraction_soft: a float between 0 and 1, the contribution of the soft
target cross entropy to the training loss. The rest of the loss will be
the cross entropy with the one-hot actual label. Required only when
training.
distill_start_step: an int, training steps after which teacher loss is
incorporated in the overall loss.
teacher_checkpoint: a string, the path to the teacher checkpoint that we
wish to use. Required only when training.
initialize_student_weights: a boolean, if true then initialize any
of the student weights whose name matches those in the teacher
checkpoint.
"""
self.student = student
self.teacher = teacher
self.temperature = temperature
self.fraction_soft = fraction_soft
self.distill_start_step = distill_start_step
self.teacher_checkpoint = teacher_checkpoint
self.initialize_student_weights = initialize_student_weights
def call_simple(self,
inputs,
targets,
compute_loss,
variable_dtype=mtf.VariableDType(tf.float32),
num_microbatches=1,
**kargs):
"""Compute logits based on inputs (all positions in parallel).
This is called during training and evaluation.
Args:
inputs: an int32 Tensor with shape [<batch_dims>, length_dim] For training
autoregressive models this should be equal to mtf.shift(targets,
offset=1, dim=length_dim, wrap=False)
targets: an optional int32 Tensor with shape [<batch_dims>, length_dim]
compute_loss: a boolean
variable_dtype: a mtf.VariableDType
num_microbatches: integer - greater than one if the step has been
serialized into multiple microbatches to save memory.
**kargs: additional arguments to pass to the student.call_simple and
teacher.call_simple
Returns:
logits: a Tensor with shape [<batch_dims>, output_vocab_dim]
loss: an optional Scalar (if compute_loss=True)
"""
with tf.variable_scope("student"):
student_logits, hard_loss = self.student.call_simple(
inputs,
targets,
compute_loss=True,
variable_dtype=variable_dtype,
num_microbatches=num_microbatches,
**kargs)
if not compute_loss:
return student_logits
elif self.fraction_soft == 0.0:
# Do not create the teacher if we do not need it.
return student_logits, hard_loss
assert self.student.output_vocab_dim == self.teacher.output_vocab_dim
assert self.student.z_loss == self.teacher.z_loss
output_vocab_dim = self.student.output_vocab_dim
z_loss = self.student.z_loss
graph = inputs.mesh.graph
with tf.variable_scope("teacher"):
teacher_logits, _ = self.teacher.call_simple(
inputs,
targets,
compute_loss=True,
variable_dtype=variable_dtype,
num_microbatches=num_microbatches,
**kargs)
graph.make_variables_untrainable(
[v for v in graph.trainable_variables if v.name.startswith("teacher/")])
soft_targets = mtf.softmax(teacher_logits / self.temperature,
output_vocab_dim)
soft_loss = mtf.layers.softmax_cross_entropy_with_logits(
student_logits / self.temperature,
mtf.stop_gradient(soft_targets),
output_vocab_dim,
z_loss=z_loss)
# Ignore losses from padding regions.
weights = mtf.cast(mtf.greater(targets, 0), soft_loss.dtype)
soft_loss = (mtf.reduce_sum(soft_loss * weights) /
self.student.loss_denominator(targets, num_microbatches))
global_step = tf.train.get_or_create_global_step()
current_fraction_soft = tf.cast(
tf.cond(
tf.math.greater(global_step, self.distill_start_step),
lambda: self.fraction_soft, lambda: tf.constant(0.0)),
dtype=tf.bfloat16)
loss = (1.0 - current_fraction_soft) * hard_loss \
+ self.temperature**2 * current_fraction_soft * soft_loss
return student_logits, loss
def decode(self, *args, **kargs):
"""Sample from the student.
Args:
*args: arguments to Unitransformer.sample_autoregressive or
Bitransformer.decode
**kargs: arguments to Unitransformer.sample_autoregressive or
Bitransformer.decode
Returns:
a Tensor with the same shape as the output of
Unitransformer.sample_autoregressive or Bitransformer.decode
"""
with tf.variable_scope("student"):
if isinstance(self.student, Unitransformer):
return self.student.sample_autoregressive(*args, **kargs)
elif isinstance(self.student, Bitransformer):
return self.student.decode(*args, **kargs)
else:
raise ValueError("unrecognized class")
def initialize(self):
"""Initialize the teacher and maybe student model from the checkpoint.
This function will be called after the graph has been constructed.
"""
if self.fraction_soft == 0.0:
# Do nothing if we do not need the teacher.
return
vars_to_restore = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope="teacher")
if self.initialize_student_weights:
student_vars_to_restore = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope="student")
# See what variables exist in the checkpoint
ckpt_vars = set([
name for name, _ in tf.train.list_variables(self.teacher_checkpoint)])
student_load_dict = {}
# Loop over all student variables and see if any can be loaded from ckpt
for var in student_vars_to_restore:
var_name = var.name[len("student/"):].split(":")[0]
if var_name in ckpt_vars:
student_load_dict[var_name] = var
else:
tf.logging.info("Student variable not found in ckpt: {}".format(
var_name))
loaded_vars = set(student_load_dict.keys())
tf.logging.info("Variables not restored from ckpt for student: {}".format(
ckpt_vars - loaded_vars))
tf.train.init_from_checkpoint(
self.teacher_checkpoint, student_load_dict)
# Initialize teacher weights
tf.train.init_from_checkpoint(
self.teacher_checkpoint,
{v.name[len("teacher/"):].split(":")[0]: v for v in vars_to_restore})
# gin-configurable constructors
@gin.configurable
def make_layer_stack(layers=gin.REQUIRED,
layer_stack_cls=LayerStack,
num_layers=6,
block_scope=True):
"""Configurable layer stack.
The "layers" argument specifies the layers in each block. It is a list
of specifications. Each specification is either a subclass of
TransformerLayer or a list/tuple containing such a subclass as well as other
optional items. Each optional item is either a string (the class name), or
a dictionary of kwargs to be passed to the class constructor.
Example:
layers=[
transformer_layers.SelfAttention,
[transformer_layers.DenseReluDense,
"feedforward", {"hidden_size": 2048, "dropout_rate":0.2}],
]
The "num_layers" argument specifies the number of blocks.
Args:
layers: a list (see above)
layer_stack_cls: a class, e.g. LayerStack or ReversibleLayerStack
num_layers: an integer
block_scope: a bool, if True then use scopes of the format
```
block_000/layer_000/...
block_000/layer_001/...
...
block_001/layer_000/...
block_001/layer_001/...
```
If False then use scopes of the format
```
layer_000/...
layer_001/...
layer_002/...
...
```
Returns:
a LayerStack
"""
layer_stack = []
for block in range(num_layers):
for n, cls in enumerate(layers):
# Set name to None if it wasn't provided which simplifies the logic below
name = None
kwargs = {}
if isinstance(cls, (list, tuple)):
for x in cls:
if isinstance(x, str):
name = x
elif isinstance(x, dict):
kwargs = x
else:
cls = x
if block_scope:
name = "block_{:03d}/{}".format(block, name or "layer_{:03d}".format(n))
else:
name = name or "layer_{:03d}".format(len(layer_stack))
layer = cls(**kwargs)
layer.set_name(name)
layer_stack.append(layer)
return layer_stack_cls(layer_stack)
@gin.configurable
def make_bitransformer(
input_vocab_size=gin.REQUIRED,
output_vocab_size=gin.REQUIRED,
layout=None,
mesh_shape=None,
encoder_name="encoder",
decoder_name="decoder",
bitransformer_cls=Bitransformer):
"""Gin-configurable bitransformer constructor.
In your config file you need to set the encoder and decoder layers like this:
encoder/make_layer_stack.layers = [
@transformer_layers.SelfAttention,
@transformer_layers.DenseReluDense,
]
decoder/make_layer_stack.layers = [
@transformer_layers.SelfAttention,
@transformer_layers.EncDecAttention,
@transformer_layers.DenseReluDense,
]
Args:
input_vocab_size: a integer
output_vocab_size: an integer
layout: optional - an input to mtf.convert_to_layout_rules
Some layers (e.g. MoE layers) cheat by looking at layout and mesh_shape
mesh_shape: optional - an input to mtf.convert_to_shape
Some layers (e.g. MoE layers) cheat by looking at layout and mesh_shape
encoder_name: optional - a string giving the Unitransformer encoder name.
decoder_name: optional - a string giving the Unitransformer decoder name.
bitransformer_cls: a class that implements the bitransformer with the
encoder and decoder both of which are Unitransformer instances.
Returns:
a bitransformer_cls instance
"""
with gin.config_scope("encoder"):
encoder = Unitransformer(
layer_stack=make_layer_stack(),
input_vocab_size=input_vocab_size,
output_vocab_size=None,
autoregressive=False,
name=encoder_name,
layout=layout,
mesh_shape=mesh_shape)
with gin.config_scope("decoder"):
decoder = Unitransformer(
layer_stack=make_layer_stack(),
input_vocab_size=output_vocab_size,
output_vocab_size=output_vocab_size,
autoregressive=True,
name=decoder_name,
layout=layout,
mesh_shape=mesh_shape)
return bitransformer_cls(encoder, decoder)
@gin.configurable
def make_bi_student_teacher(input_vocab_size=gin.REQUIRED,
output_vocab_size=gin.REQUIRED,
layout=None,
mesh_shape=None):
"""Gin-configurable bitransformer student teacher constructor.
In your config file you need to set the encoder and decoder layers like this:
encoder_layers = [
@mesh_tensorflow.transformer.transformer_layers.SelfAttention,
@mesh_tensorflow.transformer.transformer_layers.DenseReluDense,
]
decoder_layers = [
@mesh_tensorflow.transformer.transformer_layers.SelfAttention,
@mesh_tensorflow.transformer.transformer_layers.EncDecAttention,
@mesh_tensorflow.transformer.transformer_layers.DenseReluDense,
]
teacher/encoder/transformer.make_layer_stack.layers = %encoder_layers
teacher/decoder/transformer.make_layer_stack.layers = %decoder_layers
student/encoder/transformer.make_layer_stack.layers = %encoder_layers
student/decoder/transformer.make_layer_stack.layers = %decoder_layers
Args:
input_vocab_size: a integer
output_vocab_size: an integer
layout: optional - an input to mtf.convert_to_layout_rules Some layers (e.g.
MoE layers) cheat by looking at layout and mesh_shape
mesh_shape: optional - an input to mtf.convert_to_shape Some layers (e.g.
MoE layers) cheat by looking at layout and mesh_shape
Returns:
a StudentTeacher
"""
with gin.config_scope("student"):
student = make_bitransformer(
input_vocab_size=input_vocab_size,
output_vocab_size=output_vocab_size,
layout=layout,
mesh_shape=mesh_shape)
with gin.config_scope("teacher"):
teacher = make_bitransformer(
input_vocab_size=input_vocab_size,
output_vocab_size=output_vocab_size,
layout=layout,
mesh_shape=mesh_shape)
return StudentTeacher(student=student, teacher=teacher)
def _round_up_to_multiple(n, divisor):
return n + -n % divisor
def delimited_lm_inputs_mask(ids, eos_id=1):
"""Binary mask indicating which parts of the ids represent the inputs.
Assumes that the ids consist of packed sequences where each example is
represented by two eos-terminated sequences, i.e.
[<inputs0>, EOS, <targets0>, EOS, <inputs1>, EOS, <targets1>, EOS ...]
As such, the inputs are the parts where the number of previous EOS tokens
is even.
Args:
ids: an int32 mtf.Tensor with shape [..., length_dim]
eos_id: an integer
Returns:
a boolean mtf.Tensor with the same shape as ids
"""
length_dim = ids.shape.dims[-1]
return mtf.equal(mtf.mod(mtf.cumsum(mtf.to_int32(mtf.equal(ids, eos_id)),
length_dim, exclusive=True), 2), 0)
@gin.configurable
def reduce_ensemble_logits_select(logits, ensemble_dim, vocab_dim, model_id=0):
"""Select logits from the model_id-th element of the ensemble."""
del vocab_dim
return mtf.gather(logits, model_id % ensemble_dim.size, ensemble_dim)
@gin.configurable
def reduce_ensemble_logits_mean_prob(logits, ensemble_dim, vocab_dim):
"""Probabilities equal to arithmetic mean probability across models."""
probs = mtf.softmax(logits, reduced_dim=vocab_dim)
probs = mtf.reduce_mean(probs, reduced_dim=ensemble_dim)
return mtf.log(mtf.maximum(probs, 1e-20))
@gin.configurable
def reduce_ensemble_logits_mean_logit(logits, ensemble_dim, vocab_dim):
"""Probabilities proportional to geometric mean probability across models."""
del vocab_dim
return mtf.reduce_mean(logits, reduced_dim=ensemble_dim)
@gin.configurable
def reduce_ensemble_logits(logits, ensemble_dim, vocab_dim,
reduce_fn=reduce_ensemble_logits_mean_prob):
"""Configurable reduction function for decoding from an ensemble.
reduce_fn is a function which takes:
a logits tensor containing ensemble_dim (logits from all models)
ensemble_dim
vocab_dim
and returns a logits tensor without ensemble_dim.
Args:
logits: a mtf.Tensor containing ensemble_dim
ensemble_dim: a mtf.Dimension
vocab_dim: a mtf.Dimension
reduce_fn: a function
Returns:
a mtf.Tensor with shape logits.shape - ensemble_dim
"""
return reduce_fn(logits, ensemble_dim, vocab_dim)
@gin.configurable
class VocabEmbedding(object):
"""A class to go from vocab ids to model states and model states to logits."""
def __init__(self, mesh, vocab_dim, output_dim, variable_dtype, name,
ensemble_dim, scale_variable_like_classifier_weights=False):
"""Embedding for the vocabulary.
Most of the arguments get passed to `mtf.layers.embedding_weights`.
Args:
mesh: a mtf.Mesh
vocab_dim: a mtf.Dimension
output_dim: a mtf.Dimension
variable_dtype: a mtf.VariableDType
name: a string
ensemble_dim: a mtf.Dimension
scale_variable_like_classifier_weights: a boolean
"""
self._vocab_dim = vocab_dim
self._output_dim = output_dim
self._scale_variable_like_classifier_weights = (
scale_variable_like_classifier_weights)
if self._scale_variable_like_classifier_weights:
initializer = tf.random_normal_initializer(
stddev=self._output_dim.size ** -0.5)
else:
initializer = None
self._embedding_weights = mtf.layers.embedding_weights(
mesh=mesh,
vocab_dim=vocab_dim,
output_dim=output_dim,
variable_dtype=variable_dtype,
name=name,
ensemble_dim=ensemble_dim,
initializer=initializer)
def ids_to_embedding(self, ids, context):
del context
ret = mtf.gather(self._embedding_weights, ids, self._vocab_dim)
if self._scale_variable_like_classifier_weights:
ret *= self._output_dim.size ** 0.5
return ret
def hidden_to_logits(self, hidden, context):
del context
if not self._scale_variable_like_classifier_weights:
hidden *= self._output_dim.size**-0.5
return mtf.einsum([hidden, self._embedding_weights],
reduced_dims=[self._output_dim])
@gin.configurable
def get_vocab_embedding_cls(cls=VocabEmbedding):
"""Configurable function to get the class to use for vocab embeddings.
Args:
cls: a class implementing the interface of mtf.transformer VocabEmbedding
Returns:
the class
"""
return cls
def sinusoid_positional_embedding_weights(mesh,
max_length_dim,
model_dim,
dtype,
min_timescale=1.0,
max_timescale=1.0e4):
"""Gets a bunch of sinusoids of different frequencies.
Mostly copied from tensor2tensor's get_timing_signal_1d.
Args:
mesh: a mtf.Mesh
max_length_dim: a mtf.Dimension
model_dim: a mtf.Dimension
dtype: a tf.DType
min_timescale: a float
max_timescale: a float
Returns:
an mtf.Tensor of timing signals with shape [max_length_dim, model_dim]
Raises:
ValueError: If the model_dim is not divisible by 2.
"""
if model_dim.size % 2:
raise ValueError("model_dim must be divisible by 2")
num_timescales = model_dim.size // 2
timescale_dim = mtf.Dimension(model_dim.name, num_timescales)
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) /
max(float(num_timescales) - 1, 1))
inv_timescales = min_timescale * mtf.exp(
mtf.mtf_range(mesh, timescale_dim, dtype=tf.float32) *
-log_timescale_increment)
position = mtf.mtf_range(mesh, max_length_dim, dtype=tf.float32)
scaled_time = mtf.einsum([position, inv_timescales])
# Please note that this slightly differs from the published paper.
# See a discussion here: https://github.com/tensorflow/tensor2tensor/pull/177
embeddings = mtf.concat(
[mtf.sin(scaled_time), mtf.cos(scaled_time)], model_dim.name)
return mtf.cast(embeddings, dtype)
| mesh-master | mesh_tensorflow/transformer/transformer.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mesh_tensorflow.transformer.transformer_layers."""
import collections
import mesh_tensorflow as mtf
from mesh_tensorflow import test_utils
from mesh_tensorflow.transformer import transformer
from mesh_tensorflow.transformer import transformer_layers
import mock
import numpy as np
import tensorflow.compat.v1 as tf
def get_dummy_decoder_context(converter,
batch=2,
d_model=6,
length=4,
mode="incremental",
initial_position=None,
state=None,
inputs=None):
batch_dim = mtf.Dimension("batch", batch)
length_dim = mtf.Dimension("length", length)
# Set up a dummy model
layer_stack = transformer.LayerStack(layers=[])
model = transformer.Unitransformer(
d_model=d_model,
input_vocab_size=10, # dummy values
output_vocab_size=10, # dummy values
autoregressive=True,
max_length=length,
layer_stack=layer_stack)
if state is not None:
state_mtf = converter.convert_np_array_to_mtf_tensor(
state, dtype=tf.float32, dim_names=["batch", "length", "d_model"])
states = [state_mtf]
else:
states = None
if initial_position:
initial_position = mtf.constant(
converter.mesh,
initial_position,
shape=mtf.Shape([batch_dim]),
dtype=tf.int32)
if inputs is not None:
inputs = converter.convert_np_array_to_mtf_tensor(
inputs, dim_names=["batch", "length"])
context = transformer.Context(
model=model,
mode=mode,
states=states,
new_states=[],
mesh=converter.mesh,
batch_dims=[batch_dim],
length_dim=length_dim,
variable_dtype=mtf.VariableDType(tf.float32),
sequence_id=1,
inputs=inputs,
initial_position=initial_position)
return context
class TransformerLayersTest(tf.test.TestCase):
def setUp(self):
super(TransformerLayersTest, self).setUp()
self.converter = test_utils.NumpyConverter()
def test_conv1d_call_same_input_output_dims(self):
batch = 2
d_model = 6
length = 3
inputs = np.random.randint(0, 10, size=[batch, length])
inputs_mtf = self.converter.convert_np_array_to_mtf_tensor(
inputs, dim_names=["batch", "length"])
# Dummy context with necessary information for Conv1DLayer.call
Context = collections.namedtuple("Context",
["inputs", "activation_dtype", "mode"])
context = Context(
inputs=inputs_mtf, activation_dtype=tf.float32, mode="train")
x = np.random.randn(batch, length, d_model)
x_mtf = self.converter.convert_np_array_to_mtf_tensor(
x, dtype=tf.float32, dim_names=["batch", "length", "d_model"])
conv_layer = transformer_layers.Conv1DLayer(
filter_size=3, output_size=d_model)
output_mtf = conv_layer.call(context, x_mtf)
self.assertAllEqual([batch, length, d_model],
output_mtf.shape.to_integer_list)
def test_conv1d_record_states_first_part_mode(self):
batch = 2
d_model = 6
length = 6
filter_size = 3
inputs = np.random.randint(1, 10, size=[batch, length])
context = get_dummy_decoder_context(
self.converter,
batch=batch,
d_model=d_model,
initial_position=2, # indices 0 and 1 correspond to partial sequences.
inputs=inputs,
mode="first_part")
x = np.zeros(shape=(batch, length, d_model))
x_mtf = self.converter.convert_np_array_to_mtf_tensor(
x, dtype=tf.float32, dim_names=["batch", "length", "d_model"])
conv_layer = transformer_layers.Conv1D()
conv_layer.record_states_first_part_mode(context, x_mtf, filter_size)
actual = self.converter.convert_mtf_tensor_to_np_array(
context.new_states[0])
expected = np.zeros(shape=[batch, filter_size, d_model])
self.assertAllClose(actual, expected)
def test_conv1d_record_states_first_part_mode_with_partial_sequence(self):
batch = 2
d_model = 6
length = 6
filter_size = 3
inputs = np.random.randint(1, 10, size=[batch, length])
context = get_dummy_decoder_context(
self.converter,
batch=batch,
d_model=d_model,
initial_position=2, # indices 0 and 1 correspond to partial sequences.
inputs=inputs,
mode="first_part")
x = np.random.randn(batch, length, d_model)
x_mtf = self.converter.convert_np_array_to_mtf_tensor(
x, dtype=tf.float32, dim_names=["batch", "length", "d_model"])
conv_layer = transformer_layers.Conv1D()
conv_layer.record_states_first_part_mode(context, x_mtf, filter_size)
actual = self.converter.convert_mtf_tensor_to_np_array(
context.new_states[0])
expected = np.zeros(shape=[batch, filter_size, d_model])
expected[:, -2, :] = x[:, 0, :]
expected[:, -1, :] = x[:, 1, :]
self.assertAllClose(actual, expected)
def test_conv1d_record_states_incremental_mode(self):
batch = 2
d_model = 6
filter_size = 3
state = np.random.randn(batch, filter_size, d_model)
context = get_dummy_decoder_context(
self.converter,
batch=batch,
d_model=d_model,
state=state)
x = np.random.randn(batch, d_model)
x_mtf = self.converter.convert_np_array_to_mtf_tensor(
x, dtype=tf.float32, dim_names=["batch", "d_model"])
conv_layer = transformer_layers.Conv1D()
_ = conv_layer.record_states_incremental_mode(context, x_mtf,
filter_size)
actual = self.converter.convert_mtf_tensor_to_np_array(
context.new_states[0])
# [batch, 2, d_model], [batch, 1, d_model] -> [batch, 3, d_model]
expected = np.concatenate([state[:, 1:, :], x[:, np.newaxis, :]], axis=1)
self.assertAllClose(actual, expected)
def test_conv1d_update_state(self):
batch = 2
d_model = 6
filter_size = 3
batch_dim = mtf.Dimension("batch", batch)
filter_dim = mtf.Dimension("filter", filter_size)
x = np.random.randn(batch, d_model)
x_mtf = self.converter.convert_np_array_to_mtf_tensor(
x, dtype=tf.float32, dim_names=["batch", "d_model"])
old_state = np.random.randn(batch, filter_size, d_model)
old_state_mtf = self.converter.convert_np_array_to_mtf_tensor(
old_state, dtype=tf.float32, dim_names=["batch", "filter", "d_model"])
position_mtf = mtf.constant(
self.converter.mesh,
filter_size - 1,
shape=mtf.Shape([batch_dim]),
dtype=tf.int32)
conv_layer = transformer_layers.Conv1D()
output_mtf = conv_layer.update_state(
old_state_mtf, x_mtf, position_mtf, filter_dim, dtype=tf.float32)
actual = self.converter.convert_mtf_tensor_to_np_array(output_mtf)
expected = np.empty(shape=old_state.shape)
expected[:, :filter_size - 1, :] = old_state[:, 1:, :]
expected[:, -1, :] = x
self.assertAllClose(actual, expected)
def test_separable_conv1d_call_same_input_output_dims(self):
batch = 2
d_model = 6
length = 3
inputs = np.random.randint(0, 10, size=[batch, length])
inputs_mtf = self.converter.convert_np_array_to_mtf_tensor(
inputs, dim_names=["batch", "length"])
# Dummy context with necessary information for Conv1DLayer.call
Context = collections.namedtuple("Context",
["inputs", "activation_dtype", "mode"])
context = Context(
inputs=inputs_mtf, activation_dtype=tf.float32, mode="train")
x = np.random.randn(batch, length, d_model)
x_mtf = self.converter.convert_np_array_to_mtf_tensor(
x, dtype=tf.float32, dim_names=["batch", "length", "d_model"])
min_relative_pos = -1
max_relative_pos = 2
conv_layer = transformer_layers.SeparableConv1DLayer(
min_relative_pos=min_relative_pos,
max_relative_pos=max_relative_pos,
output_size=d_model)
output_mtf = conv_layer.call(context, x_mtf)
self.assertAllEqual([batch, length, d_model],
output_mtf.shape.to_integer_list)
def test_conv1d_call_incremental_mode(self):
batch = 2
d_model = 6
length = 4
filter_size = 3
output_size = 2
state = np.random.randn(batch, filter_size, d_model)
context = get_dummy_decoder_context(
self.converter,
batch=batch,
d_model=d_model,
length=length,
state=state)
x = np.random.randn(batch, d_model)
x_mtf = self.converter.convert_np_array_to_mtf_tensor(
x, dtype=tf.float32, dim_names=["batch", "d_model"])
conv_filter = np.random.randn(1, filter_size, d_model, output_size)
def mock_initializer():
# pylint: disable=unused-argument
def conv_init(shape, dtype, **unused_kwargs):
return conv_filter
return conv_init
with mock.patch.object(tf, "glorot_uniform_initializer", mock_initializer):
conv_layer = transformer_layers.Conv1DLayer(
filter_size=filter_size, output_size=output_size)
output_mtf = conv_layer.call(context, x_mtf)
actual = self.converter.convert_mtf_tensor_to_np_array(output_mtf)
# [batch, 2, d_model], [batch, 1, d_model] -> [batch, 3, d_model]
padded_x = np.concatenate([state[:, 1:, :], x[:, np.newaxis, :]], axis=1)
# b: batch h: fake height, l: length (or filter), d: d_model, o: output_size
expected = np.einsum("bld,hldo->bo", padded_x, conv_filter)
self.assertAllClose(actual, expected)
def test_separable_conv1d_layer_incremental_mode(self):
batch = 2
d_model = 6
length = 4
filter_size = 3
output_size = 2
state = np.random.randn(batch, filter_size, d_model)
context = get_dummy_decoder_context(
self.converter,
batch=batch,
d_model=d_model,
length=length,
state=state)
x = np.random.randn(batch, d_model)
x_mtf = self.converter.convert_np_array_to_mtf_tensor(
x, dtype=tf.float32, dim_names=["batch", "d_model"])
max_relative_pos = 0
min_relative_pos = max_relative_pos - filter_size + 1
conv_layer = transformer_layers.SeparableConv1DLayer(
min_relative_pos=min_relative_pos,
max_relative_pos=max_relative_pos,
output_size=output_size)
# Non-standard implementation of depthwise convolution in the
# transformer_layers.py requires somewhat complicated testing.
# A list of weights (length filter_size) each of shape [model_dim], which is
# the depth dimension. So the total number of parameters is filter_size *
# model_dim as expected for depthwise convolution.
all_kernel_wts = [np.random.randn(d_model) for _ in range(filter_size)]
all_kernel_wts_mtf = [
self.converter.convert_np_array_to_mtf_tensor(
w, dtype=tf.float32, dim_names=["d_model"]) for w in all_kernel_wts
]
pointwise_weight = np.random.randn(d_model, output_size)
pointwise_weight_mtf = self.converter.convert_np_array_to_mtf_tensor(
pointwise_weight, dtype=tf.float32, dim_names=["d_model", "d_model"])
with mock.patch.object(mtf.layers,
"get_dense_kernel_weights") as mock_weights:
mock_weights.return_value = pointwise_weight_mtf
output_mtf = conv_layer.call(
context, x_mtf, all_kernel_wts=all_kernel_wts_mtf)
actual = self.converter.convert_mtf_tensor_to_np_array(output_mtf)
# [filter_size, d_model]
conv_filter = np.array(all_kernel_wts)
# [batch, filter_size, d_model]
padded_x = np.concatenate([state[:, 1:, :], x[:, np.newaxis, :]], axis=1)
# b: batch, l: length (or filter), d: d_model
depthwise_convolved = np.einsum("bld,ld->bd", padded_x, conv_filter)
# The pointwise convolution can be implemented with matrix multiplication.
# [batch, d_model] * [d_model, output_size] -> [batch, output_size]
expected = np.dot(depthwise_convolved, pointwise_weight)
self.assertAllClose(actual, expected)
if __name__ == "__main__":
tf.test.main()
| mesh-master | mesh_tensorflow/transformer/transformer_layers_test.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import dataclasses
from mesh_tensorflow.transformer import learning_rate_schedules
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
@dataclasses.dataclass
class LearningRateSpec(object):
step: tf.Tensor
total_train_steps: int
initial_lr: float
offset: int
def _get_linear_decay_lr(spec):
return learning_rate_schedules.linear_decay_learning_rate(
spec.step, spec.total_train_steps, spec.initial_lr, spec.offset)
class UtilsTest(parameterized.TestCase, tf.test.TestCase):
def testLinearDecayLearningRate(self):
with self.test_session() as sess:
# At step 0 (no offset), the learning rate should be initial_lr.
spec = LearningRateSpec(
step=tf.constant(0, tf.int32),
total_train_steps=100,
initial_lr=0.001,
offset=0)
self.assertAlmostEqual(0.001, sess.run(_get_linear_decay_lr(spec)))
# Halfway, the learning rate should be initial_lr / 2.
spec.step = tf.constant(50, tf.int32)
self.assertAlmostEqual(0.0005, sess.run(_get_linear_decay_lr(spec)))
# At the end of training it should be 0.
spec.step = 100
self.assertAlmostEqual(0, sess.run(_get_linear_decay_lr(spec)))
# If the 0 > step > offset, then lr should be initial_lr.
spec.offset = 50
spec.step = 40
self.assertAlmostEqual(0.001, sess.run(_get_linear_decay_lr(spec)))
if __name__ == "__main__":
tf.test.main()
| mesh-master | mesh_tensorflow/transformer/learning_rate_schedules_test.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mesh_tensorflow.transformer.funnel_transformer."""
from absl.testing import parameterized
import mesh_tensorflow as mtf
from mesh_tensorflow import test_utils
from mesh_tensorflow.transformer import funnel_transformer
from mesh_tensorflow.transformer import transformer
from mesh_tensorflow.transformer import transformer_layers
import numpy as np
import tensorflow.compat.v1 as tf
def create_dummy_model(mesh,
shapes,
n_blocks=2,
block_param_size_str="2_2",
block_repeat_size_str="1_1"):
"""Creates a dummy model and layer stack with 4-dimensional input."""
assert len(shapes) == 4
outer_batch_size, batch_size, length, d_model = shapes
batch_dim = mtf.Dimension("batch", batch_size)
outer_batch_dim = mtf.Dimension("outer_batch", outer_batch_size)
length_dim = mtf.Dimension("length", length)
block_param_size = list(map(int, block_param_size_str.split("_")))
block_repeat_size = list(map(int, block_repeat_size_str.split("_")))
sublayers_initial = [
transformer.sublayer_dropout,
]
sublayers_per_layer = [
transformer.sublayer_rms_norm,
transformer.sublayer_call_layer,
transformer.sublayer_dropout,
transformer.sublayer_residual,
]
sublayers_final = [
transformer.sublayer_rms_norm,
transformer.sublayer_dropout,
]
submodules = [
transformer_layers.SelfAttention(),
transformer_layers.DenseReluDense()
]
n_sublayers = np.array(block_param_size).prod()
layers = submodules * n_sublayers
layer_stack = funnel_transformer.FunnelTransformerLayerStack(
layers=layers,
n_blocks=n_blocks,
block_param_size=block_param_size,
block_repeat_size=block_repeat_size,
sublayers_initial=sublayers_initial,
sublayers_per_layer=sublayers_per_layer,
sublayers_final=sublayers_final)
model = transformer.Unitransformer(
input_vocab_size=10,
output_vocab_size=10,
autoregressive=False,
max_length=8,
d_model=d_model,
layer_stack=layer_stack)
context = transformer.Context(
model=model,
mesh=mesh,
batch_dims=[batch_dim, outer_batch_dim],
length_dim=length_dim,
variable_dtype=mtf.VariableDType(tf.float32),
sequence_id=mtf.ones(mesh, mtf.Shape([length_dim])),
position=mtf.range(mesh, length_dim, dtype=tf.int32)
)
return layer_stack, context
class FunnelTransformerTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(FunnelTransformerTest, self).setUp()
self.converter = test_utils.NumpyConverter()
self.default_dim_names = ["outer_batch", "batch", "length", "d_model"]
def test_layer_stack_call_padding_handling(self):
self.converter = test_utils.NumpyConverter()
x = np.random.randn(2, 3, 4, 5)
layer_stack, context = create_dummy_model(
self.converter.mesh, shapes=x.shape)
# The last two sequence positions are padding.
x[:, :, -2:, :] *= 0
sequence_id = np.ones_like(x, dtype=np.int32)
sequence_id[:, :, -2:, :] *= 0
context.sequence_id = self.converter.convert_np_array_to_mtf_tensor(
sequence_id, dim_names=self.default_dim_names)
x_mtf = self.converter.convert_np_array_to_mtf_tensor(
x, dim_names=self.default_dim_names, dtype=tf.float32)
output_mtf = layer_stack.call(context, x_mtf)
# [2, 3, 4, 5] -> [2, 3, 2, 5]
actual = self.converter.convert_mtf_tensor_to_np_array(output_mtf)
# After pooling, the last sequence position should be padding, i.e., zeros.
last_position = actual[:, :, -1, :]
self.assertAllClose(last_position, np.zeros_like(last_position))
def test_layer_stack_call_pooled_length(self):
converter = test_utils.NumpyConverter()
x = np.random.randn(2, 3, 4, 5)
layer_stack, context = create_dummy_model(
converter.mesh, shapes=x.shape)
x_mtf = converter.convert_np_array_to_mtf_tensor(
x, dim_names=self.default_dim_names, dtype=tf.float32)
output_mtf = layer_stack.call(context, x_mtf)
actual = converter.convert_mtf_tensor_to_np_array(output_mtf)
self.assertAllEqual(actual.shape, (2, 3, 2, 5))
def test_layer_stack_call_num_output_layers(self):
x = np.random.randn(2, 3, 4, 5)
layer_stack, context = create_dummy_model(
self.converter.mesh, shapes=x.shape)
x_mtf = self.converter.convert_np_array_to_mtf_tensor(
x, dim_names=self.default_dim_names, dtype=tf.float32)
_ = layer_stack.call(context, x_mtf)
# +1 accounts for the sublayers_initial. sublayer_final is merged with the
# last layer of sublayers_per_layer.
self.assertLen(context.layer_outputs, len(layer_stack.layers) + 1)
def test_layer_stack_call_num_unique_layers(self):
x = np.random.randn(2, 3, 4, 5)
layer_stack, context = create_dummy_model(
self.converter.mesh, shapes=x.shape)
x_mtf = self.converter.convert_np_array_to_mtf_tensor(
x, dim_names=self.default_dim_names, dtype=tf.float32)
output_mtf = layer_stack.call(context, x_mtf)
lowering, _ = self.converter.convert_mtf_tensor_to_tf_tensor(output_mtf)
# Test the number of unique layers.
all_vars = lowering.graph.all_variables
self_attn_vars = [
var.name for var in all_vars if "SelfAttention" in var.name
]
# We expect total of `n_layers` of SelfAttention and DenseReluDense layers.
n_layers = len(layer_stack.layers)
# We expect n_sublayers` SelfAttention.
n_sublayers = n_layers // 2
# Each self attn layer has 4 variables: wq, wk, wv, wo.
self.assertEqual(len(self_attn_vars) // 4, n_sublayers)
def test_layer_stack_update_context_sequence_id(self):
x = np.random.randn(2, 3, 4, 5)
layer_stack, context = create_dummy_model(
self.converter.mesh, shapes=x.shape)
x_mtf = self.converter.convert_np_array_to_mtf_tensor(
x, dim_names=self.default_dim_names, dtype=tf.float32)
_ = layer_stack.call(context, x_mtf)
self.assertEqual(2, context.length_dim.size)
def test_layer_stack_update_context_position(self):
x = np.random.randn(2, 3, 4, 5)
layer_stack, context = create_dummy_model(
self.converter.mesh, shapes=x.shape)
x_mtf = self.converter.convert_np_array_to_mtf_tensor(
x, dim_names=self.default_dim_names, dtype=tf.float32)
_ = layer_stack.call(context, x_mtf)
actual = self.converter.convert_mtf_tensor_to_np_array(context.position)
self.assertAllEqual(np.arange(2), actual)
if __name__ == "__main__":
tf.test.main()
| mesh-master | mesh_tensorflow/transformer/funnel_transformer_test.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mixture-of-experts code.
Interfaces and algorithms are under development and subject to rapid change
without notice.
TODO(noam): Remove the other copy of this code from tensor2tensor.
TODO(noam): Write a new, simpler, cleaner version of this code.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gin
import mesh_tensorflow as mtf
from mesh_tensorflow.transformer import transformer
import tensorflow.compat.v1 as tf
@gin.configurable
class MoE1D(transformer.TransformerLayer):
"""Mixture of Experts Layer."""
def __init__(self,
num_experts=16,
loss_coef=1e-2,
hidden_size=4096,
group_size=1024,
capacity_factor_train=1.25,
capacity_factor_eval=2.0,
use_second_place_loss=False,
second_policy_train="random",
second_policy_eval="random",
second_threshold_train=0.2,
second_threshold_eval=0.2,
dropout_rate=0.0,
activation="relu",
moe_gating="top_2",
min_expert_capacity=4,
switch_policy_train="input_jitter",
switch_policy_eval="input_jitter",
switch_dropout=0.1,
switch_temperature=1.0,
switch_jitter=1e-2,
ntlb_top_k=4,
output_dim=None,
use_experts_attention=False):
self._hparams = HParams(
moe_gating=moe_gating,
moe_num_experts=num_experts,
moe_loss_coef=loss_coef,
moe_hidden_size=hidden_size,
moe_group_size=group_size,
moe_min_expert_capacity=min_expert_capacity,
moe_capacity_factor_train=capacity_factor_train,
moe_capacity_factor_eval=capacity_factor_eval,
moe_use_second_place_loss=use_second_place_loss,
moe_second_policy_train=second_policy_train,
moe_second_policy_eval=second_policy_eval,
moe_second_threshold_train=second_threshold_train,
moe_second_threshold_eval=second_threshold_eval,
moe_dropout_rate=dropout_rate,
moe_switch_policy_train=switch_policy_train,
moe_switch_policy_eval=switch_policy_eval,
moe_switch_dropout=switch_dropout,
moe_switch_temperature=switch_temperature,
moe_switch_jitter=switch_jitter,
moe_output_dim=output_dim,
moe_ntlb_top_k=ntlb_top_k,
moe_use_experts_attention=use_experts_attention)
self._activation = activation
def call(self, context, x, losses=None):
"""Call the layer."""
if context.model.ensemble_dim:
raise NotImplementedError("MoE not yet implemented with ensembles")
has_length_dim = context.length_dim in x.shape.dims
if not has_length_dim:
x_shape = x.shape
shape_with_length = mtf.Shape(
x_shape.dims[:-1] + [mtf.Dimension("length", 1)]
+ x_shape.dims[-1:])
x = mtf.reshape(x, shape_with_length)
# Extract the MoE output dimension
if self._hparams.moe_output_dim is not None:
output_dim = self._hparams.moe_output_dim
else:
output_dim = context.model.model_dim
y, loss = transformer_moe_layer_v1(
x,
output_dim,
self._hparams,
context.train,
context.variable_dtype,
layout=context.model.layout,
mesh_shape=context.model.mesh_shape,
nonpadding=context.nonpadding,
activation=self._activation,
num_microbatches=context.num_microbatches)
if context.losses is not None:
context.losses.append(loss)
if not has_length_dim:
if self._hparams.moe_use_experts_attention:
y_reshape = [mtf.reshape(y_out, x_shape) for y_out in y]
y = y_reshape
else:
y = mtf.reshape(y, x_shape)
return y
class MoE2D(transformer.TransformerLayer):
"""Mixture of Experts Layer."""
def __init__(self,
expert_x=8,
expert_y=8,
loss_coef=1e-2,
hidden_size=4096,
group_size=1024,
capacity_factor_train=1.25,
capacity_factor_eval=2.0,
capacity_factor_second_level=1.0,
use_second_place_loss=False,
second_policy_train="random",
second_policy_eval="random",
second_threshold_train=0.2,
second_threshold_eval=0.2):
self._hparams = HParams(
moe_gating="top_2",
moe_num_experts=[expert_x, expert_y],
moe_loss_coef=loss_coef,
moe_hidden_size=hidden_size,
moe_group_size=group_size,
moe_capacity_factor_train=capacity_factor_train,
moe_capacity_factor_eval=capacity_factor_eval,
moe_capacity_factor_second_level=capacity_factor_second_level,
moe_use_second_place_loss=use_second_place_loss,
moe_second_policy_train=second_policy_train,
moe_second_policy_eval=second_policy_eval,
moe_second_threshold_train=second_threshold_train,
moe_second_threshold_eval=second_threshold_eval)
def call(self, context, x, losses=None):
"""Call the layer."""
if context.model.ensemble_dim:
raise NotImplementedError("MoE not yet implemented with ensembles")
has_length_dim = context.length_dim in x.shape.dims
if not has_length_dim:
x_shape = x.shape
shape_with_length = mtf.Shape(
x_shape.dims[:-1] + [mtf.Dimension("length", 1)]
+ x_shape.dims[-1:])
x = mtf.reshape(x, shape_with_length)
y, loss = transformer_moe_layer_v2(
x,
context.model.model_dim,
self._hparams,
context.train,
context.variable_dtype,
layout=context.model.layout,
mesh_shape=context.model.mesh_shape,
nonpadding=context.nonpadding,
num_microbatches=context.num_microbatches)
if context.losses is not None:
context.losses.append(loss)
if not has_length_dim:
y = mtf.reshape(y, x_shape)
return y
def transformer_moe_layer_v1(
inputs, output_dim, hparams, train, variable_dtype,
layout=None, mesh_shape=None, nonpadding=None, activation=mtf.relu,
num_microbatches=None):
"""Local mixture of experts that works well on TPU.
Adapted from the paper https://arxiv.org/abs/1701.06538
Note: until the algorithm and inferface solidify, we pass in a hyperparameters
dictionary in order not to complicate the interface in mtf_transformer.py .
Once this code moves out of "research", we should pass the hyperparameters
separately.
Hyperparameters used:
hparams.moe_num_experts: number of experts
hparams.moe_hidden_size: size of hidden layer in each expert
hparams.moe_group_size: size of each "group" for gating purposes
hparams.moe_capacity_factor_train: a float
hparams.moe_capacity_factor_eval: a float
hparams.moe_gating: a string
+ all hyperparmeters used by _top_2_gating()
The number of parameters in the gating network is:
(input_dim.size * hparams.num_experts) +
The number of parameters in the experts themselves is:
(hparams.num_experts
* (input_dim.size + output_dim.size)
* hparams.moe_hidden_size)
The input is n-dimensional: [<batch_and_length_dims>, input_dim], consisting
of the representations of all positions in a batch of sequences.
Each position of each sequence is sent to 0-2 experts. The expert
choices and the combination weights are determined by a learned gating
function.
This function returns a small auxiliary loss that should be added to the
training loss of the model. This loss helps to balance expert usage.
Without the loss, it is very likely that a few experts will be trained and
the rest will starve.
Several hacks are necessary to get around current TPU limitations:
- To ensure static shapes, we enforce (by truncation/padding)
that each sequence send the same number of elements to each expert.
It would make more sense to enforce this equality over the entire batch,
but due to our hacked-up gather-by-matmul implementation, we need to divide
the batch into "groups". For each group, the same number of elements
are sent to each expert.
TODO(noam): Factor this code better. We want to be able to substitute
different code for the experts themselves.
Dimensions cheat sheet:
B: batch dim(s)
L: original sequence length
M: input depth
N: output depth
G: number of groups
S: group size
E: number of experts
C: expert capacity
Args:
inputs: a mtf.Tensor with shape [batch_dim(s), length_dim, input_dim]
output_dim: a mtf.Dimension (for Transformer, this is input_dim)
hparams: model hyperparameters
train: a boolean
variable_dtype: a mtf.VariableDType
layout: optional - an input to mtf.convert_to_layout_rules
mesh_shape: optional - an input to mtf.convert_to_shape
nonpadding: an optional Tensor with shape [batch_dim(s), length_dim]
and the same dtype as inputs, consisting of ones(nonpadding)
and zeros(padding).
activation: a function.
num_microbatches: number of microbatches.
Returns:
outputs: a Tensor with shape [batch_dim(s), length_dim, output_dim]
loss: a mtf scalar
Raises:
ValueError: on unrecognized hparams.moe_gating
"""
# pylint: disable=line-too-long
#
# O outer_batch dimension can be used for expert replication, e.g.
# outer_batch=4 for placing 128 experts on 512 cores with 4 replicas of each
# expert.
#
# E.g. 16x16 basic example:
# moe_num_experts=512, num_groups=1024, batch=4096, length=256, d_model=1024
# ---
# Below ` indicates common way of splitting along mesh dimension.
#
# orig_inputs OB`LM Tensor
# Shape[outer_batch=1, batch=4096, length=256, d_model=1024]
# v (reshaped)
# inputs OG`SM
# Shape[outer_batch=1, batch=1024, group=1024, d_model=1024]
#
# combine_tensor,
# dispatch_tensor OG`SEC
# Shape[outer_batch=1, batch=1024, group=1024, expert_unsplit=512, expert_capacity=4]
#
# (dispatched inputs)
# expert_inputs OEG`CM
# Shape[outer_batch=1, expert_unsplit=512, batch=1024, expert_capacity=4, d_model=1024]
# v (re-split via ReshapeOperation)
# OE`GCM
# Shape[outer_batch=1, experts=512, batch_unsplit=1024, expert_capacity=4, d_model=1024]
#
# (hidden representation)
# h OE`GCH
# Shape[outer_batch=1, experts=512, batch_unsplit=1024, expert_capacity=4, expert_hidden=8192]
#
# expert_output OE`GCM
# Shape[outer_batch=1, experts=512, batch_unsplit=1024, expert_capacity=4, d_model=1024]
# v (re-split via ReshapeOperation)
# OEG`CM
# Shape[outer_batch=1, expert_unsplit=512, batch=1024, expert_capacity=4, d_model=1024]
#
# (combined expert_output)
# output OG`SM
# Shape[outer_batch=1, batch=1024, group=1024, d_model=1024
# v (reshape)
# OB`LM
# Shape[outer_batch=1, batch=4096, length=256, d_model=1024]
#
# pylint: enable=line-too-long
orig_inputs = inputs
hidden_dim = mtf.Dimension("expert_hidden", hparams.moe_hidden_size)
experts_dim = mtf.Dimension("experts", hparams.moe_num_experts)
# We "cheat" here and look at the mesh shape and layout. This is to ensure
# that the number of groups is a multiple of the mesh dimension
# over which those groups are split.
batch_and_length_dims, input_dim = (orig_inputs.shape.dims[:-1],
orig_inputs.shape.dims[-1])
# Hack: we assume that
# "outer_batch" == replication of experts
# mesh_dim_size can be derived from mesh_shape and orig_batch_dim
#
# We then reqire num_groups to be a multiple of mesh_dim_size.
if orig_inputs.shape.dims[0].name == "outer_batch":
outer_batch_dim, orig_batch_dim = orig_inputs.shape.dims[:2]
else:
outer_batch_dim, orig_batch_dim = (mtf.Dimension("outer_batch", 1),
orig_inputs.shape.dims[0])
# Number of MoE inputs (total number of position across batch_and_length_dims
# per replica.
n = 1
for d in batch_and_length_dims:
n *= d.size
n = n // outer_batch_dim.size
mesh_dim_size = mtf.tensor_dim_to_mesh_dim_size(layout, mesh_shape,
orig_batch_dim)
num_groups, group_size = _split_into_groups(n, hparams.moe_group_size,
mesh_dim_size)
group_size_dim = mtf.Dimension("group", group_size)
num_groups_dim = mtf.Dimension(orig_batch_dim.name, num_groups)
moe_input_dims = [outer_batch_dim, num_groups_dim, group_size_dim, input_dim]
# OGSM Tensor
inputs = mtf.reshape(inputs, moe_input_dims)
# Each sequence sends expert_capacity positions to each expert.
if train:
capacity_factor = hparams.moe_capacity_factor_train
else:
capacity_factor = hparams.moe_capacity_factor_eval
expert_capacity = min(
group_size_dim.size,
int((group_size_dim.size * capacity_factor) / experts_dim.size))
expert_capacity = max(expert_capacity, hparams.moe_min_expert_capacity)
tf.logging.info("expert_capacity: %d" % expert_capacity)
expert_capacity_dim = mtf.Dimension("expert_capacity", expert_capacity)
experts_dim_unsplit = mtf.Dimension("expert_unsplit", experts_dim.size)
batch_dim_unsplit = mtf.Dimension("batch_unsplit", num_groups_dim.size)
if nonpadding is not None:
nonpadding = mtf.zeros(
inputs.mesh, batch_and_length_dims, dtype=inputs.dtype) + nonpadding
nonpadding = mtf.reshape(nonpadding, moe_input_dims[:-1])
if hparams.moe_gating == "top_2":
# combine_tensor,
# dispatch_tensor OG`SEC Tensors
# (G is generally split along mesh dim)
dispatch_tensor, combine_tensor, loss = _top_2_gating(
inputs=inputs,
outer_expert_dims=None,
experts_dim=experts_dim_unsplit,
expert_capacity_dim=expert_capacity_dim,
hparams=hparams,
train=train,
variable_dtype=variable_dtype,
importance=nonpadding,
num_microbatches=num_microbatches)
elif hparams.moe_gating == "switch":
dispatch_tensor, combine_tensor, loss = _switch_gating(
inputs=inputs,
outer_expert_dims=None,
experts_dim=experts_dim_unsplit,
expert_capacity_dim=expert_capacity_dim,
hparams=hparams,
train=train,
variable_dtype=variable_dtype,
importance=nonpadding,
num_microbatches=num_microbatches)
elif hparams.moe_gating == "ntlb":
dispatch_tensor, combine_tensor, loss = _ntlb_gating(
inputs=inputs,
outer_expert_dims=None,
experts_dim=experts_dim_unsplit,
expert_capacity_dim=expert_capacity_dim,
hparams=hparams,
train=train,
variable_dtype=variable_dtype,
importance=nonpadding,
num_microbatches=num_microbatches)
else:
raise ValueError("unknown hparams.moe_gating=%s" % hparams.moe_gating)
expert_inputs = mtf.einsum([inputs, dispatch_tensor],
mtf.Shape([
outer_batch_dim, experts_dim_unsplit,
num_groups_dim, expert_capacity_dim, input_dim
]))
# Extra reshape reduces communication cost for model-parallel versions.
# For model-parallel versions, this reshape causes an mtf.slice and for non-
# model-parallel versions, this has no effect.
d_model_split_dim = mtf.Dimension("d_model_split", input_dim.size)
expert_inputs = mtf.reshape(
expert_inputs,
mtf.Shape([
outer_batch_dim, experts_dim, batch_dim_unsplit, expert_capacity_dim,
d_model_split_dim
]))
# Split over batch -> split over experts
expert_inputs = mtf.reshape(
expert_inputs,
mtf.Shape([
outer_batch_dim, experts_dim, batch_dim_unsplit, expert_capacity_dim,
input_dim
]))
# Now feed the expert inputs through the experts.
h = mtf.layers.dense_product(
expert_inputs,
reduced_dims=expert_inputs.shape.dims[-1:],
new_dims=[hidden_dim],
expert_dims=[experts_dim],
activation_functions=activation, use_bias=False,
variable_dtype=variable_dtype, name="wi")
if train and hparams.moe_dropout_rate != 0.0:
h = mtf.dropout(h, 1.0 - hparams.moe_dropout_rate)
def _compute_output(hidden, layer_name):
"""Compute the output of the attention layer from the hidden vector."""
expert_output = mtf.layers.dense(
hidden, output_dim, expert_dims=[experts_dim], use_bias=False,
reduced_dims=hidden.shape.dims[-1:], variable_dtype=variable_dtype,
name=layer_name)
# Extra reshape reduces communication cost for model-parallel versions.
# For model-parallel versions, this reshape causes an mtf.slice and for non-
# model-parallel versions, this has no effect.
expert_output = mtf.reshape(
expert_output,
mtf.Shape([
outer_batch_dim, experts_dim_unsplit, num_groups_dim,
expert_capacity_dim, d_model_split_dim
]))
# Split over experts -> split over batch
expert_output = mtf.reshape(
expert_output,
mtf.Shape([
outer_batch_dim,
experts_dim_unsplit,
num_groups_dim,
expert_capacity_dim,
output_dim,
]))
moe_output_dims = moe_input_dims[:-1] + [output_dim]
output = mtf.einsum([expert_output, combine_tensor],
mtf.Shape(moe_output_dims))
output = mtf.reshape(output, batch_and_length_dims + [output_dim])
return output
if hparams.moe_use_experts_attention:
# We share k_h and v_h with no degradation in performance
q_h, k_h = h, h
outputs = []
q = _compute_output(q_h, layer_name="q_wo")
k = _compute_output(k_h, layer_name="k_wo")
outputs.append(q)
outputs.append(k)
return outputs, loss * hparams.moe_loss_coef
else:
output = _compute_output(h, layer_name="wo")
return output, loss * hparams.moe_loss_coef
def transformer_moe_layer_v2(
inputs, output_dim, hparams, train, variable_dtype,
layout=None, mesh_shape=None, nonpadding=None, num_microbatches=None):
"""2-level mixture of experts.
Adapted from the paper https://arxiv.org/abs/1701.06538
Note: until the algorithm and inferface solidify, we pass in a hyperparameters
dictionary in order not to complicate the interface in mtf_transformer.py .
Once this code moves out of "research", we should pass the hyperparameters
separately.
Hyperparameters used:
hparams.moe_num_experts: number of experts
hparams.moe_hidden_size: size of hidden layer in each expert
hparams.moe_group_size: size of each "group" for gating purposes
hparams.moe_capacity_factor_train: a float
hparams.moe_capacity_factor_eval: a float
hparams.moe_capacity_factor_second_level: a float
hparams.moe_gating: a string
+ all hyperparmeters used by _top_2_gating()
One set of params for experts in first level and different of hparams
per expert in the second level.
The number of parameters in the gating network is:
(input_dim.size * (hparams.num_experts) +
(moe_hidden_size * hparams.num_experts) * hparams.num_experts
The number of parameters in the experts themselves is:
(hparams.num_experts
* (input_dim.size + output_dim.size)
* hparams.moe_hidden_size)
The input is n-dimensional: [<batch_and_length_dims>, input_dim], consisting
of the representations of all positions in a batch of sequences.
Each position of each sequence is sent to 0-3 experts. The expert
choices and the combination weights are determined by a learned gating
function.
This function returns a small auxiliary loss that should be added to the
training loss of the model. This loss helps to balance expert usage.
Without the loss, it is very likely that a few experts will be trained and
the rest will starve.
Several hacks are necessary to get around current TPU limitations:
- To ensure static shapes, we enforce (by truncation/padding)
that each sequence send the same number of elements to each expert.
It would make more sense to enforce this equality over the entire batch,
but due to our hacked-up gather-by-matmul implementation, we need to divide
the batch into "groups". For each group, the same number of elements
are sent to each expert.
TODO(noam): Factor this code better. We want to be able to substitute
different code for the experts themselves.
Dimensions cheat sheet:
a, b: batch size
l: original sequence length
m: input depth
n: output depth
g, h: number of groups
s, t: group size
x, y: number of experts
c, d: expert capacity
input: [a0, b1, l, m]
input: [a0, g1, s, m]
dispatch_tensor_x: [a0, g1, s, x, c]
expert_input: [a0, g1, x, c, m]
alltoall: [a0, g, x1, c, m]
alltoall: [a0, g, x1, c, m]
transpose: [x1, a0, g, c, m]
reshape: [x1, h0, s, m]
assignment2: [x1, h0, t, y, d]
expert_input2: [x1, h0, y, d, m]
alltoall: [x1, h, y0, d, m]
...
reverse of that
gating params 0: [m, x]
gating params 1: [x1, m, y]
expert params:
[x1, y0, m, hidden]
[x1, y0, hidden, n]
Args:
inputs: a mtf.Tensor with shape [a, b, l, m]
output_dim: a mtf.Dimension (for Transformer, this is input_dim)
hparams: model hyperparameters
train: a boolean
variable_dtype: a mtf.VariableDType
layout: optional - an input to mtf.convert_to_layout_rules
mesh_shape: optional - an input to mtf.convert_to_shape
nonpadding: an optional mtf.Tensor with shape [a, b, l]
and the same dtype as inputs, consisting of ones(nonpadding)
and zeros(padding).
num_microbatches: number of microbatches.
Returns:
outputs: a Tensor with shape [a, b, l, n]
loss: a mtf scalar
Raises:
ValueError: on unrecognized hparams.moe_gating
"""
if nonpadding is not None:
nonpadding = mtf.zeros(inputs.mesh, inputs.shape.dims[:-1],
dtype=inputs.dtype) + nonpadding
insert_outer_batch_dim = (len(inputs.shape.dims) == 3)
if insert_outer_batch_dim:
inputs = mtf.reshape(
inputs, [mtf.Dimension("outer_batch", 1)] + inputs.shape.dims)
assert len(hparams.moe_num_experts) == 2
a0, b1, l, m = inputs.shape.dims
hidden_dim = mtf.Dimension("expert_hidden", hparams.moe_hidden_size)
x1 = mtf.Dimension("expert_x", hparams.moe_num_experts[0])
y0 = mtf.Dimension("expert_y", hparams.moe_num_experts[1])
x = mtf.Dimension("expert_x_unsplit", hparams.moe_num_experts[0])
y = mtf.Dimension("expert_y_unsplit", hparams.moe_num_experts[1])
n = output_dim
# We "cheat" here and look at the mesh shape and layout. This is to ensure
# that the number of groups (g.size) is a multiple of the mesh dimension
# over which those groups are split.
num_groups, group_size = _split_into_groups(
b1.size * l.size, hparams.moe_group_size,
mtf.tensor_dim_to_mesh_dim_size(layout, mesh_shape, b1))
g1 = mtf.Dimension(b1.name, num_groups)
g = mtf.Dimension(b1.name + "_unsplit", g1.size)
s = mtf.Dimension("group_size_x", group_size)
# Each sequence sends (at most?) expert_capacity positions to each expert.
# Static expert_capacity dimension is needed for expert batch sizes
if train:
capacity_factor = hparams.moe_capacity_factor_train
else:
capacity_factor = hparams.moe_capacity_factor_eval
expert_capacity = min(s.size, int((s.size * capacity_factor) / x.size))
expert_capacity = max(expert_capacity, hparams.moe_min_expert_capacity)
c = mtf.Dimension("expert_capacity_x", expert_capacity)
# We "cheat" here and look at the mesh shape and layout. This is to ensure
# that the number of groups (h.size) is a multiple of the mesh dimension
# over which those groups are split.
num_groups, group_size = _split_into_groups(
a0.size * g.size * c.size,
hparams.moe_group_size,
mtf.tensor_dim_to_mesh_dim_size(layout, mesh_shape, a0))
t = mtf.Dimension("group_size_y", group_size)
h0 = mtf.Dimension(a0.name, num_groups)
h = mtf.Dimension(a0.name + "_unsplit", h0.size)
expert_capacity = min(
t.size,
int((t.size * hparams.moe_capacity_factor_second_level) / y.size))
expert_capacity = max(expert_capacity, hparams.moe_min_expert_capacity)
d = mtf.Dimension("expert_capacity_y", expert_capacity)
# First level of expert routing
# Reshape the inner batch size to a multiple of group_dim g1 and
# group_size_dim s.
inputs = mtf.reshape(inputs, [a0, g1, s, m])
if nonpadding is not None:
nonpadding = mtf.reshape(nonpadding, [a0, g1, s])
# Get the assignments for the first level.
# dispatch_tensor_x has shape [a0, g1, s, x, c]
if hparams.moe_gating == "top_2":
dispatch_tensor_x, combine_tensor_x, loss_outer = _top_2_gating(
inputs=inputs,
outer_expert_dims=None,
experts_dim=x,
expert_capacity_dim=c,
hparams=hparams,
train=train,
variable_dtype=variable_dtype,
name="outer_gating",
importance=nonpadding,
num_microbatches=num_microbatches)
else:
raise ValueError("unknown hparams.moe_gating=%s" % hparams.moe_gating)
# Now create expert_inputs based on the assignments.
# put num_experts dimension first to make split easier in alltoall
expert_inputs_x = mtf.einsum([inputs, dispatch_tensor_x], [x, a0, g1, c, m])
# we construct an "importance" Tensor for the inputs to the second-level
# gating. The importance of an input is 1.0 if it represents the
# first-choice expert-group and 0.5 if it represents the second-choice expert
# group. This is used by the second-level gating.
importance = mtf.reduce_sum(combine_tensor_x, output_shape=[x, a0, g1, c])
importance = 0.5 * (
mtf.to_float(mtf.greater(importance, 0.5)) +
mtf.to_float(mtf.greater(importance, 0.0)))
# First level, all to all. Here we change the split dimension from g1 to x1.
expert_inputs_x = mtf.reshape(expert_inputs_x, mtf.Shape(
[x1, a0, g, c, m]))
importance = mtf.reshape(importance, [x1, a0, g, c])
# Second level of expert routing
# Reshape the expert_inputs outer batch dim to be a multiple of group_dim h0
# and group_size_dim t.
inputs_y = mtf.reshape(expert_inputs_x, [x1, h0, t, m])
importance = mtf.reshape(importance, [x1, h0, t])
# Get the assignments for the second level.
# dispatch_tensor_y has shape [x1, h0, t, y, d]
if hparams.moe_gating == "top_2":
dispatch_tensor_y, combine_tensor_y, loss_inner = _top_2_gating(
inputs=inputs_y,
outer_expert_dims=[x1],
experts_dim=y,
expert_capacity_dim=d,
hparams=hparams,
train=train,
variable_dtype=variable_dtype,
importance=importance,
name="inner_gating",
num_microbatches=num_microbatches)
else:
raise ValueError("unknown hparams.moe_gating=%s" % hparams.moe_gating)
# Now create expert_inputs based on the assignments.
# put num_experts dimension first to make split easier in alltoall
expert_inputs_y = mtf.einsum([inputs_y, dispatch_tensor_y], [y, x1, h0, d, m])
# Second level, all to all. Here we change the split dimension from h0 to y0.
expert_inputs_y = mtf.reshape(expert_inputs_y, mtf.Shape(
[y0, x1, h, d, m]))
hidden_output = mtf.layers.dense(
expert_inputs_y, hidden_dim, expert_dims=[y0, x1],
reduced_dims=expert_inputs_y.shape.dims[-1:],
activation=mtf.relu, use_bias=False, variable_dtype=variable_dtype,
name="wi")
expert_output = mtf.layers.dense(
hidden_output, output_dim, expert_dims=[y0, x1],
reduced_dims=hidden_output.shape.dims[-1:],
use_bias=False, variable_dtype=variable_dtype,
name="wo")
# NOW COMBINE EXPERT OUTPUTS (reversing everything we have done)
# expert_output has shape [y0, x1, h, d, n]
# alltoall
expert_output = mtf.reshape(expert_output, mtf.Shape(
[y, x1, h0, d, n]))
# combine results from inner level
output_y = mtf.einsum([expert_output, combine_tensor_y], [x1, h0, t, n])
# Reshape the combined tensor from inner level to now contain outer_batch_dim
# a0 and group_dim g
output = mtf.reshape(output_y, [x1, a0, g, c, n])
# alltoall from expert_dim x to group_dim g1
expert_output_x = mtf.reshape(output, mtf.Shape([x, a0, g1, c, n]))
# combine results from outer level
output_x = mtf.einsum([expert_output_x, combine_tensor_x], [a0, g1, s, n])
# Reshape the combined tensor to now contain inner_batch_dim
# b1 and the original sequence length
output = mtf.reshape(output_x, [a0, b1, l, n])
if insert_outer_batch_dim:
output = mtf.reshape(output, [b1, l, n])
return output, (loss_outer + loss_inner) * hparams.moe_loss_coef
def _ntlb_gating(inputs,
outer_expert_dims,
experts_dim,
expert_capacity_dim,
hparams,
train,
variable_dtype,
importance=None,
name="ntlb_gating",
num_microbatches=None):
"""Compute Switch gating with no-token-left behind (NTLB) behavior."""
# SELECT EXPERT
if train:
policy = hparams.moe_switch_policy_train
else:
policy = hparams.moe_switch_policy_eval
# Input perturbations
if train and policy == "input_jitter":
inputs = mtf.layers.multiplicative_jitter(inputs, hparams.moe_switch_jitter)
gate_logits = mtf.layers.dense(
inputs,
experts_dim,
use_bias=False,
expert_dims=outer_expert_dims,
variable_dtype=variable_dtype,
name=name)
raw_gates = mtf.softmax(gate_logits, reduced_dim=experts_dim)
# The internals of this function run in float32.
# bfloat16 seems to reduce quality.
raw_gates = mtf.to_float(raw_gates)
# Top-k operation
k_dim = mtf.Dimension("k", hparams.moe_ntlb_top_k)
expert_gate, expert_index = mtf.top_k(
raw_gates, reduced_dim=experts_dim, k_dim=k_dim)
expert_mask = mtf.one_hot(expert_index, experts_dim)
# LOAD BALANCING LOSS
outer_batch_dim = inputs.shape[0]
batch_dim = inputs.shape[1]
group_size_dim = inputs.shape[-2]
density_1 = mtf.reduce_mean(expert_mask, reduced_dim=group_size_dim)
density_1_proxy = mtf.reduce_mean(raw_gates, reduced_dim=group_size_dim)
if importance is not None:
expert_mask *= mtf.cast(mtf.equal(importance, 1.0), dtype=raw_gates.dtype)
expert_gate *= mtf.cast(mtf.equal(importance, 1.0), dtype=raw_gates.dtype)
density_1_proxy *= mtf.cast(
mtf.equal(importance, 1.0), dtype=raw_gates.dtype)
loss = (
mtf.reduce_mean(density_1_proxy * density_1) *
float(experts_dim.size * experts_dim.size))
if num_microbatches and num_microbatches > 1:
tf.logging.info("Dividing load-balance loss by num_microbatches={}".format(
num_microbatches))
loss /= num_microbatches
# Logging
if train:
entropy = mtf.reduce_sum(
-raw_gates * mtf.log(raw_gates + 1e-9), reduced_dim=experts_dim)
batch_entropy = mtf.reduce_mean(entropy)
mtf.scalar_summary(name + "/entropy", batch_entropy)
mask_count_experts = mtf.reduce_sum(expert_mask, output_shape=[experts_dim])
total_routed = mtf.reduce_sum(mask_count_experts)
expert_fraction = mtf.to_float(mask_count_experts / total_routed)
split_fractions = mtf.split(
expert_fraction,
split_dim=experts_dim,
num_or_size_splits=experts_dim.size)
for fraction in split_fractions:
mtf.scalar_summary("experts/" + fraction.name.replace(":", "/"),
mtf.reduce_mean(fraction))
mtf.scalar_summary("aux_loss", mtf.reduce_mean(loss))
# COMPUTE ASSIGNMENT TO EXPERT
# Iteratively route tokens (no-token-left-behind). The idea is to route as
# many tokens as possible to top-i before then trying top-(i+1).
top_k_masks = mtf.split(
expert_mask, split_dim=k_dim, num_or_size_splits=k_dim.size)
top_k_gates = mtf.split(
expert_gate, split_dim=k_dim, num_or_size_splits=k_dim.size)
top_k_indices = mtf.split(
expert_index, split_dim=k_dim, num_or_size_splits=k_dim.size)
# Tensors cumulative values over the iterative process.
combine_tensor = mtf.constant(
inputs.mesh,
value=0,
shape=[outer_batch_dim, batch_dim, experts_dim, expert_capacity_dim])
cum_tokens = mtf.constant(
inputs.mesh, value=0, shape=[outer_batch_dim, batch_dim, experts_dim])
tokens_left_to_route = mtf.constant(
inputs.mesh, value=1., shape=[outer_batch_dim, batch_dim, group_size_dim])
expert_capacity_float = float(expert_capacity_dim.size)
for (top_i_mask, top_i_gate, top_i_index) in zip(top_k_masks, top_k_gates,
top_k_indices):
top_i_mask = mtf.reshape(
top_i_mask,
new_shape=[outer_batch_dim, batch_dim, group_size_dim, experts_dim])
# Operate only on the unrouted tokens.
top_i_mask *= tokens_left_to_route
# Record cumulative number of tokens to each expert across iterations.
cumulative_tokens_in_expert = cum_tokens + mtf.cumsum(
top_i_mask, group_size_dim)
expert_overflow = mtf.to_float(
mtf.less_equal(cumulative_tokens_in_expert, expert_capacity_float))
output_i_tokens = top_i_mask * expert_overflow
# Update the cumulative tokens routed to each expert.
cum_tokens += mtf.reduce_sum(output_i_tokens, reduced_dim=group_size_dim)
tokens_left_to_route -= (
mtf.reduce_sum(output_i_tokens, reduced_dim=experts_dim))
# Combine-tensor for this iteration
output_i_tokens_flat = mtf.reduce_sum(
output_i_tokens, reduced_dim=experts_dim)
position_in_expert = cumulative_tokens_in_expert - 1
top_i_combine_tensor = (
top_i_gate * output_i_tokens_flat *
mtf.one_hot(top_i_index, experts_dim) *
mtf.one_hot(mtf.to_int32(position_in_expert), expert_capacity_dim))
combine_tensor += top_i_combine_tensor
# Match the inputs dtype.
combine_tensor = mtf.cast(combine_tensor, inputs.dtype)
loss = mtf.cast(loss, inputs.dtype)
dispatch_tensor = mtf.cast(
mtf.cast(combine_tensor, tf.bool), combine_tensor.dtype)
return dispatch_tensor, combine_tensor, loss
def _switch_gating(
inputs, outer_expert_dims, experts_dim, expert_capacity_dim,
hparams, train, variable_dtype, importance=None, name="switch_gating",
num_microbatches=None):
"""Compute Switch gating."""
# SELECT EXPERT
if train:
policy = hparams.moe_switch_policy_train
else:
policy = hparams.moe_switch_policy_eval
# The internals of this function run in float32.
# bfloat16 seems to reduce quality.
gate_inputs = mtf.to_float(inputs)
# Input perturbations
if train and policy == "input_dropout":
gate_inputs = mtf.dropout(gate_inputs, 1.0 - hparams.moe_switch_dropout)
elif train and policy == "input_jitter":
gate_inputs = mtf.layers.multiplicative_jitter(gate_inputs,
hparams.moe_switch_jitter)
gate_logits = mtf.layers.dense(
gate_inputs,
experts_dim,
use_bias=False,
expert_dims=outer_expert_dims,
variable_dtype=variable_dtype,
name=name)
raw_gates = mtf.softmax(gate_logits, reduced_dim=experts_dim)
if policy == "argmax" or policy == "input_dropout" or policy == "input_jitter":
expert_gate, expert_index = mtf.top_1(raw_gates, reduced_dim=experts_dim)
if train:
mtf.scalar_summary("expert_gate", mtf.reduce_mean(expert_gate))
elif policy == "sample":
expert_index = mtf.sample_with_temperature(
gate_logits, experts_dim, temperature=hparams.moe_switch_temperature)
expert_gate = mtf.gather(raw_gates, expert_index, dim=experts_dim)
else:
raise ValueError("Unknown Switch gating policy %s" % policy)
expert_mask = mtf.one_hot(expert_index, experts_dim, dtype=raw_gates.dtype)
# LOAD BALANCING LOSS
group_size_dim = inputs.shape[-2]
density_1 = mtf.reduce_mean(expert_mask, reduced_dim=group_size_dim)
density_1_proxy = mtf.reduce_mean(raw_gates, reduced_dim=group_size_dim)
if importance is not None:
expert_mask *= mtf.cast(mtf.equal(importance, 1.0), dtype=raw_gates.dtype)
expert_gate *= mtf.cast(mtf.equal(importance, 1.0), dtype=raw_gates.dtype)
density_1_proxy *= mtf.cast(
mtf.equal(importance, 1.0), dtype=raw_gates.dtype)
loss = (
mtf.reduce_mean(density_1_proxy * density_1) *
float(experts_dim.size * experts_dim.size))
if num_microbatches and num_microbatches > 1:
tf.logging.info("Dividing load-balance loss by num_microbatches={}".format(
num_microbatches))
loss /= num_microbatches
# Logging
if train:
entropy = mtf.reduce_sum(-raw_gates * mtf.log(raw_gates + 1e-9),
reduced_dim=experts_dim)
batch_entropy = mtf.reduce_mean(entropy)
mtf.scalar_summary(name + "/entropy", batch_entropy)
mask_count_experts = mtf.reduce_sum(expert_mask, output_shape=[experts_dim])
total_routed = mtf.reduce_sum(mask_count_experts)
expert_fraction = mtf.to_float(mask_count_experts / total_routed)
split_fractions = mtf.split(
expert_fraction,
split_dim=experts_dim,
num_or_size_splits=experts_dim.size)
for fraction in split_fractions:
mtf.scalar_summary("experts/" + fraction.name.replace(":", "/"),
mtf.reduce_mean(fraction))
mtf.scalar_summary("aux_loss", mtf.reduce_mean(loss))
# COMPUTE ASSIGNMENT TO EXPERT
# Experts have a limited capacity, ensure we do not exceed it. Construct
# the batch indices, to each expert, with position_in_expert
position_in_expert = mtf.cumsum(
expert_mask, group_size_dim, exclusive=True) * expert_mask
position_in_expert = mtf.cast(position_in_expert, dtype=raw_gates.dtype)
# Keep only tokens that fit within expert_capacity.
expert_capacity_float = float(expert_capacity_dim.size)
expert_mask *= mtf.cast(
mtf.less(position_in_expert, expert_capacity_float),
dtype=raw_gates.dtype)
expert_mask_flat = mtf.reduce_sum(expert_mask, reduced_dim=experts_dim)
if train:
total_routed = mtf.reduce_sum(expert_mask_flat)
importance = mtf.cast(importance, dtype=total_routed.dtype)
mtf.scalar_summary("fraction_routed",
total_routed / mtf.reduce_sum(importance))
# Mask out the experts that have overflowed expert capacity. Sparsify the
# expert_gate.
expert_gate *= expert_mask_flat
combine_tensor = (
expert_gate * expert_mask_flat *
mtf.one_hot(expert_index, experts_dim, dtype=raw_gates.dtype) *
mtf.one_hot(
mtf.to_int32(position_in_expert),
expert_capacity_dim,
dtype=raw_gates.dtype))
# Match the inputs dtype.
combine_tensor = mtf.cast(combine_tensor, inputs.dtype)
loss = mtf.cast(loss, inputs.dtype)
dispatch_tensor = mtf.cast(
mtf.cast(combine_tensor, tf.bool), combine_tensor.dtype)
return dispatch_tensor, combine_tensor, loss
def _top_2_gating(
inputs, outer_expert_dims, experts_dim, expert_capacity_dim,
hparams, train, variable_dtype, importance=None, name="top_2_gating",
num_microbatches=None):
"""Compute gating for mixture-of-experts in TensorFlow.
Note: until the algorithm and inferface solidify, we pass in a hyperparameters
dictionary in order not to complicate the interface in mtf_transformer.py .
Once this code moves out of "research", we should pass the hyperparameters
separately.
Hyperparameters used:
hparams.moe_use_second_place_loss: a boolean
hparams.moe_second_policy_train: a string
hparams.moe_second_policy_eval: a string
hparams.moe_second_threshold: a float
The returned forward assignment is a tensor used to map (via einsum) from the
inputs to the expert_inputs. Likewise, the returned combine_tensor is
used to map (via einsum) from the expert outputs to the outputs. Both the
forward and backward assignments are mostly zeros. The shapes of the tensors
are as follows.
inputs: [<batch_dims>, group_size_dim, input_dim]
importance: [<batch_dims>, group_size_dim]
dispatch_tensor:
[<batch_dims>, group_size_dim, experts_dim, expert_capacity_dim]
expert_inputs:
[<batch_dims>, experts_dim, expert_capacity_dim, input_dim]
expert_outputs: [<batch_dims>, experts_dim, expert_capacity_dim, output_dim]
combine_tensor:
[<batch_dims>, group_size_dim, experts_dim, expert_capacity_dim]
outputs: [<batch_dims>, group_size_dim, output_dim]
"importance" is an optional tensor with one floating-point value for each
input vector. If the importance of an input is 1.0, then we send it to
up to 2 experts. If 0.0 < importance < 1.0, then we send it to at most
one expert. If importance == 0.0, then we send it to no experts.
We use "importance" at the second-level gating function of a hierarchical
mixture of experts. Inputs to the first-choice expert-group get importance
1.0. Inputs to the second-choice expert group get importance 0.5.
Inputs that represent padding get importance 0.0.
Args:
inputs: a mtf.Tensor with shape [<batch_dims>, group_size_dim, input_dim]
outer_expert_dims: an optional list of dimensions. This is for the case
where we are at an inner level of a hierarchical MoE.
experts_dim: a Dimension (the number of experts)
expert_capacity_dim: a Dimension (number of examples per group per expert)
hparams: model hyperparameters.
train: a boolean
variable_dtype: a mtf.VariableDType
importance: an optional tensor with shape [<batch_dims>, group_size_dim]
name: an optional string
num_microbatches: number of microbatches.
Returns:
dispatch_tensor: a Tensor with shape
[<batch_dims>, group_size_dim, experts_dim, expert_capacity_dim]
combine_tensor: a Tensor with shape
[<batch_dims>, group_size_dim, experts_dim, expert_capacity_dim]
loss: a mtf scalar
Raises:
ValueError: on illegal hyperparameters
"""
group_size_dim, unused_input_dim = inputs.shape.dims[-2:]
# The internals of this function run in float32.
# bfloat16 seems to reduce quality.
gate_inputs = mtf.to_float(inputs)
raw_gates = mtf.layers.dense(
gate_inputs, experts_dim, use_bias=False,
expert_dims=outer_expert_dims,
variable_dtype=variable_dtype,
name=name)
raw_gates = mtf.softmax(raw_gates, experts_dim)
expert_capacity_f = float(expert_capacity_dim.size)
# FIND TOP 2 EXPERTS PER POSITON
# Find the top expert for each position. shape=[batch, group]
gate_1, index_1 = mtf.top_1(raw_gates, experts_dim)
# [batch, group, experts]
mask_1 = mtf.one_hot(index_1, experts_dim, dtype=raw_gates.dtype)
density_1_proxy = raw_gates
if importance is not None:
mask_1 *= mtf.to_float(mtf.equal(importance, 1.0))
gate_1 *= mtf.to_float(mtf.equal(importance, 1.0))
density_1_proxy *= mtf.to_float(mtf.equal(importance, 1.0))
gates_without_top_1 = raw_gates * (1.0 - mask_1)
# [batch, group]
gate_2, index_2 = mtf.top_1(gates_without_top_1, experts_dim)
# [batch, group, experts]
mask_2 = mtf.one_hot(index_2, experts_dim, dtype=raw_gates.dtype)
if importance is not None:
mask_2 *= mtf.to_float(mtf.greater(importance, 0.0))
denom = gate_1 + gate_2 + 1e-9
gate_1 /= denom
gate_2 /= denom
# BALANCING LOSSES
# shape = [batch, experts]
# We want to equalize the fraction of the batch assigned to each expert
density_1 = mtf.reduce_mean(mask_1, reduced_dim=group_size_dim)
# Something continuous that is correlated with what we want to equalize.
density_1_proxy = mtf.reduce_mean(density_1_proxy, reduced_dim=group_size_dim)
loss = (mtf.reduce_mean(density_1_proxy * density_1)
* float(experts_dim.size * experts_dim.size))
if hparams.moe_use_second_place_loss:
# Also add a loss to encourage all experts to be used equally also as the
# second-place expert. Experimentally, this seems to be a wash.
# We want to equalize the fraction of the batch assigned to each expert:
density_2 = mtf.reduce_mean(mask_2, reduced_dim=group_size_dim)
# As a proxy for density_2, we renormalize the raw gates after the top one
# has been removed.
normalized = gates_without_top_1 / (
mtf.reduce_sum(gates_without_top_1, reduced_dim=experts_dim) + 1e-9)
density_2_proxy = mtf.reduce_mean(normalized, reduced_dim=group_size_dim)
loss_2 = (mtf.reduce_mean(density_2_proxy * density_2)
* float(experts_dim.size * experts_dim.size))
loss += loss_2 * 0.5
if num_microbatches and num_microbatches > 1:
tf.logging.info("Dividing load-balance loss by num_microbatches={}".format(
num_microbatches))
loss /= num_microbatches
# Depending on the policy in the hparams, we may drop out some of the
# second-place experts.
if train:
policy = hparams.moe_second_policy_train
threshold = hparams.moe_second_threshold_train
else:
policy = hparams.moe_second_policy_eval
threshold = hparams.moe_second_threshold_eval
if policy == "all":
# Use second-place experts for all examples.
pass
elif policy == "none":
# Never use second-place experts for all examples.
mask_2 = mtf.zeros_like(mask_2)
elif policy == "threshold":
# Use second-place experts if gate_2 > threshold.
mask_2 *= mtf.to_float(mtf.greater(gate_2, threshold))
elif policy == "random":
# Use second-place experts with probablity min(1.0, gate_2 / threshold).
mask_2 *= mtf.to_float(
mtf.less(mtf.random_uniform(gate_2.mesh, gate_2.shape),
gate_2 / max(threshold, 1e-9)))
else:
raise ValueError("Unknown policy %s" % policy)
# COMPUTE ASSIGNMENT TO EXPERTS
# [batch, group, experts]
# This is the position within the expert's mini-batch for this sequence
position_in_expert_1 = mtf.cumsum(
mask_1, group_size_dim, exclusive=True) * mask_1
# Remove the elements that don't fit. [batch, group, experts]
mask_1 *= mtf.to_float(mtf.less(position_in_expert_1, expert_capacity_f))
# [batch, experts]
# How many examples in this sequence go to this expert
mask_1_count = mtf.reduce_sum(mask_1, reduced_dim=group_size_dim)
# [batch, group] - mostly ones, but zeros where something didn't fit
mask_1_flat = mtf.reduce_sum(mask_1, reduced_dim=experts_dim)
# [batch, group]
position_in_expert_1 = mtf.reduce_sum(
position_in_expert_1, reduced_dim=experts_dim)
# Weight assigned to first expert. [batch, group]
gate_1 *= mask_1_flat
# [batch, group, experts]
position_in_expert_2 = (
mtf.cumsum(mask_2, group_size_dim, exclusive=True) + mask_1_count)
position_in_expert_2 *= mask_2
mask_2 *= mtf.to_float(mtf.less(position_in_expert_2, expert_capacity_f))
# mask_2_count = mtf.reduce_sum(mask_2, reduced_dim=experts_dim)
mask_2_flat = mtf.reduce_sum(mask_2, reduced_dim=experts_dim)
gate_2 *= mask_2_flat
position_in_expert_2 = mtf.reduce_sum(
position_in_expert_2, reduced_dim=experts_dim)
# [batch, group, experts, expert_capacity]
combine_tensor = (
gate_1 * mask_1_flat
* mtf.one_hot(index_1, experts_dim)
* mtf.one_hot(mtf.to_int32(position_in_expert_1), expert_capacity_dim) +
gate_2 * mask_2_flat
* mtf.one_hot(index_2, experts_dim)
* mtf.one_hot(mtf.to_int32(position_in_expert_2), expert_capacity_dim))
combine_tensor = mtf.cast(combine_tensor, inputs.dtype)
loss = mtf.cast(loss, inputs.dtype)
dispatch_tensor = mtf.cast(
mtf.cast(combine_tensor, tf.bool), combine_tensor.dtype)
return dispatch_tensor, combine_tensor, loss
def set_default_moe_hparams(hparams):
"""Add necessary hyperparameters for mixture-of-experts."""
hparams.moe_num_experts = 16
hparams.moe_loss_coef = 1e-2
hparams.add_hparam("moe_gating", "top_2")
# Experts have fixed capacity per batch. We need some extra capacity
# in case gating is not perfectly balanced.
# moe_capacity_factor_* should be set to a value >=1.
hparams.add_hparam("moe_capacity_factor_train", 1.25)
hparams.add_hparam("moe_capacity_factor_eval", 2.0)
hparams.add_hparam("moe_capacity_factor_second_level", 1.0)
# Each expert has a hidden layer with this size.
hparams.add_hparam("moe_hidden_size", 4096)
# For gating, divide inputs into groups of this size before gating.
# Each group sends the same number of inputs to each expert.
# Ideally, the group size would be the whole batch, but this is expensive
# due to our use of matrix multiplication for reordering.
hparams.add_hparam("moe_group_size", 1024)
# For top_2 gating, whether to impose an additional loss in order to make
# the experts equally used as the second-place expert.
hparams.add_hparam("moe_use_second_place_loss", 0)
# In top_2 gating, policy for whether to use a second-place expert.
# Legal values are:
# "all": always
# "none": never
# "threshold": if gate value > the given threshold
# "random": if gate value > threshold*random_uniform(0,1)
hparams.add_hparam("moe_second_policy_train", "random")
hparams.add_hparam("moe_second_policy_eval", "random")
hparams.add_hparam("moe_second_threshold_train", 0.2)
hparams.add_hparam("moe_second_threshold_eval", 0.2)
def _split_into_groups(n, max_group_size, mesh_dim_size):
"""Helper function for figuring out how to split a dimension into groups.
We have a dimension with size n and we want to split it into
two dimensions: n = num_groups * group_size
group_size should be the largest possible value meeting the constraints:
group_size <= max_group_size
(num_groups = n/group_size) is a multiple of mesh_dim_size
Args:
n: an integer
max_group_size: an integer
mesh_dim_size: an integer
Returns:
num_groups: an integer
group_size: an integer
Raises:
ValueError: if n is not a multiple of mesh_dim_size
"""
if n % mesh_dim_size != 0:
raise ValueError(
"n=%d is not a multiple of mesh_dim_size=%d" % (n, mesh_dim_size))
num_groups = max(1, n // max_group_size)
while (num_groups % mesh_dim_size != 0 or n % num_groups != 0):
num_groups += 1
group_size = n // num_groups
tf.logging.info(
"_split_into_groups(n=%d, max_group_size=%d, mesh_dim_size=%d)"
" = (num_groups=%d group_size=%d)" %
(n, max_group_size, mesh_dim_size, num_groups, group_size))
return num_groups, group_size
class HParams(object):
"""Replacement for tf.contrib.training.HParams.
TODO(noam): remove this class and rewrite the methods in this file.
"""
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
def add_hparam(self, k, v):
setattr(self, k, v)
| mesh-master | mesh_tensorflow/transformer/moe.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Transformer using Mesh-TensorFlow.
Training/Eval/Inference of a transformer machine-translation model.
Data comes from TensorFlow Datasets.
The core transformer model code is in the mesh_tensorflow/transformer/
directory of this repository.
Instructions for running this on cloud TPU are in the README .
TODO(noam): instructions are obsolete and need updating.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import os
import sys
from mesh_tensorflow.transformer import utils
import tensorflow.compat.v1 as tf
tf.flags.DEFINE_string(
"tpu_job_name", None,
"Name of TPU worker binary. Only necessary if job name is changed from"
" default tpu_worker.")
tf.flags.DEFINE_string(
"model_dir", "/tmp/transformer_standalone", "Estimator model_dir")
tf.flags.DEFINE_string(
"tpu",
default=None,
help="The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.")
tf.flags.DEFINE_string(
"gcp_project",
default=None,
help="Project name for the Cloud TPU-enabled project. If not specified, we "
"will attempt to automatically detect the GCE project from metadata.")
tf.flags.DEFINE_string(
"tpu_zone",
default=None,
help="GCE zone where the Cloud TPU is located in. If not specified, we "
"will attempt to automatically detect the GCE project from metadata.")
# TFDS Module Import
tf.flags.DEFINE_multi_string(
"module_import", None,
"Modules to import. Use this when your DatasetBuilder is defined outside "
"of tensorflow_datasets so that it is registered.")
FLAGS = tf.flags.FLAGS
def main(_):
if FLAGS.module_import:
for module in FLAGS.module_import:
importlib.import_module(module)
tf.io.gfile.makedirs(FLAGS.model_dir)
suffix = 0
command_filename = os.path.join(FLAGS.model_dir, "command")
while tf.io.gfile.exists(command_filename):
suffix += 1
command_filename = os.path.join(
FLAGS.model_dir, "command.{}".format(suffix))
with tf.io.gfile.GFile(command_filename, "w") as f:
f.write(" ".join(sys.argv))
utils.parse_gin_defaults_and_flags()
utils.run(
tpu_job_name=FLAGS.tpu_job_name,
tpu=FLAGS.tpu,
gcp_project=FLAGS.gcp_project,
tpu_zone=FLAGS.tpu_zone,
model_dir=FLAGS.model_dir)
if __name__ == "__main__":
tf.disable_v2_behavior()
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| mesh-master | mesh_tensorflow/transformer/main.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for mesh_tensorflow.transformer.vocab_embeddings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mesh_tensorflow as mtf
from mesh_tensorflow.transformer import vocab_embeddings
import mock
import numpy as np
import scipy.misc
import tensorflow.compat.v1 as tf
def initialize_by_shape(shape_to_value):
"""Create an initializer with values specified by tensor shape."""
def initialize(shape, dtype):
shape = tuple(shape)
if shape not in shape_to_value:
raise ValueError(
'Shape {} not found in shape to value map.'.format(shape))
return tf.reshape(
tf.constant(shape_to_value[tuple(shape)], dtype=dtype), shape)
return initialize
class FactorizedVocabEmbeddingTest(tf.test.TestCase):
def setUp(self):
super(FactorizedVocabEmbeddingTest, self).setUp()
self.graph = mtf.Graph()
self.mesh = mtf.Mesh(self.graph, 'mtf_mesh')
self.variable_dtype = mtf.VariableDType(activation_dtype=tf.float32)
self.addCleanup(mock.patch.stopall)
self.initializer_mock = mock.MagicMock()
random_normal_initializer_mock = mock.patch.object(
tf, 'random_normal_initializer').start()
random_normal_initializer_mock.return_value = self.initializer_mock
def test_ids_to_embedding_correctlyEmbeds(self):
seq_len = 4
vocab_size = 3
model_size = 2
inner_dimension_size = 1
vocab_dim = mtf.Dimension('vocab', vocab_size)
model_dim = mtf.Dimension('model', model_size)
length_dim = mtf.Dimension('length', seq_len)
ids = tf.constant([0, 1, 2, 1], dtype=tf.int32)
mtf_ids = mtf.import_tf_tensor(
self.mesh, ids, shape=mtf.Shape([length_dim]))
def initialize(shape, dtype):
return tf.reshape(1 + tf.range(np.prod(shape), dtype=dtype), shape)
self.initializer_mock.side_effect = initialize
vocab_embedding = vocab_embeddings.FactorizedVocabEmbedding(
self.mesh,
vocab_dim,
output_dim=model_dim,
variable_dtype=self.variable_dtype,
name='embedding',
ensemble_dim=None,
inner_dimension_size=inner_dimension_size)
mtf_embedding = vocab_embedding.ids_to_embedding(mtf_ids, context=None)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[''])
lowering = mtf.Lowering(self.graph, {self.mesh: mesh_impl})
actual_embedding = lowering.export_to_tf_tensor(mtf_embedding)
self.evaluate(tf.global_variables_initializer())
self.evaluate(lowering.copy_masters_to_slices())
actual = self.evaluate([actual_embedding])[0]
self.assertAllClose(actual, [[1, 2], [2, 4], [3, 6], [2, 4]])
def test_hidden_to_logits_computesLogitsCorrectly(self):
seq_len = 4
vocab_size = 3
model_size = 2
inner_dimension_size = 1
vocab_dim = mtf.Dimension('vocab', vocab_size)
model_dim = mtf.Dimension('model', model_size)
length_dim = mtf.Dimension('length', seq_len)
embeddings = tf.constant([[1, 0], [0, 1], [1, 1], [2, 1]], dtype=tf.float32)
mtf_embeddings = mtf.import_tf_tensor(
self.mesh, embeddings, shape=mtf.Shape([length_dim, model_dim]))
def initialize(shape, dtype):
return tf.reshape(1 + tf.range(np.prod(shape), dtype=dtype), shape)
self.initializer_mock.side_effect = initialize
vocab_embedding = vocab_embeddings.FactorizedVocabEmbedding(
self.mesh,
vocab_dim,
output_dim=model_dim,
variable_dtype=self.variable_dtype,
name='embedding',
ensemble_dim=None,
inner_dimension_size=inner_dimension_size)
mtf_logits = vocab_embedding.hidden_to_logits(mtf_embeddings, context=None)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[''])
lowering = mtf.Lowering(self.graph, {self.mesh: mesh_impl})
actual_logits = lowering.export_to_tf_tensor(mtf_logits)
self.evaluate(tf.global_variables_initializer())
self.evaluate(lowering.copy_masters_to_slices())
actual = self.evaluate([actual_logits])[0]
self.assertAllClose(
actual, model_size**-0.5 *
np.array([[1, 2, 3], [2, 4, 6], [3, 6, 9], [4, 8, 12]]))
class AdaptiveVocabEmbeddingTest(tf.test.TestCase):
def setUp(self):
super(AdaptiveVocabEmbeddingTest, self).setUp()
self.graph = mtf.Graph()
self.mesh = mtf.Mesh(self.graph, 'mtf_mesh')
self.variable_dtype = mtf.VariableDType(activation_dtype=tf.float32)
self.addCleanup(mock.patch.stopall)
self.initializer_mock = mock.MagicMock()
random_normal_initializer_mock = mock.patch.object(
tf, 'random_normal_initializer').start()
random_normal_initializer_mock.return_value = self.initializer_mock
def test_constructor_tokenCountsDontSumToVocabSize_raisesValueError(self):
vocab_dim = mtf.Dimension('vocab', 5)
model_dim = mtf.Dimension('model', 2)
with self.assertRaises(ValueError):
vocab_embeddings.AdaptiveVocabEmbedding(
self.mesh,
vocab_dim,
output_dim=model_dim,
variable_dtype=self.variable_dtype,
name='embedding',
ensemble_dim=None,
clusters=[{
'token_count': 3,
'embedding_size': 2
}, {
'token_count': 3,
'embedding_size': 1
}])
def test_ids_to_embedding_correctlyEmbeds(self):
seq_len = 6
vocab_size = 5
model_size = 2
vocab_dim = mtf.Dimension('vocab', vocab_size)
model_dim = mtf.Dimension('model', model_size)
length_dim = mtf.Dimension('length', seq_len)
ids = tf.constant([0, 1, 2, 3, 4, 0], dtype=tf.int32)
mtf_ids = mtf.import_tf_tensor(
self.mesh, ids, shape=mtf.Shape([length_dim]))
self.initializer_mock.side_effect = initialize_by_shape({
(2, 2): [[0, 1], [2, 0]],
(3, 1): [[1], [2], [3]],
(1, 2): [[1], [2]],
})
vocab_embedding = vocab_embeddings.AdaptiveVocabEmbedding(
self.mesh,
vocab_dim,
output_dim=model_dim,
variable_dtype=self.variable_dtype,
name='embedding',
ensemble_dim=None,
clusters=[{
'token_count': 2,
'embedding_size': 2
}, {
'token_count': 3,
'embedding_size': 1
}])
mtf_embedding = vocab_embedding.ids_to_embedding(mtf_ids, context=None)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[''])
lowering = mtf.Lowering(self.graph, {self.mesh: mesh_impl})
actual_embedding = lowering.export_to_tf_tensor(mtf_embedding)
self.evaluate(tf.global_variables_initializer())
self.evaluate(lowering.copy_masters_to_slices())
actual = self.evaluate([actual_embedding])[0]
self.assertAllClose(actual,
[[0, 1], [2, 0], [1, 2], [2, 4], [3, 6], [0, 1]])
def test_hidden_to_logits_computesLogitsCorrectly(self):
seq_len = 4
vocab_size = 5
model_size = 2
vocab_dim = mtf.Dimension('vocab', vocab_size)
model_dim = mtf.Dimension('model', model_size)
length_dim = mtf.Dimension('length', seq_len)
embeddings = tf.constant([[1, 0], [0, 1], [1, 1], [2, 1]], dtype=tf.float32)
mtf_embeddings = mtf.import_tf_tensor(
self.mesh, embeddings, shape=mtf.Shape([length_dim, model_dim]))
self.initializer_mock.side_effect = initialize_by_shape({
(2, 2): [[0, 1], [2, 0]],
(3, 1): [[1], [2], [3]],
(1, 2): [[1], [2]],
})
vocab_embedding = vocab_embeddings.AdaptiveVocabEmbedding(
self.mesh,
vocab_dim,
output_dim=model_dim,
variable_dtype=self.variable_dtype,
name='embedding',
ensemble_dim=None,
clusters=[{
'token_count': 2,
'embedding_size': 2
}, {
'token_count': 3,
'embedding_size': 1
}])
mtf_logits = vocab_embedding.hidden_to_logits(mtf_embeddings, context=None)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[''])
lowering = mtf.Lowering(self.graph, {self.mesh: mesh_impl})
actual_logits = lowering.export_to_tf_tensor(mtf_logits)
self.evaluate(tf.global_variables_initializer())
self.evaluate(lowering.copy_masters_to_slices())
actual = self.evaluate([actual_logits])[0]
self.assertAllClose(
actual,
model_size**-0.5 * np.array([[0, 2, 1, 2, 3], [1, 0, 2, 4, 6],
[1, 2, 3, 6, 9], [1, 4, 4, 8, 12]]))
class MixtureOfSoftmaxesTest(tf.test.TestCase):
def setUp(self):
super(MixtureOfSoftmaxesTest, self).setUp()
self.graph = mtf.Graph()
self.mesh = mtf.Mesh(self.graph, 'mtf_mesh')
self.variable_dtype = mtf.VariableDType(activation_dtype=tf.float32)
self.addCleanup(mock.patch.stopall)
self.initializer_mock = mock.MagicMock()
random_normal_initializer_mock = mock.patch.object(
tf, 'random_normal_initializer').start()
random_normal_initializer_mock.return_value = self.initializer_mock
def test_ids_to_embedding_correctlyEmbeds(self):
seq_len = 4
vocab_size = 4
model_size = 3
num_softmaxes = 1
vocab_dim = mtf.Dimension('vocab', vocab_size)
model_dim = mtf.Dimension('model', model_size)
length_dim = mtf.Dimension('length', seq_len)
ids = tf.constant([0, 1, 2, 3], dtype=tf.int32)
mtf_ids = mtf.import_tf_tensor(
self.mesh, ids, shape=mtf.Shape([length_dim]))
self.initializer_mock.side_effect = initialize_by_shape({
# Embedding weights.
(4, 3): [[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 2]],
# Mixture weights.
(1, 3): [[1, 0, 0]],
# Context weights
(1, 3, 3): [[[1, 0, 0], [0, 1, 0], [0, 0, 1]],],
})
vocab_embedding = vocab_embeddings.MixtureOfSoftmaxes(
self.mesh,
vocab_dim,
output_dim=model_dim,
variable_dtype=self.variable_dtype,
name='embedding',
ensemble_dim=None,
num_softmaxes=num_softmaxes)
mtf_embedding = vocab_embedding.ids_to_embedding(mtf_ids, context=None)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[''])
lowering = mtf.Lowering(self.graph, {self.mesh: mesh_impl})
actual_embedding = lowering.export_to_tf_tensor(mtf_embedding)
self.evaluate(tf.global_variables_initializer())
self.evaluate(lowering.copy_masters_to_slices())
actual = self.evaluate([actual_embedding])[0]
self.assertAllClose(actual, [[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 2]])
def test_hidden_to_logits_computesLogitsCorrectly(self):
seq_len = 1
vocab_size = 4
model_size = 3
num_softmaxes = 2
vocab_dim = mtf.Dimension('vocab', vocab_size)
model_dim = mtf.Dimension('model', model_size)
length_dim = mtf.Dimension('length', seq_len)
embeddings = tf.constant(
np.array([[1.0, 1.0, 2.0]]) / model_size**-0.5, dtype=tf.float32)
mtf_embeddings = mtf.import_tf_tensor(
self.mesh, embeddings, shape=mtf.Shape([length_dim, model_dim]))
self.initializer_mock.side_effect = initialize_by_shape({
# Embedding weights.
(4, 3): [[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 1]],
# Mixture weights.
(2, 3): [[1, 0, 0], [0, 1, 1]],
# Context weights
(2, 3, 3): [
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[[0, 0, 1], [0, 1, 0], [1, 0, 0]],
],
})
vocab_embedding = vocab_embeddings.MixtureOfSoftmaxes(
self.mesh,
vocab_dim,
output_dim=model_dim,
variable_dtype=self.variable_dtype,
name='embedding',
ensemble_dim=None,
num_softmaxes=num_softmaxes)
mtf_logits = vocab_embedding.hidden_to_logits(mtf_embeddings, context=None)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[''])
lowering = mtf.Lowering(self.graph, {self.mesh: mesh_impl})
actual_logits = lowering.export_to_tf_tensor(mtf_logits)
self.evaluate(tf.global_variables_initializer())
self.evaluate(lowering.copy_masters_to_slices())
actual, = self.evaluate([actual_logits])
expected_priors = scipy.special.softmax([1, 3])
expected_probs_1 = scipy.special.softmax(np.tanh([1, 1, 2, 2]))
expected_probs_2 = scipy.special.softmax(np.tanh([2, 1, 1, 1]))
expected_probs = (
expected_priors[0] * expected_probs_1 +
expected_priors[1] * expected_probs_2)
expected_logits = np.log(expected_probs)
self.assertAllClose(actual, [expected_logits])
class MixtapeTest(tf.test.TestCase):
def setUp(self):
super(MixtapeTest, self).setUp()
self.graph = mtf.Graph()
self.mesh = mtf.Mesh(self.graph, 'mtf_mesh')
self.variable_dtype = mtf.VariableDType(activation_dtype=tf.float32)
self.addCleanup(mock.patch.stopall)
self.initializer_mock = mock.MagicMock()
random_normal_initializer_mock = mock.patch.object(
tf, 'random_normal_initializer').start()
random_normal_initializer_mock.return_value = self.initializer_mock
def test_ids_to_embedding_correctlyEmbeds(self):
seq_len = 5
vocab_size = 5
model_size = 2
gate_embedding_size = 1
frequent_token_fraction = 0.4
vocab_dim = mtf.Dimension('vocab', vocab_size)
model_dim = mtf.Dimension('model', model_size)
length_dim = mtf.Dimension('length', seq_len)
context = mock.MagicMock()
context.train = False
ids = tf.constant([0, 1, 2, 3, 4], dtype=tf.int32)
mtf_ids = mtf.import_tf_tensor(
self.mesh, ids, shape=mtf.Shape([length_dim]))
self.initializer_mock.side_effect = initialize_by_shape({
# Embedding weights.
(5, 2): list(range(10)),
# Context weights.
(4, 2, 2): list(range(16)),
# Prior weights.
(3, 1, 2): list(range(6)),
# Prior vocab vector.
(2, 1): list(range(2)),
# Prior gates vector.
(3, 2): list(range(6)),
# Prior bias.
(2, 3): list(range(6)),
})
vocab_embedding = vocab_embeddings.Mixtape(
self.mesh,
vocab_dim,
output_dim=model_dim,
variable_dtype=self.variable_dtype,
name='embedding',
ensemble_dim=None,
gate_embedding_size=gate_embedding_size,
frequent_token_fraction=frequent_token_fraction)
mtf_embedding = vocab_embedding.ids_to_embedding(mtf_ids, context=None)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[''])
lowering = mtf.Lowering(self.graph, {self.mesh: mesh_impl})
actual_embedding = lowering.export_to_tf_tensor(mtf_embedding)
self.evaluate(tf.global_variables_initializer())
self.evaluate(lowering.copy_masters_to_slices())
actual = self.evaluate([actual_embedding])[0]
self.assertAllClose(actual, np.reshape(list(range(10)), (5, 2)))
def test_hidden_to_logits_computesLogitsCorrectly(self):
seq_len = 1
vocab_size = 5
model_size = 2
gate_embedding_size = 1
frequent_token_fraction = 0.4
vocab_dim = mtf.Dimension('vocab', vocab_size)
model_dim = mtf.Dimension('model', model_size)
length_dim = mtf.Dimension('length', seq_len)
context = mock.MagicMock()
context.train = False
embeddings = tf.constant(
np.array([[1.0, 2.0]]) / model_size**-0.5, dtype=tf.float32)
mtf_embeddings = mtf.import_tf_tensor(
self.mesh, embeddings, shape=mtf.Shape([length_dim, model_dim]))
self.initializer_mock.side_effect = initialize_by_shape({
# Embedding weights.
(5, 2): list(range(10)),
# Context weights.
(4, 2, 2): [
[[1, 0], [0, 1]],
[[0, 1], [1, 0]],
[[1, 0], [0, 0]],
[[0, 0], [0, 1]],
],
# Prior weights.
(3, 1, 2): [
[[1, 0]],
[[0, 1]],
[[1, 1]],
],
# Prior vocab vector.
(2, 1): [[1], [1]],
# Prior gates vector.
(3, 2): [
[1, 0],
[0, 1],
[1, 1],
],
# Prior bias.
(2, 3): [[1, 2, 3], [3, 4, 5]],
})
vocab_embedding = vocab_embeddings.Mixtape(
self.mesh,
vocab_dim,
output_dim=model_dim,
variable_dtype=self.variable_dtype,
name='embedding',
ensemble_dim=None,
gate_embedding_size=gate_embedding_size,
frequent_token_fraction=frequent_token_fraction,
noise_std_dev=0.0)
mtf_logits = vocab_embedding.hidden_to_logits(
mtf_embeddings, context=context)
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
shape=[], layout={}, devices=[''])
lowering = mtf.Lowering(self.graph, {self.mesh: mesh_impl})
actual_logits = lowering.export_to_tf_tensor(mtf_logits)
self.evaluate(tf.global_variables_initializer())
self.evaluate(lowering.copy_masters_to_slices())
actual, = self.evaluate([actual_logits])
self.assertAllClose(actual,
[[0.905462, 4.390559, 6.575162, 9.513036, 12.450909]])
if __name__ == '__main__':
tf.test.main()
| mesh-master | mesh_tensorflow/transformer/vocab_embeddings_test.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Different ways to go from token ids to hidden states and states to logits."""
import gin
import mesh_tensorflow as mtf
from mesh_tensorflow.transformer import transformer
import tensorflow.compat.v1 as tf
@gin.configurable
class FactorizedVocabEmbedding(object):
"""Factorizes the embedding matrix with projection to a small inner dimension.
Like ALBERT (https://arxiv.org/abs/1706.03762).
Interface matches mesh_tensorflow.transformer VocabEmbedding object.
"""
def __init__(self,
mesh,
vocab_dim,
output_dim,
variable_dtype,
name,
ensemble_dim,
inner_dimension_size=gin.REQUIRED):
"""Configurable embedding for the vocabulary.
Most of the arguments get passed to `mtf.layers.embedding_weights` with an
option to factorize the embedding matrix.
Args:
mesh: a mtf.Mesh
vocab_dim: a mtf.Dimension
output_dim: a mtf.Dimension
variable_dtype: a mtf.VariableDType
name: a string
ensemble_dim: a mtf.Dimension
inner_dimension_size: a positive integer, the size of the inner dimension
of the embedding matrix
"""
self._vocab_dim = vocab_dim
self._output_dim = output_dim
self._inner_dim = mtf.Dimension("inner_vocab", inner_dimension_size)
self._factor1 = mtf.layers.embedding_weights(
mesh=mesh,
vocab_dim=vocab_dim,
output_dim=self._inner_dim,
variable_dtype=variable_dtype,
name="{}1".format(name),
ensemble_dim=ensemble_dim,
initializer=tf.random_normal_initializer(
stddev=inner_dimension_size**-0.25))
self._factor2 = mtf.layers.embedding_weights(
mesh=mesh,
vocab_dim=self._inner_dim,
output_dim=output_dim,
variable_dtype=variable_dtype,
name="{}2".format(name),
ensemble_dim=ensemble_dim,
initializer=tf.random_normal_initializer(
stddev=inner_dimension_size**-0.25))
def ids_to_embedding(self, ids, context):
del context
tmp = mtf.gather(self._factor1, ids, self._vocab_dim)
return mtf.einsum([tmp, self._factor2], reduced_dims=[self._inner_dim])
def hidden_to_logits(self, hidden, context):
del context
hidden *= self._output_dim.size**-0.5
tmp = mtf.einsum([hidden, self._factor2], reduced_dims=[self._output_dim])
return mtf.einsum([tmp, self._factor1], reduced_dims=[self._inner_dim])
class _Cluster(object):
"""Helper class for adaptive embeddings specifying a cluster of tokens.
Essentially a wrapper around a vocab embedding for the cluster with additional
metadata so that we can apply the embedding to the actual ids and hidden
states.
"""
def __init__(self, embedding, start_token_id, end_token_id):
"""Cluster constructor.
Args:
embedding: a FactorizedVocabEmbedding or transformer.VocabEmbedding, the
vocab embedding to use for the cluster
start_token_id: an integer, the inclusive id of the first token in the
cluster
end_token_id: an integer, the exclusive id of the last token in the
cluster
"""
self._embedding = embedding
self._start_token_id = start_token_id
self._end_token_id = end_token_id
def ids_to_embedding(self, ids, context):
"""Ids to embeddings with ids not in cluster mapped to the zero vector."""
ids -= self._start_token_id
# The mtf.gather in the embedding's ids_to_embedding implementation will
# cause the one hot representations of tokens greater than cluster vocab
# dimension size to be the zero vector. Thus the embeddings for those tokens
# will be the zero vector.
ids = mtf.where(mtf.greater_equal(ids, 0), ids, self._end_token_id)
return self._embedding.ids_to_embedding(ids, context)
def hidden_to_logits(self, hidden, context):
"""Returns the logits for tokens within the cluster."""
return self._embedding.hidden_to_logits(hidden, context)
@gin.configurable
class AdaptiveVocabEmbedding(object):
"""A vocab embedding assigning variable capacity to clusters of tokens.
Similar to the adaptive input representations in this paper
(https://arxiv.org/abs/1809.10853). However, they use an adaptive softmax to
compute logits while this embedding uses a regular softmax.
The idea is to create clusters of tokens and assign different capacity to
different clusters by factoring their embedding matrices to different inner
dimensions.
The clustering can be done by word frequency with more frequent tokens getting
higher capacity. In this implementation, token ids of clusters must be
contiguous in the vocabulary.
Interface matches mesh_tensorflow.transformer VocabEmbedding object.
"""
def __init__(self,
mesh,
vocab_dim,
output_dim,
variable_dtype,
name,
ensemble_dim,
clusters=gin.REQUIRED):
"""Configurable embedding for the vocabulary.
Most of the arguments get passed to `mtf.layers.embedding_weights`.
The clustering parameters are specified by the `clusters` argument. It is a
list of dicts with keys "token_count" and "embedding_size". Token count
specifies the number of tokens in the cluster, and embedding size specifies
the hidden dimension size of its embedding.
For example, let's say we have a vocab size of 500k and pass as clusters:
[
{"token_count": 50000, "embedding_size": 1024},
{"token_count": 100000, "embedding_size": 256},
{"token_count": 350000, "embedding_size": 64},
]
Then tokens with ids 0 (inclusive) to 50k (exclusive) will be in the first
cluster with embedding size of 1024, tokens with ids 50k to 150k will be in
the second cluster with embedding size of 256, and tokens with ids 150k to
500k will be in the third cluster with embedding size of 64.
Args:
mesh: a mtf.Mesh
vocab_dim: a mtf.Dimension
output_dim: a mtf.Dimension
variable_dtype: a mtf.VariableDType
name: a string
ensemble_dim: a mtf.Dimension
clusters: a list(dict), specification of the clusters
Raises:
ValueError: The sum of the token counts across the clusters does not equal
the vocabulary size.
"""
self._vocab_dim = vocab_dim
self._output_dim = output_dim
token_counts = [cluster["token_count"] for cluster in clusters]
if sum(token_counts) != vocab_dim.size:
raise ValueError(
"The cluster token counts {} do not sum to the vocab size {}.".format(
token_counts, vocab_dim.size))
self._clusters = []
start_token_id = 0
for i, cluster in enumerate(clusters):
token_count = cluster["token_count"]
embedding_size = cluster["embedding_size"]
cluster_vocab_dim = mtf.Dimension(vocab_dim.name, token_count)
if embedding_size == self._output_dim.size:
# In this case we don't need to up project from the embedding space to
# the model state space.
cluster_embedding = transformer.VocabEmbedding(
mesh=mesh,
vocab_dim=cluster_vocab_dim,
output_dim=output_dim,
variable_dtype=variable_dtype,
name="{}_{}".format(name, i),
ensemble_dim=ensemble_dim)
else:
cluster_embedding = FactorizedVocabEmbedding(
mesh=mesh,
vocab_dim=cluster_vocab_dim,
output_dim=output_dim,
variable_dtype=variable_dtype,
name="{}_{}".format(name, i),
ensemble_dim=ensemble_dim,
inner_dimension_size=embedding_size)
self._clusters.append(
_Cluster(
embedding=cluster_embedding,
start_token_id=start_token_id,
end_token_id=start_token_id + token_count))
start_token_id += token_count
def ids_to_embedding(self, ids, context):
# Ids not in each cluster will be mapped to the zero vector. Since clusters
# are disjoint, this sum is correct.
return sum(
cluster.ids_to_embedding(ids, context) for cluster in self._clusters)
def hidden_to_logits(self, hidden, context):
# Each cluster returns the logits for only the tokens with itself, so their
# concatenation is the full logits.
return mtf.concat(
[
cluster.hidden_to_logits(hidden, context=context)
for cluster in self._clusters
],
concat_dim_name=self._vocab_dim.name,
)
@gin.configurable
class MixtureOfSoftmaxes(object):
"""Embedding with the token distributions as a weighted mixture of softmaxes.
Expressing the token distributions in this way improves expressiveness and
enables the matrix of token probabilities given all contexts to be high rank.
The vocab embedding is the same as the default, which is just a simple
embedding.
See https://arxiv.org/pdf/1711.03953.pdf for more details.
"""
def __init__(self,
mesh: mtf.Mesh,
vocab_dim: mtf.Dimension,
output_dim: mtf.Dimension,
variable_dtype: mtf.VariableDType,
name: str,
ensemble_dim: mtf.Dimension,
num_softmaxes: int = gin.REQUIRED):
"""Configurable embedding for the vocabulary.
Most of the arguments get passed to `mtf.layers.embedding_weights`.
Args:
mesh: the mesh used to layout the tensors.
vocab_dim: the dimension corresponding to vocabulary.
output_dim: the dimension corresponding to the model
hidden states.
variable_dtype: the datatype information for the
variables used in the embedding tensors.
name: a name to base variable names off of.
ensemble_dim: the dimension used for ensembling.
Absolutely no guarantees that this code will work with ensembling.
num_softmaxes: a positive int, the number of components to use in the
mixture.
"""
self._vocab_dim = vocab_dim
self._output_dim = output_dim
self._copy_output_dim = mtf.Dimension("_{}_copy".format(output_dim.name),
output_dim.size)
self._components_dim = mtf.Dimension("softmax_components", num_softmaxes)
self._embedding_weights = mtf.layers.embedding_weights(
mesh=mesh,
vocab_dim=vocab_dim,
output_dim=output_dim,
variable_dtype=variable_dtype,
name="{}_embedding_weights".format(name),
ensemble_dim=ensemble_dim)
self._mixture_weights = mtf.layers.embedding_weights(
mesh=mesh,
vocab_dim=self._components_dim,
output_dim=output_dim,
variable_dtype=variable_dtype,
name="{}_mixture_weights".format(name),
ensemble_dim=ensemble_dim)
self._context_weights = mtf.layers.embedding_weights(
mesh=mesh,
vocab_dim=self._copy_output_dim,
output_dim=output_dim,
variable_dtype=variable_dtype,
name="{}_context_weights".format(name),
ensemble_dim=([ensemble_dim] if ensemble_dim else []) +
[self._components_dim])
def ids_to_embedding(self, ids: mtf.Tensor, context) -> mtf.Tensor:
del context
return mtf.gather(self._embedding_weights, ids, self._vocab_dim)
def hidden_to_logits(self, hidden: mtf.Tensor,
context: transformer.Context) -> mtf.Tensor:
"""Function called by mtf transformer to get the logits.
Note that we are taking the log of a mixture of softmaxes. The logits will
then go through a softmax. This could potentially run into numerical
stability issues. If that happens, try setting the activation_dtype to
float32.
Args:
hidden: hidden model states of the final decoder layer.
context: the context used for the call to the
transformer.
Returns:
The logits.
"""
del context
hidden *= self._output_dim.size**-0.5
component_prior_logits = mtf.einsum([hidden, self._mixture_weights],
reduced_dims=[self._output_dim])
component_contexts = mtf.einsum([
mtf.rename_dimension(hidden, self._output_dim.name,
self._copy_output_dim.name),
self._context_weights,
],
reduced_dims=[self._copy_output_dim])
component_contexts = mtf.tanh(component_contexts)
component_logits = mtf.einsum([component_contexts, self._embedding_weights],
reduced_dims=[self._output_dim])
component_prior_logits = mtf.log_softmax(
component_prior_logits, reduced_dim=self._components_dim)
component_logits = mtf.log_softmax(
component_logits, reduced_dim=self._vocab_dim)
logits = component_prior_logits + component_logits
logits = mtf.reduce_logsumexp(logits, reduced_dim=self._components_dim)
return logits
@gin.configurable
class Mixtape(object):
"""Embedding that uses Mixtape in computing logits.
Expressing the token distributions in this way improves expressiveness and
enables the matrix of token probabilities given all contexts to be high rank.
Mixtape has the advantage of added efficiency over other methods such as
mixture of softmax.
The vocab embedding is the same as the default, which just a simple embedding.
See
https://papers.nips.cc/paper/9723-mixtape-breaking-the-softmax-bottleneck-efficiently.pdf
for more details.
"""
def __init__(self,
mesh: mtf.Mesh,
vocab_dim: mtf.Dimension,
output_dim: mtf.Dimension,
variable_dtype: mtf.VariableDType,
name: str,
ensemble_dim: mtf.Dimension,
extra_ids: int = 0,
dropout_rate: float = 0.0,
gate_embedding_size: int = gin.REQUIRED,
frequent_token_fraction: float = 0.1,
noise_std_dev: float = 0.0):
"""Configurable embedding for the vocabulary.
Most of the arguments get passed to `mtf.layers.embedding_weights`.
Mixtape shares gates for low frequency tokens to improve efficiency. Since
our vocabs are sorted in decreasing order of frequency with sentinels
appended to the end, we need to do a little trick to ensure that the
sentinels are treated as high frequency. If you want to treat the sentinels
as low frequency tokens, then pass in zero for `extra_ids`.
Args:
mesh: the mesh used to layout the tensors.
vocab_dim: the dimension corresponding to vocabulary.
output_dim: the dimension corresponding to the model hidden states.
variable_dtype: the datatype information for the variables used in the
embedding tensors.
name: a name to base variable names off of.
ensemble_dim: the dimension used for ensembling. Absolutely no guarantees
that this code will work with ensembling.
extra_ids: a non-negative integer, the number of sentinels at the end of
the vocab.
dropout_rate: a float between 0 and 1, the rate to use for dropout.
gate_embedding_size: a positive integer, the size to use for embedding for
the gates. It is usually chosen to be much smaller than d_model.
frequent_token_fraction: a float between 0 and 1, what fraction of tokens
to consider as high frequency and not share gates for.
noise_std_dev: a non-negative float, the standard deviation of the
Gaussian noise to add to the pre-activation priors.
"""
self._extra_ids = extra_ids
self._dropout_rate = dropout_rate
self._noise_std_dev = noise_std_dev
self._mesh = mesh
self._vocab_dim = vocab_dim
self._frequent_vocab_dim = mtf.Dimension(
vocab_dim.name, int(frequent_token_fraction * vocab_dim.size))
self._rare_vocab_dim = mtf.Dimension(
vocab_dim.name, vocab_dim.size - self._frequent_vocab_dim.size)
self._output_dim = output_dim
self._copy_output_dim = mtf.Dimension("_{}_copy".format(output_dim.name),
output_dim.size)
self._pre_gates_dim = mtf.Dimension("gates", 3)
self._gates_dim = mtf.Dimension("gates", 4)
self._gate_embedding_dim = mtf.Dimension("gate_embedding",
gate_embedding_size)
self._embedding_weights = mtf.layers.embedding_weights(
mesh=mesh,
vocab_dim=vocab_dim,
output_dim=output_dim,
variable_dtype=variable_dtype,
name="{}_embedding_weights".format(name),
ensemble_dim=ensemble_dim)
ensemble_dims = [ensemble_dim] if ensemble_dim else []
self._context_weights = mtf.layers.embedding_weights(
mesh=mesh,
vocab_dim=self._copy_output_dim,
output_dim=output_dim,
variable_dtype=variable_dtype,
name="{}_context_weights".format(name),
ensemble_dim=ensemble_dims + [self._gates_dim])
self._context_weights_bias = mtf.get_variable(
mesh,
name="{}_context_weights_bias".format(name),
shape=mtf.Shape(ensemble_dims + [self._gates_dim, output_dim]),
dtype=variable_dtype,
initializer=tf.zeros_initializer())
self._prior_weights = mtf.layers.embedding_weights(
mesh=mesh,
vocab_dim=self._gate_embedding_dim,
output_dim=output_dim,
variable_dtype=variable_dtype,
name="{}_prior_weights".format(name),
ensemble_dim=ensemble_dims + [self._pre_gates_dim])
self._prior_weights_bias = mtf.get_variable(
mesh,
name="{}_prior_weights_bias".format(name),
shape=mtf.Shape(ensemble_dims +
[self._pre_gates_dim, self._gate_embedding_dim]),
dtype=variable_dtype,
initializer=tf.zeros_initializer())
self._prior_vocab_vector = mtf.get_variable(
mesh,
name="{}_prior_vocab_vector".format(name),
shape=mtf.Shape(ensemble_dims +
[self._frequent_vocab_dim, self._gate_embedding_dim]),
dtype=variable_dtype,
initializer=tf.random_normal_initializer())
self._prior_gates_vector = mtf.get_variable(
mesh,
name="{}_prior_gates_vector".format(name),
shape=mtf.Shape(ensemble_dims + [self._pre_gates_dim, output_dim]),
dtype=variable_dtype,
initializer=tf.random_normal_initializer())
self._prior_bias = mtf.get_variable(
mesh,
name="{}_prior_bias".format(name),
shape=mtf.Shape(ensemble_dims +
[self._frequent_vocab_dim, self._pre_gates_dim]),
dtype=variable_dtype,
initializer=tf.random_normal_initializer())
def ids_to_embedding(self, ids: mtf.Tensor, context) -> mtf.Tensor:
del context
return mtf.gather(self._embedding_weights, ids, self._vocab_dim)
def _sigmoid_tree(self, tensor):
"""Create probability distribution along gates dim using a sigmoid tree."""
gamma = mtf.split(
mtf.sigmoid(tensor), self._pre_gates_dim, self._pre_gates_dim.size)
return mtf.concat([
gamma[0] * gamma[1],
gamma[0] * (1 - gamma[1]),
(1 - gamma[0]) * gamma[2],
(1 - gamma[0]) * (1 - gamma[2]),
], self._gates_dim.name)
def _dropout(self, tensor, context):
if context.train and self._dropout_rate != 0.0:
return mtf.dropout(
tensor,
1.0 - self._dropout_rate,
noise_shape=tensor.shape - context.length_dim)
return tensor
def _rearrange_sentinels(self, logits):
"""Reorder along the vocab dim so the last few tokens don't share gates."""
if not self._extra_ids:
return logits
sentinels, nonsentinels = mtf.split(
logits, self._vocab_dim,
[self._extra_ids, self._vocab_dim.size - self._extra_ids])
return mtf.concat([nonsentinels, sentinels], self._vocab_dim.name)
def hidden_to_logits(self, hidden: mtf.Tensor,
context: transformer.Context) -> mtf.Tensor:
"""Function called by mtf transformer to get the logits.
Args:
hidden: an mtf.Tensor, hidden model states of the final decoder layer.
context: a transformer.Context, the context used for the call to the
transformer.
Returns:
An mtf.Tensor, the logits.
"""
hidden *= self._output_dim.size**-0.5
component_contexts = mtf.einsum([
mtf.rename_dimension(hidden, self._output_dim.name,
self._copy_output_dim.name),
self._context_weights,
],
reduced_dims=[self._copy_output_dim])
component_contexts = mtf.tanh(component_contexts +
self._context_weights_bias)
component_logits = mtf.einsum([component_contexts, self._embedding_weights],
reduced_dims=[self._output_dim])
component_logits = self._dropout(component_logits, context)
prior_tanh = mtf.tanh(
mtf.einsum([self._prior_weights, hidden],
reduced_dims=[self._output_dim]) + self._prior_weights_bias)
prior_tanh = self._dropout(prior_tanh, context)
prior_shared_logits = mtf.einsum([self._prior_gates_vector, hidden],
reduced_dims=[self._output_dim])
prior_frequent_vocab_logits = (
mtf.einsum([self._prior_vocab_vector, prior_tanh]) +
prior_shared_logits + self._prior_bias)
prior_logits = mtf.concat([
prior_frequent_vocab_logits,
mtf.ones(
self._mesh,
mtf.Shape([self._rare_vocab_dim]),
dtype=prior_shared_logits.dtype) * prior_shared_logits
], self._vocab_dim.name)
if context.train and self._noise_std_dev != 0.0:
prior_logits += mtf.random_normal(
self._mesh, prior_logits.shape, stddev=self._noise_std_dev)
prior_proportions = self._sigmoid_tree(prior_logits)
logits = mtf.einsum([component_logits, prior_proportions],
reduced_dims=[self._gates_dim])
return self._rearrange_sentinels(logits)
| mesh-master | mesh_tensorflow/transformer/vocab_embeddings.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Dataset utilities for Transformer example.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gin
import tensorflow_datasets as tfds
class Vocabulary(object):
"""Abstract class for encoding strings as lists of integers.
We will subclass this and wrap multiple implementations of text encoders.
We follow the convention that ids 0=PAD and 1=EOS are reserved.
"""
@property
def vocab_size(self):
"""Number of ids (including 0=PAD and 1=EOS).
Returns:
an integer
"""
raise NotImplementedError("Not implemented.")
def encode(self, s):
"""Encode a python string as a list of integers.
Args:
s: a string
Returns:
a list of integers (not terminated by EOS)
"""
raise NotImplementedError("Not implemented.")
def decode(self, ids):
"""Decode a list of integers to a python string.
Args:
ids: a list of integers (not terminated by EOS)
Returns:
a string
"""
raise NotImplementedError("Not implemented.")
def encode_tf(self, s):
"""Encode a tf.Scalar string to a tf.Tensor.
This will be necessary for on-the-fly tokenization.
Args:
s: a tf.Scalar with dtype tf.string
Returns:
a 1d tf.Tensor with dtype tf.int32
"""
raise NotImplementedError("Not implemented.")
def decode_tf(self, ids):
"""Decode in TensorFlow.
I don't know when we will use this, but it seems logical to
have if we can.
Args:
ids: a 1d tf.Tensor with dtype tf.int32
Returns:
a tf Scalar with dtype tf.string
"""
raise NotImplementedError("Not implemented.")
class TFDSVocabulary(Vocabulary):
"""Wrapper for tensorflow_datasets encoders.
In the TFDS encoders, ID=0 is reserved for padding.
We want to also reserve ID=1 for EOS, so we shift all IDs up by 1.
"""
def __init__(self, tfds_encoder):
self._tfds_encoder = tfds_encoder
@property
def vocab_size(self):
"""Number of ids (including 0=PAD and 1=EOS).
Returns:
an integer
"""
return self._tfds_encoder.vocab_size + 1
def encode(self, s):
"""Encode a python string as a list of integers.
Args:
s: a string
Returns:
a list of integers (not terminated by EOS)
"""
# shift IDs up by 1 to make room for EOS=1 (see class docstring)
return [i + 1 for i in self._tfds_encoder.encode(s)]
def decode(self, ids):
"""Decode a list of integers to a python string.
Args:
ids: a list of integers (not terminated by EOS)
Returns:
a string
"""
return self._tfds_encoder.decode([i - 1 for i in ids])
@gin.configurable
def get_tfds_vocabulary(dataset_name=gin.REQUIRED):
info = tfds.builder(dataset_name).info
# this assumes that either there are no inputs, or that the
# inputs and targets have the same vocabulary.
return TFDSVocabulary(info.features[info.supervised_keys[1]].encoder)
| mesh-master | mesh_tensorflow/transformer/vocabulary.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Learning rate schedules for training the transformer.
All learning rate schedule functions must take the scalar named argument `step`
and the numeric argument `total_train_steps`. They must output a tf.Scalar which
is the learning rate for the step.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import gin
import tensorflow.compat.v1 as tf
@gin.configurable
def product_learning_rate(step,
total_train_steps,
factors=gin.REQUIRED,
offset=0):
"""Learning rate is the product of one or more factors.
Takes a list of factors which are either numbers or learning-rate functions
each taking step and total_train_step arguments.
If `offset` is nonzero, then subtract offset from the step and from
total_train_steps before computing the learning rate.
Args:
step: a tf.Scalar
total_train_steps: a number
factors: a list of numbers and/or functions
offset: an optional float
Returns:
a tf.Scalar, the learning rate for the step.
"""
ret = 1.0
for f in factors:
ret *= f(step - offset, total_train_steps - offset) if callable(f) else f
return ret
@gin.configurable
def linear_decay(step,
total_train_steps,
steps_or_fraction=0.1):
"""Linearly decay the learning rate to 0.
If steps_or_fraction > 1 , it is the absolute number of final steps
over which to decay. If it is <=1, then it is a fraction of the total number
of training steps.
Args:
step: a tf.scalar representing the step we want the learning rate for.
total_train_steps: a number, the total number of training steps.
steps_or_fraction: a number
Returns:
a tf.Scalar, the learning rate for the step.
"""
decay_steps = steps_or_fraction
if steps_or_fraction <= 1:
decay_steps *= total_train_steps
step = tf.cast(step, tf.float32)
return tf.minimum(1.0, (total_train_steps - step) / decay_steps)
@gin.configurable
def linear_warmup(step,
total_train_steps,
steps_or_fraction=10000):
"""Linearly warm up the learning rate from 0.
If steps_or_fraction > 1 , it is the absolute number of initial steps over
which to warm up. If it is <=1, then it is a fraction of the total number of
training steps.
Args:
step: a tf.scalar representing the step we want the learning rate for.
total_train_steps: a number, the total number of training steps.
steps_or_fraction: a number
Returns:
a tf.Scalar, the learning rate for the step.
"""
warmup_steps = steps_or_fraction
if steps_or_fraction <= 1:
warmup_steps *= total_train_steps
step = tf.cast(step, tf.float32)
return tf.minimum(1.0, step / warmup_steps)
@gin.configurable
def truncated_rsqrt(step,
total_train_steps,
warmup_steps=10000):
"""Noam's favorite learning-rate schedule.
rsqrt(max(step_num, warmup_steps)
Args:
step: a tf.scalar representing the step we want the learning rate for.
total_train_steps: a number, the total number of training steps.
warmup_steps: a number
Returns:
a tf.Scalar, the learning rate for the step.
"""
del total_train_steps
step_num = tf.cast(step, tf.float32)
return tf.math.rsqrt(tf.maximum(step_num, warmup_steps))
@gin.configurable
def constant(step, total_train_steps, value=1.0):
"""Constant learning rate (multiplier).
Args:
step: a tf.Scalar
total_train_steps: a number
value: a number or tf.Scalar
Returns:
a tf.Scalar, the learning rate for the step.
"""
del step, total_train_steps
return value
@gin.configurable
def constant_learning_rate(step, total_train_steps, learning_rate=gin.REQUIRED):
"""Learning rate independent of step.
DEPRECATED: use constant() or pass a float directly to utils.run.learning_rate
Args:
step: a tf.Scalar
total_train_steps: a number
learning_rate: a number or tf.Scalar
Returns:
a tf.Scalar, the learning rate for the step.
"""
del step, total_train_steps
return tf.cast(learning_rate, tf.float32)
@gin.configurable
def linear_decay_learning_rate(step,
total_train_steps,
initial_lr=0.1,
offset=0):
"""Linearly decay the learning rate to 0.
DEPRECATED - use product_learning_rate instead with factors:
[<initial_lr>,
@learning_rate_schedules.linear_decay]
learning_rate_schedules.linear.decay.steps_or_fraction = 1.0
Args:
step: a tf.scalar representing the step we want the learning rate for.
total_train_steps: a number, the total number of training steps.
initial_lr: initial learning rate. Decays from here.
offset: a number used for finetuning. Starts the learning-rate decay
schedule from this step forwards.
Returns:
a tf.Scalar, the learning rate for the step.
"""
offset = tf.cast(offset, tf.float32)
step = tf.cast(step, tf.float32)
return initial_lr * tf.minimum(1.0, (total_train_steps - step) /
(total_train_steps - offset))
@gin.configurable
def learning_rate_schedule_noam(step,
total_train_steps,
warmup_steps=10000,
linear_decay_fraction=0.1,
multiplier=1.0,
offset=0):
"""Noam's favorite learning-rate schedule.
DEPRECATED - use product_learning_rate instead with factors:
[<multiplier>,
@learning_rate_schedules.truncated_rsqrt,
@learning_rate_schedules.linear_decay]
(rsqrt(max(step_num, warmup_steps))
* multiplier
* min(1.0, (train_steps-step_num)/(train_steps*linear_decay_fraction)))
Args:
step: a tf.scalar representing the step we want the learning rate for.
total_train_steps: a number, the total number of training steps.
warmup_steps: a number
linear_decay_fraction: a number
multiplier: a number
offset: a number used for finetuning. Starts the learning-rate decay
schedule from this step forwards. Prior to this step, the learning rate is
the same as if it were a warmup step.
Returns:
a tf.Scalar, the learning rate for the step.
"""
train_steps = float(total_train_steps) - offset
step_num = tf.cast(step, tf.float32) - offset
learning_rate = tf.math.rsqrt(tf.maximum(step_num, warmup_steps))
learning_rate *= multiplier
if linear_decay_fraction > 0:
learning_rate *= tf.minimum(1.0, (train_steps - step_num) /
(train_steps * linear_decay_fraction))
return learning_rate
@gin.configurable
def slanted_triangular(step,
total_train_steps,
cut_fraction=0.1,
ratio=32,
max_learning_rate=0.01,
start_step=0):
"""Triangular learning rate with short increase and long decay.
TODO(noam): add minimum_value arguments to linear_decay() and linear_warmup()
so that this function can be replaced.
Taken from "Universal Language Model Fine-tuning for Text Classification",
see https://arxiv.org/abs/1801.06146. Default parameters are those specified
in the paper.
Args:
step: a tf.scalar representing the step we want the learning rate for.
total_train_steps: a number, the total number of training steps.
cut_fraction: a number between 0 and 1, fraction of iterations for which we
are increasing the learning rate.
ratio: a number greater than 1, the ratio from the smallest learning rate to
the max learning rate.
max_learning_rate: a number, the highest learning rate reached during
training.
start_step: a number, the step training starts at. Useful when fine-tuning
from a checkpoint that hasn't had its global step reset.
Returns:
a tf.Scalar, the learning rate for the step.
"""
train_steps = float(total_train_steps)
start_step = float(start_step)
step_num = tf.cast(step, tf.float32) - start_step
cut = math.floor(train_steps * cut_fraction)
p = tf.cond(
step_num < cut,
lambda: step_num / cut,
lambda: 1 - (step_num - cut) / (cut * (1 / cut_fraction - 1)),
)
return max_learning_rate * (1 + p * (ratio - 1)) / ratio
| mesh-master | mesh_tensorflow/transformer/learning_rate_schedules.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Implementation of Flat and ProductKey Memory Layers.
See the papers https://arxiv.org/abs/1907.05242 and
https://github.com/facebookresearch/XLM/blob/master/PKM-layer.ipynb
"""
from typing import Tuple
import gin
import mesh_tensorflow as mtf
from mesh_tensorflow.transformer import transformer
@gin.configurable
class ProductKeyValueMemory(transformer.TransformerLayer):
"""Memory network with product key-value structure."""
def __init__(self,
key_size: int = gin.REQUIRED,
n_keys: int = gin.REQUIRED,
n_heads: int = gin.REQUIRED,
knn: int = gin.REQUIRED):
"""Creates a ProductKeyValueMemory layer."""
self.key_size = key_size
self.n_keys = n_keys
self.n_values = n_keys**2
self.n_heads = n_heads
self.knn = knn
def call(self, context, x: mtf.Tensor) -> mtf.Tensor:
"""Call the layer."""
# Initialize Memory Keys and Values
n_key_dim = mtf.Dimension("n_keys", self.n_keys)
n_value_dim = mtf.Dimension("n_values", self.n_values)
key_dim = mtf.Dimension("key", self.key_size // 2)
value_dim = x.shape.dims[-1]
head_dim = mtf.Dimension("n_heads", self.n_heads)
product_dim = mtf.Dimension("product_key", 2)
keys = mtf.get_variable(
context.mesh,
name="keys",
shape=mtf.Shape([head_dim, product_dim, n_key_dim, key_dim]),
dtype=context.variable_dtype)
values = mtf.layers.embedding_weights(
context.mesh,
vocab_dim=n_value_dim,
output_dim=value_dim,
variable_dtype=context.variable_dtype,
name="values")
# Compute query
new_dims = [head_dim, product_dim, key_dim]
reduce_dims = x.shape.dims[-1:]
query = mtf.layers.dense(
x,
new_dims,
reduced_dims=reduce_dims,
activation=None,
use_bias=True,
variable_dtype=context.variable_dtype,
name="query") # [b, l, h, 2, k]
# Note: We use layer norm instead of batch norm to normalize queries.
# The main advantage is that layer norm works well with the codebase
# whereas the implementation of batch norm requires handling of tf ops.
query = mtf.layers.layer_norm(query, query.shape.dims[-1])
# Retrieve indices and scores
scores, indices = self.get_indices(keys, query) # [b, l, h, k]
scores = mtf.softmax(scores, reduced_dim=scores.shape.dims[-1])
top_values = mtf.gather(values, indices, n_value_dim) # [b, l, h, k, v]
out_values = mtf.einsum([top_values, scores],
reduced_dims=scores.shape.dims[-2:]) # [b, l, v]
return out_values
def get_indices(self, keys: mtf.Tensor,
query: mtf.Tensor) -> Tuple[mtf.Tensor, mtf.Tensor]:
"""Generate score and indices for the query."""
score_shape = mtf.Shape(query.shape.dims[:-1] + keys.shape.dims[2:3])
scores = mtf.einsum([query, keys],
output_shape=score_shape) # [b, l, h, 2, n_keys]
knn_dim = mtf.Dimension("knn", self.knn)
scores, indices = mtf.top_k(scores, score_shape.dims[-1],
knn_dim) # [b, l, h, 2, knn]
# Computes the top cartesian products and their indices
knn_square_dim = mtf.Dimension("knn_square_dim", self.knn**2)
scores1, scores2 = mtf.unstack(scores, scores.shape.dims[-2])
scores2 = mtf.rename_dimension(scores2, "knn", "knn2")
out_shape = mtf.Shape(scores1.shape.dims + scores2.shape.dims[-1:])
all_scores = mtf.add(scores1, scores2, output_shape=out_shape)
all_scores = mtf.replace_dimensions(all_scores, out_shape[-2:],
knn_square_dim)
indices1, indices2 = mtf.unstack(indices, indices.shape.dims[-2])
indices1 = mtf.multiply(indices1, self.n_keys)
indices2 = mtf.rename_dimension(indices2, "knn", "knn2")
all_indices = mtf.add(indices1, indices2, output_shape=out_shape)
all_indices = mtf.replace_dimensions(all_indices, out_shape[-2:],
knn_square_dim)
scores, best_indices = mtf.top_k(all_scores, all_scores.shape.dims[-1],
knn_dim)
return scores, mtf.gather(all_indices, best_indices, knn_square_dim)
| mesh-master | mesh_tensorflow/transformer/memory_layers.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layers for the Transformer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import functools
import math
import gin
import mesh_tensorflow as mtf
from mesh_tensorflow import layers
from mesh_tensorflow.transformer import attention
from mesh_tensorflow.transformer import transformer
import tensorflow.compat.v1 as tf
@gin.configurable
class DenseReluDense(transformer.TransformerLayer):
"""Two dense layers with ReLU or other activation on hidden layer."""
def __init__(self, hidden_size=4096, dropout_rate=0.0, activation="relu",
use_bias=False):
"""Create a DenseReluDense.
Args:
hidden_size: an integer - size of the hidden layer
dropout_rate: a floating-point number
activation: an activation function or a list of activation functions.
see documentation for mtf.layers.dense_product()
use_bias: a boolean, whether to use bias in the dense layers.
"""
self.hidden_size = hidden_size
self.dropout_rate = dropout_rate
self.activation = activation
self.use_bias = use_bias
def call(self, context, x, losses=None):
"""Call the layer."""
io_channels = x.shape.dims[-1]
hidden_channels = mtf.Dimension("d_ff", self.hidden_size)
h = mtf.layers.dense_product(x,
reduced_dims=x.shape.dims[-1:],
new_dims=hidden_channels,
activation_functions=self.activation,
use_bias=self.use_bias,
variable_dtype=context.variable_dtype,
name="wi",
expert_dims=context.model.ensemble_dims)
if context.train and self.dropout_rate != 0.0:
h = mtf.dropout(h, 1.0 - self.dropout_rate,
noise_shape=h.shape - context.length_dim)
return mtf.layers.dense(h, io_channels,
use_bias=self.use_bias,
activation=None,
variable_dtype=context.variable_dtype,
reduced_dims=h.shape.dims[-1:],
name="wo",
expert_dims=context.model.ensemble_dims)
def attention_params(context,
kv_dim,
num_heads,
num_memory_heads=0,
shared_kv=False,
no_query=False,
combine_dims=True,
keep_query_heads_dims=False,
fold_scaling_into_initializer=True):
"""Attention Parameters for Transformer Layers.
The num_heads argument indicates the number of read-heads.
For the familiar behavior described in "Attention Is All You Need", set
num_memory_heads=0.
If num_memory_heads==1, then there is only a single write-head, and multiple
read-heads. This leads to faster incremental decoding, since the
recurrent state is smaller
If num_memory_heads > 1, then num_memory_heads indicates the number of
write-heads. A fraction of the read-heads read each write-head.
num_memory_heads must divide num_heads. This behavior has not yet been tested.
no query flag is set to true when we do not want to create parameters
for query params (for synthesizer model).
Args:
context: a transformer.Context
kv_dim: a dimension (for key and value channels)
num_heads: an integer
num_memory_heads: an optional integer
shared_kv: a boolean
no_query: a boolean
combine_dims: a boolean
keep_query_heads_dims: a boolean
fold_scaling_into_initializer: a boolean
Returns:
an attention.AttentionParams object
"""
if num_heads == 1:
query_heads_dims = None
memory_heads_dims = None
elif num_memory_heads == 0:
query_heads_dims = [mtf.Dimension("heads", num_heads)]
memory_heads_dims = query_heads_dims
elif num_memory_heads == 1:
query_heads_dims = [mtf.Dimension("heads", num_heads)]
memory_heads_dims = None
else:
if num_heads % num_memory_heads != 0:
raise ValueError("num_memory_heads must divide num_heads")
memory_heads_dims = [mtf.Dimension("heads", num_memory_heads)]
query_heads_dims = memory_heads_dims + [
mtf.Dimension("query_heads", num_heads // num_memory_heads)]
return attention.AttentionParams(
context.mesh,
query_input_dim=context.model.model_dim,
memory_input_dim=context.model.model_dim,
output_dim=context.model.model_dim,
key_dim=kv_dim,
value_dim=kv_dim,
query_heads_dims=query_heads_dims,
memory_heads_dims=memory_heads_dims,
variable_dtype=context.variable_dtype,
shared_kv=shared_kv,
no_query=no_query,
ensemble_dim=context.model.ensemble_dim,
combine_dims=combine_dims,
keep_query_heads_dims=keep_query_heads_dims,
fold_scaling_into_initializer=fold_scaling_into_initializer)
@gin.configurable
class SelfAttention(transformer.TransformerLayer):
"""Multi-head self-attention layer."""
def __init__(self,
num_heads=8,
num_memory_heads=0,
key_value_size=128,
shared_kv=False,
dropout_rate=0.0,
attention_kwargs=None,
relative_attention_type=None,
relative_attention_num_buckets=32,
attention_func=None,
combine_dims=True,
keep_query_heads_dims=False,
fold_scaling_into_initializer=True):
"""Create a SelfAttention Layer.
Args:
num_heads: an integer
num_memory_heads: an optional integer
key_value_size: an integer
shared_kv: a boolean
dropout_rate: a float
attention_kwargs: a dictionary of kwargs for attention.attention
relative_attention_type: an optional string - one of
(None, "bias", "bias_shared", "contextual")
relative_attention_num_buckets: an integer
attention_func: attention function: None/'hybrid'.
combine_dims: a boolean
keep_query_heads_dims: a boolean
fold_scaling_into_initializer: a boolean
"""
self.num_heads = num_heads
self.num_memory_heads = num_memory_heads
self.key_value_size = key_value_size
self.shared_kv = shared_kv
self.dropout_rate = dropout_rate
self.attention_kwargs = attention_kwargs or {}
self.relative_attention_type = relative_attention_type
self.relative_attention_num_buckets = relative_attention_num_buckets
self.attention_func = attention_func
self.combine_dims = combine_dims
self.keep_query_heads_dims = keep_query_heads_dims
self.fold_scaling_into_initializer = fold_scaling_into_initializer
def layer_output_from_attention_output(self, context, attention_output,
losses):
return attention_output
def expected_attention_output_shape(self, x, params):
if self.keep_query_heads_dims:
return mtf.Shape(x.shape[:-1] + params.query_heads_dims + x.shape[-1:])
return x.shape
def attention_kwargs_from_context(self, context):
kwargs = copy.copy(self.attention_kwargs)
kwargs["dropout_rate"] = self.dropout_rate if context.train else 0.0
if "dropout_broadcast_dims" not in kwargs:
kwargs["dropout_broadcast_dims"] = [context.length_dim]
return kwargs
def make_params(self, context):
return attention_params(
context=context,
kv_dim=self.kv_dim,
num_heads=self.num_heads,
num_memory_heads=self.num_memory_heads,
shared_kv=self.shared_kv,
combine_dims=self.combine_dims,
keep_query_heads_dims=self.keep_query_heads_dims,
fold_scaling_into_initializer=self.fold_scaling_into_initializer)
def call(self, context, x, losses=None):
"""Call the layer."""
params = self.make_params(context)
q = params.compute_q(x)
memory_length = self.memory_length(context)
if context.mode == "incremental":
m = x
else:
m = mtf.replace_dimensions(x, context.length_dim, memory_length)
if self.shared_kv:
kv = params.compute_kv(m)
else:
k = params.compute_k(m)
v = params.compute_v(m)
if context.mode == "incremental":
one_hot = mtf.one_hot(
context.position, memory_length, dtype=context.activation_dtype)
inv_one_hot = 1.0 - one_hot
if self.shared_kv:
old_kv = context.get_states(1)
kv = old_kv * inv_one_hot + kv * one_hot
else:
old_k, old_v = context.get_states(2)
k = old_k * inv_one_hot + k * one_hot
v = old_v * inv_one_hot + v * one_hot
memory_position = mtf.range(context.mesh, memory_length, tf.int32)
else:
memory_position = self.rename_length_to_memory_length(
context.position, context)
if context.mode == "incremental" or context.mode == "first_part":
context.record_new_states([kv] if self.shared_kv else [k, v])
if self.shared_kv:
k = kv
v = kv
o = self.attention_fn(
q, k, v, context=context, memory_length_dim=memory_length,
key_dim=self.kv_dim, value_dim=self.kv_dim,
bias=self.compute_bias(context, memory_position, x,
params.query_heads_dims, q),
**self.attention_kwargs_from_context(context))
attention_output_shape = self.expected_attention_output_shape(x, params)
attention_output = params.compute_output(
o, output_shape=attention_output_shape)
return self.layer_output_from_attention_output(context, attention_output,
losses)
def compute_bias(self, context, memory_position, x, heads_dims, q):
"""Compute attention bias.
Args:
context: a transformer.Context
memory_position: an int32 tensor containing memory_length dimension.
x: a Tensor - the query antecedent - required for relative attention
heads_dims: a list of dimensions
q: a Tensor - the queries - required for contextual relative attention
Returns:
a Tensor or None
"""
min_relative_position = self.min_relative_position(context) # pylint: disable=assignment-from-none
max_relative_position = self.max_relative_position(context) # pylint: disable=assignment-from-none
biases = []
relative_position = memory_position - context.position
if min_relative_position is not None:
visible = mtf.greater_equal(relative_position, min_relative_position)
biases.append(attention.visibility_mask_to_attention_bias(
visible, context.activation_dtype))
if max_relative_position is not None:
visible = mtf.less_equal(relative_position, max_relative_position)
biases.append(attention.visibility_mask_to_attention_bias(
visible, context.activation_dtype))
if context.read_priority is not None:
visible = mtf.greater_equal(
context.read_priority,
mtf.layers.rename_length_to_memory_length(context.write_priority))
biases.append(attention.visibility_mask_to_attention_bias(
visible, context.activation_dtype))
sequence_id = None
# Subsequence id should only be set if we are in the decoder and have
# multiple targets per input. This will allow each sub-target to only attend
# to itself.
if isinstance(context.subsequence_id, mtf.Tensor):
sequence_id = context.subsequence_id
elif isinstance(context.sequence_id, mtf.Tensor):
sequence_id = context.sequence_id
if (sequence_id is not None and context.length_dim in sequence_id.shape):
visible = mtf.equal(
sequence_id,
self.rename_length_to_memory_length(sequence_id, context))
biases.append(attention.visibility_mask_to_attention_bias(
visible, context.activation_dtype))
if self.relative_attention_type is not None:
buckets_dim = mtf.Dimension(
"buckets", self.relative_attention_num_buckets)
bidirectional = not context.model.fully_autoregressive
rp_bucket = _relative_position_bucket(
relative_position,
bidirectional=bidirectional,
num_buckets=buckets_dim.size)
if (self.relative_attention_type == "bias" or
self.relative_attention_type == "bias_shared"):
bias_shape = context.model.ensemble_dims + heads_dims + [buckets_dim]
values = None
cache = self.relative_attention_type == "bias_shared"
if cache:
cache_key = ("self_attention_bias",
min_relative_position,
max_relative_position,
tuple(heads_dims))
if cache_key in context.cache:
values = context.cache[cache_key]
if values is None:
values = mtf.get_variable(
context.mesh, "relative_attention_bias",
bias_shape, dtype=context.variable_dtype)
if cache:
context.cache[cache_key] = values
elif self.relative_attention_type == "contextual":
values = layers.dense(
q, reduced_dims=[self.kv_dim],
new_dims=[buckets_dim],
variable_dtype=context.variable_dtype,
name="relative_attention_ak",
use_bias=False,
expert_dims=context.model.ensemble_dims + heads_dims)
else:
raise ValueError("unrecognized relative_attention_type \"%s\"" %
self.relative_attention_type)
biases.append(mtf.gather(values, rp_bucket, buckets_dim))
return mtf.add_n(biases) if biases else None
@property
def kv_dim(self):
return mtf.Dimension("d_kv", self.key_value_size)
def memory_length(self, context):
return mtf.Dimension("memory_length", context.length_dim.size)
def rename_length_to_memory_length(self, x, context):
return mtf.replace_dimensions(
x, context.length_dim, self.memory_length(context))
def min_relative_position(self, context):
return None
def max_relative_position(self, context):
return None
@property
def attention_fn(self):
if self.attention_func == "hybrid":
return attention.hybrid_attention
else:
return attention.attention
@gin.configurable
class ExpertsSelfAttention(SelfAttention):
"""Expert-layers for SelfAttention computations."""
def __init__(self,
num_experts=16,
loss_coef=1e-2,
group_size=1024,
capacity_factor_train=1.25,
capacity_factor_eval=2.0,
moe_gating="switch",
min_expert_capacity=4,
switch_policy_train="input_jitter",
switch_policy_eval="input_jitter",
switch_dropout=0.0,
switch_temperature=1.0,
switch_jitter=1e-2,
switch_top_k=4,
hidden_size=3072,
use_experts_attention=True,
**kwargs):
super(ExpertsSelfAttention, self).__init__(**kwargs)
self._hparams = mtf.transformer.moe.HParams(
moe_gating=moe_gating,
num_experts=num_experts,
loss_coef=loss_coef,
group_size=group_size,
min_expert_capacity=min_expert_capacity,
capacity_factor_train=capacity_factor_train,
capacity_factor_eval=capacity_factor_eval,
switch_policy_train=switch_policy_train,
switch_policy_eval=switch_policy_eval,
switch_dropout=switch_dropout,
switch_temperature=switch_temperature,
switch_jitter=switch_jitter,
switch_top_k=switch_top_k,
hidden_size=hidden_size,
use_experts_attention=use_experts_attention)
def make_params(self, context):
num_heads = self.num_heads
num_memory_heads = self.num_memory_heads
if num_heads == 1:
query_heads_dims = None
memory_heads_dims = None
elif num_memory_heads == 0:
query_heads_dims = [mtf.Dimension("heads", num_heads)]
memory_heads_dims = query_heads_dims
elif num_memory_heads == 1:
query_heads_dims = [mtf.Dimension("heads", num_heads)]
memory_heads_dims = None
else:
if num_heads % num_memory_heads != 0:
raise ValueError("num_memory_heads must divide num_heads")
memory_heads_dims = [mtf.Dimension("heads", num_memory_heads)]
query_heads_dims = memory_heads_dims + [
mtf.Dimension("query_heads", num_heads // num_memory_heads)]
return attention.ExpertsAttentionParams(
context.mesh,
query_input_dim=context.model.model_dim,
memory_input_dim=context.model.model_dim,
output_dim=context.model.model_dim,
key_dim=self.kv_dim,
value_dim=self.kv_dim,
query_heads_dims=query_heads_dims,
memory_heads_dims=memory_heads_dims,
variable_dtype=context.variable_dtype,
shared_kv=self.shared_kv,
no_query=False,
ensemble_dim=context.model.ensemble_dim,
combine_dims=self.combine_dims,
keep_query_heads_dims=self.keep_query_heads_dims,
fold_scaling_into_initializer=self.fold_scaling_into_initializer,
context=context,
experts_hparams=self._hparams)
@gin.configurable
class ExpertsEncDecAttention(ExpertsSelfAttention):
"""Expert-layers for EncDecAttention computations."""
def __init__(self, relative_attention_type=None, **kwargs):
super(ExpertsEncDecAttention, self).__init__(
relative_attention_type=relative_attention_type, **kwargs)
def _get_memory_antecedent(self, context):
return context.encoder_output
def call(self, context, x, losses=None):
"""Call the layer."""
return enc_dec_attention(self, self._get_memory_antecedent(context),
context, x, losses)
@gin.configurable
class Synthesizer(SelfAttention):
"""Multi-head Synthesizer layer https://arxiv.org/abs/2005.00743."""
def __init__(self,
num_heads=8,
num_memory_heads=0,
key_value_size=128,
shared_kv=False,
dropout_rate=0.0,
attention_kwargs=None,
relative_attention_type=None,
relative_attention_num_buckets=32,
attention_func=None,
combine_dims=True,
keep_query_heads_dims=False,
synthesize_mode="random_plus_alpha",
fold_scaling_into_initializer=True,
**kwargs):
"""Create a Synthesizer Layer.
Args:
num_heads: an integer
num_memory_heads: an optional integer
key_value_size: an integer
shared_kv: a boolean
dropout_rate: a float
attention_kwargs: a dictionary of kwargs for attention.attention
relative_attention_type: an optional string - one of
(None, "bias", "bias_shared", "contextual")
relative_attention_num_buckets: an integer
attention_func: attention function: None/'hybrid'.
combine_dims: a boolean
keep_query_heads_dims: a boolean
synthesize_mode: a string to select synthesizer variant
fold_scaling_into_initializer: a boolean
**kwargs: additional constructor params
"""
super(Synthesizer, self).__init__(**kwargs)
self.num_heads = num_heads
self.num_memory_heads = num_memory_heads
self.key_value_size = key_value_size
self.shared_kv = shared_kv
self.dropout_rate = dropout_rate
self.attention_kwargs = attention_kwargs or {}
self.relative_attention_type = relative_attention_type
self.relative_attention_num_buckets = relative_attention_num_buckets
self.attention_func = attention_func
self.combine_dims = combine_dims
self.keep_query_heads_dims = keep_query_heads_dims
self.synthesize_mode = synthesize_mode
self.fold_scaling_into_initializer = fold_scaling_into_initializer
self.no_query = False
if "plus" in self.synthesize_mode:
self.shared_kv = False
self.no_query = False
elif "minus" in self.synthesize_mode:
# We still keep the query as first projection
self.shared_kv = True
self.no_query = False
else:
self.shared_kv = True
self.no_query = True
def make_params(self, context):
return attention_params(
context=context,
kv_dim=self.kv_dim,
num_heads=self.num_heads,
num_memory_heads=self.num_memory_heads,
shared_kv=self.shared_kv,
no_query=self.no_query,
fold_scaling_into_initializer=self.fold_scaling_into_initializer)
def call(self, context, x, losses=None):
"""Call the layer."""
params = self.make_params(context)
memory_length = self.memory_length(context)
if context.mode == "incremental":
m = x
else:
m = mtf.replace_dimensions(x, context.length_dim, memory_length)
if self.shared_kv:
kv = params.compute_kv(m)
else:
k = params.compute_k(m)
v = params.compute_v(m)
if self.no_query:
# we don't use q for some synthesizer modes that don't use QKV at all.
q = x
else:
q = params.compute_q(x)
if self.shared_kv:
k = kv
v = kv
if context.mode == "incremental":
one_hot = mtf.one_hot(
context.position, memory_length, dtype=context.activation_dtype)
inv_one_hot = 1.0 - one_hot
old_k, old_v = context.get_states(2)
k = old_k * inv_one_hot + k * one_hot
v = old_v * inv_one_hot + v * one_hot
memory_position = mtf.range(context.mesh, memory_length, tf.int32)
else:
memory_position = self.rename_length_to_memory_length(
context.position, context)
if context.mode == "incremental" or context.mode == "first_part":
context.record_new_states([k, v])
o = attention.synthetic_attention(q, k, v, memory_length,
self.kv_dim, self.kv_dim,
self.compute_bias(context,
memory_position,
x,
params.query_heads_dims,
q),
synthesize=True,
synthesize_mode=self.synthesize_mode,
context=context,
**self.attention_kwargs_from_context(
context))
attention_output_shape = self.expected_attention_output_shape(x, params)
attention_output = params.compute_output(
o, output_shape=attention_output_shape)
return self.layer_output_from_attention_output(context, attention_output,
losses)
@gin.configurable
def relative_position_spans(context, num_sentinels=gin.REQUIRED):
"""Compute relative positions between inputs and targets.
Used by enc_dec_attention_bias.
Assumes that inputs and targets were generated by a span-filling objective:
The inputs consist of the original text with some spans removed and replaced
by single sentinels.
The targets consist of the dropped spans, each preceded by a single sentinel.
Sentinels are the last tokens in the vocabulary.
e.g.
inputs: A B C <S> F G H <S>
shifted-targets: <BOS> <S> D E <S> I J K
Relative positions are computed by identifying a target token with the
corresponding sentinel in the input and returning the distance between these
two tokens in the input.
Target tokens which precede all sentinels get identified with the beginning of
the input. So if we apply this to a problem with no sentinels, all target
tokens will be indentified with the beginning of the input. We assume this is
the case during incremental decoding, so this code will not work properly to
incrementally decode a problem with sentinels. This may not be an issue,
since the span-filling objective is primarily used for unsupervised
pre-training.
Args:
context: a Context
num_sentinels: an integer. Should have the same value as
SentencePieceVocabulary.extra_ids
Returns:
a Tensor
"""
decoder_id = context.inputs
encoder_id = context.encoder_inputs
decoder_length = context.length_dim
encoder_length = context.encoder_length_dim
mesh = encoder_id.mesh
encoder_pos = mtf.range(mesh, encoder_length, tf.int32)
if decoder_length not in decoder_id.shape.dims:
# we are doing incremental decoding.
# Map the target token to the beginning of the input.
dec_to_enc_pos = 0
else:
vocab_size = context.model.input_vocab_size_unpadded
def sentinel_mask(t):
return mtf.cast(mtf.greater_equal(
t, vocab_size - num_sentinels), tf.int32)
decoder_is_sentinel = sentinel_mask(decoder_id)
encoder_is_sentinel = sentinel_mask(encoder_id)
encoder_segment_id = mtf.cumsum(encoder_is_sentinel, encoder_length)
decoder_segment_id = mtf.cumsum(decoder_is_sentinel, decoder_length)
encoder_sequence_id = context.encoder_sequence_id
decoder_sequence_id = context.sequence_id
if encoder_sequence_id is not None:
# distinguish segments from different sequences
multiplier = max(encoder_length.size, decoder_length.size)
encoder_segment_id += encoder_sequence_id * multiplier
decoder_segment_id += decoder_sequence_id * multiplier
dec_to_enc_pos = mtf.reduce_sum(
mtf.cast(mtf.less(encoder_segment_id, decoder_segment_id), tf.int32),
reduced_dim=encoder_length)
return dec_to_enc_pos - encoder_pos
@gin.configurable
def enc_dec_attention_bias(layer,
context,
heads_dims,
relative_position_fn=relative_position_spans):
"""Compute bias term for encoder-decoder attention.
Args:
layer: a TransformerLayer
context: a Context
heads_dims: a list of Dimension
relative_position_fn: an optional function
Returns:
a Tensor
"""
biases = []
if context.encoder_sequence_id and context.sequence_id:
visible = mtf.equal(context.sequence_id, context.encoder_sequence_id)
biases.append(attention.visibility_mask_to_attention_bias(
visible, context.activation_dtype))
if (layer.relative_attention_type == "bias" or
layer.relative_attention_type == "bias_shared"):
buckets_dim = mtf.Dimension(
"buckets", layer.relative_attention_num_buckets)
bias_shape = context.model.ensemble_dims + heads_dims + [buckets_dim]
values = None
cache = layer.relative_attention_type == "bias_shared"
if cache:
cache_key = ("enc_dec_relative_attention_bias", tuple(heads_dims))
if cache_key in context.cache:
values = context.cache[cache_key]
if values is None:
values = mtf.get_variable(
context.mesh, "enc_dec_relative_attention_bias",
bias_shape, dtype=context.variable_dtype)
if cache:
context.cache[cache_key] = values
rel_pos = relative_position_fn(context)
rp_bucket = _relative_position_bucket(
rel_pos,
bidirectional=True,
num_buckets=buckets_dim.size)
biases.append(mtf.gather(values, rp_bucket, buckets_dim))
elif layer.relative_attention_type is not None:
raise ValueError("unrecognized relative_attention_type \"%s\"" %
layer.relative_attention_type)
return mtf.add_n(biases) if biases else None
@gin.configurable
def enc_dec_attention(self_attention_layer, memory_antecedent, context, x,
losses, attention_fn=attention.attention):
"""Multi-head attention over the encoder outputs."""
memory_input_dim = memory_antecedent.shape[-1]
if memory_input_dim != context.model.model_dim:
raise NotImplementedError(
"TODO(noam): support different model_dim in encoder and decoder.")
params = self_attention_layer.make_params(context)
q = params.compute_q(x)
if context.mode == "incremental":
k, v, memory_length = context.get_constant_state()
else:
m = memory_antecedent
if self_attention_layer.shared_kv:
kv = params.compute_kv(m)
k = kv
v = kv
else:
k = params.compute_k(m)
v = params.compute_v(m)
memory_length, = [d for d in m.shape.dims if d.name == "memory_length"]
if context.mode == "first_part":
context.record_constant_state((k, v, memory_length))
bias = enc_dec_attention_bias(self_attention_layer,
context,
params.query_heads_dims)
a = attention_fn(
q, k, v, memory_length, self_attention_layer.kv_dim,
self_attention_layer.kv_dim, bias,
context=context,
**self_attention_layer.attention_kwargs_from_context(context))
attention_output_shape = self_attention_layer.expected_attention_output_shape(
x, params)
attention_output = params.compute_output(
a, output_shape=attention_output_shape)
return self_attention_layer.layer_output_from_attention_output(
context, attention_output, losses)
@gin.configurable
class EncDecAttention(SelfAttention):
"""Multi-head attention over encoder output."""
def __init__(self, relative_attention_type=None, **kwargs):
super(EncDecAttention, self).__init__(
relative_attention_type=relative_attention_type, **kwargs)
def _get_memory_antecedent(self, context):
return context.encoder_output
def call(self, context, x, losses=None):
"""Call the layer."""
return enc_dec_attention(self, self._get_memory_antecedent(context),
context, x, losses,
attention_fn=self.attention_fn)
@property
def attention_fn(self):
return attention.attention
@gin.configurable
class TransparentEncDecAttention(EncDecAttention):
"""Transparent multi-head attention over encoder output."""
def __init__(self,
layers_per_encoder_module=gin.REQUIRED,
layers_per_decoder_module=gin.REQUIRED,
encoder_num_modules=gin.REQUIRED,
decoder_num_modules=gin.REQUIRED,
dropout_rate=0.0,
**kwargs):
"""Create a transparent attention EncDec Layer.
Args:
layers_per_encoder_module: positive integer telling how many layer are in
each repeated module in the encoder
layers_per_decoder_module: positive integer telling how many layer are in
each repeated module in the decoder
encoder_num_modules: positive integer of how many repeated modules there
are in the encoder
decoder_num_modules: positive integer of how many repeated modules there
are in the decoder
dropout_rate: positive float, the dropout rate for the matrix relating
encoder outputs to decoder inputs
**kwargs: additional constructor params
"""
super(TransparentEncDecAttention, self).__init__(**kwargs)
self.layers_per_encoder_module = layers_per_encoder_module
self.layers_per_decoder_module = layers_per_decoder_module
self.encoder_num_modules = encoder_num_modules
self.decoder_num_modules = decoder_num_modules
self.dropout_rate = dropout_rate
def _get_memory_antecedent(self, context):
decoder_module_index = context.layer_index // self.layers_per_decoder_module
decoder_inputs = self._get_decoder_inputs(context)
return decoder_inputs[decoder_module_index]
def _get_decoder_inputs(self, context):
"""Computes the inputs to the decoder when using transparent attention.
We must cache on the context in order to ensure that we are not replicating
variables when the layer's call function is called in different tf variable
scopes.
Args:
context: a Context
Returns:
a list containing `self.num_decoder_modules` of tensors with shape
[<batch_dims>, length_dim, output_vocab_dim]
"""
if hasattr(context, "decoder_layers_per_module"):
return context.decoder_layers_per_module
encoder_layer_outputs = [
mtf.layers.rename_length_to_memory_length(output)
for output in context.encoder_layer_outputs
]
layers_per_module = self.layers_per_encoder_module
encoder_module_outputs_dim = mtf.Dimension(
"encoder_module_outputs", size=self.encoder_num_modules + 1)
decoder_module_inputs_dim = mtf.Dimension(
"decoder_module_inputs", size=self.decoder_num_modules)
encoder_module_outputs = mtf.stack(
[encoder_layer_outputs[0]] +
encoder_layer_outputs[layers_per_module::layers_per_module],
dim_name="encoder_module_outputs")
stddev = 1.0
if not mtf.layers.unit_scaling_convention():
stddev *= encoder_module_outputs_dim.size ** -0.5
w = mtf.get_variable(
context.mesh,
"w",
mtf.Shape([encoder_module_outputs_dim, decoder_module_inputs_dim]),
initializer=tf.random_normal_initializer(stddev=stddev),
dtype=context.variable_dtype)
if context.train and self.dropout_rate != 0.0:
w = mtf.dropout(w, 1.0 - self.dropout_rate)
s = mtf.softmax(w, reduced_dim=encoder_module_outputs_dim)
z = mtf.layers.us_einsum([s, encoder_module_outputs],
reduced_dims=[encoder_module_outputs_dim])
input_per_decoder = mtf.split(
z,
split_dim=decoder_module_inputs_dim,
num_or_size_splits=decoder_module_inputs_dim.size)
context.decoder_layers_per_module = [
mtf.reshape(inpt, z.shape.dims[1:]) for inpt in input_per_decoder
]
return context.decoder_layers_per_module
@gin.configurable
class LocalSelfAttention(SelfAttention):
"""Multi-head local self-attention layer."""
def __init__(self,
radius=128,
num_heads=8,
num_memory_heads=0,
key_value_size=128,
shared_kv=False,
dropout_rate=0.0,
attention_kwargs=None,):
super(LocalSelfAttention, self).__init__(
num_heads,
num_memory_heads,
key_value_size,
shared_kv,
dropout_rate,
attention_kwargs)
self.radius = radius
def call(self, context, x, losses=None):
"""Call the layer."""
params = self.make_params(context)
q = params.compute_q(x)
if self.shared_kv:
kv = params.compute_kv(x)
k = kv
v = kv
else:
k = params.compute_k(x)
v = params.compute_v(x)
if context.mode == "incremental":
if self.shared_kv:
prev_kv, = context.get_states(1)
else:
prev_k, prev_v = context.get_states(2)
current_position = mtf.equal(
mtf.range(context.mesh, self.window_dim, dtype=tf.int32),
mtf.mod(context.position, self.radius))
if self.shared_kv:
kv = mtf.where(current_position, kv, prev_kv,
output_shape=prev_kv.shape)
k = kv
v = kv
context.record_new_states([kv])
else:
k = mtf.where(current_position, params.compute_k(x), prev_k,
output_shape=prev_k.shape)
v = mtf.where(current_position, params.compute_v(x), prev_v,
output_shape=prev_v.shape)
context.record_new_states([k, v])
window_pos = mtf.range(context.mesh, self.window_dim, tf.int32)
visible = mtf.greater_equal(context.position, window_pos)
bias = attention.visibility_mask_to_attention_bias(
visible, context.activation_dtype)
o = attention.attention(
q,
k,
v,
self.window_dim,
self.kv_dim,
self.kv_dim,
bias,
**self.attention_kwargs_from_context(context))
elif context.length_dim.size <= max(256, self.radius * 4):
# nothing fancy - just do full attention and mask
memory_length = self.rename_length_to_memory_length(
context.position, context)
o = attention.attention(
q, self.rename_length_to_memory_length(k, context),
self.rename_length_to_memory_length(v, context),
self.memory_length(context), self.kv_dim, self.kv_dim,
self.compute_bias(context, memory_length, x, params.query_heads_dims,
q), **self.attention_kwargs_from_context(context))
else:
# fancy local attention algorithm
o = attention.local_attention_1d(
q=q,
k=k,
v=None if self.shared_kv else v,
length_dim=context.length_dim,
key_dim=self.kv_dim,
value_dim=self.kv_dim,
length_dim_num_splits=1, # TODO(noam): look at the layout
autoregressive=context.model.fully_autoregressive,
radius=self.radius,
sequence_id=context.sequence_id,
write_priority=context.write_priority,
read_priority=context.read_priority,
attention_kwargs=self.attention_kwargs_from_context(context))
if context.mode == "first_part":
window_pos = mtf.range(context.mesh, self.window_dim, tf.int32)
pos = mtf.range(context.mesh, context.length_dim, tf.int32)
select_recent = mtf.cast(
mtf.equal(mtf.mod(pos, self.radius), window_pos), x.dtype)
select_recent *= mtf.cast(
mtf.less(pos, context.initial_position), x.dtype)
select_recent *= mtf.cast(
mtf.greater_equal(
pos, context.initial_position - self.radius), x.dtype)
state_shape = (k.shape - [context.length_dim, self.kv_dim]
+ [self.window_dim, self.kv_dim])
k_state = mtf.einsum(
[k, select_recent], output_shape=state_shape,
reduced_dims=[context.length_dim])
context.new_states.append(k_state)
if not self.shared_kv:
v_state = mtf.einsum(
[v, select_recent], output_shape=state_shape,
reduced_dims=[context.length_dim])
context.new_states.append(v_state)
return params.compute_output(o, output_shape=x.shape)
def min_relative_position(self, context):
return 1 - self.radius
def max_relative_position(self, context):
return None if context.model.fully_autoregressive else self.radius
@property
def window_dim(self):
return mtf.Dimension("window", self.radius)
def _relative_position_bucket(relative_position,
bidirectional=True,
num_buckets=32,
max_distance=128):
"""Translate relative position to a bucket number for relative attention.
The relative position is defined as memory_position - query_position, i.e.
the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are
invalid.
We use smaller buckets for small absolute relative_position and larger buckets
for larger absolute relative_positions. All relative positions >=max_distance
map to the same bucket. All relative positions <=-max_distance map to the
same bucket. This should allow for more graceful generalization to longer
sequences than the model has been trained on.
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32
values in the range [0, num_buckets)
"""
ret = 0
n = -relative_position
if bidirectional:
num_buckets //= 2
ret += mtf.to_int32(mtf.less(n, 0)) * num_buckets
n = mtf.abs(n)
else:
n = mtf.maximum(n, 0)
# now n is in the range [0, inf)
max_exact = num_buckets // 2
is_small = mtf.less(n, max_exact)
val_if_large = max_exact + mtf.to_int32(
mtf.log(mtf.to_float(n) / max_exact)
/ math.log(max_distance / max_exact) * (num_buckets - max_exact))
val_if_large = mtf.minimum(val_if_large, num_buckets - 1)
ret += mtf.where(is_small, n, val_if_large)
return ret
@gin.configurable
class TalkingHeadsSelfAttention(SelfAttention):
"""Experimental Talking-heads self-attention layer.
https://arxiv.org/abs/2003.02436
This is a variant where there are (optionally) extra learned linear
projections on the attention logits and attention weights. These linear
projections are across attention heads (but not across different query or
memory positions).
The user specifies three sets of mtf.Dimension:
key_heads_dims: "heads" dimensions the queries, keys and ther dot-product
softmax_heads_dims: "heads" dimensions for the logits and their softmax
value_heads_dims: "heads" dimensions for the values
If these three sets are identical, then this layer is identical to ordinary
multi-head attention.
If key_heads_dims != softmax_heads_dims, then a learned linear projection
is applied to compute the logits. This projection reduces out dimensions
in (key_heads_dims-softmax_heads_dims) and inserts dimensions in
(softmax_heads_dims-key_heads_dims).
If softmax_heads_dims != value_heads_dims, then a learned linear
projection is applied to the weights (the output of the softmax). This
projection reduces out dimensions in (softmax_heads_dims-value_heads_dims)
and inserts dimensions in (value_heads_dims-softmax_heads_dims).
TPU performance is lousy due to small matrix sizes.
Early experiments show that quality can be significantly better than baseline.
An additional supported option is dynamic talking-heads projections where the
talking-heads projections themselves contain terms that depend on the inputs.
Each of the logits-projection and the weights-projection can depend on either
or both of the query-antecedent X or the memory-antecedent Y. This gives
a total of four dynamic projections which can be enabled individually.
To enable, set the dynamic_projections argument to a list containing a
some or all of the strings ["x2l", "m2l", "x2w", "m2w"].
Example:
TalkingHeadsSelfAttention.key_heads_dims = [("key_heads", 12)]
TalkingHeadsSelfAttention.softmax_heads_dims = [("heads", 32)]
TalkingHeadsSelfAttention.value_heads_dims = [("value_heads", 12)]
TalkingHeadsSelfAttention.key_size = 64
TalkingHeadsSelfAttention.value_size = 64
d_model = 1024
We start with an input x
x: [length, d_model]
The input is first transformed into queries, keys and values:
queries: [query_length, key_heads, key_size]
keys: [memory_length, key_heads, key_size]
values: [memory_length, value_heads, value_size]
queries and keys get einsummed to produce a tensor p:
p: [query_length, memory_length, key_heads]
p gets linearly transformed with a learned weight matrix with shape
[key_heads, softmax_heads] to produce logits
logits: [query_length, memory_length, softmax_heads]
take the softmax of logits (across memory_length to produce weights)
h: [query_length, memory_length, softmax_heads]
Now a learned linear projection with shape [softmax_heads, value_heads]
on h produces the weights.
weights: [query_length, memory_length, value_heads]
As usual, we einsum the weights with the values.
o: [query_length, value_heads, value_size]
Finally, project o back to the desired output dimension
y: [query_length, d_model]
Also, this doesn't model-parallelize trivially. To model-parallelize, you
should add one heads-dimension that is present in all of key_heads_dims,
softmax_heads_dims, value_heads_dims. Call this dimension "heads" and shard
that over multiple devices. Then also include additional different
heads-dimension for the keys, softmax, and values.
"""
def __init__(self, # pylint: disable=super-init-not-called
key_heads_dims=(("heads", 12),),
softmax_heads_dims=(("heads", 12),),
value_heads_dims=(("heads", 12),),
key_size=64,
value_size=64,
dropout_rate=0.0,
relative_attention_type=None,
relative_attention_num_buckets=32,
dynamic_projections=None,
dynamic_projections_init_scale=1e-2):
"""Create a SelfAttention Layer.
Args:
key_heads_dims: a list of mtf.Dimension or (name, size) pairs
softmax_heads_dims: a list of mtf.Dimension or (name, size) pairs
value_heads_dims: a list of mtf.Dimension or (name, size) pairs
key_size: an integer
value_size: an integer
dropout_rate: a float
relative_attention_type: an optional string - one of
(None, "bias", "bias_shared", "contextual")
relative_attention_num_buckets: an integer
dynamic_projections: an optional sequence containing a subset of
["x2l", "m2l", "x2w", "m2w"] (see class comments)
dynamic_projections_init_scale: a float - initializer variance scaling
factor for these dynamic projections. We have observed learning
difficulties when this value is too large.
"""
self.key_heads_dims = [mtf.convert_to_dimension(d) for d in key_heads_dims]
self.softmax_heads_dims = [
mtf.convert_to_dimension(d) for d in softmax_heads_dims]
self.value_heads_dims = [
mtf.convert_to_dimension(d) for d in value_heads_dims]
self.key_dim = mtf.Dimension("d_k", key_size)
self.value_dim = mtf.Dimension("d_v", value_size)
self.dropout_rate = dropout_rate
self.relative_attention_type = relative_attention_type
self.relative_attention_num_buckets = relative_attention_num_buckets
self.dynamic_projections = dynamic_projections or []
self.dynamic_projections_init_scale = dynamic_projections_init_scale
def compute_q(self, context, x):
# Scale the initializer variance by 1.0/d_k
# This scales the initializer by rsqrt(d_k)
init_scale = 1.0
if not mtf.layers.unit_scaling_convention():
init_scale /= self.key_dim.size
kernel_initializer = mtf.layers.VarianceScalingInitializer(init_scale)
return mtf.layers.dense(
x, reduced_dims=[context.model.model_dim],
new_dims=self.key_heads_dims + [self.key_dim],
use_bias=False, activation=None,
variable_dtype=context.variable_dtype,
name="q", expert_dims=context.model.ensemble_dims,
kernel_initializer=kernel_initializer)
def compute_k(self, context, x):
return mtf.layers.dense(
x, reduced_dims=[context.model.model_dim],
new_dims=self.key_heads_dims + [self.key_dim],
use_bias=False, activation=None,
variable_dtype=context.variable_dtype,
name="k", expert_dims=context.model.ensemble_dims)
def compute_v(self, context, x):
return mtf.layers.dense(
x, reduced_dims=[context.model.model_dim],
new_dims=self.value_heads_dims + [self.value_dim],
use_bias=False, activation=None,
variable_dtype=context.variable_dtype,
name="v", expert_dims=context.model.ensemble_dims)
def compute_y(self, context, u):
return mtf.layers.dense(
u, reduced_dims=self.value_heads_dims + [self.value_dim],
new_dims=[context.model.model_dim],
use_bias=False, activation=None,
variable_dtype=context.variable_dtype,
name="y", expert_dims=context.model.ensemble_dims)
def call(self, context, x, losses=None):
"""Call the layer."""
memory_length = self.memory_length(context)
q = self.compute_q(context, x)
if context.mode == "incremental":
m = x
else:
m = mtf.replace_dimensions(x, context.length_dim, memory_length)
k = self.compute_k(context, m)
v = self.compute_v(context, m)
if context.mode == "incremental":
one_hot = mtf.one_hot(
context.position, memory_length, dtype=context.activation_dtype)
inv_one_hot = 1.0 - one_hot
old_k, old_v = context.get_states(2)
k = old_k * inv_one_hot + k * one_hot
v = old_v * inv_one_hot + v * one_hot
memory_position = mtf.range(context.mesh, memory_length, tf.int32)
else:
memory_position = self.rename_length_to_memory_length(
context.position, context)
if context.mode == "incremental" or context.mode == "first_part":
context.record_new_states([k, v])
bias = self.compute_bias(context, memory_position, x,
self.softmax_heads_dims, q)
return self.attention_internal(context, x, m, q, k, v, memory_length, bias)
def attention_internal(self, context, x, m, q, k, v, memory_length, bias):
p = mtf.layers.us_einsum([q, k], reduced_dims=[self.key_dim])
logits = self.talking_heads(
context, p, "logits", self.key_heads_dims, self.softmax_heads_dims,
dynamic_projections_from=(
([x] if "x2l" in self.dynamic_projections else []) +
([m] if "m2l" in self.dynamic_projections else [])))
if bias is not None:
logits += bias
h = mtf.softmax(logits, memory_length)
weights = self.talking_heads(
context, h, "weights", self.softmax_heads_dims, self.value_heads_dims,
dynamic_projections_from=(
([x] if "x2w" in self.dynamic_projections else []) +
([m] if "m2w" in self.dynamic_projections else [])))
# TODO(noam): make dropout_broadcast_dims configurable
dropout_broadcast_dims = [context.length_dim]
weights = mtf.dropout(
weights, rate=self.dropout_rate if context.train else 0.0,
noise_shape=weights.shape - dropout_broadcast_dims)
u = mtf.einsum([weights, v], reduced_dims=[memory_length])
return self.compute_y(context, u)
def talking_heads(
self, context, inp, name, input_heads_dims, output_heads_dims,
dynamic_projections_from=None):
shared_dims = [d for d in input_heads_dims if d in output_heads_dims]
reduced_dims = [d for d in input_heads_dims if d not in output_heads_dims]
new_dims = [d for d in output_heads_dims if d not in input_heads_dims]
if not (reduced_dims or new_dims):
# Output dimensions are same as input dimensions. Return the input
return inp
elif dynamic_projections_from:
# There are one or more dynamic talking-heads-projections
with tf.variable_scope(name):
# static projection - this is the same as the static projection in the
# "else" case below. We create the weight matrix with get_variable
# instead of calling mtf.layers.dense() so that we can fold the
# static projection into one of the dynamic projections.
static_p_initializer = mtf.layers.VarianceScalingInitializer()(
reduced_dims, new_dims)
static_p_shape = (
context.model.ensemble_dims + shared_dims + reduced_dims + new_dims)
static_p = mtf.get_variable(inp.mesh,
"kernel",
static_p_shape,
initializer=static_p_initializer,
dtype=context.variable_dtype)
ps = []
for i, dp_from in enumerate(dynamic_projections_from):
init_scale = self.dynamic_projections_init_scale
if not mtf.layers.unit_scaling_convention():
init_scale /= mtf.Shape(reduced_dims).size
kernel_initializer = mtf.layers.VarianceScalingInitializer(
init_scale)
ps.append(
mtf.layers.dense(
dp_from, reduced_dims=[context.model.model_dim],
new_dims=shared_dims + reduced_dims + new_dims,
use_bias=False, activation=None,
variable_dtype=context.variable_dtype,
name="%s_dynamic_%d" % (name, i),
expert_dims=context.model.ensemble_dims,
kernel_initializer=kernel_initializer))
# Fold the static projection into one of the static projections.
# Mathematically, we could add all the dynamic projections together
# here, but it would create a very large tensor which contained
# both the query-length and memory-length dimensions, and would
# probably be slower in practice.
ps[0] += static_p
return mtf.add_n(
[mtf.layers.us_einsum([inp, p], reduced_dims=reduced_dims)
for p in ps])
else:
# No dynamic projections. Static talking-heads projection only
return mtf.layers.dense(
inp, reduced_dims=reduced_dims,
new_dims=new_dims,
use_bias=False, activation=None,
variable_dtype=context.variable_dtype,
name=name, expert_dims=context.model.ensemble_dims + shared_dims)
@gin.configurable
class TalkingHeadsEncDecAttention(TalkingHeadsSelfAttention):
"""Talking-heads attention over encoder output.
See comments on TalkingHeadsSelfAttention.
"""
def __init__(self, relative_attention_type=None, **kwargs):
super(TalkingHeadsEncDecAttention, self).__init__(
relative_attention_type=relative_attention_type, **kwargs)
def _get_memory_antecedent(self, context):
return context.encoder_output
def call(self, context, x, losses=None):
"""Call the layer."""
m = self._get_memory_antecedent(context)
memory_input_dim = m.shape[-1]
if memory_input_dim != context.model.model_dim:
raise NotImplementedError(
"TODO(noam): support different model_dim in encoder and decoder.")
q = self.compute_q(context, x)
if context.mode == "incremental":
k, v, memory_length = context.get_constant_state()
else:
k = self.compute_k(context, m)
v = self.compute_v(context, m)
memory_length, = [d for d in m.shape.dims if d.name == "memory_length"]
if context.mode == "first_part":
context.record_constant_state((k, v, memory_length))
bias = enc_dec_attention_bias(self,
context,
self.softmax_heads_dims)
return self.attention_internal(context, x, m, q, k, v, memory_length, bias)
@gin.configurable
class GeneralBilinearSelfAttention(SelfAttention):
"""General Bilinear Self-Attention.
Described in the forthcoming talking-heads paper.
Equivalent to multi-head attentino where d_kv == d_model.
It is redundant to have projections on both q and k.
It is redundant to have projections on both v and output.
We therefore omit the projections on k and v, making the two identical.
"""
def __init__(self, # pylint: disable=super-init-not-called
heads_dims=(("heads", 12),),
dropout_rate=0.0,
relative_attention_type=None,
relative_attention_num_buckets=32):
"""Create a GeneralBilinearSelfAttention Layer.
Args:
heads_dims: a list of mtf.Dimension or (name, size) pairs
dropout_rate: a float
relative_attention_type: an optional string - one of
(None, "bias", "bias_shared", "contextual")
relative_attention_num_buckets: an integer
"""
self.heads_dims = [
mtf.convert_to_dimension(d) for d in heads_dims]
self.dropout_rate = dropout_rate
self.relative_attention_type = relative_attention_type
self.relative_attention_num_buckets = relative_attention_num_buckets
def compute_q(self, context, x):
# Scale the initializer variance by 1.0/d_k
# This scales the initializer by rsqrt(d_k)
init_scale = 1.0
if not mtf.layers.unit_scaling_convention():
init_scale /= context.model.model_dim.size
return mtf.layers.dense(
x, reduced_dims=[context.model.model_dim],
new_dims=self.heads_dims + [context.model.model_dim],
use_bias=False, activation=None,
variable_dtype=context.variable_dtype,
name="q", expert_dims=context.model.ensemble_dims,
kernel_initializer=mtf.layers.VarianceScalingInitializer(init_scale))
def compute_y(self, context, u):
return mtf.layers.dense(
u, reduced_dims=self.heads_dims + [context.model.model_dim],
new_dims=[context.model.model_dim],
use_bias=False, activation=None,
variable_dtype=context.variable_dtype,
name="y", expert_dims=context.model.ensemble_dims)
def call(self, context, x, losses=None):
"""Call the layer."""
memory_length = self.memory_length(context)
q = self.compute_q(context, x)
if context.mode == "incremental":
m = x
else:
m = mtf.replace_dimensions(x, context.length_dim, memory_length)
if context.mode == "incremental":
one_hot = mtf.one_hot(
context.position, memory_length, dtype=context.activation_dtype)
inv_one_hot = 1.0 - one_hot
old_m, = context.get_states(1)
m = old_m * inv_one_hot + one_hot * m
memory_position = mtf.range(context.mesh, memory_length, tf.int32)
else:
memory_position = self.rename_length_to_memory_length(
context.position, context)
if context.mode == "incremental" or context.mode == "first_part":
context.record_new_states([m])
bias = self.compute_bias(context, memory_position, x, self.heads_dims, q)
return self.attention_internal(context, q, m, memory_length, bias)
def attention_internal(self, context, q, m, memory_length, bias):
logits = mtf.layers.us_einsum(
[q, m], reduced_dims=[context.model.model_dim])
if bias is not None:
logits += bias
weights = mtf.softmax(logits, memory_length)
# TODO(noam): make dropout_broadcast_dims configurable
dropout_broadcast_dims = [context.length_dim]
weights = mtf.dropout(
weights, rate=self.dropout_rate if context.train else 0.0,
noise_shape=weights.shape - dropout_broadcast_dims)
u = mtf.einsum([weights, m], reduced_dims=[memory_length])
return self.compute_y(context, u)
@gin.configurable
class GeneralBilinearEncDecAttention(GeneralBilinearSelfAttention):
"""Talking-heads attention over encoder output.
See comments on GBMSelfAttention.
"""
def __init__(self, relative_attention_type=None, **kwargs):
super(GeneralBilinearEncDecAttention, self).__init__(
relative_attention_type=relative_attention_type, **kwargs)
def _get_memory_antecedent(self, context):
return context.encoder_output
def call(self, context, x, losses=None):
"""Call the layer."""
memory_antecedent = self._get_memory_antecedent(context)
memory_input_dim = memory_antecedent.shape[-1]
if memory_input_dim != context.model.model_dim:
raise NotImplementedError(
"TODO(noam): support different model_dim in encoder and decoder.")
q = self.compute_q(context, x)
if context.mode == "incremental":
m, memory_length = context.get_constant_state()
else:
m = memory_antecedent
memory_length, = [d for d in m.shape.dims if d.name == "memory_length"]
if context.mode == "first_part":
context.record_constant_state((m, memory_length))
bias = enc_dec_attention_bias(self,
context,
self.heads_dims)
return self.attention_internal(context, q, m, memory_length, bias)
@gin.configurable
class BranchedSelfAttention(SelfAttention):
"""Branched self attention."""
def __init__(self, **kwargs):
super(BranchedSelfAttention, self).__init__(
combine_dims=False, keep_query_heads_dims=True, **kwargs)
if self.num_memory_heads != 0:
raise ValueError("Set num_memory_heads to 0 for branched attention.")
self.dense_layer = DenseReluDense()
self.kappa_init = tf.random_uniform_initializer(minval=0.0, maxval=1.0)
self.alpha_init = tf.random_uniform_initializer(minval=0.0, maxval=1.0)
def _constraint(self, z):
"""Keep z non-negative and summing to 1."""
z = mtf.relu(z)
return z / mtf.reduce_sum(z + 10**-4)
def layer_output_from_attention_output(self, context, attention_output,
losses):
heads_dim = mtf.Dimension("heads", self.num_heads)
kappa = mtf.get_variable(
context.mesh,
"kappa",
mtf.Shape([heads_dim]),
initializer=self.kappa_init,
dtype=context.variable_dtype,
constraint=self._constraint)
alpha = mtf.get_variable(
context.mesh,
"alpha",
mtf.Shape([heads_dim]),
initializer=self.alpha_init,
dtype=context.variable_dtype,
constraint=self._constraint)
o = mtf.einsum([attention_output, kappa],
output_shape=attention_output.shape)
o = self.dense_layer.call(context, o, losses)
o = mtf.einsum([o, alpha], output_shape=o.shape)
o = mtf.reduce_sum(o, reduced_dim=heads_dim)
return o
@gin.configurable
class BranchedEncDecAttention(BranchedSelfAttention):
"""Branched attention over encoder output."""
def __init__(self, relative_attention_type=None, **kwargs):
super(BranchedEncDecAttention, self).__init__(
relative_attention_type=relative_attention_type, **kwargs)
def _get_memory_antecedent(self, context):
return context.encoder_output
def call(self, context, x, losses=None):
"""Call the layer."""
return enc_dec_attention(self, self._get_memory_antecedent(context),
context, x, losses)
class Conv1D(transformer.TransformerLayer):
"""Parent class for convolutional layers for common decoding logics.
When convolutional layers are used in the decoder, the incremental decoding
requires common features such as storing and accessing the recurrent state
information. These features do not depend on the specifics of the
convolutional layer (e.g., depthwise convolution, lightweight) as long as they
have the fixed receptive field defined by the filter size. This class
provides the methods for such features.
"""
def record_states_first_part_mode(self,
context,
x,
filter_size,
length_dim_name="length"):
"""Record the states during the first part mode.
l: current layer index
k: convolution filter size
x(l): input tensor to layer `l` for the first_part mode with the shape
[<batch_dims>, length, d_model].
The first_part mode is called once before the incremental mode is called for
the actual decoding process. The purpose is to set the recurrent states in
context.states, which are accessed during the incremental mode via
context.get_states. There are two cases depending on partial sequences are
present or not.
1) with partial sequences
When partial sequences are present, we decode from the position after the
partial sequence, but we need to use the information contained in the
partial sequence.
x(l) = [x1, x2, 0, 0, 0]
context.initial_position = 2 (the actual decoding should start from index
2).
Then we record the state = [0, x1, x2]. If partial sequences are shorter
than the filter size, we zero pad from the left.
2) Without partial sequences
x(l) = [0, 0, 0, 0, 0]
context.initial_position = 0
Then we record the state = [0, 0, 0]
These two cases can be handled with the following pseudocode. Let
i = context.initial_position.
state = x[:, i-filter_size:i, :] and store this as state.
Equivalently we can shift x by filter_size and slice
shifted_x = shift(x, length_dim)
state = shifted_x[:, i:i + filter_size, :]
Args:
context: a transformer.Context.
x: a Tensor.
filter_size: an intger - convolution filter size.
length_dim_name: a string - a dimension name for the length mtf.Dimension.
"""
length_dim = x.shape.dims[-2]
# Slice shifted_x[:, i:i + self.filter_size, :]
filter_dim = mtf.Dimension(length_dim_name, filter_size)
indices = mtf.range(x.mesh, filter_dim, dtype=tf.int32)
indices = context.initial_position + indices
# Assumes that x.shape = [<batch_dims>, length_dim, model_dim]
output_shape = mtf.Shape(x.shape.dims[:-2] + [filter_dim] +
x.shape.dims[-1:])
shifted_x = mtf.shift(x, filter_size, length_dim, wrap=False)
state = mtf.gather(
shifted_x, indices, length_dim, output_shape=output_shape)
context.record_new_states([state])
def record_states_incremental_mode(self, context, x, filter_size,
length_dim_name="length"):
"""Record the states during the first part mode.
l: current layer index
t: current decoding time step
k: convolution filter size
x(l, t): input vector to layer `l` at time step `t` for the incremental
mode with the shape [<batch_dims>, d_model].
During the incremental mode, the input to the conv layer x(l, t) does not
have the length dim because the input vector x corresponds to the current
decoding time step. We want to restore the input to the current layer in the
previous time steps (stored in the context.states) and combine with the
input at the current time step. This method does the following.
1) Restore the states: [x(l, t-k), ..., x(l, t-1)]
2) Combine with the current input: [x(l, t-k+1), ..., x(l, t-1), x(l, t)]
3) Store the new state and return it to be used as an input to the conv
layer.
It is important to note that the state being recorded is not used by the
next layer; it is used by the same layer but at the future time steps.
Args:
context: a transformer.Context.
x: a Tensor.
filter_size: an intger - convolution filter size.
length_dim_name: a string - a dimension name for the length mtf.Dimension.
Returns:
x: a Tensor of shape [<batch_dims>, filter_size, d_model].
"""
# Augment x with the states
filter_dim = mtf.Dimension(length_dim_name, filter_size)
input_state = context.get_states(1)[0]
position = mtf.constant(
x.mesh,
filter_size - 1, # Always use the last position.
shape=mtf.Shape(x.shape.dims[:-1]), # Pick out batch dims.
dtype=tf.int32)
# [batch, d_model] -> [batch, filter, d_model]
x = self.update_state(
input_state, x, position, filter_dim, dtype=context.activation_dtype)
# new state include the input for [t-filter, ..., t] steps.
context.record_new_states([x])
return x
def update_state(self, old_state, x, position, filter_dim, dtype):
"""Augment the current input to the old state.
[x(l, t-k), ..., x(l, t-1)], x(l, t) ->
[x(l, t-k+1), ..., x(l, t-1), x(l, t)]
Args:
old_state: a Tensor of shape [<batch_dims>, filter_size, d_model]
x: a Tensor of shape [<batch_dims>, d_model]
position: a Tensor of shape [<batch_dims>]
filter_dim: an mtf.Dimension corresponding to the filter size.
dtype: a mtf.VariableDType
Returns:
new_state: a Tensor of shape [<batch_dims>, filter_size, d_model].
"""
# [<batch_dims>, length, d_model]
shifted_state = mtf.shift(old_state, -1, filter_dim, wrap=False)
# [<batch_dims>, length]
one_hot = mtf.one_hot(position, filter_dim, dtype=dtype)
# [<batch_dims>, length, d_model]
shifted_x = one_hot * x
new_state = shifted_state + shifted_x
return new_state
@gin.configurable
class Conv1DLayer(Conv1D):
"""1D convolution over sequence length with model dim as channels.
One caveat is that this layer does nothing to stop information from bleeding
across packed examples.
"""
def __init__(self, filter_size, output_size, activation="linear"): # pylint: disable=super-init-not-called
"""Create a Conv1DLayer.
Args:
filter_size: a positive integer, the size of convolutional kernel.
output_size: a positive integer, the number of channels in the output.
activation: an optional string function name from namespace mtf, a
function to be applied to the layer output. If not provided or set to
"linear", then no function will be applied.
"""
self._filter_size = filter_size
self._output_size = output_size
self._activation = activation
def call(self, context, x, losses=None):
"""Call the layer."""
if context.mode == "first_part":
self.record_states_first_part_mode(context, x, self.filter_size)
if context.mode == "incremental":
x = self.record_states_incremental_mode(context, x, self.filter_size)
padding = "VALID"
else:
# The first_part mode also needs masking because it may have partial
# sequences.
mask = mtf.cast(
mtf.not_equal(context.inputs, 0), context.activation_dtype)
x *= mask
padding = "SAME"
model_dim = x.shape.dims[-1]
input_dim = mtf.Dimension("input_dim", model_dim.size)
x = mtf.replace_dimensions(x, model_dim, input_dim)
output_dim = mtf.Dimension(model_dim.name, self._output_size)
output = mtf.layers.conv1d(
x,
output_dim=output_dim,
filter_size=self._filter_size,
padding=padding,
filter_initializer=tf.glorot_uniform_initializer())
if context.mode == "incremental":
filter_dim = mtf.Dimension("length", self.filter_size)
# [batch_dims, 1, output_dim] -> [batch_dims, output_dim]
output = mtf.reduce_sum(
output, reduced_dim=mtf.Dimension(filter_dim.name, 1))
if self._activation != "linear":
activation_fn = getattr(mtf, self._activation)
output = activation_fn(output)
return output
@property
def filter_size(self):
return self._filter_size
@gin.configurable
class SeparableConv1DLayer(Conv1D):
"""1D separable convolution over sequence length with model dim as channels.
One caveat is that this layer does nothing to stop information from bleeding
across packed examples.
"""
def __init__(self,
min_relative_pos,
max_relative_pos,
output_size,
depthwise_filter_initializer_scale=1.0,
pointwise_filter_initializer_scale=1.0,
activation="linear"):
"""Create a SeparableConv1DLayer.
The filter size will be `max_relative_pos - min_relative_pos + 1`.
Args:
min_relative_pos: an integer, the inclusive minimum relative positive of
the depthwise filter, where a relative position of zero means the left
end of the filter aligns with the left end of the input.
max_relative_pos: an integer, the inclusive maximum relative position of
the depthwise filter, where a relative position of zero means the right
end of the filter aligns with the right end of the input.
output_size: a positive integer, the number of channels in the output.
depthwise_filter_initializer_scale: a positive interger, the scale for the
initializer for the depthwise filter.
pointwise_filter_initializer_scale: a positive interger, the scale for the
initializer for the pointwise filter.
activation: an optional string function name from namespace mtf, a
function to be applied to the layer output. If not provided or set to
"linear", then no function will be applied.
"""
self._min_relative_pos = min_relative_pos
self._max_relative_pos = max_relative_pos
self._output_size = output_size
self._depthwise_filter_initializer_scale = depthwise_filter_initializer_scale
self._pointwise_filter_initializer_scale = pointwise_filter_initializer_scale
self._activation = activation
def call(self, context, x, losses=None, all_kernel_wts=None):
"""Call the layer."""
if context.mode == "first_part":
self.record_states_first_part_mode(context, x, self.filter_size)
if context.mode == "incremental":
x = self.record_states_incremental_mode(context, x, self.filter_size)
else:
# Mask padding.
# TODO(karishmamalkan): Change the inputs_for_mask_creation to use decoder
# when using with decoder
inputs_for_mask_creation = context.inputs
mask = mtf.cast(
mtf.not_equal(inputs_for_mask_creation, 0), context.activation_dtype)
x *= mask
model_dim = x.shape.dims[-1]
output_dim = mtf.Dimension(model_dim.name, self._output_size)
output = mtf.layers.separable_conv1d(
x,
output_dim=output_dim,
min_relative_pos=self._min_relative_pos,
max_relative_pos=self._max_relative_pos,
depthwise_filter_initializer_scale=self
._depthwise_filter_initializer_scale,
pointwise_filter_initializer_scale=self
._pointwise_filter_initializer_scale,
use_bias=True,
kernel_depth_weights=all_kernel_wts)
if context.mode == "incremental":
filter_dim = mtf.Dimension("length", self.filter_size)
# Drop unnecessary portion [batch, length, d_model] -> [batch, d_model]
# Only the last sequence position is relevant.
output = mtf.gather(output, [self.filter_size - 1], filter_dim)
if self._activation != "linear":
activation_fn = getattr(mtf, self._activation)
output = activation_fn(output)
return output
@property
def filter_size(self):
return self._max_relative_pos - self._min_relative_pos + 1
@gin.configurable
class Conv1DLocalAttn(SeparableConv1DLayer):
"""Lightweight 1D separable convolution over sequence length with d_model as channels.
Lightweight 1D separable convolution over sequence length, with separated over
model_dim as channels, containing a fixed number of unique channels
repeated/stacked over the model_dim.
"""
def __init__(self,
min_relative_pos,
max_relative_pos,
output_size,
depthwise_filter_initializer_scale=1.0,
pointwise_filter_initializer_scale=1.0,
activation="linear",
num_unique_depth_filters=1):
"""Create a LightweightConv1DLayer.
The filter size will be `max_relative_pos - min_relative_pos + 1`
The value of the Filter is depthwise separable, and the filter is tied and
repeats at every "num_unique_depth_filters" elements.
Args:
min_relative_pos: an integer, the inclusive minimum relative positive of
the depthwise filter, where a relative position of zero means the left
end of the filter aligns with the left end of the input.
max_relative_pos: an integer, the inclusive maximum relative position of
the depthwise filter, where a relative position of zero means the right
end of the filter aligns with the right end of the input.
output_size: a positive integer, the number of channels in the output.
depthwise_filter_initializer_scale: a positive interger, the scale for the
initializer for the depthwise filter.
pointwise_filter_initializer_scale: a positive interger, the scale for the
initializer for the pointwise filter.
activation: an optional string function name from namespace mtf, a
function to be applied to the layer output. If not provided or set to
"linear", then no function will be applied.
num_unique_depth_filters: The number of unique depth filter values. The
unique filter is repeated along the depth dim every
num_unique_depth_filters elements.
"""
super(Conv1DLocalAttn,
self).__init__(min_relative_pos, max_relative_pos, output_size,
depthwise_filter_initializer_scale,
pointwise_filter_initializer_scale, activation)
self._num_unique_depth_filters = num_unique_depth_filters
assert (self._output_size % self._num_unique_depth_filters == 0), (
"The number of elements in the unique depth filter should exactly "
"divide the number of output channels. You set "
"num_unique_depth_filters=%d, output_size(num_output_channels)=%d") % (
self._num_unique_depth_filters, self._output_size)
def call(self, context, x, losses=None):
"""Call the layer."""
depth_dim = x.shape.dims[-1]
initializer_scale = self._depthwise_filter_initializer_scale or 1.0
kernel_size = self._max_relative_pos - self._min_relative_pos + 1
kernel_initializer = mtf.layers.VarianceScalingInitializer(
scale=initializer_scale / kernel_size)
repeated_kernel_dim = [
mtf.Dimension(
"repeated_kernel_dim",
size=int(depth_dim.size / self._num_unique_depth_filters))
]
all_kernel_wts = []
for i in range(kernel_size):
# get a kernel variable of size num_unique_depth_filters, and replicate it
# to span the size of the complete depth(d_model) of x
kernel_wt = self.get_kernel_wt(
x,
repeated_kernel_dim=repeated_kernel_dim,
kernel_initializer=kernel_initializer,
i=i,
context=context,
variable_dtype=context.variable_dtype,
master_dtype=tf.float32,
slice_dtype=tf.float32)
repeated_kernel_wts = [kernel_wt] * self._num_unique_depth_filters
repeated_kernel_wts_concat = mtf.concat(
repeated_kernel_wts, concat_dim_name="repeated_kernel_dim")
repeated_kernel_wts_concat = mtf.rename_dimension(
repeated_kernel_wts_concat, "repeated_kernel_dim", "d_model")
all_kernel_wts.append(repeated_kernel_wts_concat)
# modify the kernel weights, such that they are softmaxed over the width of
# the kernel. We do this by stacking the individual kernel positions,
# performing the softmax, and then re-separating the stack.
stacked_kernel_weights = mtf.stack(all_kernel_wts, "new_stack_dim")
softmaxed_kernel_weights = mtf.softmax(
stacked_kernel_weights, reduced_dim=stacked_kernel_weights.shape[0]
) # the softmax is calculated over the new_stack_dim we created
unstacked_kernel_weights = mtf.unstack(softmaxed_kernel_weights,
stacked_kernel_weights.shape[0])
return super(Conv1DLocalAttn, self).call(context, x, losses,
unstacked_kernel_weights)
@gin.configurable
class LightweightConv1DLocalAttn(Conv1DLocalAttn):
"""Lightweight 1D separable convolution over seq_len with d_model as channels.
Lightweight 1D separable convolution over sequence length, with separated over
model_dim as channels, containing a fixed number of unique channels
repeated/stacked over the model_dim.
"""
def get_kernel_wt(self,
x,
repeated_kernel_dim,
kernel_initializer,
i,
context,
variable_dtype,
master_dtype=tf.float32,
slice_dtype=tf.float32):
kernel_wt = mtf.layers.get_dense_kernel_weights(
x,
new_dims=[],
reduced_dims=[],
expert_dims=repeated_kernel_dim,
kernel_initializer=kernel_initializer,
name="lightwt_depthwise_dense_%d" % (i),
variable_dtype=context.variable_dtype,
master_dtype=tf.float32,
slice_dtype=tf.float32)
return kernel_wt
@gin.configurable
class DynamicConv1DLocalAttn(Conv1DLocalAttn):
"""Dynamic 1D separable convolution over seq_len with d_model as channels.
Dynamic kernels predicted based on input at a position of the seq_len. Conv
operation separated over model_dim as channels, containing a fixed number of
unique channels repeated/stacked over the model_dim.
"""
def get_kernel_wt(self,
x,
repeated_kernel_dim,
kernel_initializer,
i,
context,
variable_dtype,
master_dtype=tf.float32,
slice_dtype=tf.float32):
kernel_wt = mtf.layers.dense(
x,
new_dims=repeated_kernel_dim,
reduced_dims=[context.model.model_dim],
expert_dims=[],
kernel_initializer=kernel_initializer,
name="dyn_conv_depthwise_dense_%d" % (i),
variable_dtype=context.variable_dtype,
master_dtype=tf.float32,
slice_dtype=tf.float32)
return kernel_wt
@gin.configurable
class LocalConvAttnBlock(transformer.TransformerLayer):
"""Conv Attention Block for Lightweight and dynamic conv attention.
Lightweight/Dynamic separable convolution over sequence length as described in
https://arxiv.org/pdf/1901.10430.pdf.
"""
def __init__(self,
min_relative_pos,
max_relative_pos,
output_size,
depthwise_filter_initializer_scale=1.0,
pointwise_filter_initializer_scale=1.0,
activation="linear",
num_unique_depth_filters=1,
attention_type="lightweight_conv"):
"""Create a LightweightConv1DAttnBlock.
The filter size will be `max_relative_pos - min_relative_pos + 1`
The value of the Filter is depthwise separable, and the filter is tied and
repeats at every "num_unique_depth_filters" elements.
Args:
min_relative_pos: an integer, the inclusive minimum relative positive of
the depthwise filter, where a relative position of zero means the left
end of the filter aligns with the left end of the input.
max_relative_pos: an integer, the inclusive maximum relative position of
the depthwise filter, where a relative position of zero means the right
end of the filter aligns with the right end of the input.
output_size: a positive integer, the number of channels in the output.
depthwise_filter_initializer_scale: a positive interger, the scale for the
initializer for the depthwise filter.
pointwise_filter_initializer_scale: a positive interger, the scale for the
initializer for the pointwise filter.
activation: an optional string function name from namespace mtf, a
function to be applied to the layer output. If not provided or set to
"linear", then no function will be applied.
num_unique_depth_filters: The number of unique depth filter values. The
unique filter is repeated along the depth dim every
num_unique_depth_filters elements.
attention_type: Type of conv attn -"lightweight_conv"/"dynamic_conv"
"""
if attention_type == "lightweight_conv":
self.conv_local_attn_layer = LightweightConv1DLocalAttn(
min_relative_pos, max_relative_pos, output_size,
depthwise_filter_initializer_scale,
pointwise_filter_initializer_scale, activation)
elif attention_type == "dynamic_conv":
self.conv_local_attn_layer = DynamicConv1DLocalAttn(
min_relative_pos, max_relative_pos, output_size,
depthwise_filter_initializer_scale,
pointwise_filter_initializer_scale, activation)
else:
raise NotImplementedError("This attention type not implemented")
def call(self, context, x, losses=None):
"""Call the layer."""
gated_ip = mtf.layers.dense_product(
x,
reduced_dims=[context.model.model_dim],
new_dims=[context.model.model_dim],
activation_functions=["linear", "sigmoid"],
variable_dtype=context.variable_dtype,
name="local_conv_inp")
attn_output = self.conv_local_attn_layer.call(context, gated_ip, losses)
op_projection = mtf.layers.dense(
attn_output,
reduced_dims=[context.model.model_dim],
new_dims=[context.model.model_dim],
activation=None,
variable_dtype=context.variable_dtype,
name="local_conv_attn_op_projection")
return op_projection
@gin.configurable
class ParallelLayer(transformer.TransformerLayer):
"""Multiple layers in parallel.
Outputs are summed and divided by sqrt(n).
"""
def __init__(self, layer_classes=(DenseReluDense, SelfAttention)):
"""Create a ParallelLayer.
Args:
layer_classes: a list of TransformerLayer classes
"""
self.layer_classes = [l() for l in layer_classes]
def call(self, context, x, losses=None):
"""Call the layer."""
return (
mtf.add_n(
[l.call(context, x, losses=losses) for l in self.layer_classes])
* (len(self.layer_classes) ** -0.5))
| mesh-master | mesh_tensorflow/transformer/transformer_layers.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| mesh-master | mesh_tensorflow/transformer/gin/__init__.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Functions and classes related to optimization (weight updates)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mesh_tensorflow as mtf
import mesh_tensorflow.optimize as mtf_optimize
import tensorflow.compat.v1 as tf
def clip_by_global_norm(grads, clip_norm):
"""Clip the grads by global norm."""
global_norm = mtf.sqrt(
mtf.add_n([mtf.reduce_sum(mtf.square(t)) for t in grads if t is not None
]))
multiplier = clip_norm / mtf.maximum(global_norm, clip_norm)
clipped_grads = [None if t is None else t * multiplier for t in grads]
return clipped_grads, global_norm
def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps,
max_optimized_variable_size=None,
optimizer="adam",
clip_gradients=True):
"""Creates an optimizer training op."""
global_step = tf.train.get_or_create_global_step()
mesh = loss.mesh
if init_lr:
# Implements linear decay of the learning rate.
learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)
learning_rate = tf.train.polynomial_decay(
learning_rate,
global_step,
num_train_steps,
end_learning_rate=0.0,
power=1.0,
cycle=False)
# Implements linear warmup. I.e., if global_step < num_warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
if num_warmup_steps:
global_steps_int = tf.cast(global_step, tf.int32)
warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)
global_steps_float = tf.cast(global_steps_int, tf.float32)
warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
warmup_percent_done = global_steps_float / warmup_steps_float
warmup_learning_rate = init_lr * warmup_percent_done
is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
learning_rate = ((1.0 - is_warmup) * learning_rate +
is_warmup * warmup_learning_rate)
mtf_learning_rate = mtf.import_tf_tensor(mesh, learning_rate, [])
else:
if optimizer == "adam":
raise ValueError("Adam does not have a default learning rate")
learning_rate = None
mtf_learning_rate = None
# It is recommended that you use this optimizer for fine tuning, since this
# is how the model was trained (note that the Adam m/v variables are NOT
# loaded from init_checkpoint.)
if optimizer == "adam":
optimizer = mtf_optimize.AdamWeightDecayOptimizer(
learning_rate=mtf_learning_rate,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
elif optimizer == "adafactor":
optimizer = mtf_optimize.AdafactorOptimizer(
learning_rate=learning_rate,
min_dim_size_to_factor=32)
else:
raise ValueError("unknown optimizer")
trainable_variables = mesh.graph.trainable_variables
if max_optimized_variable_size:
trainable_variables = [t for t in trainable_variables
if t.shape.size <= max_optimized_variable_size]
var_grads = mtf.gradients(
[loss], [v.outputs[0] for v in trainable_variables])
# This is how the model was pre-trained.
if clip_gradients:
(var_grads, _) = clip_by_global_norm(
var_grads, clip_norm=mtf.constant(mesh, 1.0, dtype=tf.float32))
update_ops = optimizer.apply_grads(var_grads, trainable_variables)
return learning_rate, update_ops
| mesh-master | mesh_tensorflow/bert/optimization.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run BERT on SQuAD 1.1 and SQuAD 2.0."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import math
import os
import random
import mesh_tensorflow as mtf
import mesh_tensorflow.bert.bert as bert_lib
import mesh_tensorflow.bert.optimization as optimization_lib
import mesh_tensorflow.bert.tokenization as tokenization
import six
from six.moves import range
import tensorflow.compat.v1 as tf
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string("train_file", None,
"SQuAD json for training. E.g., train-v1.1.json")
flags.DEFINE_string(
"predict_file", None,
"SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json")
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_string(
"cached_train_file", None, "Prepared training file.")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_float(
"max_optimized_variable_size", 1e7,
"Do not optimize variables larger than this.")
flags.DEFINE_integer(
"max_seq_length", 384,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_integer(
"doc_stride", 128,
"When splitting up a long document into chunks, how much stride to "
"take between chunks.")
flags.DEFINE_integer(
"max_query_length", 64,
"The maximum number of tokens for the question. Questions longer than "
"this will be truncated to this length.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_predict", False, "Whether to run eval on the dev set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("predict_batch_size", 8,
"Total batch size for predictions.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_bool("clip_gradients", True, "Apply gradient clipping.")
flags.DEFINE_string("optimizer", "adam", "adam/adafactor")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer(
"n_best_size", 20,
"The total number of n-best predictions to generate in the "
"nbest_predictions.json output file.")
flags.DEFINE_integer(
"max_answer_length", 30,
"The maximum length of an answer that can be generated. This is needed "
"because the start and end predictions are not conditioned on one another.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string("mesh_shape", "batch:8", "mesh shape")
tf.flags.DEFINE_string(
"layout",
"batch:batch,vocab:model,intermediate:model,num_heads:model,experts:batch",
"layout rules")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_bool(
"verbose_logging", False,
"If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.")
flags.DEFINE_bool(
"version_2_with_negative", False,
"If true, the SQuAD examples contain some that do not have an answer.")
flags.DEFINE_float(
"null_score_diff_threshold", 0.0,
"If null_score - best_non_null is greater than the threshold predict null.")
class SquadExample(object):
"""A single training/test example for simple sequence classification.
For examples without an answer, the start and end position are -1.
"""
def __init__(self,
qas_id,
question_text,
doc_tokens,
orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=False):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (tokenization.printable_text(self.qas_id))
s += ", question_text: %s" % (
tokenization.printable_text(self.question_text))
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
if self.start_position:
s += ", start_position: %d" % (self.start_position)
if self.start_position:
s += ", end_position: %d" % (self.end_position)
if self.start_position:
s += ", is_impossible: %r" % (self.is_impossible)
return s
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
segment_ids,
start_position=None,
end_position=None,
is_impossible=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def read_squad_examples(input_file, is_training):
"""Read a SQuAD json file into a list of SquadExample."""
with tf.gfile.Open(input_file, "r") as reader:
input_data = json.load(reader)["data"]
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
examples = []
for entry in input_data:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["context"]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position = None
end_position = None
orig_answer_text = None
is_impossible = False
if is_training:
if FLAGS.version_2_with_negative:
is_impossible = qa["is_impossible"]
if (len(qa["answers"]) != 1) and (not is_impossible):
raise ValueError(
"For training, each question should have exactly 1 answer.")
if not is_impossible:
answer = qa["answers"][0]
orig_answer_text = answer["text"]
answer_offset = answer["answer_start"]
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[answer_offset + answer_length -
1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = " ".join(
doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = " ".join(
tokenization.whitespace_tokenize(orig_answer_text))
if actual_text.find(cleaned_answer_text) == -1:
tf.logging.warning("Could not find answer: '%s' vs. '%s'",
actual_text, cleaned_answer_text)
continue
else:
start_position = -1
end_position = -1
orig_answer_text = ""
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
examples.append(example)
return examples
def convert_examples_to_features(examples, tokenizer, max_seq_length,
doc_stride, max_query_length, is_training,
output_fn):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
for (example_index, example) in enumerate(examples):
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
start_position = None
end_position = None
if is_training and not example.is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and
tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if is_training and example.is_impossible:
start_position = 0
end_position = 0
if example_index < 20:
tf.logging.info("*** Example ***")
tf.logging.info("unique_id: %s" % (unique_id))
tf.logging.info("example_index: %s" % (example_index))
tf.logging.info("doc_span_index: %s" % (doc_span_index))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("token_to_orig_map: %s" % " ".join(
["%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)]))
tf.logging.info("token_is_max_context: %s" % " ".join([
"%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context)
]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info(
"input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
if is_training and example.is_impossible:
tf.logging.info("impossible example")
if is_training and not example.is_impossible:
answer_text = " ".join(tokens[start_position:(end_position + 1)])
tf.logging.info("start_position: %d" % (start_position))
tf.logging.info("end_position: %d" % (end_position))
tf.logging.info(
"answer: %s" % (tokenization.printable_text(answer_text)))
feature = InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
start_position=start_position,
end_position=end_position,
is_impossible=example.is_impossible)
# Run callback
output_fn(feature)
unique_id += 1
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids):
"""Creates a classification model."""
model = bert_lib.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
mesh_shape=FLAGS.mesh_shape,
layout=FLAGS.layout)
final_hidden = model.get_sequence_output()
model_dim = model.model_dim
class_dim = mtf.Dimension("class", 2)
logits = mtf.layers.dense(
final_hidden,
reduced_dims=[model_dim],
new_dims=[class_dim],
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
name="cls/squad/output")
unstacked_logits = mtf.unstack(logits, dim=class_dim)
(start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1])
return (start_logits, end_logits)
def model_fn_builder(bert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
# MTF setup.
graph = mtf.Graph()
mesh_shape = mtf.convert_to_shape(FLAGS.mesh_shape)
layout_rules = mtf.convert_to_layout_rules(FLAGS.layout)
ctx = params["context"]
num_hosts = ctx.num_hosts
host_placement_fn = ctx.tpu_host_placement_function
device_list = [host_placement_fn(host_id=t) for t in range(num_hosts)]
tf.logging.info("device_list = %s" % device_list,)
replica_cache_size = 300 * 1000000 # 300M per replica
# Worker 0 caches all the TPU binaries.
worker0_mem = replica_cache_size * ctx.num_replicas
devices_memeory_usage = [worker0_mem] + [0] * (num_hosts - 1)
var_placer = mtf.utils.BalancedVariablePlacer(device_list,
devices_memeory_usage)
mesh_devices = [""] * mesh_shape.size
mesh_impl = mtf.simd_mesh_impl.SimdMeshImpl(mesh_shape, layout_rules,
mesh_devices,
ctx.device_assignment)
mesh = mtf.Mesh(graph, "bert_mesh", var_placer)
unique_ids = features["unique_ids"]
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
batch_size = input_ids.get_shape()[0].value
batch_dim = mtf.Dimension("batch", batch_size)
seq_length = input_ids.get_shape()[1].value
seq_dim = mtf.Dimension("seq", seq_length)
mtf_input_ids = mtf.import_tf_tensor(mesh, input_ids, [batch_dim, seq_dim])
mtf_input_mask = mtf.import_tf_tensor(mesh, input_mask,
[batch_dim, seq_dim])
mtf_segment_ids = mtf.import_tf_tensor(mesh, segment_ids,
[batch_dim, seq_dim])
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(start_logits, end_logits) = create_model(
bert_config=bert_config,
is_training=is_training,
input_ids=mtf_input_ids,
input_mask=mtf_input_mask,
segment_ids=mtf_segment_ids)
if mode == tf.estimator.ModeKeys.TRAIN:
def compute_loss(logits, positions):
one_hot_positions = mtf.one_hot(positions, output_dim=seq_dim)
log_probs = mtf.log_softmax(logits, seq_dim)
loss = -mtf.reduce_mean(
mtf.reduce_sum(one_hot_positions * log_probs, reduced_dim=seq_dim))
return loss
start_positions = features["start_positions"]
mtf_start_positions = mtf.import_tf_tensor(mesh, start_positions,
[batch_dim])
end_positions = features["end_positions"]
mtf_end_positions = mtf.import_tf_tensor(mesh, end_positions, [batch_dim])
start_loss = compute_loss(start_logits, mtf_start_positions)
end_loss = compute_loss(end_logits, mtf_end_positions)
total_loss = (start_loss + end_loss) / 2.0
_, update_ops = optimization_lib.create_optimizer(
total_loss,
learning_rate,
num_train_steps,
num_warmup_steps,
max_optimized_variable_size=FLAGS.max_optimized_variable_size,
optimizer=FLAGS.optimizer,
clip_gradients=FLAGS.clip_gradients)
elif mode == tf.estimator.ModeKeys.PREDICT:
start_logits = mtf.anonymize(start_logits)
end_logits = mtf.anonymize(end_logits)
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
if mode == tf.estimator.ModeKeys.TRAIN:
tf_loss = tf.to_float(lowering.export_to_tf_tensor(total_loss))
global_step = tf.train.get_global_step()
tf_update_ops = [lowering.lowered_operation(op) for op in update_ops]
tf_update_ops.append(tf.assign_add(global_step, 1))
tf.logging.info("tf_update_ops: {}".format(tf_update_ops))
train_op = tf.group(tf_update_ops)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = bert_lib.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
with mtf.utils.outside_all_rewrites():
# Copy master variables to slices. Must be called first.
restore_hook = mtf.MtfRestoreHook(lowering)
if mode == tf.estimator.ModeKeys.TRAIN:
saver = tf.train.Saver(
tf.global_variables(),
sharded=True,
max_to_keep=10,
keep_checkpoint_every_n_hours=2,
defer_build=False,
save_relative_paths=True)
tf.add_to_collection(tf.GraphKeys.SAVERS, saver)
saver_listener = mtf.MtfCheckpointSaverListener(lowering)
saver_hook = tf.train.CheckpointSaverHook(
FLAGS.output_dir,
save_steps=1000,
saver=saver,
listeners=[saver_listener])
return tf.estimator.tpu.TPUEstimatorSpec(
mode,
loss=tf_loss,
train_op=train_op,
training_hooks=[restore_hook, saver_hook],
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
"unique_ids": unique_ids,
"start_logits": lowering.export_to_tf_tensor(start_logits),
"end_logits": lowering.export_to_tf_tensor(end_logits),
}
return tf.estimator.tpu.TPUEstimatorSpec(
mode,
prediction_hooks=[restore_hook],
predictions=predictions,
scaffold_fn=scaffold_fn)
else:
raise ValueError("Only TRAIN and PREDICT modes are supported: %s" %
(mode))
return model_fn
def input_fn_builder(input_file, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"unique_ids": tf.FixedLenFeature([], tf.int64),
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
}
if is_training:
name_to_features["start_positions"] = tf.FixedLenFeature([], tf.int64)
name_to_features["end_positions"] = tf.FixedLenFeature([], tf.int64)
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.data.experimental.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
RawResult = collections.namedtuple("RawResult",
["unique_id", "start_logits", "end_logits"])
def write_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case, output_prediction_file,
output_nbest_file, output_null_log_odds_file):
"""Write final predictions to the json file and log-odds of null if needed."""
tf.logging.info("Writing predictions to: %s" % (output_prediction_file))
tf.logging.info("Writing nbest to: %s" % (output_nbest_file))
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min mull score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
# if we could have irrelevant answers, get the min score of irrelevant
if FLAGS.version_2_with_negative:
feature_null_score = result.start_logits[0] + result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = feature_index
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
if FLAGS.version_2_with_negative:
prelim_predictions.append(
_PrelimPrediction(
feature_index=min_null_feature_index,
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# if we didn't inlude the empty option in the n-best, inlcude it
if FLAGS.version_2_with_negative:
if "" not in seen_predictions:
nbest.append(
_NbestPrediction(
text="", start_logit=null_start_logit,
end_logit=null_end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
if not FLAGS.version_2_with_negative:
all_predictions[example.qas_id] = nbest_json[0]["text"]
else:
# predict "" iff the null score - the score of best non-null > threshold
score_diff = score_null - best_non_null_entry.start_logit - (
best_non_null_entry.end_logit)
scores_diff_json[example.qas_id] = score_diff
if score_diff > FLAGS.null_score_diff_threshold:
all_predictions[example.qas_id] = ""
else:
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
with tf.gfile.GFile(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
with tf.gfile.GFile(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if FLAGS.version_2_with_negative:
with tf.gfile.GFile(output_null_log_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
def get_final_text(pred_text, orig_text, do_lower_case):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heruistic between
# `pred_text` and `orig_text` to get a character-to-charcter alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if FLAGS.verbose_logging:
tf.logging.info(
"Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if FLAGS.verbose_logging:
tf.logging.info("Length not equal after stripping spaces: '%s' vs '%s'",
orig_ns_text, tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in six.iteritems(tok_ns_to_s_map):
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if FLAGS.verbose_logging:
tf.logging.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if FLAGS.verbose_logging:
tf.logging.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
class FeatureWriter(object):
"""Writes InputFeature to TF example file."""
def __init__(self, filename, is_training):
self.filename = filename
self.is_training = is_training
self.num_features = 0
self._writer = tf.python_io.TFRecordWriter(filename)
def process_feature(self, feature):
"""Write a InputFeature to the TFRecordWriter as a tf.train.Example."""
self.num_features += 1
def create_int_feature(values):
feature = tf.train.Feature(
int64_list=tf.train.Int64List(value=list(values)))
return feature
features = collections.OrderedDict()
features["unique_ids"] = create_int_feature([feature.unique_id])
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
if self.is_training:
features["start_positions"] = create_int_feature([feature.start_position])
features["end_positions"] = create_int_feature([feature.end_position])
impossible = 0
if feature.is_impossible:
impossible = 1
features["is_impossible"] = create_int_feature([impossible])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
self._writer.write(tf_example.SerializeToString())
def close(self):
self._writer.close()
def validate_flags_or_throw(bert_config):
"""Validate the input FLAGS or throw an exception."""
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
if not FLAGS.do_train and not FLAGS.do_predict:
raise ValueError("At least one of `do_train` or `do_predict` must be True.")
if FLAGS.do_train:
if not FLAGS.train_file:
raise ValueError(
"If `do_train` is True, then `train_file` must be specified.")
if FLAGS.do_predict:
if not FLAGS.predict_file:
raise ValueError(
"If `do_predict` is True, then `predict_file` must be specified.")
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
if FLAGS.max_seq_length <= FLAGS.max_query_length + 3:
raise ValueError(
"The max_seq_length (%d) must be greater than max_query_length "
"(%d) + 3" % (FLAGS.max_seq_length, FLAGS.max_query_length))
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
bert_config = bert_lib.BertConfig.from_json_file(FLAGS.bert_config_file)
validate_flags_or_throw(bert_config)
tf.gfile.MakeDirs(FLAGS.output_dir)
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
run_config = tf.estimator.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.estimator.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_cores_per_replica=1,
per_host_input_for_training=tf.estimator.tpu.InputPipelineConfig
.BROADCAST))
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = read_squad_examples(
input_file=FLAGS.train_file, is_training=True)
num_train_steps = int(
len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
# Pre-shuffle the input to avoid having to make a very large shuffle
# buffer in in the `input_fn`.
rng = random.Random(12345)
rng.shuffle(train_examples)
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.estimator.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
# We write to a temporary file to avoid storing very large constant tensors
# in memory.
if FLAGS.cached_train_file:
train_tfrecords_file = FLAGS.cached_train_file
else:
train_writer = FeatureWriter(
filename=os.path.join(FLAGS.output_dir, "train.tf_record"),
is_training=True)
convert_examples_to_features(
examples=train_examples,
tokenizer=tokenizer,
max_seq_length=FLAGS.max_seq_length,
doc_stride=FLAGS.doc_stride,
max_query_length=FLAGS.max_query_length,
is_training=True,
output_fn=train_writer.process_feature)
train_writer.close()
train_tfrecords_file = train_writer.filename
tf.logging.info("***** Running training *****")
tf.logging.info(" Num orig examples = %d", len(train_examples))
tf.logging.info(" Num split examples = %d", train_writer.num_features)
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
del train_examples
train_input_fn = input_fn_builder(
input_file=train_tfrecords_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if FLAGS.do_predict:
eval_examples = read_squad_examples(
input_file=FLAGS.predict_file, is_training=False)
eval_writer = FeatureWriter(
filename=os.path.join(FLAGS.output_dir, "eval.tf_record"),
is_training=False)
eval_features = []
def append_feature(feature):
eval_features.append(feature)
eval_writer.process_feature(feature)
convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=FLAGS.max_seq_length,
doc_stride=FLAGS.doc_stride,
max_query_length=FLAGS.max_query_length,
is_training=False,
output_fn=append_feature)
eval_writer.close()
tf.logging.info("***** Running predictions *****")
tf.logging.info(" Num orig examples = %d", len(eval_examples))
tf.logging.info(" Num split examples = %d", len(eval_features))
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
all_results = []
predict_input_fn = input_fn_builder(
input_file=eval_writer.filename,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=False)
# If running eval on the TPU, you will need to specify the number of
# steps.
all_results = []
for result in estimator.predict(
predict_input_fn, yield_single_examples=True):
if len(all_results) % 1000 == 0:
tf.logging.info("Processing example: %d" % (len(all_results)))
unique_id = int(result["unique_ids"])
start_logits = [float(x) for x in result["start_logits"].flat]
end_logits = [float(x) for x in result["end_logits"].flat]
all_results.append(
RawResult(
unique_id=unique_id,
start_logits=start_logits,
end_logits=end_logits))
output_prediction_file = os.path.join(FLAGS.output_dir, "predictions.json")
output_nbest_file = os.path.join(FLAGS.output_dir, "nbest_predictions.json")
output_null_log_odds_file = os.path.join(FLAGS.output_dir, "null_odds.json")
write_predictions(eval_examples, eval_features, all_results,
FLAGS.n_best_size, FLAGS.max_answer_length,
FLAGS.do_lower_case, output_prediction_file,
output_nbest_file, output_null_log_odds_file)
if __name__ == "__main__":
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
| mesh-master | mesh_tensorflow/bert/run_squad.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| mesh-master | mesh_tensorflow/bert/__init__.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import unicodedata
from absl import flags
import six
import tensorflow.compat.v1 as tf
FLAGS = flags.FLAGS
flags.DEFINE_bool(
"preserve_unused_tokens", False,
"If True, Wordpiece tokenization will not be applied to words in the vocab."
)
_UNUSED_TOKEN_RE = re.compile("^\\[unused\\d+\\]$")
def preserve_token(token, vocab):
"""Returns True if the token should forgo tokenization and be preserved."""
if not FLAGS.preserve_unused_tokens:
return False
if token not in vocab:
return False
return bool(_UNUSED_TOKEN_RE.search(token))
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
"""Checks whether the casing config is consistent with the checkpoint name."""
# The casing has to be passed in by the user and there is no explicit check
# as to whether it matches the checkpoint. The casing information probably
# should have been stored in the bert_config.json file, but it's not, so
# we have to heuristically detect it to validate.
if not init_checkpoint:
return
m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
if m is None:
return
model_name = m.group(1)
lower_models = [
"uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12",
"multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12"
]
cased_models = [
"cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16",
"multi_cased_L-12_H-768_A-12"
]
is_bad_config = False
if model_name in lower_models and not do_lower_case:
is_bad_config = True
actual_flag = "False"
case_name = "lowercased"
opposite_flag = "True"
if model_name in cased_models and do_lower_case:
is_bad_config = True
actual_flag = "True"
case_name = "cased"
opposite_flag = "False"
if is_bad_config:
raise ValueError(
"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
"However, `%s` seems to be a %s model, so you "
"should pass in `--do_lower_case=%s` so that the fine-tuning matches "
"how the model was pre-training. If this error is wrong, please "
"just comment out this check." %
(actual_flag, init_checkpoint, model_name, case_name, opposite_flag))
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
with tf.gfile.GFile(vocab_file, "r") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
if token not in vocab:
vocab[token] = len(vocab)
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(
do_lower_case=do_lower_case, vocab=self.vocab)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
if preserve_token(token, self.vocab):
split_tokens.append(token)
continue
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True, vocab=tuple()):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
vocab: A container of tokens to not mutate during tokenization.
"""
self.do_lower_case = do_lower_case
self.vocab = vocab
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if preserve_token(token, self.vocab):
split_tokens.append(token)
continue
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically control characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat in ("Cc", "Cf"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
| mesh-master | mesh_tensorflow/bert/tokenization.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Run masked LM/next sentence masked_lm pre-training for BERT."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import mesh_tensorflow as mtf
import mesh_tensorflow.bert.bert as bert_lib
import mesh_tensorflow.bert.optimization as optimization_lib
from six.moves import range
import tensorflow.compat.v1 as tf
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string(
"input_train_files", None,
"Input TF example files for training (can be a glob or comma separated).")
flags.DEFINE_string(
"input_eval_files", None,
"Input TF example files for evaluation (can be a glob or comma separated).")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded. Must match data generation.")
flags.DEFINE_integer(
"max_predictions_per_seq", 20,
"Maximum number of masked LM predictions per sequence. "
"Must match data generation.")
flags.DEFINE_string(
"mode", "train_and_eval",
"One of {\"train_and_eval\", \"train\", \"eval\"}.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_bool("clip_gradients", True, "Apply gradient clipping.")
flags.DEFINE_string("optimizer", "adam", "adam/adafactor")
flags.DEFINE_integer("num_train_steps", 100000, "Number of training steps.")
flags.DEFINE_integer("num_warmup_steps", 10000, "Number of warmup steps.")
flags.DEFINE_integer("steps_per_eval", 5000,
"How often to evaluate the checkpoint.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer("max_eval_steps", 100, "Maximum number of eval steps.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string("mesh_shape", "batch:8", "mesh shape")
tf.flags.DEFINE_string(
"layout",
"batch:batch,vocab:model,intermediate:model,num_heads:model,experts:batch",
"layout rules")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
# pylint: disable=unused-argument
def model_fn_builder(bert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
# MTF setup.
graph = mtf.Graph()
mesh_shape = mtf.convert_to_shape(FLAGS.mesh_shape)
layout_rules = mtf.convert_to_layout_rules(FLAGS.layout)
if FLAGS.use_tpu:
ctx = params["context"]
num_hosts = ctx.num_hosts
host_placement_fn = ctx.tpu_host_placement_function
device_list = [host_placement_fn(host_id=t) for t in range(num_hosts)]
tf.logging.info("device_list = %s" % device_list,)
replica_cache_size = 300 * 1000000 # 300M per replica
# Worker 0 caches all the TPU binaries.
worker0_mem = replica_cache_size * ctx.num_replicas
devices_memeory_usage = [worker0_mem] + [0] * (num_hosts - 1)
var_placer = mtf.utils.BalancedVariablePlacer(device_list,
devices_memeory_usage)
mesh_devices = [""] * mesh_shape.size
physical_shape = list(ctx.device_assignment.topology.mesh_shape)
logical_to_physical = mtf.simd_mesh_impl.auto_logical_to_physical_tpu(
mesh_shape.to_integer_list, physical_shape)
mesh_impl = mtf.simd_mesh_impl.SimdMeshImpl(
mesh_shape,
layout_rules,
mesh_devices,
ctx.device_assignment,
logical_to_physical=logical_to_physical)
else:
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
mesh_shape, layout_rules, [""] * mesh_shape.size)
var_placer = None
mesh = mtf.Mesh(graph, "bert_mesh", var_placer)
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
masked_lm_positions = features["masked_lm_positions"]
masked_lm_ids = features["masked_lm_ids"]
masked_lm_weights = features["masked_lm_weights"]
next_sentence_labels = tf.squeeze(features["next_sentence_labels"], 1)
batch_size = input_ids.get_shape()[0].value
batch_dim = mtf.Dimension("batch", batch_size)
seq_length = input_ids.get_shape()[1].value
seq_dim = mtf.Dimension("seq", seq_length)
max_predictions_per_seq = masked_lm_positions.get_shape()[1].value
max_predictions_per_seq_dim = mtf.Dimension("max_pred_seq",
max_predictions_per_seq)
mtf_input_ids = mtf.import_tf_tensor(mesh, input_ids, [batch_dim, seq_dim])
mtf_input_mask = mtf.import_tf_tensor(mesh, input_mask,
[batch_dim, seq_dim])
mtf_segment_ids = mtf.import_tf_tensor(mesh, segment_ids,
[batch_dim, seq_dim])
mtf_masked_lm_positions = mtf.import_tf_tensor(
mesh, masked_lm_positions, [batch_dim, max_predictions_per_seq_dim])
mtf_masked_lm_ids = mtf.import_tf_tensor(
mesh, masked_lm_ids, [batch_dim, max_predictions_per_seq_dim])
mtf_masked_lm_weights = mtf.import_tf_tensor(
mesh, masked_lm_weights, [batch_dim, max_predictions_per_seq_dim])
mtf_next_sentence_labels = mtf.import_tf_tensor(
mesh, next_sentence_labels, [batch_dim])
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
model = bert_lib.BertModel(
config=bert_config,
is_training=is_training,
input_ids=mtf_input_ids,
input_mask=mtf_input_mask,
token_type_ids=mtf_segment_ids,
layout=layout_rules,
mesh_shape=mesh_shape)
(masked_lm_loss, masked_lm_example_loss,
masked_lm_logits) = model.get_masked_lm_output(
mtf_masked_lm_positions, mtf_masked_lm_ids, mtf_masked_lm_weights)
(next_sentence_loss, next_sentence_example_loss,
next_sentence_logits) = model.get_next_sentence_output(
mtf_next_sentence_labels)
extra_loss = model.get_extra_loss()
total_loss = masked_lm_loss + next_sentence_loss
total_loss = mtf.anonymize(total_loss)
masked_lm_example_loss = mtf.anonymize(masked_lm_example_loss)
masked_lm_logits = mtf.anonymize(masked_lm_logits)
next_sentence_example_loss = mtf.anonymize(next_sentence_example_loss)
next_sentence_logits = mtf.anonymize(next_sentence_logits)
# TRAIN mode
if mode == tf.estimator.ModeKeys.TRAIN:
_, update_ops = optimization_lib.create_optimizer(
total_loss + extra_loss,
learning_rate,
num_train_steps,
num_warmup_steps,
optimizer=FLAGS.optimizer,
clip_gradients=FLAGS.clip_gradients)
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
tf_loss = tf.to_float(lowering.export_to_tf_tensor(total_loss))
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_global_step()
tf_update_ops = [lowering.lowered_operation(op) for op in update_ops]
tf_update_ops.append(tf.assign_add(global_step, 1))
tf.logging.info("tf_update_ops: {}".format(tf_update_ops))
train_op = tf.group(tf_update_ops)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(masked_lm_example_loss, masked_lm_logits, masked_lm_ids,
masked_lm_weights, next_sentence_example_loss,
next_sentence_logits, next_sentence_labels):
"""Computes the loss and accuracy of the model."""
masked_lm_logits = tf.reshape(masked_lm_logits,
[-1, masked_lm_logits.shape[-1]])
masked_lm_predictions = tf.argmax(
masked_lm_logits, axis=-1, output_type=tf.int32)
masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])
masked_lm_ids = tf.reshape(masked_lm_ids, [-1])
masked_lm_weights = tf.reshape(masked_lm_weights, [-1])
masked_lm_accuracy = tf.metrics.accuracy(
labels=masked_lm_ids,
predictions=masked_lm_predictions,
weights=masked_lm_weights)
masked_lm_mean_loss = tf.metrics.mean(
values=masked_lm_example_loss, weights=masked_lm_weights)
next_sentence_logits = tf.reshape(
next_sentence_logits, [-1, next_sentence_logits.shape[-1]])
next_sentence_predictions = tf.argmax(
next_sentence_logits, axis=-1, output_type=tf.int32)
next_sentence_labels = tf.reshape(next_sentence_labels, [-1])
next_sentence_accuracy = tf.metrics.accuracy(
labels=next_sentence_labels, predictions=next_sentence_predictions)
next_sentence_mean_loss = tf.metrics.mean(
values=next_sentence_example_loss)
return {
"masked_lm_accuracy": masked_lm_accuracy,
"masked_lm_loss": masked_lm_mean_loss,
"next_sentence_accuracy": next_sentence_accuracy,
"next_sentence_loss": next_sentence_mean_loss,
}
eval_metrics = (metric_fn, [
lowering.export_to_tf_tensor(masked_lm_example_loss),
lowering.export_to_tf_tensor(masked_lm_logits), masked_lm_ids,
masked_lm_weights,
lowering.export_to_tf_tensor(next_sentence_example_loss),
lowering.export_to_tf_tensor(next_sentence_logits),
next_sentence_labels
])
with mtf.utils.outside_all_rewrites():
# Copy master variables to slices. Must be called first.
restore_hook = mtf.MtfRestoreHook(lowering)
if mode == tf.estimator.ModeKeys.TRAIN:
saver = tf.train.Saver(
tf.global_variables(),
sharded=True,
max_to_keep=10,
keep_checkpoint_every_n_hours=2,
defer_build=False,
save_relative_paths=True)
tf.add_to_collection(tf.GraphKeys.SAVERS, saver)
saver_listener = mtf.MtfCheckpointSaverListener(lowering)
saver_hook = tf.train.CheckpointSaverHook(
FLAGS.output_dir,
save_steps=1000,
saver=saver,
listeners=[saver_listener])
return tf.estimator.tpu.TPUEstimatorSpec(
tf.estimator.ModeKeys.TRAIN,
loss=tf_loss,
train_op=train_op,
training_hooks=[restore_hook, saver_hook])
elif mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.tpu.TPUEstimatorSpec(
tf.estimator.ModeKeys.EVAL,
evaluation_hooks=[restore_hook],
loss=tf_loss,
eval_metrics=eval_metrics)
return model_fn
def input_fn_builder(input_files,
max_seq_length,
max_predictions_per_seq,
is_training,
num_cpu_threads=4):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
name_to_features = {
"input_ids":
tf.FixedLenFeature([max_seq_length], tf.int64),
"input_mask":
tf.FixedLenFeature([max_seq_length], tf.int64),
"segment_ids":
tf.FixedLenFeature([max_seq_length], tf.int64),
"masked_lm_positions":
tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
"masked_lm_ids":
tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
"masked_lm_weights":
tf.FixedLenFeature([max_predictions_per_seq], tf.float32),
"next_sentence_labels":
tf.FixedLenFeature([1], tf.int64),
}
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
if is_training:
d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))
d = d.repeat()
d = d.shuffle(buffer_size=len(input_files))
# `cycle_length` is the number of parallel files that get read.
cycle_length = min(num_cpu_threads, len(input_files))
# `sloppy` mode means that the interleaving is not exact. This adds
# even more randomness to the training pipeline.
d = d.apply(
tf.data.experimental.parallel_interleave(
tf.data.TFRecordDataset,
sloppy=is_training,
cycle_length=cycle_length))
d = d.shuffle(buffer_size=100)
else:
d = tf.data.TFRecordDataset(input_files)
# Since we evaluate for a fixed number of steps we don't want to encounter
# out-of-range exceptions.
d = d.repeat()
# We must `drop_remainder` on training because the TPU requires fixed
# size dimensions. For eval, we assume we are evaluating on the CPU or GPU
# and we *don't* want to drop the remainder, otherwise we wont cover
# every sample.
d = d.apply(
tf.data.experimental.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
num_parallel_batches=num_cpu_threads,
drop_remainder=True))
return d
return input_fn
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
bert_config = bert_lib.BertConfig.from_json_file(FLAGS.bert_config_file)
tf.gfile.MakeDirs(FLAGS.output_dir)
input_train_files = []
for input_pattern in FLAGS.input_train_files.split(","):
input_train_files.extend(tf.gfile.Glob(input_pattern))
tf.logging.info("*** Input Training Files ***")
for input_train_file in input_train_files:
tf.logging.info(" %s" % input_train_file)
input_eval_files = []
for input_pattern in FLAGS.input_eval_files.split(","):
input_eval_files.extend(tf.gfile.Glob(input_pattern))
tf.logging.info("*** Input Evaluation Files ***")
for input_eval_file in input_eval_files:
tf.logging.info(" %s" % input_eval_file)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
run_config = tf.estimator.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.estimator.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_cores_per_replica=1,
per_host_input_for_training=tf.estimator.tpu.InputPipelineConfig
.BROADCAST))
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=FLAGS.num_train_steps,
num_warmup_steps=FLAGS.num_warmup_steps)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.estimator.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size)
if FLAGS.mode in ("train_and_eval", "train"):
tf.logging.info("Set train batch size = %d", FLAGS.train_batch_size)
train_input_fn = input_fn_builder(
input_files=input_train_files,
max_seq_length=FLAGS.max_seq_length,
max_predictions_per_seq=FLAGS.max_predictions_per_seq,
is_training=True)
if FLAGS.mode in ("train_and_eval", "eval"):
tf.logging.info("Set eval batch size = %d", FLAGS.eval_batch_size)
eval_input_fn = input_fn_builder(
input_files=input_eval_files,
max_seq_length=FLAGS.max_seq_length,
max_predictions_per_seq=FLAGS.max_predictions_per_seq,
is_training=False)
try:
current_step = tf.train.load_variable(FLAGS.output_dir,
tf.GraphKeys.GLOBAL_STEP)
except (TypeError, ValueError, tf.errors.NotFoundError):
current_step = 0
while current_step < FLAGS.num_train_steps:
if FLAGS.mode == "train_and_eval":
# Train for up to steps_per_eval number of steps.
# At the end of training, a checkpoint will be written to --model_dir.
next_checkpoint = min(current_step + FLAGS.steps_per_eval,
FLAGS.num_train_steps)
elif FLAGS.mode == "train":
next_checkpoint = FLAGS.num_train_steps
if FLAGS.mode in ("train_and_eval", "train"):
start_timestamp = time.time() # This time will include compilation time
tf.logging.info("Starting to train.")
estimator.train(input_fn=train_input_fn, max_steps=next_checkpoint)
current_step = next_checkpoint
tf.logging.info("Finished training up to step %d. Elapsed seconds %d.",
current_step, int(time.time() - start_timestamp))
if FLAGS.mode in ("train_and_eval", "eval"):
tf.logging.info("Starting to evaluate.")
result = estimator.evaluate(
input_fn=eval_input_fn, steps=FLAGS.max_eval_steps)
output_eval_file = os.path.join(
FLAGS.output_dir, "eval_results_{}.txt".format(current_step))
with tf.gfile.GFile(output_eval_file, "w") as writer:
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if FLAGS.mode == "eval":
tf.logging.info("Exit eval mode")
break
if __name__ == "__main__":
flags.mark_flag_as_required("input_train_files")
flags.mark_flag_as_required("input_eval_files")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.disable_v2_behavior()
tf.app.run()
| mesh-master | mesh_tensorflow/bert/run_pretraining.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import mesh_tensorflow as mtf
import mesh_tensorflow.bert.bert as bert_lib
import mesh_tensorflow.bert.optimization as optimization_lib
import mesh_tensorflow.bert.tokenization as tokenization
from six.moves import range
import tensorflow.compat.v1 as tf
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"data_dir", None,
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("task_name", None, "The name of the task to train.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_string(
"cached_train_file", None, "Prepared training file.")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_float(
"max_optimized_variable_size", 1e7,
"Do not optimize variables larger than this.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_predict", False,
"Whether to run the model in inference mode on the test set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 32, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_bool("clip_gradients", True, "Apply gradient clipping.")
flags.DEFINE_string("optimizer", "adam", "adam/adafactor")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string("mesh_shape", "batch:8", "mesh shape")
tf.flags.DEFINE_string(
"layout",
"batch:batch,vocab:model,intermediate:model,num_heads:model,experts:batch",
"layout rules")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class PaddingInputExample(object):
"""Fake example so the num input examples is a multiple of the batch size.
When running eval/predict on the TPU, we need to pad the number of examples
to be a multiple of the batch size, because the TPU requires a fixed batch
size. The alternative is to drop the last batch, which is bad because it means
the entire output data won't be generated.
We use this class instead of `None` because treating `None` as padding
battches could cause silent errors.
"""
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids,
input_mask,
segment_ids,
label_id,
is_real_example=True):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.is_real_example = is_real_example
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class XnliProcessor(DataProcessor):
"""Processor for the XNLI data set."""
def __init__(self):
self.language = "zh"
def get_train_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(
os.path.join(data_dir, "multinli",
"multinli.train.%s.tsv" % self.language))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "train-%d" % (i)
text_a = tokenization.convert_to_unicode(line[0])
text_b = tokenization.convert_to_unicode(line[1])
label = tokenization.convert_to_unicode(line[2])
if label == tokenization.convert_to_unicode("contradictory"):
label = tokenization.convert_to_unicode("contradiction")
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv"))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "dev-%d" % (i)
language = tokenization.convert_to_unicode(line[0])
if language != tokenization.convert_to_unicode(self.language):
continue
text_a = tokenization.convert_to_unicode(line[6])
text_b = tokenization.convert_to_unicode(line[7])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
text_a = tokenization.convert_to_unicode(line[8])
text_b = tokenization.convert_to_unicode(line[9])
if set_type == "test":
label = "contradiction"
else:
label = tokenization.convert_to_unicode(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[3])
text_b = tokenization.convert_to_unicode(line[4])
if set_type == "test":
label = "0"
else:
label = tokenization.convert_to_unicode(line[0])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# Only the test set has a header
if set_type == "test" and i == 0:
continue
guid = "%s-%s" % (set_type, i)
if set_type == "test":
text_a = tokenization.convert_to_unicode(line[1])
label = "0"
else:
text_a = tokenization.convert_to_unicode(line[3])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
if isinstance(example, PaddingInputExample):
return InputFeatures(
input_ids=[0] * max_seq_length,
input_mask=[0] * max_seq_length,
segment_ids=[0] * max_seq_length,
label_id=0,
is_real_example=False)
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" %
" ".join([tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
is_real_example=True)
return feature
def file_based_convert_examples_to_features(examples, label_list,
max_seq_length, tokenizer,
output_file):
"""Convert a set of `InputExample`s to a TFRecord file."""
writer = tf.python_io.TFRecordWriter(
output_file, tf.python_io.TFRecordOptions(output_buffer_size=2 ** 24))
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id])
features["is_real_example"] = create_int_feature(
[int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def file_based_input_fn_builder(input_file, seq_length, is_training,
drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([], tf.int64),
"is_real_example": tf.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.data.experimental.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
labels, num_labels_dim, layout, mesh_shape):
"""Creates a classification model."""
model = bert_lib.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
layout=layout,
mesh_shape=mesh_shape)
# In the demo, we are doing a simple classification task on the entire
# segment.
#
# If you want to use the token-level output, use model.get_sequence_output()
# instead.
output_layer = model.get_pooled_output()
hidden_dim = output_layer.shape[-1]
mesh = input_ids.mesh
output_weights = mtf.get_variable(
mesh,
"output_weights",
shape=[num_labels_dim, hidden_dim],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = mtf.get_variable(
mesh,
"output_bias",
shape=[num_labels_dim],
initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = mtf.dropout(output_layer, keep_prob=0.9)
logits = mtf.einsum([output_layer, output_weights],
reduced_dims=[hidden_dim])
logits = logits + output_bias
probabilities = mtf.softmax(logits, reduced_dim=num_labels_dim)
per_example_loss = mtf.layers.softmax_cross_entropy_with_logits(
logits, labels, vocab_dim=num_labels_dim)
loss = mtf.reduce_mean(per_example_loss) + model.get_extra_loss()
return (loss, per_example_loss, logits, probabilities)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
# MTF setup.
graph = mtf.Graph()
mesh_shape = mtf.convert_to_shape(FLAGS.mesh_shape)
layout_rules = mtf.convert_to_layout_rules(FLAGS.layout)
ctx = params["context"]
num_hosts = ctx.num_hosts
host_placement_fn = ctx.tpu_host_placement_function
device_list = [host_placement_fn(host_id=t) for t in range(num_hosts)]
tf.logging.info("device_list = %s" % device_list,)
replica_cache_size = 300 * 1000000 # 300M per replica
# Worker 0 caches all the TPU binaries.
worker0_mem = replica_cache_size * ctx.num_replicas
devices_memeory_usage = [worker0_mem] + [0] * (num_hosts - 1)
var_placer = mtf.utils.BalancedVariablePlacer(device_list,
devices_memeory_usage)
mesh_devices = [""] * mesh_shape.size
mesh_impl = mtf.simd_mesh_impl.SimdMeshImpl(mesh_shape, layout_rules,
mesh_devices,
ctx.device_assignment)
mesh = mtf.Mesh(graph, "bert_mesh", var_placer)
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_real_example = None
if "is_real_example" in features:
is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32)
else:
is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)
batch_size = input_ids.get_shape()[0].value
batch_dim = mtf.Dimension("batch", batch_size)
seq_length = input_ids.get_shape()[1].value
seq_dim = mtf.Dimension("seq", seq_length)
num_labels_dim = mtf.Dimension("seq", num_labels)
mtf_input_ids = mtf.import_tf_tensor(mesh, input_ids, [batch_dim, seq_dim])
mtf_input_mask = mtf.import_tf_tensor(mesh, input_mask,
[batch_dim, seq_dim])
mtf_segment_ids = mtf.import_tf_tensor(mesh, segment_ids,
[batch_dim, seq_dim])
mtf_label_ids = mtf.import_tf_tensor(mesh, label_ids, [batch_dim])
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, logits,
probabilities) = create_model(bert_config, is_training, mtf_input_ids,
mtf_input_mask, mtf_segment_ids,
mtf_label_ids, num_labels_dim,
layout_rules, mesh_shape)
total_loss = mtf.anonymize(total_loss)
per_example_loss = mtf.anonymize(per_example_loss)
logits = mtf.anonymize(logits)
if mode == tf.estimator.ModeKeys.TRAIN:
_, update_ops = optimization_lib.create_optimizer(
total_loss,
learning_rate,
num_train_steps,
num_warmup_steps,
max_optimized_variable_size=FLAGS.max_optimized_variable_size,
optimizer=FLAGS.optimizer,
clip_gradients=FLAGS.clip_gradients)
lowering = mtf.Lowering(graph, {mesh: mesh_impl})
tf_loss = tf.to_float(lowering.export_to_tf_tensor(total_loss))
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_global_step()
tf_update_ops = [lowering.lowered_operation(op) for op in update_ops]
tf_update_ops.append(tf.assign_add(global_step, 1))
tf.logging.info("tf_update_ops: {}".format(tf_update_ops))
train_op = tf.group(tf_update_ops)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions, weights=is_real_example)
loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
eval_metrics = (metric_fn, [
lowering.export_to_tf_tensor(per_example_loss), label_ids,
lowering.export_to_tf_tensor(logits), is_real_example
])
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = bert_lib.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
with mtf.utils.outside_all_rewrites():
# Copy master variables to slices. Must be called first.
restore_hook = mtf.MtfRestoreHook(lowering)
if mode == tf.estimator.ModeKeys.TRAIN:
saver = tf.train.Saver(
tf.global_variables(),
sharded=True,
max_to_keep=10,
keep_checkpoint_every_n_hours=2,
defer_build=False,
save_relative_paths=True)
tf.add_to_collection(tf.GraphKeys.SAVERS, saver)
saver_listener = mtf.MtfCheckpointSaverListener(lowering)
saver_hook = tf.train.CheckpointSaverHook(
FLAGS.output_dir,
save_steps=1000,
saver=saver,
listeners=[saver_listener])
return tf.estimator.tpu.TPUEstimatorSpec(
mode,
loss=tf_loss,
train_op=train_op,
training_hooks=[restore_hook, saver_hook],
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.tpu.TPUEstimatorSpec(
mode,
evaluation_hooks=[restore_hook],
loss=tf_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
return tf.estimator.tpu.TPUEstimatorSpec(
mode,
prediction_hooks=[restore_hook],
predictions={
"probabilities": lowering.export_to_tf_tensor(probabilities)
},
scaffold_fn=scaffold_fn)
return model_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def input_fn_builder(features, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
})
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
features.append(feature)
return features
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
"xnli": XnliProcessor,
}
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
raise ValueError(
"At least one of `do_train`, `do_eval` or `do_predict' must be True.")
bert_config = bert_lib.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
tf.gfile.MakeDirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
run_config = tf.estimator.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.estimator.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_cores_per_replica=1,
per_host_input_for_training=tf.estimator.tpu.InputPipelineConfig
.BROADCAST))
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
num_train_steps = int(
len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
model_fn = model_fn_builder(
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.estimator.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
if FLAGS.cached_train_file:
train_file = FLAGS.cached_train_file
else:
train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
file_based_convert_examples_to_features(train_examples, label_list,
FLAGS.max_seq_length, tokenizer,
train_file)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if FLAGS.do_eval:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
num_actual_eval_examples = len(eval_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on. These do NOT count towards the metric (all tf.metrics
# support a per-instance weight, and these get a weight of 0.0).
while len(eval_examples) % FLAGS.eval_batch_size != 0:
eval_examples.append(PaddingInputExample())
eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
file_based_convert_examples_to_features(eval_examples, label_list,
FLAGS.max_seq_length, tokenizer,
eval_file)
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(eval_examples), num_actual_eval_examples,
len(eval_examples) - num_actual_eval_examples)
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
# This tells the estimator to run through the entire set.
eval_steps = None
# However, if running eval on the TPU, you will need to specify the
# number of steps.
if FLAGS.use_tpu:
assert len(eval_examples) % FLAGS.eval_batch_size == 0
eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)
eval_drop_remainder = True if FLAGS.use_tpu else False
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with tf.gfile.GFile(output_eval_file, "w") as writer:
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if FLAGS.do_predict:
predict_examples = processor.get_test_examples(FLAGS.data_dir)
num_actual_predict_examples = len(predict_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on.
while len(predict_examples) % FLAGS.predict_batch_size != 0:
predict_examples.append(PaddingInputExample())
predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
file_based_convert_examples_to_features(predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file)
tf.logging.info("***** Running prediction*****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(predict_examples), num_actual_predict_examples,
len(predict_examples) - num_actual_predict_examples)
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_drop_remainder = True if FLAGS.use_tpu else False
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=predict_drop_remainder)
result = estimator.predict(input_fn=predict_input_fn)
output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
with tf.gfile.GFile(output_predict_file, "w") as writer:
num_written_lines = 0
tf.logging.info("***** Predict results *****")
for (i, prediction) in enumerate(result):
probabilities = prediction["probabilities"]
if i >= num_actual_predict_examples:
break
output_line = "\t".join(
str(class_probability)
for class_probability in probabilities) + "\n"
writer.write(output_line)
num_written_lines += 1
assert num_written_lines == num_actual_predict_examples
if __name__ == "__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.disable_v2_behavior()
tf.app.run()
| mesh-master | mesh_tensorflow/bert/run_classifier.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""MeshTensorFlow implementation of BERT.
The code is ported from https://github.com/google-research/bert.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import math
import re
import mesh_tensorflow as mtf
import mesh_tensorflow.transformer.moe as moe
import six
import tensorflow.compat.v1 as tf
class BertConfig(object):
"""Configuration for `BertModel`."""
def __init__(self,
vocab_size,
d_model=768,
position_signal="embedding",
max_position_embeddings=512,
num_blocks=12,
block_layers="attention,feedforward",
layer_output_dropout_prob=0.1,
residual_structure="original",
use_bias=True,
attention_num_heads=12,
attention_head_size=None,
attention_num_key_heads=None,
attention_key_head_size=None,
attention_num_value_heads=None,
attention_value_head_size=None,
attention_probs_dropout_prob=0.1,
feedforward_intermediate_size=3072,
feedforward_intermediate_act="gelu",
feedforward_intermediate_dropout_prob=0.0,
moe_num_experts=32,
moe_intermediate_size=6144,
type_vocab_size=16,
initializer_range=0.02):
"""Constructs BertConfig.
residual_structure="original"
TODO(noam): describe
residual_structure="direct"
TODO(noam): describe
Args:
vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.
d_model: Number of channels in input/output of each layer.
position_signal: A string specifying the type of position signal.
Implemented values are "embedding", "relative_attention_bias".
max_position_embeddings: For models using positional embeddings,
this is the maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
num_blocks: Number of (attention+feed-forward) blocks in the Transformer
encoder.
block_layers: a comma-separated string specifying the sequence of layers
in each block.
layer_output_dropout_prob: The dropout probability for the output of
each layer.
residual_structure: a string. Legal values are "original" and "direct".
use_bias: a boolean - If true, then we use biases for dense layers and
in layer normalization, and subtract off the mean in layer
normalization.
attention_num_heads: Number of attention heads for each attention layer in
the Transformer encoder.
attention_head_size: Size of attention keys and values. If set to None,
a default value is used equal to (d_model / attention_num_heads)
attention_num_key_heads: Number of attention key heads.
attention_key_head_size: Size of attention keys.
attention_num_value_heads: Number of attention value heads.
attention_value_head_size: Size of attention values.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
feedforward_intermediate_size: The size of the "intermediate" layer in the
feed-forward layer in the Transformer encoder (a.k.a. d_ff).
feedforward_intermediate_act: The non-linear activation function
(function or string) applied to the feedforward intermediate layer
and the pooler layer.
feedforward_intermediate_dropout_prob: The dropout probability for
feed-forward intermediate layer.
moe_num_experts: an integer - number of experts in moe layer
moe_intermediate_size: an integer - size of intermediate layer in each
expert
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The stdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
self.vocab_size = vocab_size
self.d_model = d_model
self.position_signal = position_signal
self.max_position_embeddings = max_position_embeddings
self.num_blocks = num_blocks
self.block_layers = block_layers.split(",")
self.layer_output_dropout_prob = layer_output_dropout_prob
self.residual_structure = residual_structure
self.use_bias = use_bias
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.attention_num_heads = attention_num_heads
self.attention_head_size = attention_head_size
self.attention_num_key_heads = attention_num_key_heads
self.attention_key_head_size = attention_key_head_size
self.attention_num_value_heads = attention_num_value_heads
self.attention_value_head_size = attention_value_head_size
self.feedforward_intermediate_size = feedforward_intermediate_size
self.feedforward_intermediate_act = feedforward_intermediate_act
self.feedforward_intermediate_dropout_prob = (
feedforward_intermediate_dropout_prob)
self.moe_num_experts = moe_num_experts
self.moe_intermediate_size = moe_intermediate_size
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
if self.position_signal not in ["embedding", "relative_attention_bias"]:
raise ValueError("unknown position_signal")
if self.residual_structure not in ["original", "direct"]:
raise ValueError("unknown residual_structure")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
# Dictionary for compatibility for tf BertConfig files.
hparam_name_conversion = {
"hidden_size": "d_model",
"num_hidden_layers": "num_blocks",
"num_attention_heads": "attention_num_heads",
"intermediate_size": "feedforward_intermediate_size",
"hidden_act": "feedforward_intermediate_act",
"hidden_dropout_prob": "layer_output_dropout_prob",
}
config = BertConfig(vocab_size=None)
for (key, value) in six.iteritems(json_object):
config.__dict__[hparam_name_conversion.get(key, key)] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with tf.gfile.GFile(json_file, "r") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class BertModel(object):
"""BERT model ("Bidirectional Encoder Representations from Transformers")."""
def __init__(self,
config,
is_training,
input_ids,
input_mask=None,
token_type_ids=None,
scope=None,
mesh_shape="",
layout=""):
self.config = copy.deepcopy(config)
del config
if not is_training:
self.config.layer_output_dropout_prob = 0.0
self.config.attention_probs_dropout_prob = 0.0
self.config.feedforward_intermediate_dropout_prob = 0.0
input_shape = input_ids.shape
assert input_shape.ndims == 2
self._seq_dim = input_shape.dims[1]
self._memory_seq_dim = mtf.Dimension("memory_seq", self.seq_dim.size)
self._extra_losses = []
mesh = input_ids.mesh
if token_type_ids is None:
token_type_ids = mtf.zeros(mesh, input_shape, dtype=tf.int32)
with tf.variable_scope(scope, default_name="bert"):
with tf.variable_scope("embeddings"):
# Perform embedding lookup on the word ids.
self.embedding_table = mtf.get_variable(
mesh, "word_embeddings",
mtf.Shape([self.vocab_dim, self.model_dim]),
initializer=self.embedding_initializer)
self.word_embedding_output = mtf.gather(
self.embedding_table, input_ids, self.vocab_dim)
# Add positional embeddings and token type embeddings, then layer
# normalize and perform dropout.
self.embedding_output = self.word_embedding_output
token_type_table = mtf.get_variable(
mesh, "token_type_embeddings",
mtf.Shape([self.token_type_vocab_dim, self.model_dim]),
initializer=self.embedding_initializer)
if token_type_ids is not None:
self.embedding_output += mtf.gather(
token_type_table, token_type_ids, self.token_type_vocab_dim)
if self.config.position_signal == "embedding":
full_position_table = mtf.get_variable(
mesh, "position_embeddings",
mtf.Shape([self.max_position_embeddings_dim, self.model_dim]),
initializer=self.embedding_initializer)
short_position_table = mtf.rename_dimension(
mtf.slice(full_position_table, 0, self.seq_dim.size,
self.max_position_embeddings_dim.name),
self.max_position_embeddings_dim.name, self.seq_dim.name)
self.embedding_output += short_position_table
self.embedding_output = self.normalize(self.embedding_output)
self.embedding_output = mtf.dropout(
self.embedding_output,
keep_prob=1.0 - self.config.layer_output_dropout_prob)
with tf.variable_scope("encoder"):
attention_biases = []
if input_mask:
# [batch_dim, memory_seq_dim]
attention_biases.append(
(1.0 - mtf.to_float(mtf.replace_dimensions(
input_mask, self.seq_dim, self.memory_seq_dim))) * -10000.0)
if self.config.position_signal == "relative_attention_bias":
buckets_dim = mtf.Dimension("buckets", 32)
rp_bucket = _relative_position_bucket(
mtf.range(mesh, self.memory_seq_dim, tf.int32)
- mtf.range(mesh, self.seq_dim, tf.int32),
num_buckets=buckets_dim.size)
bias_var = mtf.get_variable(
mesh, "relative_attention_bias",
[self.num_heads_dim, buckets_dim],
initializer=tf.zeros_initializer())
attention_biases.append(mtf.gather(bias_var, rp_bucket, buckets_dim))
attention_bias = mtf.add_n(attention_biases)
prev_layer_output = self.embedding_output
self.all_encoder_layers = []
for block_num in range(self.config.num_blocks):
with tf.variable_scope("block_%d" % block_num):
for layer_idx, layer_type in enumerate(self.config.block_layers):
layer_name = layer_type
count = self.config.block_layers[:layer_idx].count(layer_type)
if count:
layer_name += "_%d" % count
with tf.variable_scope(layer_name):
x = prev_layer_output
if self.config.residual_structure == "direct":
x = self.normalize(x)
if layer_type == "attention":
x = self.self_attention(x, attention_bias)
elif layer_type == "feedforward":
x = self.feedforward(x)
elif layer_type == "moe":
x = self.moe(x, layout, mesh_shape, input_mask, is_training)
else:
raise ValueError("unknown layer type " + layer_type)
x = mtf.dropout(
x, keep_prob=1.0 - self.config.layer_output_dropout_prob)
layer_output = prev_layer_output + x
if self.config.residual_structure == "original":
layer_output = self.normalize(layer_output)
prev_layer_output = layer_output
self.all_encoder_layers.append(layer_output)
self.sequence_output = prev_layer_output
if self.config.residual_structure == "direct":
self.sequence_output = self.normalize(self.sequence_output)
# The "pooler" converts the encoded sequence tensor of shape
# [batch_dim, seq_dim, hidden_size] to a tensor of shape
# [batch_dim, hidden_size]. This is necessary for segment-level
# (or segment-pair-level) classification tasks where we need a fixed
# dimensional representation of the segment.
with tf.variable_scope("pooler"):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token. We assume that this has been pre-trained
first_token_tensor = mtf.gather(self.sequence_output, 0, self.seq_dim)
self.pooled_output = mtf.layers.dense(
first_token_tensor,
reduced_dims=[self.model_dim],
new_dims=[self.model_dim],
activation=mtf.tanh,
kernel_initializer=self.dense_initializer,
use_bias=self.config.use_bias)
def self_attention(self, x, attention_bias):
"""Performs multi-headed self-attention with output projection.
Args:
x: output of previous layer
attention_bias: optional float32 Tensor broadcastable to shape
x.shape - self.model_dim + self.memory_seq_dim
to be added to attention logits.
This may used to mask out padding regions of the memory.
Returns:
float Tensor with the same shape as x
"""
queries = mtf.layers.dense(
x,
reduced_dims=[self.model_dim],
new_dims=[self.num_heads_dim, self.size_per_head_dim],
kernel_initializer=self.dense_initializer,
name="query",
use_bias=self.config.use_bias)
keys = mtf.layers.dense(
mtf.replace_dimensions(x, self.seq_dim, self.memory_seq_dim),
reduced_dims=[self.model_dim],
new_dims=[self.num_heads_dim, self.size_per_head_dim],
kernel_initializer=self.dense_initializer,
name="key",
use_bias=self.config.use_bias)
values = mtf.layers.dense(
mtf.replace_dimensions(x, self.seq_dim, self.memory_seq_dim),
reduced_dims=[self.model_dim],
new_dims=[self.num_heads_dim, self.size_per_head_dim],
kernel_initializer=self.dense_initializer,
name="value",
use_bias=self.config.use_bias)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
attention_scores = mtf.einsum(
[queries, keys], reduced_dims=[self.size_per_head_dim])
attention_scores *= self.size_per_head_dim.size ** -0.5
if attention_bias is not None:
attention_scores += attention_bias
# Normalize the attention scores to probabilities.
attention_probs = mtf.softmax(attention_scores, self.memory_seq_dim)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = mtf.dropout(
attention_probs,
keep_prob=1.0 - self.config.attention_probs_dropout_prob)
output = mtf.einsum([attention_probs, values],
reduced_dims=[self.memory_seq_dim])
# linear transformation back to shape of query_antecedent
output = mtf.layers.dense(
output,
reduced_dims=[self.num_heads_dim, self.size_per_head_dim],
new_dims=[self.model_dim],
kernel_initializer=self.dense_initializer,
name="output",
use_bias=self.config.use_bias)
output = mtf.transpose(output, x.shape)
return output
def feedforward(self, x):
intermediate = mtf.layers.dense(
x, reduced_dims=[self.model_dim],
new_dims=[self.feedforward_intermediate_dim],
activation=get_activation(self.config.feedforward_intermediate_act),
kernel_initializer=self.dense_initializer,
name="dense_1", use_bias=self.config.use_bias)
return mtf.layers.dense(
intermediate,
reduced_dims=[self.feedforward_intermediate_dim],
new_dims=[self.model_dim],
kernel_initializer=self.dense_initializer,
name="dense_2", use_bias=self.config.use_bias)
def moe(self, x, layout, mesh_shape, input_mask, is_training):
"""Mixture of experts layer.
TODO(noam): clean up the mixture-of-experts code in Transformer.
Args:
x: layer input
layout: a mtf.LayoutRules
mesh_shape: a mtf.Shape
input_mask: a mtf.Tensor
is_training: a boolean
Returns:
a mtf.Tensor (the layer output)
"""
hparams = moe.HParams(
moe_gating="top_2",
moe_num_experts=self.config.moe_num_experts,
moe_loss_coef=1e-3,
moe_hidden_size=self.config.moe_intermediate_size,
moe_group_size=2048,
moe_capacity_factor_train=1.25,
moe_capacity_factor_eval=8.0,
moe_use_second_place_loss=False,
moe_second_policy_train="random",
moe_second_policy_eval="random",
moe_second_threshold_train=0.2,
moe_second_threshold_eval=0.2)
layer_output, loss = moe.transformer_moe_layer_v1(
inputs=x,
output_dim=self.model_dim,
hparams=hparams,
train=is_training,
variable_dtype=tf.float32,
layout=layout,
mesh_shape=mesh_shape,
nonpadding=(mtf.cast(input_mask, tf.float32) if input_mask else None),
activation=get_activation(self.config.feedforward_intermediate_act))
self._extra_losses.append(loss)
return layer_output
def get_masked_lm_output(self, positions, label_ids, label_weights):
"""Get loss and logits for the masked LM."""
input_tensor = self.get_sequence_output()
output_weights = self.get_embedding_table()
# [batch_size, num_position, hidden]
input_tensor = mtf.gather(input_tensor, positions, self.seq_dim)
with tf.variable_scope("cls/predictions"):
# We apply one more non-linear transformation before the output layer.
# This matrix is not used after pre-training.
with tf.variable_scope("transform"):
input_tensor = mtf.layers.dense(
input_tensor,
reduced_dims=[self.model_dim],
new_dims=[self.model_dim],
activation=get_activation(self.config.feedforward_intermediate_act),
kernel_initializer=self.dense_initializer,
use_bias=self.config.use_bias)
input_tensor = self.normalize(input_tensor)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
output_bias = mtf.get_variable(
input_tensor.mesh,
name="output_bias",
shape=[self.vocab_dim],
initializer=tf.zeros_initializer())
logits = mtf.einsum([input_tensor, output_weights],
reduced_dims=[self.model_dim]) + output_bias
per_example_loss = mtf.layers.softmax_cross_entropy_with_logits(
logits, label_ids, self.vocab_dim, z_loss=1e-4)
# The `positions` tensor might be zero-padded (if the sequence is too
# short to have the maximum number of predictions). The `label_weights`
# tensor has a value of 1.0 for every real prediction and 0.0 for the
# padding predictions.
numerator = mtf.reduce_sum(label_weights * per_example_loss)
denominator = mtf.reduce_sum(label_weights) + mtf.constant(
input_tensor.mesh, 1e-5, dtype=tf.float32)
loss = numerator / denominator
return (loss, per_example_loss, logits)
def get_next_sentence_output(self, labels):
"""Get loss and logits for the next sentence prediction."""
class_dim = mtf.Dimension("class", 2)
input_tensor = self.get_pooled_output()
# Simple binary classification. Note that 0 is "next sentence" and 1 is
# "random sentence". This weight matrix is not used after pre-training.
logits = mtf.layers.dense(
input_tensor,
reduced_dims=[self.model_dim],
new_dims=[class_dim],
kernel_initializer=self.dense_initializer,
name="cls/seq_relationship",
use_bias=self.config.use_bias)
per_example_loss = mtf.layers.softmax_cross_entropy_with_logits(
logits, labels, class_dim, z_loss=1e-4)
loss = mtf.reduce_mean(per_example_loss)
return (loss, per_example_loss, logits)
def get_pooled_output(self):
return self.pooled_output
def get_sequence_output(self):
"""Gets final hidden layer of encoder.
Returns:
float Tensor of shape [batch_dim, seq_dim, model_dim] corresponding
to the final hidden of the transformer encoder.
"""
return self.sequence_output
def get_all_encoder_layers(self):
return self.all_encoder_layers
def get_word_embedding_output(self):
"""Get output of the word(piece) embedding lookup.
This is BEFORE positional embeddings and token type embeddings have been
added.
Returns:
float Tensor of shape [batch_dim, seq_dim, model_dim] corresponding
to the output of the word(piece) embedding layer.
"""
return self.word_embedding_output
def get_embedding_output(self):
"""Gets output of the embedding lookup (i.e., input to the transformer).
Returns:
float Tensor of shape [batch_dim, seq_dim, model_dim] corresponding
to the output of the embedding layer, after summing the word
embeddings with the positional embeddings and the token type embeddings,
then performing layer normalization. This is the input to the transformer.
"""
return self.embedding_output
def normalize(self, x):
return layer_norm(x, self.model_dim,
subtract_mean=self.config.use_bias,
use_bias=self.config.use_bias)
def get_embedding_table(self):
return self.embedding_table
def get_extra_loss(self):
return mtf.add_n(self._extra_losses)
@property
def vocab_dim(self):
# pad vocab to a multiple of 128 so as to be splittable.
# TODO(noam): This creates issues in checkpoint compatibility
n = self.config.vocab_size
return mtf.Dimension("vocab", n + (-n % 128))
@property
def model_dim(self):
return mtf.Dimension("hidden", self.config.d_model)
@property
def token_type_vocab_dim(self):
return mtf.Dimension("token_type_vocab", self.config.type_vocab_size)
@property
def feedforward_intermediate_dim(self):
return mtf.Dimension("intermediate",
self.config.feedforward_intermediate_size)
@property
def num_heads_dim(self):
return mtf.Dimension("num_heads", self.config.attention_num_heads)
@property
def softmax_heads_dims(self):
return self.num_heads_dim
@property
def max_position_embeddings_dim(self):
return mtf.Dimension("max_position_embeddings",
self.config.max_position_embeddings)
@property
def seq_dim(self):
return self._seq_dim
@property
def memory_seq_dim(self):
return self._memory_seq_dim
@property
def dense_initializer(self):
if self.config.initializer_range:
return tf.truncated_normal_initializer(
stddev=self.config.initializer_range)
else:
return mtf.layers.VarianceScalingInitializer(scale=0.4)
@property
def embedding_initializer(self):
initializer = self.dense_initializer
if isinstance(initializer, mtf.layers.DenseInitializer):
# embedding matrix is also used as classifier weight matrix.
# scale it appropriately.
return initializer(
reduced_dims=[self.model_dim], new_dims=[self.vocab_dim])
else:
return initializer
@property
def size_per_head_dim(self):
"""Dimensionality of attention queries/keys/values."""
if self.config.attention_head_size:
attention_head_size = self.config.attention_head_size
else:
if self.model_dim.size % self.num_heads_dim.size != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (self.model_dim.size, self.num_heads_dim.size))
attention_head_size = int(self.model_dim.size / self.num_heads_dim.size)
return mtf.Dimension("attention_head", attention_head_size)
@property
def key_dim(self):
"""Dimensionality of attention key."""
if self.config.attention_key_head_size is None:
raise ValueError("The key head size is not defined.")
return mtf.Dimension("d_k", self.config.attention_key_head_size)
@property
def key_heads_dims(self):
"""Dimensionality of number of key heads."""
if self.config.attention_num_key_heads is None:
raise ValueError("The number of key heads is not defined.")
return mtf.Dimension("key_heads", self.config.attention_num_key_heads)
@property
def value_dim(self):
"""Dimensionality of attention value."""
if self.config.attention_value_head_size is None:
raise ValueError("The value head size is not defined.")
return mtf.Dimension("d_v", self.config.attention_value_head_size)
@property
def value_heads_dims(self):
"""Dimensionality of number of value heads."""
if self.config.attention_num_value_heads is None:
raise ValueError("The number of value heads is not defined.")
return mtf.Dimension("value_heads", self.config.attention_num_value_heads)
def get_activation(activation_string):
"""Maps a string to a Python function, e.g., "relu" => `mtf.relu`.
Args:
activation_string: String name of the activation function.
Returns:
A Python function corresponding to the activation function. If
`activation_string` is None, empty, or "linear", this will return None.
If `activation_string` is not a string, it will return `activation_string`.
Raises:
ValueError: The `activation_string` does not correspond to a known
activation.
"""
# We assume that anything that"s not a string is already an activation
# function, so we just return it.
if not isinstance(activation_string, six.string_types):
return activation_string
if not activation_string:
return None
act = activation_string.lower()
if act == "linear":
return None
elif act == "gelu":
return mtf.gelu
elif act == "relu":
return mtf.relu
elif act == "tanh":
return mtf.tanh
else:
raise ValueError("Unsupported activation: %s" % act)
def get_assignment_map_from_checkpoint(tvars, init_checkpoint):
"""Compute the union of the current variables and checkpoint variables."""
assignment_map = {}
initialized_variable_names = {}
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
if "global_step" in name or "adam_" in name or "slot_" in name:
continue
name_to_variable[name] = var
tf.logging.info("init_checkpoint:{} ".format(init_checkpoint))
init_vars = tf.train.list_variables(init_checkpoint)
assignment_map = collections.OrderedDict()
for x in init_vars:
(name, var) = (x[0], x[1])
if name not in name_to_variable:
continue
assignment_map[name] = name
initialized_variable_names[name] = 1
initialized_variable_names[name + ":0"] = 1
return (assignment_map, initialized_variable_names)
def _relative_position_bucket(relative_position,
bidirectional=True,
num_buckets=32,
max_distance=128):
"""Translate relative position to a bucket number for relative attention.
The relative position is defined as memory_position - query_position, i.e.
the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are
invalid.
We use smaller buckets for small absolute relative_position and larger buckets
for larger absolute relative_positions. All relative positions >=max_distance
map to the same bucket. All relative positions <=-max_distance map to the
same bucket. This should allow for more graceful generalization to longer
sequences than the model has been trained on.
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32
values in the range [0, num_buckets)
"""
ret = 0
n = -relative_position
if bidirectional:
num_buckets //= 2
ret += mtf.to_int32(mtf.less(n, 0)) * num_buckets
n = mtf.abs(n)
else:
n = mtf.maximum(n, 0)
# now n is in the range [0, inf)
max_exact = num_buckets // 2
is_small = mtf.less(n, max_exact)
val_if_large = max_exact + mtf.to_int32(
mtf.log(mtf.to_float(n) / max_exact)
/ math.log(max_distance / max_exact) * (num_buckets - max_exact))
val_if_large = mtf.minimum(val_if_large, num_buckets - 1)
ret += mtf.where(is_small, n, val_if_large)
return ret
def layer_norm(x, dim, epsilon=1e-6,
subtract_mean=True,
use_scale=True,
use_bias=True,
name=None):
"""Layer normalization over dimension dim.
TODO(noam): This is cleaner than the version in mtf.layers
Move this version into mtf.layers to replace the one there.
Args:
x: a mtf.Tensor whose shape contains dim.
dim: a mtf.Dimension
epsilon: a floating point number
subtract_mean: a boolean
use_scale: a boolean
use_bias: a boolean
name: a string used for tf.variable_scope.
Returns:
a mtf.Tensor with same shape as x.
"""
with tf.variable_scope(name, default_name="layer_norm"):
if subtract_mean:
x -= mtf.reduce_mean(x, reduced_dim=dim)
variance = mtf.reduce_mean(mtf.square(x), reduced_dim=dim)
x *= mtf.rsqrt(variance + epsilon)
if use_scale:
x *= mtf.get_variable(
x.mesh,
"scale",
mtf.Shape([dim]),
initializer=tf.ones_initializer(),
activation_dtype=x.dtype)
if use_bias:
x += mtf.get_variable(
x.mesh,
"bias",
mtf.Shape([dim]),
initializer=tf.zeros_initializer(),
activation_dtype=x.dtype)
return x
| mesh-master | mesh_tensorflow/bert/bert.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Graph representation returned by cost estimators.
Cost estimators need to return a notion of computational graph, but it can
be complicated and expensive to work with tf.Graph and mtf.Graph. The
GraphInterface class serves as this return value. The base class returns
information corresponding to a tf.Graph or mtf.Graph, but subclasses may
return information corresponding to a mix of graphs.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import mesh_tensorflow as mtf
import tensorflow.compat.v1 as tf
from tensorflow.core.framework import cost_graph_pb2
class GraphInterface(object):
"""tf.Graph & mtf.Graph common representation which produces a CostGraphDef.
Attributes:
canonical_device: string or None, the name of the canonical device for
is_tensor_on_canonical_device
Usage Example:
mtf_graph = mtf.Graph()
# Add operations to mtf_graph using Mesh TensorFlow.
graph = graph_interface.GraphInterface(mtf_graph)
for operation_name in graph.get_all_operation_names():
print("Operation: {}".format(operation_name))
for input_name in graph.get_operation_input_names(operation_name):
print(" Input: {}".format(input_name))
for output_name in graph.get_operation_output_names(operation_name):
print(" Output: {}".format(output_name))
# Tensor names can also be used to retrieve data type, shape, and Mesh
# TensorFlow dimension names.
# Operation names can also be used to get Mesh TensorFlow dimension names.
cost_graph = graph.compute_cost_graph()
# Give cost_graph to a scheduler to compute schedule.
memory_contents = graph.compute_memory_contents_under_schedule(schedule)
"""
def __init__(self, graph, canonical_device=None):
"""Initializer.
Args:
graph: either a tf.Graph or mtf.Graph.
canonical_device: optional string, the name of the canonical device for
IsTensoronCanonicalDevice.
"""
self._graph = graph
self.canonical_device = canonical_device
self._operations = self._initialize_operations()
self._operation_name_to_id = self._initialize_operation_name_to_id()
self._tensor_name_to_ids = self._initialize_tensor_name_to_ids()
self._final_tensors = set() # set(tf.Tensor or mtf.Tensor)
def get_num_operations(self):
"""The number of operations in the graph.
Returns:
an integer, the number of operations.
"""
return len(self._operations)
def get_all_operation_names(self):
"""Generates the names of all operations in the graph.
Yields:
a string, the name of an operation.
"""
for operation in self._operations:
yield operation.name
def get_operation_input_names(self, operation_name):
"""Generates the names of all input tensors of an operation.
Args:
operation_name: a string, the name of an operation in the graph.
Yields:
a string, the name of an input tensor.
"""
for input_tensor in self._name_to_operation(operation_name).inputs:
yield input_tensor.name
def get_operation_output_names(self, operation_name):
"""Generates the names of all output tensors of an operation.
Args:
operation_name: a string, the name of an operation in the graph.
Yields:
a string, the name of an output tensor.
"""
for output_tensor in self._name_to_operation(operation_name).outputs:
yield output_tensor.name
def get_all_tensor_names(self):
"""Generates the names of all tensors in the graph.
Yields:
a string, the name of a tensor.
"""
for tensor in self._get_tensors():
yield tensor.name
def get_tensor_dtype(self, tensor_name):
"""The tf.Dtype of a tensor.
Args:
tensor_name: string, the name of a tensor in the graph.
Returns:
a tf.DType
"""
return self._name_to_tensor(tensor_name).dtype
def get_tensor_shape(self, tensor_name):
"""The tf.TensorShape of a tensor.
Args:
tensor_name: string, the name of a tensor in the graph.
Returns:
a tf.TensorShape
"""
tensor = self._name_to_tensor(tensor_name)
if isinstance(tensor, mtf.Tensor):
return tf.TensorShape(tensor.shape.to_integer_list)
else: # tf.Tensor
return tensor.shape
def get_tensor_num_entries(self, tensor_name, partial_layout=None,
mesh_dimension_to_size=None):
"""The number of entries in a tensor.
If partial_layout is specified, then mesh_dimension_to_size must also be. In
this case, the number of entries on a single device is returned.
Args:
tensor_name: a string, name of a tensor in the graph.
partial_layout: an optional {string: string}, from MTF dimension name to
mesh dimension name.
mesh_dimension_to_size: an optional {string: int}, from mesh dimension
name to size.
Returns:
an integer
"""
shape = self.get_tensor_shape(tensor_name)
# We don't have to worry about divisiblity issues because Mesh TensorFlow
# only allows evenly divisible assignments.
num_entries = 1
for dim in shape.dims:
num_entries = num_entries * dim.value
if not partial_layout:
return num_entries
for mtf_dimension_name in self.get_tensor_mtf_dimension_names(tensor_name):
if mtf_dimension_name not in partial_layout:
continue
mesh_dimension_name = partial_layout[mtf_dimension_name]
mesh_dimension_size = mesh_dimension_to_size[mesh_dimension_name]
num_entries = int(math.ceil(num_entries / mesh_dimension_size))
return num_entries
def get_tensor_size(self, tensor_name, partial_layout=None,
mesh_dimension_to_size=None):
"""The size of a tensor in bytes.
If partial_layout is specified, then mesh_dimension_to_size must also be. In
this case, the size on a single device is returned.
Args:
tensor_name: a string, name of a tensor in the graph.
partial_layout: an optional {string: string}, from MTF dimension name to
mesh dimension name.
mesh_dimension_to_size: an optional {string: int}, from mesh dimension
name to size.
Returns:
an integer
"""
return (self.get_tensor_dtype(tensor_name).size *
self.get_tensor_num_entries(tensor_name, partial_layout,
mesh_dimension_to_size))
def get_tensor_device(self, tensor_name):
"""The device of a tensor.
Note that only tf tensors have device assignments.
Args:
tensor_name: a string, name of a tensor in the graph.
Returns:
a string or None, representing the device name.
"""
tensor = self._name_to_tensor(tensor_name)
if isinstance(tensor, tf.Tensor):
return tensor.device
else: # mtf.Tensor
return None
def is_tensor_on_canonical_device(self, tensor_name):
"""Whether the tensor is on the first (canonical) device.
Tensors not assigned to a device are assumed to be on all devices, including
the canonical device.
Args:
tensor_name: a string, name of a tensor in the graph.
Returns:
a boolean indicating whether the tensor is on the first device.
"""
device = self.get_tensor_device(tensor_name)
return not device or device == self.canonical_device
def get_operation_device(self, operation_name):
"""The device of an operation.
Note that only tf operations have device assignments.
Args:
operation_name: a string, name of an operation in the graph.
Returns:
a string or None, representing the device name.
"""
operation = self._name_to_operation(operation_name)
if isinstance(operation, tf.Operation):
return operation.device
else: # mtf.Operation
return None
def get_tensor_mtf_dimension_names(self, tensor_name):
"""The Mesh TensorFlow dimensions associated with a tensor.
Args:
tensor_name: a string, name of a tensor in the graph.
Returns:
a [string], the names of Mesh TensorFlow dimensions.
"""
tensor = self._name_to_tensor(tensor_name)
if isinstance(tensor, mtf.Tensor):
return tensor.shape.dimension_names
else: # tf.Tensor
return []
def get_operation_mtf_dimension_names(self, operation_name):
"""The Mesh TensorFlow dimensions associated with an operation.
Args:
operation_name: a string, name of an operation in the graph.
Returns:
a set(string), the names of Mesh TensorFlow dimensions.
"""
mtf_dimension_names = set()
for tensor_name in self.get_operation_input_names(operation_name):
mtf_dimension_names.update(self.get_tensor_mtf_dimension_names(
tensor_name))
for tensor_name in self.get_operation_output_names(operation_name):
mtf_dimension_names.update(self.get_tensor_mtf_dimension_names(
tensor_name))
return mtf_dimension_names
def set_tensor_final(self, tensor_name):
"""Denotes a tensor as a final output of the computation.
Args:
tensor_name: a string, name of a tensor in the graph.
"""
tensor = self._name_to_tensor(tensor_name)
self._final_tensors.add(tensor)
def is_tensor_final(self, tensor_name):
"""Whether a tensor is a final output of the computation.
Args:
tensor_name: a string, name of a tensor in the graph.
Returns:
a boolean indicating whether the tensor was a final output.
"""
tensor = self._name_to_tensor(tensor_name)
return tensor in self._final_tensors
def compute_cost_graph(self, devices=None):
"""Computes a CostGraphDef protobuf based on this graph.
Defined in tensorflow/core/framework/cost_graph.proto.
Args:
devices: optional [string], the names of devices to consider. If
specified, any tensor on a device not listed is given a size of zero.
Any device-less tensor (e.g. Mesh TensorFlow tensor) is not affected.
Returns:
a CostGraphDef protobuf with a Node for every operation in the graph, each
of which is populated with size/dtype information for its inputs and
outputs (which match the input/output order of the operation).
"""
cost_graph_def = cost_graph_pb2.CostGraphDef()
for i, operation_name in enumerate(self.get_all_operation_names()):
node = cost_graph_def.node.add(
name=operation_name,
device=self.get_operation_device(operation_name),
id=i)
for input_name in self.get_operation_input_names(operation_name):
id1, id2 = self._tensor_name_to_ids[input_name]
node.input_info.add(preceding_node=id1, preceding_port=id2)
for output_name in self.get_operation_output_names(operation_name):
tensor_device = self.get_tensor_device(output_name)
# devices = [] is not the same as None, and tensor_device = '' is also
# not the same as None.
if devices is None or tensor_device is None or tensor_device in devices:
node.output_info.add(
size=self.get_tensor_size(output_name),
alias_input_port=-1,
dtype=self.get_tensor_dtype(output_name).as_datatype_enum,
shape=self.get_tensor_shape(output_name).as_proto(),
)
else:
node.output_info.add(
size=0,
alias_input_port=-1,
dtype=self.get_tensor_dtype(output_name).as_datatype_enum,
)
# NOTE(joshuawang): Unfortunately, the CostGraphDef protobuf has final
# operations, not tensors. As a result, we have to declare any operation
# that outputs a final tensor as final, which may expand the final set
# of tensors to keep in memory. This issue also arises in the scheduler
# code we will interface with.
if self.is_tensor_final(output_name):
node.is_final = True
return cost_graph_def
def compute_memory_contents_under_schedule(self, schedule):
"""The in-memory tensors present when executing each operation in schedule.
Simulates running operations in the order given by a schedule. Keeps track
of the tensors in memory at every point in time, and outputs a list (one
entry for each point in time) of all sets of all memory contents (i.e. a
frozenset of strings) ever seen in this execution.
It is assumed (but not checked) that schedule is a valid topological sort of
the operations in this graph.
Args:
schedule: A list of integer ids; the order to run operations in.
Returns:
a list of frozenset of strings, where the ith entry describes the tensors
in memory when executing operation i (where schedule[i] is an index into
get_all_operation_names()).
"""
out_degree = self._compute_initial_out_degree()
curr_memory_contents = set()
memory_contents_for_each_operation = []
for operation_id in schedule:
operation_name = self._operations[operation_id].name
# Allocate new memory to perform the computation at this node.
for output_name in self.get_operation_output_names(operation_name):
curr_memory_contents.add(output_name)
memory_contents_for_each_operation.append(frozenset(curr_memory_contents))
# Free any tensors which are no longer needed.
for output_name in self.get_operation_output_names(operation_name):
if out_degree[output_name] == 0:
curr_memory_contents.remove(output_name)
for input_name in self.get_operation_input_names(operation_name):
out_degree[input_name] -= 1
if out_degree[input_name] == 0:
curr_memory_contents.remove(input_name)
return memory_contents_for_each_operation
def _initialize_operations(self):
"""Initializer for _operations.
Raises:
TypeError: _graph is not a tf.Graph or mtf.Graph.
Returns:
a list of (tf.Operation or mtf.Operation)
"""
if isinstance(self._graph, tf.Graph):
return self._graph.get_operations()
elif isinstance(self._graph, mtf.Graph):
return self._graph.operations
else:
raise TypeError('Graph is not tf.Graph or mtf.Graph: {}'
.format(type(self._graph)))
def _initialize_operation_name_to_id(self):
"""Initializer for _operation_name_to_id.
Returns:
a {string: int}, mapping operation names to their index in _operations.
"""
operation_name_to_id = {}
for i, operation in enumerate(self._operations):
operation_name_to_id[operation.name] = i
return operation_name_to_id
def _initialize_tensor_name_to_ids(self):
"""Initializer for _tensor_name_to_ids.
Returns:
a {string: (int, int)}, mapping the name of tensor T to the index of T's
operation in _operations and T's index in T's operation's outputs.
"""
tensor_name_to_ids = {}
for i, operation in enumerate(self._operations):
for j, tensor in enumerate(operation.outputs):
tensor_name_to_ids[tensor.name] = (i, j)
return tensor_name_to_ids
def _get_tensors(self):
"""Generator for all tensors.
Yields:
a tf.Tensor or mtf.Tensor
"""
for operation in self._operations:
for tensor in operation.outputs:
yield tensor
def _name_to_operation(self, operation_name):
"""The operation with the given name.
Args:
operation_name: a string, name of a operation in the graph.
Returns:
a tf.Operation or mtf.Operation
"""
return self._operations[self._operation_name_to_id[operation_name]]
def _name_to_tensor(self, tensor_name):
"""The tensor with the given name.
Args:
tensor_name: a string, name of a tensor in the graph.
Returns:
a tf.Tensor or mtf.Tensor
"""
id1, id2 = self._tensor_name_to_ids[tensor_name]
return self._operations[id1].outputs[id2]
def _compute_initial_out_degree(self):
"""The number of operations which use each tensor as input.
Returns:
a {string, int} mapping tensor name to the number of operations which use
it as input, or one plus that quantity if the tensor is final.
"""
out_degree = collections.defaultdict(int)
# Pretend that final tensors have an additional degree so they are not
# freed.
for tensor_name in self.get_all_tensor_names():
if self.is_tensor_final(tensor_name):
out_degree[tensor_name] = 1
for operation_name in self.get_all_operation_names():
for input_name in self.get_operation_input_names(operation_name):
out_degree[input_name] += 1
return out_degree
| mesh-master | mesh_tensorflow/auto_mtf/graph_interface.py |
# coding=utf-8
# Copyright 2021 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mesh_tensorflow.auto_mtf.scheduler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import mesh_tensorflow as mtf
from mesh_tensorflow.auto_mtf import graph_interface
from mesh_tensorflow.auto_mtf import scheduler
import tensorflow.compat.v1 as tf
class SchedulerTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters('NAIVE', 'LIST')
def testReturnsTopoSort(self, scheduler_alg):
mtf_graph = mtf.Graph()
mesh = mtf.Mesh(mtf_graph, 'my_mesh')
x = mtf.Constant(mesh, 0,
shape=mtf.convert_to_shape('a:3,b:4'),
dtype=tf.int32,
name='X').outputs[0]
y = mtf.Constant(mesh, 0,
shape=mtf.convert_to_shape('b:4,c:5'),
dtype=tf.int32,
name='Y').outputs[0]
mtf.EinsumOperation([x, y], mtf.convert_to_shape('a:3,c:5'), name='Z1')
mtf.EinsumOperation([x, y], mtf.convert_to_shape('a:3,c:5'), name='Z2')
graph = graph_interface.GraphInterface(mtf_graph)
graph.set_tensor_final('Z1:0')
graph.set_tensor_final('Z2:0')
schedule = list(scheduler.minimize_peak_memory(graph, scheduler_alg))
self.assertCountEqual(schedule[0:2], [0, 1])
self.assertCountEqual(schedule[2:4], [2, 3])
def testMinimizePeakMemoryList(self):
mtf_graph = mtf.Graph()
mesh = mtf.Mesh(mtf_graph, 'my_mesh')
x = mtf.Constant(mesh, 0,
shape=mtf.convert_to_shape('a:3,b:4'),
dtype=tf.int32,
name='X').outputs[0]
y = mtf.Constant(mesh, 0,
shape=mtf.convert_to_shape('b:4,c:5'),
dtype=tf.int32,
name='Y').outputs[0]
mtf.EinsumOperation([x, y], mtf.convert_to_shape('a:3,b:4,c:5'), name='Z')
w = mtf.EinsumOperation([x, y], mtf.convert_to_shape('a:3,c:5'),
name='W').outputs[0]
mtf.BroadcastOperation(w, mtf.convert_to_shape('a:3,b:4,c:5'), name='V')
graph = graph_interface.GraphInterface(mtf_graph)
graph.set_tensor_final('Z:0')
graph.set_tensor_final('V:0')
schedule = list(scheduler.minimize_peak_memory(graph, 'LIST'))
# List Scheduler prefers to schedule things that free the most memory.
# When nothing is scheduled:
# X frees -12 entries.
# Y frees -20 entries.
# After [X] scheduled:
# Y frees -20 entries.
# After [X, Y] scheduled:
# Z frees -60 entries.
# W frees -15 entries.
# After [X, Y, W] scheduled:
# Z frees -28 entries.
# V frees -45 entries.
# Hence the schedule should be [X, Y, W, Z, V].
self.assertEqual(schedule, [0, 1, 3, 2, 4])
def testMinimizePeakMemoryList_SingleUseTensor(self):
mtf_graph = mtf.Graph()
mesh = mtf.Mesh(mtf_graph, 'my_mesh')
mtf.Constant(mesh, 0, shape=mtf.convert_to_shape('a:4'), dtype=tf.int32,
name='X')
y = mtf.Constant(mesh, 0, shape=mtf.convert_to_shape('b:3'), dtype=tf.int32,
name='Y').outputs[0]
mtf.BroadcastOperation(y, mtf.convert_to_shape('b:3,c:2'), name='Z')
graph = graph_interface.GraphInterface(mtf_graph)
graph.set_tensor_final('X:0')
graph.set_tensor_final('Z:0')
schedule = list(scheduler.minimize_peak_memory(graph, 'LIST'))
# When nothing is scheduled:
# X frees -4 entries
# Y frees -3 entries
# After [Y] scheduled:
# X frees -4 entries
# Z frees -3 entries
# Hence the schedule should be [Y, Z, X].
self.assertEqual(schedule, [1, 2, 0])
def testMinimizePeakMemoryList_ZeroUseTensor(self):
mtf_graph = mtf.Graph()
mesh = mtf.Mesh(mtf_graph, 'my_mesh')
mtf.Constant(mesh, 0, shape=mtf.convert_to_shape('a:4'), dtype=tf.int32,
name='X')
y = mtf.Constant(mesh, 0, shape=mtf.convert_to_shape('b:3'), dtype=tf.int32,
name='Y').outputs[0]
mtf.BroadcastOperation(y, mtf.convert_to_shape('b:3,c:2'), name='Z')
graph = graph_interface.GraphInterface(mtf_graph)
schedule = list(scheduler.minimize_peak_memory(graph, 'LIST'))
# When nothing is scheduled:
# X frees 0 entries
# Y frees -3 entries
# Hence the schedule should be [X, Y, Z].
self.assertEqual(schedule, [0, 1, 2])
if __name__ == '__main__':
tf.disable_v2_behavior()
tf.test.main()
| mesh-master | mesh_tensorflow/auto_mtf/scheduler_test.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.