python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Test TE Paddle Recompute"""
from pathlib import Path
import re
import subprocess
import numpy as np
import pytest
from transformer_engine.paddle.fp8 import is_fp8_available
test_root = Path(__file__).resolve().parent
is_fp8_supported, reason = is_fp8_available()
@pytest.mark.skipif(not is_fp8_supported, reason=reason)
@pytest.mark.parametrize('use_reentrant', [False, True])
def test_transformer_encoder_recompute(use_reentrant):
"""
Test TransformerLayer encoder recompute
"""
rtol = 1e-5
atol = 1e-5
def launch_subprocess_and_check_output(enable_recompute):
"""Launch training in subprocess and check output"""
try:
cmd = [
'python',
str(test_root / 'recompute_tests' / 'recompute_transformer_encoder.py'),
str(int(enable_recompute)),
str(int(use_reentrant))
]
result = subprocess.check_output(cmd, stderr=subprocess.STDOUT, universal_newlines=True)
print(result)
loss_match = re.search(r'Loss:\s+(-?\d+\.\d+)', result)
memory_match = re.search(r'Peak memory:\s+(\d+)', result)
loss_value = float(loss_match.group(1))
memory_value = int(memory_match.group(1))
return loss_value, memory_value
except subprocess.CalledProcessError as e:
raise ValueError(f"Subprocess failed with error: {e}") from e
loss_recompute, peak_memory_recompute = launch_subprocess_and_check_output(True)
loss_ref, peak_memory_ref = launch_subprocess_and_check_output(False)
assert peak_memory_recompute < peak_memory_ref
np.testing.assert_allclose(loss_recompute, loss_ref, rtol=rtol, atol=atol)
| TransformerEngine-main | tests/paddle/test_recompute.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Test TE Paddle Parallel"""
from pathlib import Path
import unittest
from dist_launcher import TestDistributed
from utils import is_devices_enough
from transformer_engine.paddle.fp8 import is_fp8_available
test_root = Path(__file__).resolve().parent
gpu_has_fp8, reason = is_fp8_available()
class TestParallelLinear(TestDistributed):
"""Test Linear in Parallel mode"""
@unittest.skipIf(not is_devices_enough(2), "TestParallelLinear needs 2 GPUs")
@unittest.skipIf(not gpu_has_fp8, reason)
def test_linear_tp(self):
"""Tests linear with tensor parallel in BF16"""
self.run_2gpu(str(test_root / 'parallel_tests' / 'linear_tp.py'))
class TestParallelLayerNormLinear(TestDistributed):
"""Test LayerNormLinear in Parallel mode"""
@unittest.skipIf(not is_devices_enough(2), "TestParallelLayerNormLinear needs 2 GPUs")
@unittest.skipIf(not gpu_has_fp8, reason)
def test_layernorm_linear_tp(self):
"""Tests layernorm_linear with tensor parallel in BF16"""
self.run_2gpu(str(test_root / 'parallel_tests' / 'layernorm_linear_tp.py'))
class TestParallelLayerNormMLP(TestDistributed):
"""Test LayerNormMLP in Parallel mode"""
@unittest.skipIf(not is_devices_enough(2), "TestParallelLayerNormMLP needs 2 GPUs")
@unittest.skipIf(not gpu_has_fp8, reason)
def test_layernorm_mlp_tp(self):
"""Tests layernorm_mlp with tensor parallel in BF16"""
self.run_2gpu(str(test_root / 'parallel_tests' / 'layernorm_mlp_tp.py'))
class TestAmaxReduction(TestDistributed):
"""Test amax reduction in dp mode"""
@unittest.skipIf(not is_devices_enough(2), "TestAmaxReduction needs 2 GPUs")
@unittest.skipIf(not gpu_has_fp8, reason)
def test_amax_reduction(self):
"""Tests amax reduction"""
self.run_2gpu(str(test_root / 'parallel_tests' / 'amax_reduction.py'))
class TestPipelineParallel(TestDistributed):
"""Test pipeline parallel"""
@unittest.skipIf(not is_devices_enough(2), "TestPipelineParallel needs 2 GPUs")
@unittest.skipIf(not gpu_has_fp8, reason)
def test_pipeline_parallel(self):
"""Tests pipeline parallel"""
self.run_2gpu(str(test_root / 'parallel_tests' / 'linear_pp.py'))
class TestGroupSharding(TestDistributed):
"""Test group sharding"""
@unittest.skipIf(not is_devices_enough(2), "TestGroupSharding needs 2 GPUs")
@unittest.skipIf(not gpu_has_fp8, reason)
def test_group_sharding(self):
"""Tests group sharding"""
self.run_2gpu(str(test_root / 'parallel_tests' / 'group_sharding.py'))
class TestParallelTransformerLayer(TestDistributed):
"""Test Transformer Layer in Parallel mode"""
@unittest.skipIf(not is_devices_enough(2), "TestParallelTransformerLayer needs 2 GPUs")
@unittest.skipIf(not gpu_has_fp8, reason)
def test_transformer_tp(self):
"""Tests Transformer Layer with tensor parallel in BF16"""
self.run_2gpu(str(test_root / 'parallel_tests' / 'transformer_tp.py'))
if __name__ == '__main__':
unittest.main()
| TransformerEngine-main | tests/paddle/test_parallel.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Utils for testing"""
import random
import numpy as np
import paddle
from paddle.distributed import fleet
from paddle.distributed.fleet.meta_parallel import get_rng_state_tracker
import transformer_engine # pylint: disable=unused-import
from transformer_engine.paddle.fp8 import FP8TensorMeta
def create_fp8_meta(num_gemms=1, amax_history_len=10):
"""
Create and initialize FP8TensorMeta
"""
fp8_meta = FP8TensorMeta(is_forward=True)
fp8_meta.prepare(num_gemms, amax_history_len)
return fp8_meta
def assert_allclose(actual,
desired,
rtol=1e-05,
atol=1e-08,
equal_nan=True,
err_msg='',
verbose=True):
"""Compare two input paddle tensors"""
if isinstance(actual, paddle.Tensor):
actual = paddle.cast(actual, 'float32').numpy()
if isinstance(desired, paddle.Tensor):
desired = paddle.cast(desired, 'float32').numpy()
np.testing.assert_allclose(actual, desired, rtol, atol, equal_nan, err_msg, verbose)
def assert_shape(inp, expected_shape):
"""Assert the shape of input tensor equals to expected shape"""
assert inp.shape == expected_shape, f"Expected tensor shape: {expected_shape} != " \
f"actual tensor shape: {inp.shape}"
def is_devices_enough(required):
"""If the number of device is enough"""
return paddle.device.cuda.device_count() >= required
def set_random_seed(seed):
"""Set random seed for reproducability."""
hcg = fleet.get_hybrid_communicate_group()
if paddle.distributed.get_world_size() > 1:
# obtain rank message of hybrid parallel
mp_rank = hcg.get_model_parallel_rank()
mp_size = hcg.get_model_parallel_world_size()
pp_rank = hcg.get_stage_id()
pp_size = hcg.get_pipe_parallel_world_size()
dp_rank = hcg.get_data_parallel_rank()
dp_size = hcg.get_data_parallel_world_size()
sharding_rank = hcg.get_sharding_parallel_rank()
else:
mp_rank, mp_size = 0, 1
pp_rank, pp_size = 0, 1
dp_rank, dp_size = 0, 1
sharding_rank, _ = 0, 1
random.seed(seed + 100 * pp_rank)
np.random.seed(seed + 100 * pp_rank)
seed_offset = seed + 1024 + paddle.distributed.get_world_size()
global_seed = (seed_offset + pp_rank * (mp_size) + dp_rank * (mp_size * pp_size) +
sharding_rank * (mp_size * pp_size * dp_size))
seed_offset += paddle.distributed.get_world_size()
local_seed = (seed_offset + mp_rank + pp_rank * (mp_size) + dp_rank * (mp_size * pp_size) +
sharding_rank * (mp_size * pp_size * dp_size))
tracker = get_rng_state_tracker()
# tracker.reset()
if "global_seed" not in tracker.states_:
tracker.add("global_seed", global_seed)
if "local_seed" not in tracker.states_:
tracker.add("local_seed", local_seed)
paddle.seed(global_seed)
| TransformerEngine-main | tests/paddle/utils.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Test basic installation of Paddle extensions"""
def test_import():
"""
Test if Paddle extension can be imported normally
"""
import transformer_engine.paddle # pylint: disable=unused-import
| TransformerEngine-main | tests/paddle/test_install.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Helper functions to launch distributed tests"""
import copy
import os
from pathlib import Path
import subprocess
import time
import unittest
from paddle import fluid
from paddle.distributed.utils.launch_utils import (
TrainerProc,
find_free_ports,
get_cluster,
watch_local_trainers,
)
__all__ = ['TestDistributed']
def get_cluster_from_args(selected_gpus):
"""Get node information from selected GPUs"""
cluster_node_ips = '127.0.0.1'
node_ip = '127.0.0.1'
node_ips = [x.strip() for x in cluster_node_ips.split(',')]
node_ips.index(node_ip)
free_ports = None
free_ports = find_free_ports(len(selected_gpus))
if free_ports is not None:
free_ports = list(free_ports)
trainer_endpoints = []
for ip in node_ips:
trainer_endpoints.append([f"{ip}:{port}" for port in free_ports])
return get_cluster(node_ips, node_ip, trainer_endpoints, selected_gpus)
def get_gpus(selected_gpus):
"""Get selected GPU string"""
selected_gpus = [x.strip() for x in selected_gpus.split(',')]
return selected_gpus
def start_local_trainers(
cluster,
pod,
training_script,
training_script_args,
allocator_strategy="auto_growth",
):
"""Launch trainers"""
current_env = copy.copy(os.environ.copy())
# paddle broadcast ncclUniqueId use socket, and
# proxy maybe make trainers unreachable, so delete them.
# if we set them to "", grpc will log error message "bad uri"
# so just delete them.
current_env.pop("http_proxy", None)
current_env.pop("https_proxy", None)
procs = []
for t in pod.trainers:
proc_env = {
"FLAGS_selected_gpus": ",".join([str(g) for g in t.gpus]),
"PADDLE_TRAINER_ID": f"{t.rank}",
"PADDLE_CURRENT_ENDPOINT": f"{t.endpoint}",
"PADDLE_TRAINERS_NUM": f"{cluster.trainers_nranks()}",
"PADDLE_TRAINER_ENDPOINTS": ",".join(cluster.trainers_endpoints()),
"PYTHONPATH": str(Path(__file__).resolve().parent),
}
proc_env["FLAGS_allocator_strategy"] = allocator_strategy
if allocator_strategy == "auto_growth":
proc_env["FLAGS_fraction_of_gpu_memory_to_use"] = "0.1"
current_env.update(proc_env)
print(f"trainer proc env:{current_env}")
if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
cmd = "python -m coverage run --branch -p " + training_script
else:
cmd = "python -u " + training_script
print(f"start trainer proc:{cmd} env:{proc_env}")
fn = None
proc = subprocess.Popen(cmd.split(" ") + training_script_args, env=current_env) # pylint: disable=consider-using-with
tp = TrainerProc()
tp.proc = proc
tp.rank = t.rank
tp.log_fn = fn
tp.cmd = cmd
procs.append(tp)
return procs
class TestDistributed(unittest.TestCase):
"""Base class for distributed test"""
@staticmethod
def run_2gpu(
target_file_name,
allocator_strategy="auto_growth",
):
"""Run target file in subprocesses"""
if (not fluid.core.is_compiled_with_cuda() or fluid.core.get_cuda_device_count() == 0):
return
selected_gpus = get_gpus('0,1')
cluster = None
pod = None
cluster, pod = get_cluster_from_args(selected_gpus)
procs = start_local_trainers(
cluster,
pod,
allocator_strategy=allocator_strategy,
training_script=target_file_name,
training_script_args=[],
)
while True:
alive = watch_local_trainers(procs, cluster.trainers_endpoints())
if not alive:
print(f"Local procs complete, POD info:{pod}")
break
time.sleep(3)
| TransformerEngine-main | tests/paddle/dist_launcher.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Test TransformerLayer encoder recompute"""
import sys
import paddle
import transformer_engine.paddle as te
class Net(paddle.nn.Layer):
"""Network use for recompute testing"""
def __init__(self, layers):
super().__init__()
self.layers = layers
def forward(self, inp, mask, enable_recompute, use_reentrant):
for layer in self.layers:
if enable_recompute:
out = te.recompute(layer, inp, mask, use_reentrant=use_reentrant)
else:
out = layer(inp, mask)
return out
def main():
"""Main function"""
paddle.seed(10)
batch_size = 16
hidden_size = 4096
num_heads = 32
ffn_hidden_size = 16384
q_seqlen = 512
kv_seqlen = 512
num_layers = 4
enable_recompute = int(sys.argv[1])
use_reentrant = int(sys.argv[2])
layers = paddle.nn.LayerList([
te.TransformerLayer(
hidden_size,
ffn_hidden_size,
num_heads,
layer_type='encoder',
) for _ in range(num_layers)
])
model = Net(layers)
optimizer = paddle.optimizer.AdamW(learning_rate=0.001, parameters=model.parameters())
for _ in range(10):
inp = paddle.uniform([batch_size, q_seqlen, hidden_size])
inp.stop_gradient = False
mask = paddle.zeros(shape=(batch_size, 1, q_seqlen, kv_seqlen), dtype='bool')
with te.fp8_autocast(enabled=True):
out = model(inp, mask, enable_recompute, use_reentrant)
loss = out.mean()
loss.backward()
optimizer.step()
optimizer.clear_grad()
print("Loss: ", float(loss))
print("Peak memory: ", paddle.device.cuda.max_memory_allocated(0))
if __name__ == "__main__":
main()
| TransformerEngine-main | tests/paddle/recompute_tests/recompute_transformer_encoder.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Unittest for LayerNormMLP layer in tensor parallel"""
import unittest
import paddle
from paddle.distributed import fleet
from utils import assert_allclose, assert_shape, set_random_seed
import transformer_engine.paddle as te
class TestLayerNormMLPTp(unittest.TestCase):
"""Tests LayerNormMLP layer with model parallel in BF16"""
def setUp(self):
self.set_attr()
self.init_dist_env()
paddle.set_default_dtype(self.global_dtype)
def init_dist_env(self):
"""Init Paddle Fleet environment"""
strategy = fleet.DistributedStrategy()
self.model_parallel_size = 2
strategy.hybrid_configs = {
"dp_degree": 1,
"mp_degree": self.model_parallel_size,
"pp_degree": 1,
}
fleet.init(is_collective=True, strategy=strategy)
self.hcg = fleet.get_hybrid_communicate_group()
self.tp_group = self.hcg.get_model_parallel_group()
def set_attr(self):
"""Set test configs"""
self.batch_size = 16
self.hidden_size = 32
self.ffn_hidden_size = 64
self.global_dtype = 'bfloat16'
self.rtol = 1e-3
self.atol = 1e-3
self.eps = 1e-3
self.fp8 = False
def test_parallel_layer(self):
"""Tests parallel LayerNormMLP"""
set_random_seed(1024)
layer_te = te.LayerNormMLP(
hidden_size=self.hidden_size,
ffn_hidden_size=self.ffn_hidden_size,
eps=self.eps,
set_parallel_mode=True,
)
layer_pd = te.LayerNormMLP(
hidden_size=self.hidden_size,
ffn_hidden_size=self.ffn_hidden_size,
eps=self.eps,
set_parallel_mode=False,
backend='paddle',
)
def _get_total_weight(local_weight, tp_group, axis):
total_weight = []
partial_weight = local_weight.clone().detach()
paddle.distributed.all_gather(total_weight, partial_weight, group=tp_group)
total_weight = paddle.concat(total_weight, axis=axis)
return total_weight
# Get total weight
total_fc1_weight = _get_total_weight(layer_te.fc1_weight, tp_group=self.tp_group, axis=0)
total_fc2_weight = _get_total_weight(layer_te.fc2_weight, tp_group=self.tp_group, axis=1)
layer_pd.fc1_weight.copy_(total_fc1_weight.T, True)
layer_pd.fc2_weight.copy_(total_fc2_weight.T, True)
assert_shape(layer_te.fc1_weight,
[self.ffn_hidden_size // self.model_parallel_size, self.hidden_size])
assert_shape(layer_te.fc1_bias, [self.ffn_hidden_size // self.model_parallel_size])
assert_shape(layer_te.fc2_weight,
[self.hidden_size, self.ffn_hidden_size // self.model_parallel_size])
assert_shape(layer_te.fc2_bias, [self.hidden_size])
optimizer_te = paddle.optimizer.SGD(learning_rate=0.001, parameters=layer_te.parameters())
optimizer_pd = paddle.optimizer.SGD(learning_rate=0.001, parameters=layer_pd.parameters())
layer_te = fleet.distributed_model(layer_te)
optimizer_te = fleet.distributed_optimizer(optimizer_te)
def train_one_step(layer, inp, optimizer):
inp = paddle.to_tensor(inp)
inp.stop_gradient = False
out = layer(inp)
loss = out.mean()
loss.backward()
optimizer.step()
optimizer.clear_grad()
return loss, inp.grad
for _ in range(5):
inp = paddle.uniform([self.batch_size, self.hidden_size], self.global_dtype)
with te.fp8_autocast(enabled=self.fp8):
loss_tp, grad_input = train_one_step(layer_te, inp, optimizer_te)
loss_ref, grad_input_ref = train_one_step(layer_pd, inp, optimizer_pd)
assert_allclose(loss_tp, loss_ref, rtol=self.rtol, atol=self.atol)
assert_allclose(grad_input, grad_input_ref, rtol=self.rtol, atol=self.atol)
class TestLayerNormMLPTpFp8(TestLayerNormMLPTp):
"""Tests LayerNormMLP layer with tensor parallelism in FP8"""
def set_attr(self):
"""Set test configs"""
self.batch_size = 16
self.hidden_size = 32
self.ffn_hidden_size = 64
self.global_dtype = 'bfloat16'
self.rtol = 1e-2
self.atol = 1e-2
self.eps = 1e-3
self.fp8 = True
if __name__ == '__main__':
unittest.main()
| TransformerEngine-main | tests/paddle/parallel_tests/layernorm_mlp_tp.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Unittest for Transformer layer in tensor parallel"""
import unittest
import paddle
from paddle.distributed import fleet
from utils import assert_allclose, set_random_seed
import transformer_engine.paddle as te
class TestTransformerTp(unittest.TestCase):
"""Tests Transformer layer with model parallel in BF16"""
def setUp(self):
self.set_attr()
self.init_dist_env()
paddle.set_default_dtype(self.global_dtype)
def init_dist_env(self):
"""Init Paddle Fleet environment"""
strategy = fleet.DistributedStrategy()
self.model_parallel_size = 2
strategy.hybrid_configs = {
"dp_degree": 1,
"mp_degree": self.model_parallel_size,
"pp_degree": 1,
}
fleet.init(is_collective=True, strategy=strategy)
self.hcg = fleet.get_hybrid_communicate_group()
self.tp_group = self.hcg.get_model_parallel_group()
def set_attr(self):
"""Set test configs"""
self.batch_size = 16
self.hidden_size = 1024
self.num_heads = 16
self.ffn_hidden_size = 4096
self.q_seqlen = 128
self.kv_seqlen = 128
self.mask_type = 'padding'
self.layer_type = 'encoder'
self.global_dtype = 'bfloat16'
self.rtol = 5e-2
self.atol = 5e-2
self.eps = 1e-3
self.fp8 = False
def test_parallel_layer(self):
"""Tests parallel Transformer"""
set_random_seed(1024)
common_args = [
self.hidden_size,
self.ffn_hidden_size,
self.num_heads,
]
common_kwargs = {
'layernorm_epsilon': self.eps,
'hidden_dropout': 0.0,
'attention_dropout': 0.0,
'self_attn_mask_type': self.mask_type,
'layer_type': self.layer_type,
}
layer_tp = te.TransformerLayer(*common_args, **common_kwargs, set_parallel_mode=True)
layer_single = te.TransformerLayer(*common_args, **common_kwargs, set_parallel_mode=False)
def _get_total_weight(local_weight, tp_group, axis):
total_weight = []
partial_weight = local_weight.clone().detach()
paddle.distributed.all_gather(total_weight, partial_weight, group=tp_group)
total_weight = paddle.concat(total_weight, axis=axis)
return total_weight
def _get_weight(obj, weight_names):
for name in weight_names:
obj = getattr(obj, name)
return obj
def copy_weight(layer_src, layer_dst, partition_mode, weight_names):
weight_src = _get_weight(layer_src, weight_names)
weight_dst = _get_weight(layer_dst, weight_names)
if partition_mode is None:
total_weight = weight_src
elif partition_mode == 'column':
total_weight = _get_total_weight(weight_src, tp_group=self.tp_group, axis=0)
elif partition_mode == 'row':
total_weight = _get_total_weight(weight_src, tp_group=self.tp_group, axis=1)
else:
raise ValueError(f"Partition Mode {partition_mode} is not supported.")
assert weight_dst.shape == total_weight.shape, \
f"Shapes of src:{total_weight.shape} and dst:{weight_dst.shape} do not match."
weight_dst.copy_(total_weight, True)
copy_weight(layer_tp, layer_single, None, ['self_attention', 'layernorm_qkv', 'ln_weight'])
copy_weight(layer_tp, layer_single, 'column', ['self_attention', 'layernorm_qkv', 'weight'])
copy_weight(layer_tp, layer_single, 'row', ['self_attention', 'proj', 'weight'])
copy_weight(layer_tp, layer_single, None, ['layernorm_mlp', 'ln_weight'])
copy_weight(layer_tp, layer_single, 'column', ['layernorm_mlp', 'fc1_weight'])
copy_weight(layer_tp, layer_single, 'row', ['layernorm_mlp', 'fc2_weight'])
optimizer_tp = paddle.optimizer.SGD(learning_rate=0.1, parameters=layer_tp.parameters())
optimizer_single = paddle.optimizer.SGD(learning_rate=0.1,
parameters=layer_single.parameters())
layer_tp = fleet.distributed_model(layer_tp)
optimizer_tp = fleet.distributed_optimizer(optimizer_tp)
def train_one_step(layer, inp_list, optimizer, fp8_enabled):
with te.fp8_autocast(enabled=fp8_enabled):
out = layer(*inp_list)
loss = out.mean()
loss.backward()
optimizer.step()
optimizer.clear_grad()
return loss
for _ in range(5):
inp = paddle.uniform([self.batch_size, self.q_seqlen, self.hidden_size],
self.global_dtype)
mask = paddle.zeros(shape=(self.batch_size, 1, self.q_seqlen, self.kv_seqlen),
dtype='bool')
loss_tp = train_one_step(layer_tp, [inp, mask], optimizer_tp, self.fp8)
loss_single = train_one_step(layer_single, [inp, mask], optimizer_single, self.fp8)
assert_allclose(loss_tp, loss_single, rtol=self.rtol, atol=self.atol)
class TestTransformerTpFp8(TestTransformerTp):
"""Tests Transformer layer with tensor parallelism in FP8"""
def set_attr(self):
"""Set test configs"""
self.batch_size = 16
self.hidden_size = 1024
self.num_heads = 16
self.ffn_hidden_size = 4096
self.q_seqlen = 128
self.kv_seqlen = 128
self.mask_type = 'padding'
self.layer_type = 'encoder'
self.global_dtype = 'bfloat16'
self.rtol = 5e-2
self.atol = 5e-2
self.eps = 1e-3
self.fp8 = True
if __name__ == '__main__':
unittest.main()
| TransformerEngine-main | tests/paddle/parallel_tests/transformer_tp.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Unittest for Linear layer in pipeline parallel"""
import unittest
import numpy as np
import paddle
from paddle.distributed import fleet
from paddle.distributed.fleet.meta_parallel import (
LayerDesc,
PipelineLayer,
)
from utils import assert_allclose, set_random_seed
import transformer_engine.paddle as te
class TEPipelineModel(PipelineLayer):
"""Model for pipeline parallel test"""
def __init__(self,
in_features,
hidden_features,
weight_attrs,
use_te=True,
use_fp8=False,
**kwargs):
self.in_features = in_features
self.hidden_features = hidden_features
self.fp8 = use_fp8
hcg = fleet.get_hybrid_communicate_group()
self.dp_group = hcg.get_data_parallel_group()
Linear = te.Linear if use_te else paddle.nn.Linear
model_desc = [
LayerDesc(Linear, self.in_features, self.hidden_features, weight_attr=weight_attrs[0]),
LayerDesc(Linear, self.hidden_features, self.in_features, weight_attr=weight_attrs[1]),
]
super().__init__(layers=model_desc, loss_fn=paddle.nn.CrossEntropyLoss(), **kwargs)
def forward(self, *args, **kwargs):
with te.fp8_autocast(enabled=self.fp8, fp8_group=self.dp_group):
return super().forward(*args, **kwargs)
class StandaloneModel(paddle.nn.Layer):
"""Model for pipeline parallel test"""
def __init__(self, in_features, hidden_features, weight_attrs):
super().__init__()
self.in_features = in_features
self.hidden_features = hidden_features
Linear = paddle.nn.Linear
self.layer = paddle.nn.Sequential(
Linear(self.in_features, self.hidden_features, weight_attr=weight_attrs[0]),
Linear(self.hidden_features, self.in_features, weight_attr=weight_attrs[1]),
)
self.loss = paddle.nn.CrossEntropyLoss()
def forward(self, inp):
out = self.layer(inp[0])
loss = self.loss(out, inp[1])
return loss
class TestLinearPipelineParallel(unittest.TestCase):
"""Tests Linear layer with pipeline parallel"""
def setUp(self):
self.set_attr()
self.init_dist_env()
paddle.set_default_dtype(self.global_dtype)
def init_dist_env(self):
"""Init Paddle Fleet environment"""
strategy = fleet.DistributedStrategy()
self.pipeline_parallel_size = 2
strategy.hybrid_configs = {
"dp_degree": 1,
"mp_degree": 1,
"pp_degree": self.pipeline_parallel_size,
}
strategy.pipeline_configs = {
"accumulate_steps": self.batch_size // self.micro_batch_size,
"micro_batch_size": self.micro_batch_size,
}
fleet.init(is_collective=True, strategy=strategy)
self.rank = fleet.worker_index()
self.hcg = fleet.get_hybrid_communicate_group()
def set_attr(self):
"""Set test configs"""
self.batch_size = 32
self.micro_batch_size = 16
self.in_features = 32
self.hidden_features = 64
self.global_dtype = 'float32'
self.rtol = 1e-5
self.atol = 1e-5
self.iter = 10
self.fp8 = False
def test_pipeline_train(self):
"""Test pipeline parallel training"""
set_random_seed(1024)
np.random.seed(1024)
weight1_np = np.random.normal(size=[self.in_features, self.hidden_features])
weight2_np = np.random.normal(size=[self.hidden_features, self.in_features])
weight_attrs = [
paddle.ParamAttr(initializer=paddle.nn.initializer.Assign(weight1_np)),
paddle.ParamAttr(initializer=paddle.nn.initializer.Assign(weight2_np)),
]
weight_attrs_transposed = [
paddle.ParamAttr(initializer=paddle.nn.initializer.Assign(weight1_np.T)),
paddle.ParamAttr(initializer=paddle.nn.initializer.Assign(weight2_np.T)),
]
pipe_model = TEPipelineModel(
self.in_features,
self.hidden_features,
weight_attrs_transposed,
use_te=True,
use_fp8=self.fp8,
seg_method="layer:Linear",
num_stages=self.pipeline_parallel_size,
)
# Check if model is split across ranks as expected
for name, sublayer in pipe_model.named_sublayers():
if name in ('_loss_fn', 'shared_layers'):
continue
if self.rank == 0:
assert tuple(sublayer.weight.shape) == weight1_np.T.shape, \
f"Shape does not match, expect: {weight1_np.T.shape}, " \
f"actual: {tuple(sublayer.weight.shape)}"
elif self.rank == 1:
assert tuple(sublayer.weight.shape) == weight2_np.T.shape, \
f"Shape does not match, expect: {weight2_np.T.shape}, " \
f"actual: {tuple(sublayer.weight.shape)}"
standalone_model = StandaloneModel(
self.in_features,
self.hidden_features,
weight_attrs,
)
optimizer_te = paddle.optimizer.SGD(learning_rate=0.1, parameters=pipe_model.parameters())
optimizer_pd = paddle.optimizer.SGD(learning_rate=0.1,
parameters=standalone_model.parameters())
pipe_model = fleet.distributed_model(pipe_model)
optimizer_te = fleet.distributed_optimizer(optimizer_te)
def train_one_step(layer, inp, optimizer):
loss = layer(inp)
loss.backward()
optimizer.step()
optimizer.clear_grad()
return loss
for i in range(self.iter):
inp = paddle.to_tensor(np.random.normal(size=[self.batch_size, self.in_features]),
dtype=self.global_dtype)
label = paddle.to_tensor(np.random.randint(self.in_features, size=[self.batch_size, 1]))
loss_te = pipe_model.train_batch([inp, label], optimizer_te)
loss_pd = train_one_step(standalone_model, [inp, label], optimizer_pd)
print(f"Iter: {i}, loss_te: {loss_te.item()}, loss_pd: {loss_pd.item()}")
assert_allclose(loss_te, loss_pd, rtol=self.rtol, atol=self.atol)
class TestLinearPipelineParallelFP8(TestLinearPipelineParallel):
"""Tests Linear layer with column/row parallelism in FP8"""
def set_attr(self):
"""Set test configs"""
self.batch_size = 32
self.micro_batch_size = 16
self.in_features = 32
self.hidden_features = 64
self.global_dtype = 'float32'
self.rtol = 5e-2
self.atol = 5e-2
self.iter = 10
self.fp8 = True
if __name__ == '__main__':
unittest.main()
| TransformerEngine-main | tests/paddle/parallel_tests/linear_pp.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Unittest for Linear layer in tensor parallel"""
import unittest
import paddle
from paddle.distributed import fleet
from paddle.distributed.fleet.layers.mpu import mp_ops
from utils import assert_allclose, assert_shape, set_random_seed
import transformer_engine.paddle as te
class TestLinearTp(unittest.TestCase):
"""Tests Linear layer with column/row parallelism in BF16"""
def setUp(self):
self.set_attr()
self.init_dist_env()
paddle.set_default_dtype(self.global_dtype)
def init_dist_env(self):
"""Init Paddle Fleet environment"""
strategy = fleet.DistributedStrategy()
self.model_parallel_size = 2
strategy.hybrid_configs = {
"dp_degree": 1,
"mp_degree": self.model_parallel_size,
"pp_degree": 1,
}
fleet.init(is_collective=True, strategy=strategy)
self.rank = fleet.worker_index()
self.hcg = fleet.get_hybrid_communicate_group()
self.tp_group = self.hcg.get_model_parallel_group()
self.world_size = self.hcg.get_model_parallel_world_size()
def set_attr(self):
"""Set test configs"""
self.batch_size = 16
self.in_features = 32
self.out_features = 64
self.global_dtype = 'bfloat16'
self.rtol = 1e-3
self.atol = 1e-3
self.fp8 = False
def test_column_parallel_layer(self):
"""Tests column parallel linear"""
set_random_seed(1024)
layer_te = te.Linear(
self.in_features,
self.out_features,
parallel_mode='column',
)
layer_pd = te.Linear(
self.in_features,
self.out_features,
backend='paddle',
)
# Get total weight
total_weight = []
partial_weight = layer_te.weight.clone().detach()
paddle.distributed.all_gather(total_weight, partial_weight, group=self.tp_group)
total_weight = paddle.concat(total_weight, axis=0)
layer_pd.weight.copy_(total_weight.T, True)
assert_shape(layer_te.weight,
[self.out_features // self.model_parallel_size, self.in_features])
assert_shape(layer_te.bias, [self.out_features // self.model_parallel_size])
optimizer_te = paddle.optimizer.SGD(learning_rate=0.001, parameters=layer_te.parameters())
optimizer_pd = paddle.optimizer.SGD(learning_rate=0.001, parameters=layer_pd.parameters())
layer_te = fleet.distributed_model(layer_te)
optimizer_te = fleet.distributed_optimizer(optimizer_te)
def train_one_step(layer, inp, optimizer, gather=False):
inp = paddle.to_tensor(inp)
inp.stop_gradient = False
out = layer(inp)
if gather:
total_out = mp_ops._c_concat(out, group=self.tp_group)
else:
total_out = out
loss = total_out.mean()
loss.backward()
optimizer.step()
optimizer.clear_grad()
return loss, inp.grad
for _ in range(5):
inp = paddle.uniform([self.batch_size, self.in_features], self.global_dtype)
with te.fp8_autocast(enabled=self.fp8):
loss_tp, grad_input = train_one_step(layer_te, inp, optimizer_te, gather=True)
loss_ref, grad_input_ref = train_one_step(layer_pd, inp, optimizer_pd)
assert_allclose(loss_tp, loss_ref, rtol=self.rtol, atol=self.atol)
assert_allclose(grad_input, grad_input_ref, rtol=self.rtol, atol=self.atol)
def test_row_parallel_layer(self):
"""Tests row parallel linear"""
set_random_seed(1024)
layer_te = te.Linear(
self.in_features,
self.out_features,
parallel_mode='row',
)
layer_pd = te.Linear(
self.in_features,
self.out_features,
backend='paddle',
)
# Get total weight
total_weight = []
partial_weight = layer_te.weight.clone().detach()
paddle.distributed.all_gather(total_weight, partial_weight, group=self.tp_group)
total_weight = paddle.concat(total_weight, axis=1)
layer_pd.weight.copy_(total_weight.T, True)
assert_shape(layer_te.weight,
[self.out_features, self.in_features // self.model_parallel_size])
assert_shape(layer_te.bias, [self.out_features])
optimizer_te = paddle.optimizer.SGD(learning_rate=0.001, parameters=layer_te.parameters())
optimizer_pd = paddle.optimizer.SGD(learning_rate=0.001, parameters=layer_pd.parameters())
# Note(tizheng): For this test, we cannot wrap model with fleet.distributed_model,
# because it will broadcast inputs across mp group. However, RPL expects splitted
# inputs, which is different on each rank.
def train_one_step(layer, inp, optimizer, split=False):
inp = paddle.to_tensor(inp, stop_gradient=True)
if split:
# TODO(tizheng): Why not working?
# issue: https://github.com/PaddlePaddle/Paddle/issues/55565
# input_parallel = mp_ops._c_split(inp, group=layer.tp_group)
split_size = inp.shape[1] // self.world_size
input_parallel = inp[:, split_size * self.rank:split_size * (self.rank + 1)]
else:
input_parallel = inp
input_parallel.stop_gradient = False
out = layer(input_parallel)
loss = out.mean()
loss.backward()
optimizer.step()
optimizer.clear_grad()
if split:
grad_input = []
paddle.distributed.all_gather(grad_input, input_parallel.grad, group=self.tp_group)
grad_input = paddle.concat(grad_input, axis=1)
else:
grad_input = input_parallel.grad
return loss, grad_input
for _ in range(5):
inp = paddle.uniform([self.batch_size, self.in_features], self.global_dtype)
with te.fp8_autocast(enabled=self.fp8):
loss_tp, grad_input = train_one_step(layer_te, inp, optimizer_te, split=True)
loss_ref, grad_input_ref = train_one_step(layer_pd, inp, optimizer_pd)
assert_allclose(loss_tp, loss_ref, rtol=self.rtol, atol=self.atol)
assert_allclose(grad_input, grad_input_ref, rtol=self.rtol, atol=self.atol)
class TestLinearTpFP8(TestLinearTp):
"""Tests Linear layer with column/row parallelism in FP8"""
def set_attr(self):
"""Set test configs"""
self.batch_size = 16
self.in_features = 32
self.out_features = 64
self.global_dtype = 'bfloat16'
self.rtol = 1e-2
self.atol = 1e-2
self.fp8 = True
if __name__ == '__main__':
unittest.main()
| TransformerEngine-main | tests/paddle/parallel_tests/linear_tp.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Unittest for LayerNormLinear layer in tensor parallel"""
import unittest
import paddle
from paddle.distributed import fleet
from paddle.distributed.fleet.layers.mpu import mp_ops
from utils import assert_allclose, assert_shape, set_random_seed
import transformer_engine.paddle as te
class TestLayerNormLinearTp(unittest.TestCase):
"""Tests LayerNormLinear layer with column/row parallelism in BF16"""
def setUp(self):
self.set_attr()
self.init_dist_env()
paddle.set_default_dtype(self.global_dtype)
def init_dist_env(self):
"""Init Paddle Fleet environment"""
strategy = fleet.DistributedStrategy()
self.model_parallel_size = 2
strategy.hybrid_configs = {
"dp_degree": 1,
"mp_degree": self.model_parallel_size,
"pp_degree": 1,
}
fleet.init(is_collective=True, strategy=strategy)
self.hcg = fleet.get_hybrid_communicate_group()
self.tp_group = self.hcg.get_model_parallel_group()
def set_attr(self):
"""Set test configs"""
self.batch_size = 16
self.in_features = 32
self.out_features = 64
self.global_dtype = 'bfloat16'
self.rtol = 1e-3
self.atol = 1e-3
self.eps = 1e-3
self.fp8 = False
def test_column_parallel_layer(self):
"""Tests column parallel LayerNormLinear"""
set_random_seed(1024)
layer_te = te.LayerNormLinear(
self.in_features,
self.out_features,
eps=self.eps,
parallel_mode='column',
)
layer_pd = te.LayerNormLinear(
self.in_features,
self.out_features,
eps=self.eps,
backend='paddle',
)
# Get total weight
total_weight = []
partial_weight = layer_te.weight.clone().detach()
paddle.distributed.all_gather(total_weight, partial_weight, group=self.tp_group)
total_weight = paddle.concat(total_weight, axis=0)
layer_pd.weight.copy_(total_weight.T, True)
assert_shape(layer_te.weight,
[self.out_features // self.model_parallel_size, self.in_features])
assert_shape(layer_te.bias, [self.out_features // self.model_parallel_size])
optimizer_te = paddle.optimizer.SGD(learning_rate=0.001, parameters=layer_te.parameters())
optimizer_pd = paddle.optimizer.SGD(learning_rate=0.001, parameters=layer_pd.parameters())
layer_te = fleet.distributed_model(layer_te)
optimizer_te = fleet.distributed_optimizer(optimizer_te)
def train_one_step(layer, inp, optimizer, gather=False):
inp = paddle.to_tensor(inp)
inp.stop_gradient = False
out = layer(inp)
if gather:
total_out = mp_ops._c_concat(out, group=self.tp_group)
else:
total_out = out
loss = total_out.mean()
loss.backward()
optimizer.step()
optimizer.clear_grad()
return loss, inp.grad
for _ in range(5):
inp = paddle.uniform([self.batch_size, self.in_features], self.global_dtype)
with te.fp8_autocast(enabled=self.fp8):
loss_tp, grad_input = train_one_step(layer_te, inp, optimizer_te, gather=True)
loss_ref, grad_input_ref = train_one_step(layer_pd, inp, optimizer_pd)
assert_allclose(loss_tp, loss_ref, rtol=self.rtol, atol=self.atol)
assert_allclose(grad_input, grad_input_ref, rtol=self.rtol, atol=self.atol)
class TestLayerNormLinearTpFp8(TestLayerNormLinearTp):
"""Tests LayernormLinear layer with column/row parallelism in FP8"""
def set_attr(self):
"""Set test configs"""
self.batch_size = 16
self.in_features = 32
self.out_features = 64
self.global_dtype = 'bfloat16'
self.rtol = 1e-2
self.atol = 1e-2
self.eps = 1e-3
self.fp8 = True
if __name__ == '__main__':
unittest.main()
| TransformerEngine-main | tests/paddle/parallel_tests/layernorm_linear_tp.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Unittest for group sharding"""
import unittest
import paddle
from paddle.distributed import fleet
from paddle.distributed.fleet.meta_optimizers.dygraph_optimizer import (
DygraphShardingOptimizer,)
from utils import assert_allclose, set_random_seed
import transformer_engine.paddle as te
class TestGroupSharding(unittest.TestCase):
"""Tests group sharding"""
def setUp(self):
self.set_attr()
self.init_dist_env()
paddle.set_default_dtype(self.global_dtype)
def set_attr(self):
"""Set test configs"""
self.sharding_degree = 2
self.global_dtype = 'float32'
self.rtol = 1e-5
self.atol = 1e-5
self.batch_size = 16
self.in_channels = 16
self.out_channels = 32
self.fp8 = False
def init_dist_env(self):
"""Init Paddle Fleet environment"""
strategy = fleet.DistributedStrategy()
strategy.hybrid_configs = {
"dp_degree": 1,
"mp_degree": 1,
"pp_degree": 1,
"sharding_degree": self.sharding_degree,
}
self.strategy = strategy
fleet.init(is_collective=True, strategy=strategy)
def _get_model_and_optimizer(self, model, stage):
if stage == 1:
optimizer = DygraphShardingOptimizer(
hcg=fleet.get_hybrid_communicate_group(),
user_defined_strategy=self.strategy,
params=model.parameters(),
inner_optimizer_class=paddle.optimizer.AdamW,
learning_rate=0.01,
)
model = fleet.distributed_model(model)
optimizer = fleet.distributed_optimizer(optimizer)
elif stage in [2, 3]:
optimizer = paddle.optimizer.AdamW(learning_rate=0.01, parameters=model.parameters())
group = fleet.get_hybrid_communicate_group().get_sharding_parallel_group()
class ShardingLevel: # pylint: disable=too-few-public-methods,
"""Paddle sharding options"""
kStage1 = 'os'
kStage2 = 'os_g'
kStage3 = 'p_g_os'
level = ShardingLevel.kStage3 if stage == 3 else ShardingLevel.kStage2
model, optimizer, _ = paddle.distributed.sharding.group_sharded_parallel(
model=model,
optimizer=optimizer,
level=level,
group=group,
segment_size=256,
)
else:
raise ValueError(f"Stage {stage} not supported")
return model, optimizer
def test_group_sharding_stage1(self):
"""Tests group sharding training"""
set_random_seed(1024)
model_te = te.Linear(self.in_channels, self.out_channels)
model_pd = paddle.nn.Linear(self.in_channels, self.out_channels)
model_pd.weight.copy_(model_te.weight.T, True)
model_pd.bias.copy_(model_te.bias, True)
model_te, optimizer_te = self._get_model_and_optimizer(model_te, stage=1)
model_pd, optimizer_pd = self._get_model_and_optimizer(model_pd, stage=1)
rank_id = paddle.distributed.get_rank()
paddle.seed(rank_id)
def train_one_step(model, inp, optimizer):
out = model(inp)
loss = out.mean()
loss.backward()
optimizer.step()
optimizer.clear_grad()
return loss
for _ in range(5):
inp = paddle.uniform([self.batch_size, self.in_channels], self.global_dtype)
with te.fp8_autocast(enabled=False):
loss_te = train_one_step(model_te, inp, optimizer_te)
loss_pd = train_one_step(model_pd, inp, optimizer_pd)
assert_allclose(loss_te, loss_pd, rtol=self.rtol, atol=self.atol)
assert len(optimizer_te.state_dict()) == 4, \
"Expect each rank to hold 4 optimizer state entries."
def test_group_sharding_stage2(self):
"""Tests group sharding training"""
set_random_seed(1024)
model_te = te.Linear(self.in_channels, self.out_channels)
model_pd = paddle.nn.Linear(self.in_channels, self.out_channels)
model_pd.weight.copy_(model_te.weight.T, True)
model_pd.bias.copy_(model_te.bias, True)
model_te, optimizer_te = self._get_model_and_optimizer(model_te, stage=2)
model_pd, optimizer_pd = self._get_model_and_optimizer(model_pd, stage=2)
rank_id = paddle.distributed.get_rank()
paddle.seed(rank_id)
def train_one_step(model, inp, optimizer):
out = model(inp)
loss = out.mean()
loss.backward()
# Check gradients are split to different trainers
if rank_id == 0:
assert model.bias.grad is None and model.weight.grad is not None
elif rank_id == 1:
assert model.weight.grad is None and model.bias.grad is not None
optimizer.step()
optimizer.clear_grad()
return loss
for _ in range(5):
inp = paddle.uniform([self.batch_size, self.in_channels], self.global_dtype)
with te.fp8_autocast(enabled=False):
loss_te = train_one_step(model_te, inp, optimizer_te)
loss_pd = train_one_step(model_pd, inp, optimizer_pd)
assert_allclose(loss_te, loss_pd, rtol=self.rtol, atol=self.atol)
assert len(optimizer_te.state_dict()) == 4, \
"Expect each rank to hold 4 optimizer state entries."
def test_group_sharding_stage3(self):
"""Tests group sharding training"""
set_random_seed(1024)
model_te = te.Linear(self.in_channels, self.out_channels)
model_pd = paddle.nn.Linear(self.in_channels, self.out_channels)
model_pd.weight.copy_(model_te.weight.T, True)
model_pd.bias.copy_(model_te.bias, True)
model_te, optimizer_te = self._get_model_and_optimizer(model_te, stage=3)
model_pd, optimizer_pd = self._get_model_and_optimizer(model_pd, stage=3)
rank_id = paddle.distributed.get_rank()
paddle.seed(rank_id)
def train_one_step(model, inp, optimizer):
out = model(inp)
loss = out.mean()
loss.backward()
optimizer.step()
optimizer.clear_grad()
return loss
for _ in range(5):
inp = paddle.uniform([self.batch_size, self.in_channels], self.global_dtype)
with te.fp8_autocast(enabled=False):
loss_te = train_one_step(model_te, inp, optimizer_te)
loss_pd = train_one_step(model_pd, inp, optimizer_pd)
assert_allclose(loss_te, loss_pd, rtol=self.rtol, atol=self.atol)
for name, value in optimizer_te.state_dict().items():
if name.endswith('w_0_moment1_0'):
assert value.numel() == \
self.in_channels * self.out_channels // self.sharding_degree, \
"Expect optimizer state to be sharded across trainers."
if __name__ == '__main__':
unittest.main()
| TransformerEngine-main | tests/paddle/parallel_tests/group_sharding.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Unittest for Linear layer in tensor parallel"""
import unittest
import paddle
from paddle.distributed import fleet
from utils import assert_allclose, set_random_seed
import transformer_engine.paddle as te
def assert_allclose_across_ranks(tensor, group=None):
"""Assert tensor is identical in all ranks"""
gathered_list = []
paddle.distributed.all_gather(gathered_list, tensor, group=group)
assert len(gathered_list) > 1
for gathered_tensor in gathered_list:
assert_allclose(tensor, gathered_tensor)
class TestAmaxReduction(unittest.TestCase):
"""Tests Amax reduction"""
def setUp(self):
self.data_parallel_size = 2
self.init_dist_env()
self.global_dtype = 'bfloat16'
paddle.set_default_dtype(self.global_dtype)
def init_dist_env(self):
"""Init Paddle Fleet environment"""
strategy = fleet.DistributedStrategy()
strategy.hybrid_configs = {
"dp_degree": self.data_parallel_size,
"mp_degree": 1,
"pp_degree": 1,
}
fleet.init(is_collective=True, strategy=strategy)
def test_amax_reduction(self):
"""Tests column parallel linear"""
set_random_seed(1024)
layer1 = te.Linear(16, 16)
layer2 = te.Linear(16, 16)
model = paddle.nn.Sequential(layer1, layer2)
model = fleet.distributed_model(model)
rank_id = paddle.distributed.get_rank()
set_random_seed(rank_id)
optimizer = paddle.optimizer.SGD(learning_rate=10.0, parameters=model.parameters())
optimizer = fleet.distributed_optimizer(optimizer)
def train_one_step(layer, inp, optimizer):
inp = paddle.to_tensor(inp)
inp.stop_gradient = False
out = layer(inp)
loss = out.mean()
loss.backward()
optimizer.step()
optimizer.clear_grad()
return loss
for _ in range(5):
inp = paddle.uniform([16, 16], self.global_dtype)
with te.fp8_autocast(enabled=True):
train_one_step(model, inp, optimizer)
assert_allclose_across_ranks(layer1.fp8_meta["scaling_fwd"].amax_history[-1])
assert_allclose_across_ranks(layer1.fp8_meta["scaling_fwd"].scale)
assert_allclose_across_ranks(layer1.fp8_meta["scaling_fwd"].scale_inv)
assert_allclose_across_ranks(layer2.fp8_meta["scaling_fwd"].amax_history[-1])
assert_allclose_across_ranks(layer2.fp8_meta["scaling_fwd"].scale)
assert_allclose_across_ranks(layer2.fp8_meta["scaling_fwd"].scale_inv)
assert_allclose_across_ranks(layer1.fp8_meta["scaling_bwd"].amax_history[-1])
assert_allclose_across_ranks(layer1.fp8_meta["scaling_bwd"].scale)
assert_allclose_across_ranks(layer1.fp8_meta["scaling_bwd"].scale_inv)
assert_allclose_across_ranks(layer2.fp8_meta["scaling_bwd"].amax_history[-1])
assert_allclose_across_ranks(layer2.fp8_meta["scaling_bwd"].scale)
assert_allclose_across_ranks(layer2.fp8_meta["scaling_bwd"].scale_inv)
if __name__ == '__main__':
unittest.main()
| TransformerEngine-main | tests/paddle/parallel_tests/amax_reduction.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Tests for the cpp extensions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import transformer_engine # pylint: disable=unused-import
import transformer_engine_tensorflow as tex
import tensorflow as tf
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from transformer_engine.tensorflow import TE_DType
from transformer_engine.tensorflow import get_stream_id
class ExtensionsTest(test.TestCase):
@test_util.run_gpu_only
def testCastFp8(self):
if not tf.test.is_gpu_available(True, (9, 0)):
self.skipTest('Fp8 requires Hopper+ GPU')
input_shape = (16, 32)
x = tf.random.uniform(input_shape)
scale, amax, scale_inv = tf.ones([]), tf.zeros([]), tf.ones([])
offset = 0
for fp8_dtype in [tex.DType.kFloat8E4M3, tex.DType.kFloat8E5M2]:
stream_id = get_stream_id()
x_fp8 = tex.cast_to_fp8(
x, scale, fp8_dtype, amax, scale_inv, offset, stream_id)
y = tex.cast_from_fp8(
x_fp8, scale_inv, fp8_dtype, TE_DType[x.dtype],
offset, stream_id)
self.assertAllClose(y, x, rtol=0.1, atol=0.01)
@test_util.run_gpu_only
def testTransposeFp8(self):
stream_id = get_stream_id()
x = tf.constant(np.random.uniform(-128, 127, (16, 32)), dtype=tf.int8)
y = tex.fp8_transpose(x, tex.DType.kFloat8E4M3, stream_id)
y_ref = tf.transpose(x, [1, 0])
self.assertAllEqual(y, y_ref)
@test_util.run_gpu_only
def testMatmulFp8(self):
if not tf.test.is_gpu_available(True, (9, 0)):
self.skipTest('Fp8 requires Hopper+ GPU')
stream_id = get_stream_id()
fp8_dtype = tex.DType.kFloat8E4M3
out_dtype = tex.DType.kFloat32
a = tf.random.uniform([32, 16])
a_scale, a_amax, a_scale_inv = tf.ones([]), tf.zeros([]), tf.ones([])
a_offset = 0
a_casted = tex.cast_to_fp8(a, a_scale, fp8_dtype, a_amax, a_scale_inv,
a_offset, stream_id)
b = tf.random.uniform([16, 16])
b_scale, b_amax, b_scale_inv = tf.ones([]), tf.zeros([]), tf.ones([])
b_offset = 0
b_casted = tex.cast_to_fp8(b, b_scale, fp8_dtype, b_amax, b_scale_inv,
b_offset, stream_id)
use_bias = False
bias = tf.zeros(())
workspace = tf.zeros([33_554_432], dtype=tf.int8)
# CublasLt inside tex.te_gemm assumes inputs are column major.
# Mathematically, A@B=C is equivalent to B^T@A^T=C^T, where X^T is the
# transpose of X. Actually, if we view X^T is the column major of X, we
# don't need any explict transpose.
# Note, for fp8 matmul, the first matrix has to be in transposed format.
d = tex.te_gemm(b_casted, b_scale_inv, fp8_dtype, b_offset, a_casted,
a_scale_inv, fp8_dtype, a_offset, workspace, use_bias,
bias, False, None, True, False, False, False, False,
out_dtype, stream_id)
# We assume b is in transposed format (see above). So we transpose it
# back to apply the ordinary row-major matmul.
bt = tf.transpose(b)
d_ref = tf.matmul(a, bt)
self.assertAllClose(d, d_ref, rtol=0.1, atol=0.01)
@test_util.run_gpu_only
def testLayerNormFwdFp8(self):
if not tf.test.is_gpu_available(True, (9, 0)):
self.skipTest('Fp8 requires Hopper+ GPU')
stream_id = get_stream_id()
fp8_dtype = tex.DType.kFloat8E4M3
N, H = (16, 32)
eps = 1e-3
x = tf.random.uniform((N, H))
gamma = tf.random.uniform((H,))
beta = tf.random.uniform((H,))
offset = 0
scale, amax, scale_inv = tf.ones([]), tf.zeros([]), tf.ones([])
y_ref, mu_ref, rsigma_ref = tex.layernorm_fwd(
x, gamma, beta, eps, stream_id)
y_fp8, mu, rsigma = tex.layernorm_fwd_fp8(
x, gamma, beta, eps, scale, fp8_dtype, amax, scale_inv, offset,
stream_id)
y = tex.cast_from_fp8(y_fp8, scale_inv, fp8_dtype, TE_DType[x.dtype],
offset, stream_id)
self.assertAllClose(y, y_ref, rtol=0.1, atol=0.01)
self.assertAllClose(mu, mu_ref)
self.assertAllClose(rsigma, rsigma_ref)
@test_util.run_gpu_only
def testGeluForwardFp8(self):
if not tf.test.is_gpu_available(True, (9, 0)):
self.skipTest('Fp8 requires Hopper+ GPU')
stream_id = get_stream_id()
fp8_dtype = tex.DType.kFloat8E4M3
M, N = (16, 32)
x = tf.random.uniform((M, N))
offset = 0
scale, amax, scale_inv = tf.ones([]), tf.zeros([]), tf.ones([])
y_ref = tf.nn.gelu(x, approximate=True)
y_fp8 = tex.te_gelu(x, scale, fp8_dtype, amax,
scale_inv, offset, stream_id)
y = tex.cast_from_fp8(y_fp8, scale_inv, fp8_dtype, TE_DType[x.dtype],
offset, stream_id)
self.assertAllClose(y, y_ref, rtol=0.1, atol=0.01)
@test_util.run_gpu_only
def testGeluForward(self):
if not tf.test.is_gpu_available(True, (9, 0)):
self.skipTest('Fp8 requires Hopper+ GPU')
stream_id = get_stream_id()
M, N = (16, 32)
x = tf.random.uniform((M, N))
y_ref = tf.nn.gelu(x, approximate=True)
y = tex.te_gelu(x, None, TE_DType[x.dtype], None, None, 0, stream_id)
self.assertAllClose(y, y_ref, rtol=0.00001, atol=0.00001)
@test_util.run_gpu_only
def testGeluBackwardFp8(self):
if not tf.test.is_gpu_available(True, (9, 0)):
self.skipTest('Fp8 requires Hopper+ GPU')
stream_id = get_stream_id()
fp8_dtype = tex.DType.kFloat8E5M2
M, K, N = (16, 32, 32)
x = tf.random.uniform((M, K))
bias = tf.random.uniform((K, ))
dy = tf.random.uniform((M, K))
offset = 0
scale, amax, scale_inv = tf.ones([]), tf.zeros([]), tf.ones([])
with tf.GradientTape(persistent=True) as tape:
tape.watch([x, bias])
x_gelu = tf.nn.bias_add(x, bias)
y = tf.nn.gelu(x_gelu, approximate=True)
loss = y * dy
dgelu_ref, dbias_ref = tape.gradient(loss, [x_gelu, bias])
dbias, dgelu_c, dgelu_t = tex.fp8_fused_cast_transpose_bgrad_dgelu(
dy, x_gelu, scale, fp8_dtype, amax, scale_inv, offset, stream_id)
dgelu = tex.cast_from_fp8(
dgelu_c, scale_inv, fp8_dtype, TE_DType[x.dtype], offset, stream_id)
self.assertAllClose(dgelu, dgelu_ref, rtol=0.1, atol=0.01)
self.assertAllClose(dbias, dbias_ref)
self.assertAllEqual(dgelu_c, tf.transpose(dgelu_t, [1, 0]))
@test_util.run_gpu_only
def testScaledUpperTriangMaskedSoftmaxFwd(self):
stream_id = get_stream_id()
B, F = (16, 32)
scale = 0.8
x = tf.random.uniform((B, F, F), dtype=tf.half)
mask_operator = tf.linalg.LinearOperatorLowerTriangular(
tf.ones((F, F), dtype=tf.bool))
mask = mask_operator.to_dense()
mask_output = tf.where(mask, scale * x, -10000.0)
y_ref = tf.nn.softmax(mask_output, axis=-1)
y = tex.scaled_upper_triang_masked_softmax_forward(x, scale, stream_id)
self.assertAllClose(y, y_ref, rtol=0.001, atol=0.001)
@test_util.run_gpu_only
def testScaledUpperTriangMaskedSoftmaxBwd(self):
stream_id = get_stream_id()
B, F = (16, 32)
scale = 0.8
x = tf.random.uniform((B, F, F), dtype=tf.half)
dy = tf.random.uniform((B, F, F), dtype=tf.half)
mask_operator = tf.linalg.LinearOperatorLowerTriangular(
tf.ones((F, F), dtype=tf.bool))
mask = mask_operator.to_dense()
with tf.GradientTape(persistent=True) as tape:
tape.watch(x)
mask_output = tf.where(mask, scale * x, -10000.0)
y = tf.nn.softmax(mask_output, axis=-1)
y = tf.cast(y, dtype=tf.half)
loss = y * dy
dx_ref = tape.gradient(loss, x)
dx = tex.scaled_upper_triang_masked_softmax_backward(
dy, y, scale, stream_id)
self.assertAllClose(dx, dx_ref, rtol=0.001, atol=0.001)
@test_util.run_gpu_only
def testScaledMaskedSoftmaxFwd(self):
stream_id = get_stream_id()
B, N, F = (16, 4, 32)
scale = 0.8
x = tf.random.uniform((B, N, F, F), dtype=tf.half)
# In NVTE, if the mask is true, the corresponding value is zero.
# Whereas, TF does the opposite. In addition, NVTE requires the mask has
# the same num of dims as the input.
mask = tf.reshape(x[0, 0] > 0.3, shape=(1, 1, F, F))
flipped_mask = x[0, 0] <= 0.3
y_ref = tf.keras.layers.Softmax(axis=-1)(scale * x, flipped_mask)
y = tex.scaled_masked_softmax_forward(x, mask, scale, stream_id)
self.assertAllClose(y, y_ref, rtol=0.001, atol=0.001)
@test_util.run_gpu_only
def testScaledMaskedSoftmaxBwd(self):
stream_id = get_stream_id()
B, N, F = (16, 4, 32)
scale = 0.8
x = tf.random.uniform((B, N, F, F), dtype=tf.half)
dy = tf.random.uniform((B, N, F, F), dtype=tf.half)
mask = tf.reshape(x[0, 0] > 0.3, shape=(1, 1, F, F))
flipped_mask = x[0, 0] <= 0.3
with tf.GradientTape(persistent=True) as tape:
tape.watch(x)
y = tf.keras.layers.Softmax(axis=-1)(scale * x, flipped_mask)
y = tf.cast(y, dtype=tf.half)
loss = y * dy
dx_ref = tape.gradient(loss, x)
dx = tex.scaled_masked_softmax_backward(dy, y, scale, stream_id)
self.assertAllClose(dx, dx_ref, rtol=0.001, atol=0.001)
@test_util.run_gpu_only
def testScaledSoftmaxFwd(self):
stream_id = get_stream_id()
B, N, F = (16, 4, 32)
scale = 0.8
x = tf.random.uniform((B, N, F, F), dtype=tf.half)
y_ref = tf.keras.layers.Softmax(axis=-1)(scale * x)
y = tex.scaled_softmax_forward(x, scale, stream_id)
self.assertAllClose(y, y_ref, rtol=0.001, atol=0.001)
@test_util.run_gpu_only
def testScaledSoftmaxBwd(self):
stream_id = get_stream_id()
B, N, F = (16, 4, 32)
scale = 0.8
x = tf.random.uniform((B, N, F, F), dtype=tf.half)
dy = tf.random.uniform((B, N, F, F), dtype=tf.half)
with tf.GradientTape(persistent=True) as tape:
tape.watch(x)
y = tf.keras.layers.Softmax(axis=-1)(scale * x)
y = tf.cast(y, tf.half)
loss = y * dy
dx_ref = tape.gradient(loss, x)
dx = tex.scaled_softmax_backward(dy, y, scale, stream_id)
self.assertAllClose(dx, dx_ref, rtol=0.001, atol=0.001)
if __name__ == '__main__':
test.main()
| TransformerEngine-main | tests/tensorflow/test_extensions.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Tests for the fp8 layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import transformer_engine.tensorflow as te
from itertools import product
from tensorflow.keras import initializers, layers
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from transformer_engine.tensorflow import (
Dense,
DelayedScaling,
Format,
LayerNorm,
LayerNormDense,
LayerNormMLP,
)
def get_fp8_recipe(override_wgrad=False):
fp8_recipe = DelayedScaling(
margin=0, interval=1, fp8_format=Format.HYBRID,
amax_compute_algo='max', amax_history_len=3,
override_linear_precision=(False, False, override_wgrad))
return fp8_recipe
def compute_scale(amax, scale, fp8_max, margin):
"""Default function to convert amax to scaling factor."""
exp = tf.math.floor(tf.experimental.numpy.log2(fp8_max / amax)) - margin
sf = tf.math.round(tf.math.pow(2., tf.math.abs(exp)))
sf = tf.where(amax > 0.0, sf, scale)
sf = tf.where(tf.math.is_finite(amax), sf, scale)
sf = tf.where(exp < 0, 1.0 / sf, sf)
return sf
def update_scale(amax_h, scale, fp8_meta, is_fwd):
key = "fp8_max_fwd" if is_fwd else "fp8_max_bwd"
amax = tf.reduce_max(amax_h, axis=0)
fp8_max = fp8_meta[key]
margin = fp8_meta["recipe"].margin
scale = compute_scale(amax, scale, fp8_max, margin)
scale_inv = 1. / scale
return scale, scale_inv
def roll_and_update(amax_h, update):
amax_h = tf.roll(amax_h, shift=-1, axis=0)
amax_h = tf.tensor_scatter_nd_update(amax_h, [[0]], [update])
return amax_h
# This function is to recompute the results of layernorm bprop.
def get_adjusted_layernorm_dx(x, ln_dy, init):
assert x.shape == ln_dy.shape
ln_layer = layers.LayerNormalization(
gamma_initializer=init,
beta_initializer=init,
)
with tf.GradientTape(persistent=True) as tape:
tape.watch(x)
y = ln_layer(x)
loss = y * ln_dy
ln_dx, (ln_dgamma, ln_dbeta) = tape.gradient(loss, [x, ln_layer.variables])
return ln_dx, ln_dgamma, ln_dbeta
class LayersTest(test.TestCase):
def setUp(self):
super().setUp()
tf.keras.mixed_precision.set_global_policy('mixed_float16')
@test_util.run_gpu_only
def testDenseFwd(self):
B, M, K, N = 4, 8, 16, 32
init = initializers.RandomUniform(minval=0., maxval=1.)
dense_kwargs = {
"units": N,
"use_bias": True,
"kernel_initializer": init,
"bias_initializer": init,
}
dense_ref = layers.Dense(**dense_kwargs)
dense = Dense(**dense_kwargs)
x = tf.random.uniform((B, M, K))
fp8_recipe = get_fp8_recipe()
for use_fp8 in [False, True]:
if use_fp8 and not tf.test.is_gpu_available(True, (9, 0)):
continue
y_ref = dense_ref(x)
with te.fp8_autocast(enabled=use_fp8, fp8_recipe=fp8_recipe):
y = dense(x)
# The TE higher precision calls use the bias fusion, so they are not
# exactly same with the TF calls.
atol, rtol = (0.01, 0.05) if use_fp8 else (1e-3, 1e-3)
self.assertAllClose(y, y_ref, rtol, atol, msg=f"use_fp8={use_fp8}")
@test_util.run_gpu_only
def testDenseBwd(self):
B, M, K, N = 4, 8, 16, 32
init = initializers.RandomUniform(minval=0., maxval=1.)
dense_kwargs = {
"units": N,
"use_bias": True,
"kernel_initializer": init,
"bias_initializer": init,
}
dense_ref = layers.Dense(**dense_kwargs)
dense = Dense(**dense_kwargs)
dy = tf.random.uniform((B, M, N))
def _train_step(x, model, use_fp8=False, fp8_recipe=None):
with tf.GradientTape(persistent=True) as tape:
tape.watch(x)
with te.fp8_autocast(enabled=use_fp8, fp8_recipe=fp8_recipe):
y = model(x, training=True)
loss = y * tf.cast(dy, y.dtype)
dx, (dw, db) = tape.gradient(loss, [x, model.trainable_variables])
return dx, dw, db
x = tf.random.uniform((B, M, K))
for use_fp8, use_override in product([False, True], repeat=2):
if use_fp8 and not tf.test.is_gpu_available(True, (9, 0)):
continue
recipe = get_fp8_recipe(use_override)
dx_ref, dw_ref, db_ref = _train_step(x, dense_ref)
dx, dw, db = _train_step(
x, dense, use_fp8=use_fp8, fp8_recipe=recipe)
assert_msg = f"use_fp8={use_fp8},use_override={use_override}"
atol, rtol = (0.01, 0.05) if use_fp8 else (1e-6, 1e-6)
self.assertAllClose(dx, dx_ref, rtol, atol, msg="dx," + assert_msg)
self.assertAllClose(db, db_ref, rtol, atol, msg="db," + assert_msg)
atol, rtol = \
(0.01, 0.05) if use_fp8 and not use_override else (1e-6, 1e-6)
self.assertAllClose(dw, dw_ref, rtol, atol, msg="dw," + assert_msg)
@test_util.run_gpu_only
def testDenseSkipWeight(self):
B, M, K, N = 4, 8, 16, 32
init = initializers.RandomUniform(minval=0., maxval=1.)
dense_kwargs = {
"units": N,
"use_bias": True,
"kernel_initializer": init,
"bias_initializer": init,
}
dense_ref = layers.Dense(**dense_kwargs)
dense = Dense(**dense_kwargs, skip_weight_param_allocation=True)
x = tf.random.uniform((B, M, K))
fp8_recipe = get_fp8_recipe()
for use_fp8 in [False, True]:
if use_fp8 and not tf.test.is_gpu_available(True, (9, 0)):
continue
y_ref = dense_ref(x)
with te.fp8_autocast(enabled=use_fp8, fp8_recipe=fp8_recipe):
y = dense(x, kernel=dense_ref.kernel, bias=dense_ref.bias)
atol, rtol = (0.01, 0.05) if use_fp8 else (1e-3, 1e-3)
self.assertAllClose(y, y_ref, rtol, atol, msg=f"use_fp8={use_fp8}")
@test_util.run_gpu_only
def testDenseBookkeeping(self):
if not tf.test.is_gpu_available(True, (9, 0)):
self.skipTest('Fp8 requires Hopper+ GPU')
M, K, N = 16, 16, 32
init = initializers.RandomNormal(mean=0., stddev=1.)
dense = Dense(N, kernel_initializer=init)
fp8_recipe = get_fp8_recipe()
def _train_step(x, dy):
with tf.GradientTape(persistent=True) as tape:
tape.watch(x)
with te.fp8_autocast(enabled=True, fp8_recipe=fp8_recipe):
y = dense(x, training=True)
loss = y * tf.cast(dy, y.dtype)
dx, dw = tape.gradient(loss, [x, dense.kernel])
return dx, dw
scale_fwd_ref = tf.ones((2,))
scale_bwd_ref = tf.ones((1,))
scale_inv_fwd_ref = 1. / scale_fwd_ref
scale_inv_bwd_ref = 1. / scale_bwd_ref
amax_h_fwd_ref = tf.zeros((fp8_recipe.amax_history_len, 2))
amax_h_bwd_ref = tf.zeros((fp8_recipe.amax_history_len, 1))
atol, rtol = 0.001, 0.001
for step in range(5):
x = tf.random.normal((M, K))
dy = tf.random.normal((M, N))
dx, dw = _train_step(x, dy)
amax_x = tf.math.reduce_max(tf.math.abs(x))
amax_w = tf.math.reduce_max(tf.math.abs(dense.kernel))
amax_dy = tf.math.reduce_max(tf.math.abs(dy))
amax_h_fwd_ref = roll_and_update(amax_h_fwd_ref, [amax_x, amax_w])
amax_h_bwd_ref = roll_and_update(amax_h_bwd_ref, [amax_dy])
amax_h_fwd = dense.fp8_meta['scaling_fwd']['amax_history']
amax_h_bwd = dense.fp8_meta['scaling_bwd']['amax_history']
scale_fwd = dense.fp8_meta['scaling_fwd']['scale']
scale_bwd = dense.fp8_meta['scaling_bwd']['scale']
scale_inv_fwd = dense.fp8_meta['scaling_fwd']['scale_inv']
scale_inv_bwd = dense.fp8_meta['scaling_bwd']['scale_inv']
self.assertAllClose(
amax_h_fwd, amax_h_fwd_ref, rtol, atol, msg="amax_history_fwd")
self.assertAllClose(
amax_h_bwd, amax_h_bwd_ref, rtol, atol, msg="amax_history_bwd")
self.assertAllClose(scale_fwd, scale_fwd_ref,
rtol, atol, msg="scale_fwd")
self.assertAllClose(scale_bwd, scale_bwd_ref,
rtol, atol, msg="scale_bwd")
self.assertAllClose(
scale_inv_fwd, scale_inv_fwd_ref, rtol, atol,
msg="scale_inv_fwd")
self.assertAllClose(
scale_inv_bwd, scale_inv_bwd_ref, rtol, atol,
msg="scale_inv_bwd")
scale_fwd_ref, scale_inv_fwd_ref = update_scale(
amax_h_fwd_ref, scale_fwd_ref, dense.fp8_meta, is_fwd=True)
scale_bwd_ref, scale_inv_bwd_ref = update_scale(
amax_h_bwd_ref, scale_bwd_ref, dense.fp8_meta, is_fwd=False)
# Apply an update to the kernel to mimic the gradient descent.
dense.kernel.assign_add(tf.cast(dw, tf.float32) * 0.1)
@test_util.run_gpu_only
def testLayerNormFwd(self):
B, M, N = 4, 16, 32
init = initializers.RandomNormal(mean=0., stddev=1.)
# The keras layer norm actually uses fp32 computation in mixed precision
# mode. So, for better comparison, we use fp32 in both reference and
# target layers.
ln_kwargs = {
"gamma_initializer": init,
"beta_initializer": init,
"dtype": 'float32',
}
ln_ref = layers.LayerNormalization(**ln_kwargs)
ln = LayerNorm(**ln_kwargs)
x = tf.random.normal((B, M, N))
y_ref = ln_ref(x)
y = ln(x)
self.assertAllClose(y, y_ref, msg="fwd_layer_norm:y")
@test_util.run_gpu_only
def testLayerNormBwd(self):
B, M, N = 4, 16, 32
init = initializers.RandomNormal(mean=0., stddev=1.)
ln_kwargs = {
"gamma_initializer": init,
"beta_initializer": init,
"dtype": 'float32',
}
ln_ref = layers.LayerNormalization(**ln_kwargs)
ln = LayerNorm(**ln_kwargs)
dy = tf.random.uniform((B, M, N))
def _train_step(x, model):
with tf.GradientTape(persistent=True) as tape:
tape.watch(x)
y = model(x, training=True)
loss = y * tf.cast(dy, y.dtype)
dx, (dg, dB) = tape.gradient(loss, [x, model.trainable_variables])
return dx, dg, dB
x = tf.random.uniform((B, M, N))
dx_ref, dg_ref, dB_ref = _train_step(x, ln_ref)
dx, dg, dB = _train_step(x, ln)
self.assertAllClose(dx, dx_ref, msg="bwd_layer_norm:dx")
self.assertAllClose(dB, dB_ref, msg="bwd_layer_norm:dbeta")
self.assertAllClose(dg, dg_ref, msg="bwd_layer_norm:dgamma")
@test_util.run_gpu_only
def testLayerNormDenseFwd(self):
B, M, K, N = 4, 8, 16, 32
init = initializers.RandomUniform(minval=0., maxval=1.)
ln_kwargs = {
"gamma_initializer": init,
"beta_initializer": init,
}
dense_kwargs = {
"units": N,
"use_bias": True,
"kernel_initializer": init,
"bias_initializer": init,
}
ln_ref = layers.LayerNormalization(**ln_kwargs)
dense_ref = layers.Dense(**dense_kwargs)
x = tf.random.uniform((B, M, K))
fp8_recipe = get_fp8_recipe()
for use_fp8, output_ln in product([False, True], repeat=2):
if use_fp8 and not tf.test.is_gpu_available(True, (9, 0)):
continue
ln_dense = LayerNormDense(
**ln_kwargs,
**dense_kwargs,
return_layernorm_output=output_ln,
)
y_ln_ref = ln_ref(x)
y_ref = dense_ref(y_ln_ref)
with te.fp8_autocast(enabled=use_fp8, fp8_recipe=fp8_recipe):
ys = ln_dense(x)
if output_ln:
y, y_ln = ys
else:
y = ys
assert_msg = f"use_fp8={use_fp8},output_ln={output_ln}"
atol, rtol = (0.01, 0.1) if use_fp8 else (1e-3, 1e-3)
self.assertAllClose(y, y_ref, rtol, atol, msg="y," + assert_msg)
if output_ln:
self.assertAllClose(
y_ln, y_ln_ref, rtol, atol, msg="y_ln," + assert_msg)
@test_util.run_gpu_only
def testLayerNormDenseBwd(self):
B, M, K, N = 4, 8, 16, 32
init = initializers.RandomUniform(minval=0., maxval=.1)
dy = tf.random.uniform((B, M, N), minval=0., maxval=1.)
x = tf.random.uniform((B, M, K), minval=0., maxval=1.)
ln_kwargs = {
"gamma_initializer": init,
"beta_initializer": init,
}
dense_kwargs = {
"units": N,
"use_bias": True,
"kernel_initializer": init,
"bias_initializer": init,
}
ln_ref = layers.LayerNormalization(**ln_kwargs)
dense_ref = layers.Dense(**dense_kwargs)
ln_dense = LayerNormDense(**ln_kwargs, **dense_kwargs)
def _train_step(x, model, use_fp8=False, fp8_recipe=None):
with tf.GradientTape(persistent=True) as tape:
tape.watch(x)
with te.fp8_autocast(enabled=use_fp8, fp8_recipe=fp8_recipe):
y = model(x, training=True)
loss = y * tf.cast(dy, y.dtype)
dx, (dg, dB, dw, db) = tape.gradient(
loss, [x, model.trainable_variables])
return dx, dg, dB, dw, db
def _train_step_ref(x):
with tf.GradientTape(persistent=True) as tape:
tape.watch(x)
t = ln_ref(x)
y = dense_ref(t)
loss = y * tf.cast(dy, y.dtype)
var_list = ln_ref.variables + dense_ref.variables
dx, dt, (dg, dB, dw, db) = tape.gradient(loss, [x, t, var_list])
return dx, dt, dg, dB, dw, db
for use_fp8, use_override in product([False, True], repeat=2):
if use_fp8 and not tf.test.is_gpu_available(True, (9, 0)):
continue
recipe = get_fp8_recipe(use_override)
dx_ref, ln_dy_ref, dg_ref, dB_ref, dw_ref, db_ref = _train_step_ref(
x)
dx, dg, dB, dw, db = _train_step(
x, ln_dense, use_fp8=use_fp8, fp8_recipe=recipe)
assert_msg = f"use_fp8={use_fp8},use_override={use_override}"
atol, rtol = (0.01, 0.1) if use_fp8 else (1e-3, 1e-3)
self.assertAllClose(db, db_ref, rtol, atol,
msg="dbias," + assert_msg)
self.assertAllClose(dw, dw_ref, rtol, atol,
msg="dkernel," + assert_msg)
atol, rtol = (0.1, 0.1) if use_fp8 else (1e-2, 1e-2)
self.assertAllClose(dx, dx_ref, rtol, atol,
msg="ln_dx," + assert_msg)
self.assertAllClose(dg, dg_ref, rtol, atol,
msg="dgamma," + assert_msg)
self.assertAllClose(dB, dB_ref, rtol, atol,
msg="dbeta," + assert_msg)
@test_util.run_gpu_only
def testLayerNormDenseSkipWeight(self):
B, M, K, N = 4, 8, 16, 32
init = initializers.RandomUniform(minval=0., maxval=1.)
ln_kwargs = {
"gamma_initializer": init,
"beta_initializer": init,
}
dense_kwargs = {
"units": N,
"use_bias": True,
"kernel_initializer": init,
"bias_initializer": init,
}
ln_ref = layers.LayerNormalization(**ln_kwargs)
dense_ref = layers.Dense(**dense_kwargs)
ln_dense = LayerNormDense(
**ln_kwargs,
**dense_kwargs,
skip_weight_param_allocation=True,
)
x = tf.random.uniform((B, M, K))
fp8_recipe = get_fp8_recipe()
for use_fp8 in [False, True]:
if use_fp8 and not tf.test.is_gpu_available(True, (9, 0)):
continue
y_ref = dense_ref(ln_ref(x))
with te.fp8_autocast(enabled=use_fp8, fp8_recipe=fp8_recipe):
y = ln_dense(x, kernel=dense_ref.kernel, bias=dense_ref.bias)
atol, rtol = (0.01, 0.1) if use_fp8 else (1e-3, 1e-3)
self.assertAllClose(y, y_ref, rtol, atol, msg=f"use_fp8={use_fp8}")
@test_util.run_gpu_only
def testLayerNormMLPFwd(self):
B, M, K, N, O = 4, 8, 16, 32, 64
init = initializers.RandomUniform(minval=0., maxval=1.)
ln_kwargs = {
"gamma_initializer": init,
"beta_initializer": init,
}
dense_common_kwargs = {
"use_bias": True,
"kernel_initializer": init,
"bias_initializer": init,
}
ln_ref = layers.LayerNormalization(**ln_kwargs)
dense1_ref = layers.Dense(**dense_common_kwargs, units=N)
dense2_ref = layers.Dense(**dense_common_kwargs, units=O)
x = tf.random.uniform((B, M, K))
fp8_recipe = get_fp8_recipe()
for use_fp8, output_ln in product([False, True], repeat=2):
if use_fp8 and not tf.test.is_gpu_available(True, (9, 0)):
continue
ln_mlp = LayerNormMLP(
**ln_kwargs,
**dense_common_kwargs,
units=N,
ffn_units=O,
ffn_kernel_initializer=init,
return_layernorm_output=output_ln,
)
y_ln_ref = ln_ref(x)
y_dense1_ref = dense1_ref(y_ln_ref)
y_gelu_ref = tf.nn.gelu(y_dense1_ref, approximate=True)
y_ref = dense2_ref(y_gelu_ref)
with te.fp8_autocast(enabled=use_fp8, fp8_recipe=fp8_recipe):
ys = ln_mlp(x)
if output_ln:
y, y_ln = ys
else:
y = ys
assert_msg = f"use_fp8={use_fp8},output_ln={output_ln}"
atol, rtol = (0.01, 0.1) if use_fp8 else (1e-3, 2e-3)
self.assertAllClose(y, y_ref, rtol, atol, msg="y," + assert_msg)
if output_ln:
self.assertAllClose(
y_ln, y_ln_ref, rtol, atol, msg="y_ln," + assert_msg)
@test_util.run_gpu_only
def testLayerNormMLPBwd(self):
B, M, K, N, O = 4, 8, 16, 32, 64
init = initializers.RandomUniform(minval=0., maxval=.1)
dy = tf.random.uniform((B, M, O), minval=0., maxval=1.)
x = tf.random.uniform((B, M, K), minval=0., maxval=1.)
ln_kwargs = {
"gamma_initializer": init,
"beta_initializer": init,
}
dense_common_kwargs = {
"use_bias": True,
"kernel_initializer": init,
"bias_initializer": init,
}
ln_ref = layers.LayerNormalization(**ln_kwargs)
dense1_ref = layers.Dense(**dense_common_kwargs, units=N)
dense2_ref = layers.Dense(**dense_common_kwargs, units=O)
ln_mlp = LayerNormMLP(
**ln_kwargs,
**dense_common_kwargs,
units=N,
ffn_units=O,
ffn_kernel_initializer=init,
)
def _train_step(x, model, use_fp8=False, fp8_recipe=None):
with tf.GradientTape(persistent=True) as tape:
tape.watch(x)
with te.fp8_autocast(enabled=use_fp8, fp8_recipe=fp8_recipe):
y = model(x, training=True)
loss = y * tf.cast(dy, y.dtype)
dx, (dg, dB, dw1, db1, dw2, db2) = tape.gradient(
loss, [x, model.trainable_variables])
return dx, dg, dB, dw1, db1, dw2, db2
def _train_step_ref(x):
with tf.GradientTape(persistent=True) as tape:
tape.watch(x)
t = ln_ref(x)
y_gelu = tf.nn.gelu(dense1_ref(t), approximate=True)
y = dense2_ref(y_gelu)
loss = y * tf.cast(dy, y.dtype)
var_list = ln_ref.variables + dense1_ref.variables + \
dense2_ref.variables
dx, dt, (dg, dB, dw1, db1, dw2, db2) = tape.gradient(
loss, [x, t, var_list])
return dx, dt, dg, dB, dw1, db1, dw2, db2
for use_fp8, use_override in product([False, True], repeat=2):
if use_fp8 and not tf.test.is_gpu_available(True, (9, 0)):
continue
recipe = get_fp8_recipe(use_override)
dx_ref, ln_dy_ref, dg_ref, dB_ref, dw1_ref, db1_ref, dw2_ref, \
db2_ref = _train_step_ref(x)
dx, dg, dB, dw1, db1, dw2, db2 = _train_step(
x, ln_mlp, use_fp8=use_fp8, fp8_recipe=recipe)
assert_msg = f"use_fp8={use_fp8},use_override={use_override}"
atol, rtol = (0.01, 0.1) if use_fp8 else (1e-3, 1e-3)
self.assertAllClose(
db2, db2_ref, rtol, atol, msg="fc2_dbias," + assert_msg)
self.assertAllClose(
dw2, dw2_ref, rtol, atol, msg="fc2_dw," + assert_msg)
self.assertAllClose(
db1, db1_ref, rtol, atol, msg="fc1_dbias," + assert_msg)
self.assertAllClose(
dw1, dw1_ref, rtol, atol, msg="fc1_dw," + assert_msg)
atol, rtol = (0.1, 0.1) if use_fp8 else (1e-2, 1e-2)
self.assertAllClose(dx, dx_ref, rtol, atol,
msg="ln_dx," + assert_msg)
self.assertAllClose(dg, dg_ref, rtol, atol,
msg="dgamma," + assert_msg)
self.assertAllClose(dB, dB_ref, rtol, atol,
msg="dbeta," + assert_msg)
if __name__ == '__main__':
test.main()
| TransformerEngine-main | tests/tensorflow/test_layers.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Tests for the MHA layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import transformer_engine.tensorflow as te
from tensorflow.keras.layers import EinsumDense
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from transformer_engine.tensorflow import (
DelayedScaling,
Format,
MultiHeadAttention,
)
def train_step(dy, x_q, x_kv, x_mask, model, attn_type, use_fp8=False,
fp8_recipe=None):
with tf.GradientTape(persistent=True) as tape:
tape.watch(x_q)
if attn_type == 'cross':
tape.watch(x_kv)
with te.fp8_autocast(enabled=use_fp8, fp8_recipe=fp8_recipe):
# The MHA won't apply the bias addition for the last projection but
# return the bias. So, we conduct the bias addition here at the end.
y, b = model(x_q, x_mask, x_kv, training=True)
y = y + tf.cast(b, y.dtype)
loss = y * tf.cast(dy, dtype=y.dtype)
xs = [x_q]
if attn_type == 'cross':
xs.append(x_kv)
dxs, dvars = tape.gradient(loss, [xs, model.trainable_variables])
return y, dxs, dvars
class MultiHeadAttentionKeras(tf.keras.Model):
def __init__(self, hidden_size, num_heads, attention_type, init_method):
super(MultiHeadAttentionKeras, self).__init__()
assert hidden_size % num_heads == 0
assert attention_type in ('self', 'cross')
self.num_heads = num_heads
self.hidden_size = hidden_size
self.depth = hidden_size // self.num_heads
self.attention_type = attention_type
# Einsum symbols:
# F=seq_q, T=seq_kv, B=batches, H=hidden_states, D=hidden_size,
# N=num_heads, E=depth
if attention_type == 'self':
self.QKV = EinsumDense('FBH,HD->FBD',
output_shape=(None, 3 * hidden_size),
bias_axes='D',
kernel_initializer=init_method)
else:
self.Q = EinsumDense('FBH,HD->FBD',
output_shape=(None, hidden_size),
bias_axes='D',
kernel_initializer=init_method)
self.KV = EinsumDense('TBH,HD->TBD',
output_shape=(None, 2 * hidden_size),
bias_axes='D',
kernel_initializer=init_method)
# The bias in the projection layer will be applied separately outside
# the MHA. So, we disable the bias in the Einsum but handle the bias at
# the end.
self.dense = EinsumDense('FBNE,NED->FBD',
output_shape=(None, hidden_size),
bias_axes=None,
kernel_initializer=init_method)
b_init = tf.zeros_initializer()
self.dense_bias = tf.Variable(
initial_value=b_init(shape=(hidden_size,),
dtype="float32"),
trainable=True,
)
def __call__(self, q_input, mask=None, kv_input=None, training=None):
if self.attention_type == 'self':
# [F, B, 3 * D]
qkv = self.QKV(q_input)
# [F, B, N, 3 * E]
qkv = tf.reshape(
qkv, (*qkv.shape[: -1],
self.num_heads, 3 * self.depth))
# 3 * [F, B, N, E]
q, k, v = tf.split(qkv, num_or_size_splits=3, axis=-1)
else:
# [F, B, D]
q = self.Q(q_input)
# [F, B, N, E]
q = tf.reshape(q, (*q.shape[:-1], self.num_heads, self.depth))
# [F, B, 2 * D]
kv = self.KV(kv_input)
# [F, B, N, 2 * E]
kv = tf.reshape(
kv, (*kv.shape[: -1],
self.num_heads, 2 * self.depth))
# 2 * [F, B, N, E]
k, v = tf.split(kv, num_or_size_splits=2, axis=-1)
dk = tf.cast(tf.shape(k)[-1], self._compute_dtype_object)
matmul_qk = tf.einsum('FBNE,TBNE->BNFT', q, k)
scaled_attn_logits = matmul_qk / tf.math.sqrt(dk)
if mask is not None:
scaled_attn_logits = tf.where(mask, scaled_attn_logits, -10000.0)
# [B, N, F, T]
attention_weights = tf.nn.softmax(scaled_attn_logits, axis=-1)
# [B, N, F, E]
scaled_attention = tf.einsum('BNFT,TBNE->BNFE', attention_weights, v)
# [F, B, N, E]
scaled_attention = tf.transpose(scaled_attention, perm=(2, 0, 1, 3))
# [F, B, D]
output = self.dense(scaled_attention)
return output, self.dense_bias
class MHATest(test.TestCase):
def setUp(self):
super().setUp()
tf.keras.mixed_precision.set_global_policy('mixed_float16')
@test_util.run_gpu_only
def testMHAForward(self):
use_fp8 = tf.test.is_gpu_available(True, (9, 0))
batches, seq_q, seq_kv, hidden_states = 16, 32, 32, 64
num_heads, depth = 4, 16
hidden_size = num_heads * depth
q_shape = (seq_q, batches, hidden_states)
kv_shape = (seq_kv, batches, hidden_states)
init = tf.keras.initializers.RandomUniform(minval=0., maxval=.1)
x_q = tf.random.uniform(q_shape, minval=0., maxval=.1)
x_kv = tf.random.uniform(kv_shape, minval=0., maxval=.1)
for attn_type in ('self', 'cross'):
for use_mask in (True, False):
mha_einsum = MultiHeadAttentionKeras(
hidden_size, num_heads, attn_type, init)
# The attention mask type needs to be `padding`, which will use
# provided mask. Alternatively, the `causal` will ignore the
# provided mask and use a upper triangular mask.
mha = MultiHeadAttention(
hidden_size=hidden_size,
num_attention_heads=num_heads,
kv_channels=depth,
attention_dropout=0.0,
attention_softmax_in_fp32=True,
init_method=init,
output_layer_init_method=init,
input_layernorm=False,
attention_type=attn_type,
attn_mask_type='padding',
)
x_mask = tf.random.uniform(
(seq_q, seq_kv)) > 0.5 if use_mask else None
y_ref, y_b_ref = mha_einsum(x_q, x_mask, x_kv)
fp8_recipe = DelayedScaling(
margin=0, interval=1, fp8_format=Format.HYBRID,
amax_compute_algo='max', amax_history_len=3)
with te.fp8_autocast(enabled=use_fp8, fp8_recipe=fp8_recipe):
y, y_b = mha(x_q, x_mask, x_kv)
self.assertAllClose(y, y_ref, rtol=0.01, atol=0.01, msg='y')
self.assertAllClose(y_b, y_b_ref, msg='y_bias')
@test_util.run_gpu_only
def testMHABackward(self):
use_fp8 = tf.test.is_gpu_available(True, (9, 0))
batches, seq_q, seq_kv, hidden_states = 4, 8, 8, 32
num_heads, depth = 4, 8
hidden_size = num_heads * depth
q_shape = (seq_q, batches, hidden_states)
kv_shape = (seq_kv, batches, hidden_states)
out_shape = (seq_q, batches, hidden_size)
init = tf.keras.initializers.RandomUniform(minval=0., maxval=.1)
x_q = tf.random.uniform(q_shape, minval=0., maxval=.1)
x_kv = tf.random.uniform(kv_shape, minval=0., maxval=.1)
dy = tf.random.uniform(out_shape, minval=0., maxval=1.)
for attn_type in ('self', 'cross'):
for use_mask in (False, True):
mha_einsum = MultiHeadAttentionKeras(
hidden_size, num_heads, attn_type, init)
mha = MultiHeadAttention(
hidden_size=hidden_size,
num_attention_heads=num_heads,
kv_channels=depth,
attention_dropout=0.0,
attention_softmax_in_fp32=True,
init_method=init,
output_layer_init_method=init,
input_layernorm=False,
attention_type=attn_type,
attn_mask_type='padding',
)
x_mask = tf.random.uniform(
(seq_q, seq_kv)) > 0.5 if use_mask else None
y_ref, dxs_ref, dvars_ref = train_step(
dy, x_q, x_kv, x_mask, mha_einsum, attn_type)
fp8_recipe = DelayedScaling(
margin=0, interval=1, fp8_format=Format.HYBRID,
amax_compute_algo='max', amax_history_len=3)
y, dxs, dvars = train_step(
dy, x_q, x_kv, x_mask, mha, attn_type, use_fp8, fp8_recipe)
for dx, dx_ref in zip(dxs, dxs_ref):
self.assertAllClose(
dx, dx_ref, rtol=0.1, atol=0.1, msg='dx')
if attn_type == 'cross':
# The variable lists are:
# [q_w, kv_w, q_b, kv_b, proj_w, proj_b] (target)
# [q_w, q_b, kv_w, kv_b, proj_w, proj_b] (reference)
self.assertEqual(len(dvars), 6)
self.assertEqual(len(dvars), len(dvars_ref))
dws = [dvars[i] for i in [0, 1, 4]]
dws_ref = [dvars_ref[i] for i in [0, 2, 4]]
dbs = [dvars[i] for i in [2, 3, 5]]
dbs_ref = [dvars_ref[i] for i in [1, 3, 5]]
else:
# The variable lists are:
# [qkv_w, qkv_b, proj_w, proj_b] (target)
# [qkv_w, qkv_b, proj_w, proj_b] (reference)
self.assertEqual(len(dvars), 4)
self.assertEqual(len(dvars), len(dvars_ref))
dws = [dvars[i] for i in [0, 2]]
dws_ref = [dvars_ref[i] for i in [0, 2]]
dbs = [dvars[i] for i in [1, 3]]
dbs_ref = [dvars_ref[i] for i in [1, 3]]
for dv, dv_ref in zip(dws, dws_ref):
self.assertAllClose(
dv, tf.reshape(dv_ref, dv.shape),
rtol=0.1, atol=0.1, msg='dkernel')
for dv, dv_ref in zip(dbs, dbs_ref):
self.assertAllClose(dv, dv_ref, rtol=0.2,
atol=0.2, msg='dbias')
if __name__ == '__main__':
test.main()
| TransformerEngine-main | tests/tensorflow/test_mha.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
import transformer_engine.tensorflow
print("OK")
| TransformerEngine-main | tests/tensorflow/test_sanity_import.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Tests for the Transformer layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import transformer_engine.tensorflow as te
from tensorflow.keras.layers import EinsumDense
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from transformer_engine.tensorflow import (
DelayedScaling,
Format,
TransformerLayer,
)
def train_step(dy, x, x_mask, x_dec, x_dec_mask, model, use_fp8=False,
fp8_recipe=None):
with tf.GradientTape(persistent=True) as tape:
tape.watch(x)
with te.fp8_autocast(enabled=use_fp8, fp8_recipe=fp8_recipe):
y = model(
hidden_states=x,
attention_mask=x_mask,
encoder_output=x_dec,
enc_dec_attn_mask=x_dec_mask,
training=True,
)
loss = y * tf.cast(dy, dtype=y.dtype)
dx, dvars = tape.gradient(loss, [x, model.trainable_variables])
return y, dx, dvars
class TransformerLayerTest(test.TestCase):
def setUp(self):
super().setUp()
tf.keras.mixed_precision.set_global_policy('mixed_float16')
@test_util.run_gpu_only
def testTransformerSanity(self):
use_fp8 = tf.test.is_gpu_available(True, (9, 0))
# F=seq_len, B=batch, H=hidden_states, N=num_heads
F, B, H, N = 8, 4, 32, 2
# E=depth
E = H // N
# D=hidden_size
D = N * E
input_shape = (F, B, H)
output_shape = (F, B, D)
init = tf.keras.initializers.RandomUniform(minval=0., maxval=.1)
x = tf.random.uniform(input_shape, minval=0., maxval=.1)
x_dec = tf.random.uniform(input_shape, minval=0., maxval=10.)
dy = tf.random.uniform(output_shape, minval=0., maxval=.1)
transformer = TransformerLayer(
hidden_size=D,
ffn_hidden_size=D,
num_attention_heads=N,
layernorm_epsilon=1e-5,
hidden_dropout=0.01,
attention_dropout=0.0,
init_method=init,
output_layer_init_method=init,
layer_number=None,
kv_channels=None,
self_attn_mask_type="padding",
apply_query_key_layer_scaling=True,
attention_softmax_in_fp32=False,
apply_residual_connection_post_layernorm=False,
output_layernorm=False,
layer_type="decoder",
drop_path_rate=0.1,
fuse_qkv_params=False,
)
fp8_recipe = DelayedScaling(
margin=0, interval=1, fp8_format=Format.HYBRID,
amax_compute_algo='max', amax_history_len=3)
y_ref, dx_ref, dvars_ref = train_step(
dy, x, None, x_dec, None, transformer, use_fp8=False)
y, dx, dvars = train_step(dy, x, None, x_dec, None, transformer,
use_fp8=use_fp8, fp8_recipe=fp8_recipe)
self.assertAllClose(y, y_ref, rtol=0.1, atol=0.01, msg="fwd-y")
self.assertAllClose(dx, dx_ref, rtol=0.5, atol=0.7, msg="bwd-dx")
self.assertEqual(len(dvars), len(dvars_ref))
dvs = []
for v, dv, dv_ref in zip(
transformer.trainable_variables, dvars, dvars_ref):
dvs.append((v.name, dv, dv_ref))
for v_name, dv, dv_ref in reversed(dvs):
# The range of these two biases are relatively large. So, we choose
# larger atols here.
if v_name == 'multi_head_attention/dense/bias:0':
self.assertAllClose(dv, dv_ref, rtol=.1,
atol=4., msg="bwd-" + v_name)
continue
if v_name == 'multi_head_attention/qkv_bias:0':
self.assertAllClose(dv, dv_ref, rtol=.1,
atol=2., msg="bwd-" + v_name)
continue
atol, rtol = (0.5, 0.6) if tf.reduce_max(
dv_ref) > 1. else (0.05, 0.05)
self.assertAllClose(dv, dv_ref, rtol=rtol,
atol=atol, msg="bwd-" + v_name)
if __name__ == '__main__':
test.main()
| TransformerEngine-main | tests/tensorflow/test_transformer.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""
This file contains tests for exporting TransformerEngine models to ONNX.
The purpose of these tests is validation that TE models are converted to their correct ONNX
representation. Toward this end, each test captures the output of a TE module forward pass,
converts the TE module to ONNX, and uses ONNX Runtime (ORT) to execute the ONNX graph and
validate the output against TE's output.
Until FP8 is introduced to the ONNX standard, FP8 QuantizeLinear/DequantizeLinear is implemented
using custom ORT operations.
To run many repetitive tests use pytest-loop:
$ python3 -m pip install pytest-loop
$ pytest --loop 1000 tests/pytorch/test_onnx_export.py::test_export_layernorm
For reproducability use: torch.manual_seed(0)
"""
import os
import tempfile
import pytest
import warnings
import numpy as np
import onnxruntime as ort
import torch
from torch import nn as nn
from typing import Optional, Union, Tuple, List
import transformer_engine.pytorch as te
from transformer_engine.common import recipe
import transformer_engine_extensions as tex
from transformer_engine.pytorch.cpp_extensions import gemm, fp8_gemm, gelu, cast_to_fp8, cast_from_fp8
from transformer_engine.pytorch.module.base import get_workspace
import transformer_engine.pytorch.cpp_extensions as texcpp
import transformer_engine.pytorch.softmax as softmax_defs
from transformer_engine.pytorch.utils import get_default_init_method
from transformer_engine.pytorch.export import is_in_onnx_export_mode
from transformer_engine.pytorch.fp8 import FP8GlobalStateManager
# Global test configuration knobs.
# Enable this to serialize test inputs and outputs to file (as a Polygraphy RunResults instance).
SAVE_TEST_IO = bool(int(os.getenv("NVTE_ONNX_EXPORT_SAVE_TEST_IO", "0")))
if SAVE_TEST_IO:
from polygraphy.json import save_json
from polygraphy.comparator import RunResults
# The directory where generated ONNX test models are stored.
NVTE_TEST_ARTIFACTS_DIR = os.environ.get('NVTE_TEST_ARTIFACTS_DIR')
NVTE_TEST_ARTIFACTS_DIR = NVTE_TEST_ARTIFACTS_DIR or os.path.join(tempfile.gettempdir(), "./gen_onnx_models")
# The directory where this file is stored.
TESTS_DIR = os.path.dirname(os.path.abspath(__file__))
# ScaledUpperTriangMaskedSoftmax is exported via ONNX::Trilu which was introduced in opset 14.
TRILU_OPSET = 14
# Opset used in the ONNX files generated by the tests.
OPSET = 17
assert OPSET >= TRILU_OPSET
# Shared library implementing custom FP8 Q/DQ operators for ONNX Runtime (ORT).
ORT_CUSTOM_OPS_LIB = os.path.join(TESTS_DIR, "./libcustom_ort_fp8_qdq_ops.so")
fp8_available, reason_for_no_fp8 = FP8GlobalStateManager.is_fp8_available()
skip_FP8 = pytest.mark.skipif(not fp8_available, reason=reason_for_no_fp8)
supported_activations = ["gelu", "relu", "reglu", "geglu", "swiglu"]
all_normalizations = ["LayerNorm", "RMSNorm"]
@pytest.fixture()
def seed_default_rng():
"""Reseed the PRNG for test reproducibility"""
torch.random.seed()
@pytest.fixture()
def set_max_seq_len(max_seq_len=128):
"""Set the maximum sequence length that can be used for attention masking"""
os.environ["NVTE_ONNX_KVCACHE_MAX_SEQ_LEN"] = f"{max_seq_len}"
def create_fp8_recipe():
return recipe.DelayedScaling(margin=0, interval=1, fp8_format=recipe.Format.E4M3)
def do_export(
model: torch.nn.Module,
inp: torch.Tensor,
fname: str,
use_fp8: bool=True,
opset: int=OPSET,
input_names: List[str]=None,
output_names: List[str]=None,
dynamic_axes: List[str]=None
):
"""Export to ONNX"""
fp8_recipe = create_fp8_recipe()
input_names = input_names or ["input"]
output_names = output_names or ["output"]
with torch.inference_mode(), te.fp8_autocast(enabled=use_fp8, fp8_recipe=fp8_recipe), warnings.catch_warnings():
warnings.filterwarnings(
action='ignore',
category=torch.jit.TracerWarning,
module=r'.*'
)
model.cuda().eval()
os.makedirs(NVTE_TEST_ARTIFACTS_DIR, exist_ok=True)
fname = os.path.join(NVTE_TEST_ARTIFACTS_DIR, fname)
inps = inp if isinstance(inp, list) or isinstance(inp, tuple) else (inp,)
assert len(inps) == len(input_names)
inds_to_del = [i for i in range(len(inps)) if inps[i] is None]
input_names = [input_names[i] for i in range(len(inps)) if i not in inds_to_del]
with te.onnx_export(True):
torch.onnx.export(
model,
inps,
fname,
verbose=True,
dynamic_axes=dynamic_axes,
opset_version=opset,
input_names=input_names,
output_names=output_names,
do_constant_folding=True,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_FALLTHROUGH)
def to_numpy(tensor):
if isinstance(tensor, torch.Tensor):
if tensor.dtype == torch.bfloat16:
tensor = tensor.type(torch.float32)
tensor = tensor.detach().cpu().numpy()
return tensor
def set_layer_scale(module: torch.nn.Module, scale: float, num_gemms: int):
"""Initialize the FP8 quantization scales in module"""
NB_SCALES_PER_GEMM = 3 # One scale per: input, weights, and output GEMM tensors.
nb_total_scales = num_gemms * NB_SCALES_PER_GEMM
module.fp8_init(num_gemms)
module.fp8_meta["scaling_fwd"].scale = torch.ones(
nb_total_scales, dtype=torch.float32, device="cuda") / scale
module.fp8_meta["scaling_fwd"].scale_inv = torch.ones(
nb_total_scales, dtype=torch.float32, device="cuda") * scale
def te_infer(model: torch.nn.Module, inps: Union[Tuple[torch.tensor], torch.tensor], is_fp8: bool):
"""Transformer Engine forward propagation."""
fp8_recipe = create_fp8_recipe()
with torch.inference_mode(), te.fp8_autocast(enabled=is_fp8, fp8_recipe=fp8_recipe), warnings.catch_warnings():
te_outputs = model(*inps if isinstance(inps, tuple) else (inps,))
if not isinstance(te_outputs, tuple):
te_outputs = (te_outputs,)
return te_outputs
def compare_outputs(onnx_outputs, te_outputs, atol, rtol, max_errors_printed, allow_cnt_errors, fname):
""" Compare ORT and TE outputs."""
assert len(onnx_outputs) == len(te_outputs)
# Compare ORT and PyTorch outputs.
for onnx_output, te_output in zip(onnx_outputs, te_outputs):
# np.isclose: abs(a - b) <= (atol + rtol * abs(b))
te_output = to_numpy(te_output)
onnx_output = to_numpy(onnx_output)
ac = ~np.isclose(onnx_output, te_output, atol=atol, rtol=rtol)
mismatches = ac.nonzero()
mismatched_ids = [loc for loc in zip(*mismatches)]
if mismatched_ids:
# Log some information in case of error.
print("*" * 100)
nb_errors = len(mismatched_ids)
nb_vals = min(nb_errors, max_errors_printed)
print(f"Detected {nb_errors} diverging values (output shape={onnx_output.shape})")
print(f"Showing first {nb_vals} errors (ONNX -- TE):")
abs_err = np.abs(onnx_output - te_output)
errors = abs_err[mismatches]
for loc in mismatched_ids[:nb_vals]:
ref = te_output[loc]
print(f"{onnx_output[loc]} -- {te_output[loc]} err={abs_err[loc]} > {atol + rtol * abs(ref)}")
print(f"Max error: {np.max(errors)}")
if nb_errors > allow_cnt_errors:
raise ValueError(f"Output validation of {fname} failed with {nb_errors} errors")
def serialize_inputs_outputs(
fname: str,
inputs: Union[Tuple[torch.Tensor], torch.Tensor],
te_outputs: List[torch.Tensor],
input_names: Optional[List[str]] = None,
output_names: Optional[List[str]] = None,
):
if not SAVE_TEST_IO:
return
fname = os.path.join(NVTE_TEST_ARTIFACTS_DIR, fname)
input_names = input_names or ["input"]
output_names = output_names or ["output"]
inputs = inputs if isinstance(inputs, list) or isinstance(inputs, tuple) else (inputs,)
named_inputs = zip(input_names, inputs)
input_data = [{k: v.cpu() for k, v in named_inputs if v is not None}]
json_fname = fname[:-len(".onnx")] + "_inputs.json"
save_json(input_data, json_fname, description="custom input data")
json_fname = fname[:-len(".onnx")] + "_output.json"
named_outputs = zip(output_names, te_outputs)
output_data = {k: v.detach().cpu() for k, v in named_outputs if v is not None}
custom_outputs = RunResults()
custom_outputs.add([output_data], runner_name="custom_runner")
custom_outputs.save(json_fname)
def validate_result(
fname: str,
inps: Union[Tuple[torch.Tensor], torch.Tensor],
model: torch.nn.Module,
atol: float=1.e-8, # np.isclose default atol
rtol: float=1.e-5, # np.isclose default rtol
max_errors_printed: int=10,
is_fp8: bool=False,
allow_cnt_errors: int=0,
input_names: List[str]=None,
output_names: List[str]=None,
te_outputs: List[torch.Tensor]=None,
):
"""Compare the outputs of a Transformer Engine (TE) module vs the outputs of its ONNX
representation using ONNX Runtime (ORT) and ensure they are close.
The purpose of the output comparison is to validate that TE models are converted to
their correct ONNX representation by testing that TE and ORT outputs match within some
small threshold (allowing for finite precision errors).
Argument `allow_cnt_errors` reduces test failure noise due to spurious errors by ignoring,
a very small number (0-3) of outliers. This is fine to do because these outliers are due to
small kernel implementation differences between TE and ORT and do not imply an incorrect ONNX
representation (the tests assume both ORT or TE kernels are correct).
Argument `te_outputs` can be used to provide pre-computed TE outputs.
"""
def create_ort_session(fname: str, is_fp8: bool):
def load_custom_ops(session_opts: ort.SessionOptions):
"""For FP8 validation with ORT we need to load our custom FP8 Q/DQ extension."""
if not os.path.exists(ORT_CUSTOM_OPS_LIB):
raise FileNotFoundError(f"Unable to find {ORT_CUSTOM_OPS_LIB}")
session_opts.register_custom_ops_library(ORT_CUSTOM_OPS_LIB)
print("registered custom FP8 Q/DQ ops!")
"""Create an ONNX Runtime session for validation."""
kwargs = {"providers": ['CUDAExecutionProvider', 'CPUExecutionProvider']}
if is_fp8:
sess_options = ort.SessionOptions()
load_custom_ops(sess_options)
kwargs["sess_options"] = sess_options
s = ort.InferenceSession(fname, **kwargs)
return s
def create_ort_input_dict(session, inputs):
inputs = inputs if isinstance(inputs, list) or isinstance(inputs, tuple) else (inputs,)
input_names = [x.name for x in session.get_inputs()]
inps = [to_numpy(x) for x in inputs if x is not None]
inp_dict = dict(zip(input_names, inps))
return inp_dict
input_names = input_names or ["input"]
output_names = output_names or ["output"]
# Run ORT session and TE model.
fname = os.path.join(NVTE_TEST_ARTIFACTS_DIR, fname)
if not te_outputs:
te_outputs = te_infer(model, inps, is_fp8)
ort_s = create_ort_session(fname, is_fp8)
input_feed = create_ort_input_dict(ort_s, inps)
onnx_outputs = ort_s.run(None, input_feed=input_feed)
compare_outputs(onnx_outputs, te_outputs, atol, rtol, max_errors_printed, allow_cnt_errors, fname)
def create_meta(scale_factor: float, size: int=1):
meta = tex.FP8TensorMeta()
meta.amax_history = torch.zeros(1, size, dtype=torch.float32, device="cuda")
meta.scale_inv = torch.ones(size, dtype=torch.float32, device="cuda") / scale_factor
meta.scale = torch.ones(size, dtype=torch.float32, device="cuda") * scale_factor
return meta
def dtype2str(dtype: torch.dtype, fake_bf16_io=False):
if fake_bf16_io:
assert dtype == torch.bfloat16
return "_fake_bf16"
return {
torch.float32: "_fp32",
torch.float16: "_fp16",
torch.bfloat16: "_bf16",
}[dtype]
def as_te_type(dtype: torch.dtype):
return {
torch.float32: tex.DType.kFloat32,
torch.float16: tex.DType.kFloat16,
torch.bfloat16: tex.DType.kBFloat16,
}[dtype]
def get_attn_mask_str(use_mask, attn_mask_type):
# See FusedScaleMaskSoftmax::forward_fused_softmax for logic behind names.
if attn_mask_type is None:
return "_mask" if use_mask else "_no-mask"
attn_mask_str = "_padding-no-mask"
attn_mask_str = "_causal-mask" if attn_mask_type == "causal" else attn_mask_str
attn_mask_str = "_padding-mask" if use_mask and attn_mask_type == "padding" else attn_mask_str
return attn_mask_str
"""
Tests cases begin here.
"""
@skip_FP8
@pytest.mark.parametrize("scale_factor", [1, 224])
@pytest.mark.parametrize(
"precision, atol", [
[torch.float32, 1e-7],
[torch.float16, 1e-7],
[torch.bfloat16, 5e-3],
["fake-torch.bfloat16", 5e-3],
])
def test_export_cast_ops(seed_default_rng, scale_factor: float, atol: float, precision: torch.dtype):
fake_bf16_io = precision == "fake-torch.bfloat16"
# reset precision to torch.bfloat16 after capturing fake BF16 mode
precision = torch.bfloat16 if precision == "fake-torch.bfloat16" else precision
class TestFP8_QDQ(nn.Module):
def __init__(self, fake_bf16_io):
super().__init__()
self.fp8_tensor = 0
self.meta = create_meta(scale_factor)
self.highprec_type = as_te_type(precision)
self.fp8_type = tex.DType.kFloat8E4M3
self.fake_bf16_io = fake_bf16_io
def forward(self, inp):
ret = cast_to_fp8(
inp,
self.meta,
self.fp8_tensor,
self.fp8_type)
ret = cast_from_fp8(
ret,
self.meta,
self.fp8_tensor,
self.fp8_type,
self.highprec_type)
if self.fake_bf16_io:
ret = ret.type(torch.float32)
return ret
# Set dimensions (these are arbitrary).
in_features = 64
hidden_size = 256
inp = torch.randn(hidden_size, in_features, device="cuda",
dtype=torch.float if fake_bf16_io else precision)
high_prec_str = dtype2str(precision, fake_bf16_io=fake_bf16_io)
fname = f"te.cast_fp8_{scale_factor}{high_prec_str}.onnx"
model = TestFP8_QDQ(fake_bf16_io)
do_export(model, inp, fname)
te_outputs = te_infer(model, inp, is_fp8=True)
serialize_inputs_outputs(fname, inp, te_outputs)
if fake_bf16_io or precision != torch.bfloat16:
validate_result(fname, inp, model, atol=atol, is_fp8=True, te_outputs=te_outputs)
@skip_FP8
@pytest.mark.parametrize("scale_factor", [448])
@pytest.mark.parametrize(
"precision, atol", [
[torch.float32, 1e-5],
[torch.float16, 1e-5],
[torch.bfloat16, 5e-3],
["fake-torch.bfloat16", 5e-3]
])
def test_export_gelu_fp8(scale_factor: float, precision: torch.dtype, atol: float):
fake_bf16_io = precision == "fake-torch.bfloat16"
# reset precision to torch.bfloat16 after capturing fake BF16 mode
precision = torch.bfloat16 if precision == "fake-torch.bfloat16" else precision
class TestFP8_Gelu(nn.Module):
def __init__(self, fake_bf16_io):
super().__init__()
self.fp8_tensor = 0
self.meta = create_meta(scale_factor)
self.highprec_type = as_te_type(precision)
self.fp8_type = tex.DType.kFloat8E4M3
self.fake_bf16_io = fake_bf16_io
def forward(self, inp):
ret = gelu(
inp,
self.meta,
self.fp8_tensor,
self.fp8_type)
ret = cast_from_fp8(
ret,
self.meta,
self.fp8_tensor,
self.fp8_type,
self.highprec_type)
if self.fake_bf16_io:
ret = ret.type(torch.float32)
return ret
# Set dimensions (these are arbitrary).
in_features = 64
hidden_size = 256
inp = torch.randn(hidden_size, in_features, device="cuda",
dtype=torch.float if fake_bf16_io else precision)
high_prec_str = dtype2str(precision, fake_bf16_io=fake_bf16_io)
fname = f"te.gelu_fp8_{scale_factor}{high_prec_str}.onnx"
model = TestFP8_Gelu(fake_bf16_io)
do_export(model, inp, fname)
te_outputs = te_infer(model, inp, is_fp8=True)
serialize_inputs_outputs(fname, inp, te_outputs)
if fake_bf16_io or precision != torch.bfloat16:
validate_result(fname, inp, model, rtol=0, atol=atol, is_fp8=True, allow_cnt_errors=2, te_outputs=te_outputs)
@pytest.mark.parametrize("scale_factors",
[(224, 224,),
])
@pytest.mark.parametrize(
"precision, use_fp8, use_bias, use_gelu", [
(torch.float32, False, False, False),
(torch.float16, False, False, False),
(torch.bfloat16, False, False, False),
(torch.float32, False, True, False),
(torch.float16, False, True, False),
(torch.bfloat16, False, True, False),
(torch.float32, False, True, True),
(torch.float16, False, True, True),
(torch.bfloat16, False, True, True),
# For FP8 GEMM GeLU is not used.
(torch.float32, True, False, False),
(torch.float16, True, False, False),
(torch.bfloat16, True, False, False),
# When enabling bias we must use float16 or bfloat16 (because of kernel limitations)
(torch.float16, True, True, False),
(torch.bfloat16, True, True, False),
])
def test_export_gemm(
seed_default_rng,
precision, # Precision of inputs, weights, output and bias
use_fp8,
use_bias,
use_gelu,
scale_factors
):
# Skip FP8 tests on non-hopper devices
if use_fp8 and not fp8_available:
pytest.skip(reason_for_no_fp8)
class TestFP8_GEMM(nn.Module):
def __init__(self, precision, use_bias, gelu, scale_factors):
super().__init__()
self.use_bias = use_bias
self.gelu = gelu
self.precision = precision
self.fp8_tensor_inp = tex.FP8FwdTensors.GEMM1_INPUT
self.fp8_tensor_weight = tex.FP8FwdTensors.GEMM1_WEIGHT
nb_inp_scales, nb_weight_scales = 1, out_features
act_scale_factor, weight_scale_factor = scale_factors
self.meta_inp = create_meta(act_scale_factor, nb_inp_scales)
self.meta_weight = create_meta(weight_scale_factor, nb_weight_scales)
bias_size = nb_weight_scales
self.bias = torch.randn(bias_size, dtype=precision, device="cuda")
self.gelu_input = torch.randn(hidden_size, out_features, dtype=precision, device="cuda")
self.inp_type = tex.DType.kFloat8E4M3
self.weights_type = tex.DType.kFloat8E4M3
self.outp_type = precision
def forward(self, inp, weight):
inp_fp8 = cast_to_fp8(
inp,
self.meta_inp,
self.fp8_tensor_inp,
self.inp_type)
weight_fp8 = cast_to_fp8(
weight,
self.meta_weight,
self.fp8_tensor_weight,
self.weights_type)
ret = fp8_gemm(
weight_fp8,
self.meta_weight.scale_inv,
self.fp8_tensor_weight,
self.inp_type,
inp_fp8,
self.meta_inp.scale_inv,
self.fp8_tensor_inp,
self.weights_type,
self.outp_type,
get_workspace(),
bias=self.bias,
use_bias=self.use_bias,
use_split_accumulator=False)
return ret
class Test_GEMM(nn.Module):
def __init__(self, precision, use_bias=False, gelu=False):
super().__init__()
self.use_bias = use_bias
self.gelu = gelu
self.precision = precision
bias_size = out_features
self.bias = torch.randn(bias_size, dtype=precision, device="cuda")
self.gelu_input = torch.randn(hidden_size, out_features, dtype=precision, device="cuda")
def forward(self, inp, weight):
outp_type = self.precision
# note: due to logic in lines 104:116 and L129 in cpp_extensions.py
# it appears either bias OR gelu can be activated, not both
ret, _, _ = gemm(
weight,
inp,
outp_type,
get_workspace(),
# test bias
bias=self.bias,
use_bias=self.use_bias,
# test gelu
gelu=self.gelu,
gelu_input=self.gelu_input,
grad=False, # only True for backward pass
accumulate=False,
)
return ret
# If gelu is applied then bias must be added, as defined by TE kernel.
if use_gelu: assert use_bias
# Set dimensions (these are arbitrary).
out_features = 128
hidden_size = 256
in_features = 64
inp = torch.randn(hidden_size, in_features, device="cuda", dtype=precision)
weight = torch.randn(out_features, in_features, device="cuda", dtype=precision)
fp8_str = "_fp8" if use_fp8 else ""
bias_str = "_bias" if use_bias else ""
gelu_str = "_gelu" if use_gelu else ""
high_prec_str = dtype2str(precision)
fname = f"te.gemm{fp8_str}{bias_str}{gelu_str}{high_prec_str}.onnx"
input_names = ['input', 'weight']
if use_fp8:
model = TestFP8_GEMM(precision, use_bias, use_gelu, scale_factors)
do_export(model, (inp, weight), fname, use_fp8, input_names=input_names)
te_outputs = te_infer(model, (inp, weight), is_fp8=use_fp8)
serialize_inputs_outputs(fname, (inp, weight), te_outputs, input_names=input_names)
if precision != torch.bfloat16:
validate_result(fname, (inp, weight), model, rtol=1e-2, atol=2e-2,
is_fp8=True, input_names=input_names, te_outputs=te_outputs)
else:
model = Test_GEMM(precision, use_bias, use_gelu)
do_export(model, (inp, weight), fname, use_fp8, input_names=input_names)
te_outputs = te_infer(model, (inp, weight), is_fp8=use_fp8)
serialize_inputs_outputs(fname, (inp, weight), te_outputs, input_names=input_names)
if precision != torch.bfloat16:
validate_result(fname, (inp, weight), model, rtol=1e-2, atol=2e-2,
input_names=input_names, te_outputs=te_outputs)
@pytest.mark.parametrize("scale_factor", [448, 112])
@pytest.mark.parametrize("zero_centered_gamma", [False, True])
@pytest.mark.parametrize(
"use_fp8, precision, atol", [
[False, torch.float32, 1e-7],
[False, torch.float16, 1e-7],
[False, torch.bfloat16, 1e-7],
[False, "fake-torch.bfloat16", 1e-7],
[True, torch.float32, 1e-7],
[True, torch.float16, 1e-7],
[True, torch.bfloat16, 1e-2],
[True, "fake-torch.bfloat16", 1e-2]
])
def test_export_layernorm(
seed_default_rng,
use_fp8: bool,
scale_factor: float,
precision: torch.dtype,
zero_centered_gamma: bool,
atol: float
):
fake_bf16_io = precision == "fake-torch.bfloat16"
# reset precision to torch.bfloat16 after capturing fake BF16 mode
precision = torch.bfloat16 if precision == "fake-torch.bfloat16" else precision
# Skip FP8 tests on non-hopper devices
if use_fp8 and not fp8_available:
pytest.skip(reason_for_no_fp8)
# Set dimensions (these are arbitrary).
inp_shape = [64, 32]
class Test_Layernorm(nn.Module):
def __init__(self) -> None:
super().__init__()
eps = 1e-6 # An arbitrary small value
dtype = torch.float if fake_bf16_io else precision
self.ln = te.LayerNorm(inp_shape[1], eps, params_dtype=dtype,
zero_centered_gamma=False).eval().cuda()
def forward(self, inp):
ret = self.ln(inp)
return ret
class TestFP8_Layernorm(nn.Module):
def __init__(self) -> None:
super().__init__()
normalized_shape = torch.Size(inp.shape[1:])
self.weight = torch.randn(*normalized_shape, device="cuda",
dtype=torch.float32 if fake_bf16_io else precision)
self.bias = torch.zeros(*normalized_shape, device="cuda",
dtype=torch.float32 if fake_bf16_io else precision)
self.eps = 1e-6 # An arbitrary small value
self.fp8_tensor = tex.FP8FwdTensors.GEMM1_INPUT
self.meta = create_meta(scale_factor)
self.fp8_type = tex.DType.kFloat8E4M3
def forward(self, inp):
ret = texcpp.layernorm_fwd_fp8_inf(
inp,
self.weight,
self.bias,
self.eps,
self.meta,
self.fp8_tensor,
self.fp8_type,
zero_centered_gamma)
ret = cast_from_fp8(
ret,
self.meta,
self.fp8_tensor,
self.fp8_type,
as_te_type(precision))
if fake_bf16_io:
ret = ret.type(torch.float32)
return ret
inp = torch.randn(*inp_shape, device="cuda", dtype=torch.float32 if fake_bf16_io else precision)
model = TestFP8_Layernorm() if use_fp8 else Test_Layernorm()
high_prec_str = dtype2str(precision, fake_bf16_io=fake_bf16_io)
fp8_str = f"_fp8-{scale_factor}" if use_fp8 else ""
fname = f"te.layernorm{fp8_str}{high_prec_str}.onnx"
do_export(model, inp, fname, use_fp8=use_fp8)
te_outputs = te_infer(model, inp, is_fp8=use_fp8)
serialize_inputs_outputs(fname, inp, te_outputs)
if fake_bf16_io or precision != torch.bfloat16:
validate_result(
fname, inp, model, atol=atol, is_fp8=use_fp8, allow_cnt_errors=3, te_outputs=te_outputs)
@pytest.mark.parametrize("scale_factor", [448, 112])
@pytest.mark.parametrize(
"use_fp8, precision, atol", [
[False, torch.float32, 1e-7],
[False, torch.float16, 1e-7],
[False, torch.bfloat16, 1e-7],
[False, "fake-torch.bfloat16", 1e-7],
[True, torch.float32, 1e-7],
[True, torch.float16, 1e-7],
[True, torch.bfloat16, 1e-2],
[True, "fake-torch.bfloat16", 1e-2]
])
def test_export_rmsnorm(
seed_default_rng,
use_fp8: bool,
scale_factor: float,
precision: torch.dtype,
atol: float
):
fake_bf16_io = precision == "fake-torch.bfloat16"
# reset precision to torch.bfloat16 after capturing fake BF16 mode
precision = torch.bfloat16 if precision == "fake-torch.bfloat16" else precision
# Skip FP8 tests on non-hopper devices
if use_fp8 and not fp8_available:
pytest.skip(reason_for_no_fp8)
# Set dimensions (these are arbitrary).
inp_shape = [64, 32]
class Test_RMSnorm(nn.Module):
def __init__(self) -> None:
super().__init__()
eps = 1e-6 # An arbitrary small value
dtype = torch.float if fake_bf16_io else precision
self.ln = te.RMSNorm(inp_shape[1], eps, params_dtype=dtype).eval().cuda()
def forward(self, inp):
ret = self.ln(inp)
return ret
class TestFP8_RMSnorm(nn.Module):
def __init__(self) -> None:
super().__init__()
normalized_shape = torch.Size(inp.shape[1:])
self.weight = torch.randn(*normalized_shape, device="cuda",
dtype=torch.float32 if fake_bf16_io else precision)
self.eps = 1e-6 # An arbitrary small value
self.fp8_tensor = tex.FP8FwdTensors.GEMM1_INPUT
self.meta = create_meta(scale_factor)
self.fp8_type = tex.DType.kFloat8E4M3
def forward(self, inp):
ret = texcpp.rmsnorm_fwd_fp8_inf(
inp,
self.weight,
self.eps,
self.meta,
self.fp8_tensor,
self.fp8_type,
False)
ret = cast_from_fp8(
ret,
self.meta,
self.fp8_tensor,
self.fp8_type,
as_te_type(precision))
if fake_bf16_io:
ret = ret.type(torch.float32)
return ret
inp = torch.randn(*inp_shape, device="cuda", dtype=torch.float32 if fake_bf16_io else precision)
model = TestFP8_RMSnorm() if use_fp8 else Test_RMSnorm()
high_prec_str = dtype2str(precision, fake_bf16_io=fake_bf16_io)
fp8_str = f"_fp8-{scale_factor}" if use_fp8 else ""
fname = f"te.layernorm{fp8_str}{high_prec_str}.onnx"
do_export(model, inp, fname, use_fp8=use_fp8)
te_outputs = te_infer(model, inp, is_fp8=use_fp8)
serialize_inputs_outputs(fname, inp, te_outputs)
if fake_bf16_io or precision != torch.bfloat16:
validate_result(
fname, inp, model, atol=atol, is_fp8=use_fp8, allow_cnt_errors=3, te_outputs=te_outputs)
@skip_FP8
@pytest.mark.parametrize("softmax_fn", [
softmax_defs.ScaledUpperTriangMaskedSoftmax,
softmax_defs.ScaledMaskedSoftmax,
softmax_defs.ScaledSoftmax,
te.softmax.FusedScaleMaskSoftmax,
])
# Softmax kernel only supports FP16 or BF16!
@pytest.mark.parametrize("precision", [torch.float16, torch.bfloat16, "fake-torch.bfloat16"])
def test_export_softmax(seed_default_rng, set_max_seq_len, softmax_fn, precision):
class Test_Softmax(nn.Module):
def __init__(self, softmax_fn, fake_bf16_io, mask_inp=False):
super().__init__()
self.softmax_fn = softmax_fn
self.scale = 8 # arbitrary value
self.mask_inp = mask_inp
self.fused_scaled_softmax = None
self.fake_bf16_io = fake_bf16_io
if self.softmax_fn == te.softmax.FusedScaleMaskSoftmax:
self.fused_scaled_softmax = te.softmax.FusedScaleMaskSoftmax(
mask_func=te.utils.attention_mask_func,
softmax_in_fp32=True,
)
def forward(self, inp, mask):
if self.fake_bf16_io:
inp = inp.type(torch.bfloat16)
if self.fused_scaled_softmax:
ret = self.fused_scaled_softmax(inp, mask, "causal", self.scale)
else:
if self.mask_inp:
ret = self.softmax_fn.apply(inp, mask, self.scale)
else:
ret = self.softmax_fn.apply(inp, self.scale)
if self.fake_bf16_io:
ret = ret.type(torch.float32)
return ret
fake_bf16_io = precision == "fake-torch.bfloat16"
precision = torch.bfloat16 if fake_bf16_io else precision
# Set dimensions (these are arbitrary).
batch_size, n_heads, seq_len_q, seq_len_k = 64, 96, 32, 32
mask = None
input_names = ["input", "mask"]
inp_shape = [batch_size, n_heads, seq_len_q, seq_len_k]
if softmax_fn == softmax_defs.ScaledUpperTriangMaskedSoftmax:
inp_shape = [batch_size, seq_len_q, seq_len_k]
kernel_str = "ScaledUpperTriangMaskedSoftmax"
model = Test_Softmax(softmax_fn, fake_bf16_io)
elif softmax_fn == softmax_defs.ScaledMaskedSoftmax:
# Generate a random mask with 50% probability for 0 or 1.
probs = 0.5 * torch.ones(1, 1, seq_len_q, seq_len_k, device="cuda", dtype=precision)
mask = torch.bernoulli(probs).to("cuda", dtype=torch.bool)
kernel_str = "ScaledMaskedSoftmax"
model = Test_Softmax(softmax_fn, fake_bf16_io, mask_inp=True)
elif softmax_fn == softmax_defs.ScaledSoftmax:
kernel_str = "ScaledSoftmax"
model = Test_Softmax(softmax_fn, fake_bf16_io)
elif softmax_fn == te.softmax.FusedScaleMaskSoftmax:
kernel_str = "TorchSoftmax"
model = Test_Softmax(softmax_fn, fake_bf16_io)
input_tensor = torch.randn(*inp_shape, device="cuda", dtype=torch.float32 if fake_bf16_io else precision)
high_prec_str = dtype2str(precision, fake_bf16_io=fake_bf16_io)
fname = f"{kernel_str}{high_prec_str}.onnx"
inp = (input_tensor, mask)
dynamic_axes = {}
if mask is not None:
dynamic_axes = {"mask": {2:"seq_len_q", 3:"seq_len_k"}}
do_export(model, inp, fname, input_names=input_names, dynamic_axes=dynamic_axes)
te_outputs = te_infer(model, inp, is_fp8=False)
serialize_inputs_outputs(fname, inp, te_outputs, input_names=input_names)
if fake_bf16_io or precision != torch.bfloat16:
atol = 5e-2 if fake_bf16_io else 1e-3
validate_result(fname, inp, model, atol=atol, input_names=input_names, te_outputs=te_outputs)
# Test dynamically generated softmax mask.
# Softmax kernel only supports FP16 or BF16!
@skip_FP8
@pytest.mark.parametrize("precision", [torch.float16, torch.bfloat16, "fake-torch.bfloat16"])
def test_softmax_mask_fn(seed_default_rng, precision):
fake_bf16_io = precision == "fake-torch.bfloat16"
# reset precision to torch.bfloat16 after capturing fake BF16 mode
precision = torch.bfloat16 if fake_bf16_io else precision
class Test_Softmax(nn.Module):
def __init__(self, use_default_te_mask_fn: bool, fake_bf16_io: bool):
super().__init__()
self.scale = 1 # arbitrary value
self.fake_bf16_io = fake_bf16_io
if use_default_te_mask_fn:
os.environ["NVTE_ONNX_KVCACHE_MAX_SEQ_LEN"] = "0"
else:
os.environ["NVTE_ONNX_KVCACHE_MAX_SEQ_LEN"] = f"{seq_len_q}"
# Use NVTE_MASKED_SOFTMAX_FUSION to force TE to use forward_torch_softmax
# even when is_in_onnx_export_mode()==False.
os.environ["NVTE_MASKED_SOFTMAX_FUSION"] = "0"
self.fused_scaled_softmax = te.softmax.FusedScaleMaskSoftmax(
mask_func=te.utils.attention_mask_func,
softmax_in_fp32=True,
)
def forward(self, inp, mask):
if self.fake_bf16_io:
inp = inp.type(torch.bfloat16)
ret = self.fused_scaled_softmax(inp, mask, "causal", scale=self.scale)
if self.fake_bf16_io:
ret = ret.type(torch.float)
return ret
# Set dimensions (these are arbitrary).
mask = None
batch_size, n_heads, seq_len_q, seq_len_k = 64, 96, 32, 32
assert seq_len_q == seq_len_k # This is a causal (TRILU) mask
inp_shape = [batch_size, n_heads, seq_len_q, seq_len_k]
input_tensor = torch.randn(
*inp_shape, device="cuda", dtype=torch.float if fake_bf16_io else precision)
inp = (input_tensor, mask)
high_prec_str = dtype2str(precision, fake_bf16_io=fake_bf16_io)
# Compare the outputs of TE when using the default softmax mask
# to the TE outputs produced when using the ONNX-compatible causal mask.
# This verifies that _get_onnx_export_causal_mask generates a correct mask.
model = Test_Softmax(use_default_te_mask_fn=True, fake_bf16_io=fake_bf16_io)
te_outputs_default_mask = te_infer(model, inp, is_fp8=True)
with te.onnx_export(True):
# ONNX export mode forces use of the ONNX-compatible causal mask.
model_onnx_mask = Test_Softmax(use_default_te_mask_fn=False, fake_bf16_io=fake_bf16_io)
te_outputs_onnx_mask = te_infer(model_onnx_mask, inp, is_fp8=True)
compare_outputs(te_outputs_default_mask, te_outputs_onnx_mask,
atol=0, rtol=0, max_errors_printed=10, allow_cnt_errors=0, fname="softmax masking")
# Compare the outputs of TE when using the default softmax mask
# to the ORT ONNX outputs produced when using the ONNX-compatible causal mask.
input_names = ["input", "mask"]
kernel_str = "FusedScaleMaskSoftmax"
fname = f"{kernel_str}{high_prec_str}.onnx"
do_export(model, inp, fname, input_names=input_names)
serialize_inputs_outputs(fname, inp, te_outputs=te_outputs_default_mask, input_names=input_names)
if fake_bf16_io or precision != torch.bfloat16:
atol = 1e-2 if fake_bf16_io else 1e-3
validate_result(
fname, inp, model_onnx_mask, atol=atol,
input_names=input_names, te_outputs=te_outputs_default_mask)
@pytest.mark.parametrize("scale_factor", [1])
@pytest.mark.parametrize("use_fp8", [False, True])
# Returning the bias is a TE fusion optimization we don't care about.
@pytest.mark.parametrize("return_bias", [False])
@pytest.mark.parametrize(
"precision, use_bias",[
(torch.float32, False),
(torch.float32, True),
(torch.float16, False),
(torch.float16, True),
# Todo: cannot configure BF16 when bias is disabled (ORT issue?)
(torch.bfloat16, False),
# Todo: cannot configure BF16 when bias is enabled (ORT issue?)
(torch.bfloat16, True),
])
def test_export_linear(
seed_default_rng,
scale_factor: float,
use_fp8: bool,
use_bias: bool,
return_bias: bool,
precision: torch.dtype
):
# Skip FP8 tests on non-hopper devices
if use_fp8 and not fp8_available:
pytest.skip(reason_for_no_fp8)
# Set dimensions (these are arbitrary).
in_features = 64
out_features = 256
hidden_size = 256
class Test_Linear(nn.Module):
def __init__(self,
in_features,
out_features,
use_bias,
return_bias,
precision
):
super().__init__()
self.linear = te.Linear(
in_features,
out_features,
bias=use_bias,
return_bias=return_bias,
params_dtype=precision
)
def forward(self, inp):
ret = self.linear(inp)
return ret
inp = torch.randn(hidden_size, in_features, device="cuda", dtype=precision)
fp8_str = "_fp8" if use_fp8 else ""
bias_str = "_bias" if use_bias else ""
high_prec_str = dtype2str(precision)
fname = f"te.linear{fp8_str}{bias_str}{high_prec_str}.onnx"
with te.fp8_autocast(enabled=use_fp8):
model = Test_Linear(
in_features,
out_features,
use_bias,
return_bias,
precision
).to(device='cuda')
if use_fp8:
set_layer_scale(model.linear, scale_factor, num_gemms=1)
do_export(model, inp, fname, use_fp8)
te_outputs = te_infer(model, inp, is_fp8=use_fp8)
serialize_inputs_outputs(fname, inp, te_outputs)
if precision in (torch.bfloat16, ):
return
if not use_fp8:
validate_result(fname, inp, model, atol=1e-3, te_outputs=te_outputs)
else:
validate_result(fname, inp, model, atol=1e-3, is_fp8=use_fp8, te_outputs=te_outputs)
@pytest.mark.parametrize("scale_factor", [112])
@pytest.mark.parametrize("use_fp8", [False, True])
# Returning the bias is a TE fusion optimization we don't care about.
@pytest.mark.parametrize("return_bias", [False])
@pytest.mark.parametrize("return_layernorm_output", [False])
@pytest.mark.parametrize(
"precision, use_bias",[
(torch.float32, False),
(torch.float32, True),
(torch.float16, True),
(torch.float16, False),
(torch.bfloat16, True),
(torch.bfloat16, False),
])
@pytest.mark.parametrize("zero_centered_gamma", [False, True])
@pytest.mark.parametrize("normalization", all_normalizations)
def test_export_layernorm_linear(
seed_default_rng,
scale_factor: float,
use_fp8: bool,
use_bias: bool,
return_bias: bool,
return_layernorm_output: bool,
precision: torch.dtype,
zero_centered_gamma: bool,
normalization: str,
):
# Skip FP8 tests on non-hopper devices
if use_fp8 and not fp8_available:
pytest.skip(reason_for_no_fp8)
if normalization == "RMSNorm" and zero_centered_gamma:
pytest.skip("RMSNorm does not support zero_centered_gamma yet!")
# Set dimensions (these are arbitrary).
in_features = 64
out_features = 256
hidden_size = 256
inp = torch.randn(in_features, out_features, device="cuda", dtype=precision)
fp8_str = "_fp8" if use_fp8 else ""
bias_str = "_bias" if use_bias else ""
high_prec_str = dtype2str(precision)
fname = f"te.layernorm_linear{fp8_str}{bias_str}{high_prec_str}.onnx"
with te.fp8_autocast(enabled=use_fp8):
model = te.LayerNormLinear(
hidden_size,
3 * hidden_size,
bias=use_bias,
return_bias=return_bias,
return_layernorm_output=return_layernorm_output,
params_dtype=precision,
zero_centered_gamma=zero_centered_gamma,
normalization=normalization,
).to(device='cuda')
if use_fp8:
set_layer_scale(model, scale_factor, num_gemms=1)
do_export(model, inp, fname, use_fp8)
te_outputs = te_infer(model, inp, is_fp8=use_fp8)
serialize_inputs_outputs(fname, inp, te_outputs)
if precision in (torch.bfloat16, ):
return
if not use_fp8:
validate_result(fname, inp, model, atol=1e-3, te_outputs=te_outputs)
elif precision != torch.bfloat16:
validate_result(fname, inp, model, atol=1e-6, is_fp8=use_fp8, te_outputs=te_outputs)
@pytest.mark.parametrize("scale_factor", [112])
@pytest.mark.parametrize("use_fp8", [False, True])
# Returning the bias is a TE fusion optimization we don't care about.
@pytest.mark.parametrize("return_bias", [False])
@pytest.mark.parametrize("return_layernorm_output", [False])
@pytest.mark.parametrize(
"precision, use_bias",[
(torch.float32, False),
(torch.float32, True),
(torch.float16, True),
(torch.float16, False),
(torch.bfloat16, True),
(torch.bfloat16, False),
])
@pytest.mark.parametrize("zero_centered_gamma", [False, True])
@pytest.mark.parametrize("activation", supported_activations)
@pytest.mark.parametrize("normalization", all_normalizations)
def test_export_layernorm_mlp(
seed_default_rng,
scale_factor: float,
use_fp8: bool,
use_bias: bool,
return_bias: bool,
return_layernorm_output: bool,
precision: torch.dtype,
zero_centered_gamma: bool,
activation: str,
normalization: str,
):
# Skip FP8 tests on non-hopper devices
if use_fp8 and not fp8_available:
pytest.skip(reason_for_no_fp8)
if normalization == "RMSNorm" and zero_centered_gamma:
pytest.skip("RMSNorm does not support zero_centered_gamma yet!")
# Set dimensions (these are arbitrary).
in_features = 64
out_features = 256
hidden_size = 256
ffn_hidden_size = 256
inp = torch.randn(in_features, out_features, device="cuda", dtype=precision)
fp8_str = "_fp8" if use_fp8 else ""
bias_str = "_bias" if use_bias else ""
high_prec_str = dtype2str(precision)
fname = f"te.layernorm_mlp{fp8_str}{bias_str}{high_prec_str}_{activation}.onnx"
with te.fp8_autocast(enabled=use_fp8):
model = te.LayerNormMLP(
hidden_size,
ffn_hidden_size,
bias=use_bias,
return_bias=return_bias,
return_layernorm_output=return_layernorm_output,
params_dtype=precision,
zero_centered_gamma=zero_centered_gamma,
activation=activation,
normalization=normalization,
).to(device='cuda')
if use_fp8:
set_layer_scale(model, scale_factor, num_gemms=2)
do_export(model, inp, fname, use_fp8)
te_outputs = te_infer(model, inp, is_fp8=use_fp8)
serialize_inputs_outputs(fname, inp, te_outputs)
if precision in (torch.bfloat16, ):
return
atol = 1e-6 if use_fp8 else (5e-1 if activation=="swiglu" else 1e-3)
validate_result(fname, inp, model, atol=atol, is_fp8=use_fp8, te_outputs=te_outputs)
@skip_FP8
@pytest.mark.parametrize(
"precision, use_mask, attn_mask_type", [
(torch.float32, True, "padding"), # calls forward_torch_softmax (apply user mask)
(torch.float32, False, "no_mask"), # calls forward_torch_softmax (apply no mask)
(torch.float16, False, "causal"), # calls forward_torch_softmax (apply dynamic onnx mask)
(torch.float16, True, "padding"), # calls forward_torch_softmax (apply user mask)
(torch.float16, False, "no_mask"), # calls forward_torch_softmax (apply no mask)
(torch.bfloat16, False, "causal"), # calls forward_torch_softmax (apply dynamic onnx mask)
(torch.bfloat16, True, "padding"), # calls forward_torch_softmax (apply user mask)
(torch.bfloat16, False, "no_mask"), # calls forward_torch_softmax (apply no mask)
])
def test_export_core_attention(
seed_default_rng,
set_max_seq_len,
precision: torch.dtype,
use_mask: bool,
attn_mask_type: str,
):
# Set dimensions (these are arbitrary).
seq_len, batch_size, num_attention_heads, kv_channels = (64, 4, 1, 64)
qkv_size = (seq_len, batch_size, num_attention_heads, kv_channels)
query_layer = torch.randn(qkv_size, dtype=precision, device="cuda")
key_layer = torch.randn(qkv_size, dtype=precision, device="cuda")
value_layer = torch.randn(qkv_size, dtype=precision, device="cuda")
input_names = ["query", "key", "value", "attention_mask", "attn_mask_type"]
attention_mask = None
if use_mask:
# Generate a random mask with 50% probability for 0 or 1.
probs = 0.5 * torch.ones(qkv_size[1], qkv_size[2], qkv_size[0], qkv_size[0], device="cuda", dtype=precision)
attention_mask = torch.bernoulli(probs).to("cuda", dtype=torch.bool)
inp = (query_layer, key_layer, value_layer, attention_mask, attn_mask_type)
mask_str = get_attn_mask_str(use_mask, attn_mask_type)
high_prec_str = dtype2str(precision)
fname = f"te.core_attention{mask_str}{high_prec_str}.onnx"
model = te.attention.DotProductAttention(
num_attention_heads=num_attention_heads,
kv_channels=kv_channels,
attention_dropout=0.5,
).to(device='cuda')
do_export(model,
inp,
fname,
input_names=input_names,
use_fp8=True)
te_outputs = te_infer(model, inp, is_fp8=True)
serialize_inputs_outputs(fname, inp, te_outputs, input_names=input_names)
if precision in (torch.bfloat16, ):
return
validate_result(fname, inp, model, is_fp8=True, atol=1e-2, input_names=input_names, te_outputs=te_outputs)
test_configs_multihead_attention = [
#"use_mask, attn_mask_type"
(False, "no_mask"), # calls ScaledUpperTriangMaskedSoftmax
(True, "padding"), # calls ScaledMaskedSoftmax
]
test_configs_attention_type = [
#"input_layernorm, attention_type, fuse_qkv_params"
(True, "self", True),
(False, "self", True),
(True, "self", False),
(False, "self", False),
(True, "cross", True),
(False, "cross", True),
(True, "cross", False),
(False, "cross", False),
]
@pytest.mark.parametrize("use_fp8", [False, True])
@pytest.mark.parametrize("use_mask, attn_mask_type", test_configs_multihead_attention)
@pytest.mark.parametrize("precision", [torch.float32, torch.float16, torch.bfloat16])
@pytest.mark.parametrize("return_layernorm_output", [False])
@pytest.mark.parametrize("input_layernorm, attention_type, fuse_qkv_params", test_configs_attention_type)
def test_export_multihead_attention(
seed_default_rng,
set_max_seq_len,
use_fp8: bool,
use_mask: bool,
attn_mask_type: str,
precision: torch.dtype,
return_layernorm_output: bool,
input_layernorm: bool,
attention_type: str,
fuse_qkv_params: bool
):
# Skip FP8 tests on non-hopper devices
if use_fp8 and not fp8_available:
pytest.skip(reason_for_no_fp8)
hidden_size = 256
sequence_length = 128
batch_size = 4
num_attention_heads = 32
kv_channels = 8
attention_dropout = 0.1
layernorm_epsilon = 1e-5
init_method = output_layer_init_method = get_default_init_method()
attention_args = (
hidden_size,
num_attention_heads,
kv_channels,
attention_dropout,
layernorm_epsilon,
init_method,
output_layer_init_method,
)
hidden_states_context = torch.randn(sequence_length, batch_size, hidden_size, dtype=precision, device="cuda")
attention_mask = None
if use_mask and attn_mask_type != "causal":
# Generate a random mask with 50% probability for 0 or 1.
probs = 0.5 * torch.ones(batch_size, 1, sequence_length, sequence_length, device="cuda", dtype=precision)
attention_mask = torch.bernoulli(probs).to("cuda", dtype=torch.bool)
encoder_output = None
if attention_type == "cross":
encoder_output = torch.randn(sequence_length, batch_size, hidden_size, dtype=precision, device="cuda")
fp8_str = "_fp8" if use_fp8 else ""
dtype_str = dtype2str(precision)
attn_type_str = "_self-attention" if attention_type == "self" else "_cross-attention"
fuse_qkv_str = "_fused-qkv" if fuse_qkv_params else ""
attn_mask_str = get_attn_mask_str(use_mask, attn_mask_type)
input_ln_str = "_input-ln" if input_layernorm else ""
fname = f"te.multihead_attention{fp8_str}{attn_mask_str}{attn_type_str}{input_ln_str}{fuse_qkv_str}{dtype_str}.onnx"
model = te.MultiheadAttention(
*attention_args,
params_dtype=precision,
return_layernorm_output=return_layernorm_output,
input_layernorm=input_layernorm,
attention_type=attention_type,
fuse_qkv_params=fuse_qkv_params,
return_bias=True,
).to(device='cuda')
inp_context = (hidden_states_context, attention_mask, encoder_output, attn_mask_type)
input_names = ["hidden_states", "attention_mask", "encoder_output", "attn_mask_type"]
output_names=["attention_output", "attention_bias"]
do_export(model, inp_context, fname, use_fp8, input_names=input_names, output_names=output_names,
dynamic_axes={"hidden_states": {0: "seq", 1:"bs"},
"attention_output": {0: "seq", 1:"bs"}})
te_outputs = te_infer(model, inp_context, is_fp8=use_fp8)
serialize_inputs_outputs(fname, inp_context, te_outputs, input_names=input_names, output_names=output_names)
if precision in (torch.bfloat16, ):
return
if not use_fp8:
validate_result(fname, inp_context, model, atol=1e-3, input_names=input_names,
output_names=output_names, te_outputs=te_outputs)
else:
validate_result(fname, inp_context, model, atol=1e-2, is_fp8=use_fp8,
input_names=input_names, output_names=output_names, allow_cnt_errors=3,
te_outputs=te_outputs)
# In GPT generative phase (inference) the input sequence is smaller than the maximum
# allowed sequence length and we want to test this condition.
# Pretend that we're in generative phase when it makes sense (causal mask and self-attention).
is_generative_phase = (attn_mask_type == "causal" and attention_type == "self")
if is_generative_phase:
seq_len_offset = 8
hidden_states_generative = torch.randn(sequence_length-seq_len_offset, batch_size, hidden_size, dtype=precision, device="cuda")
inp_generative = (hidden_states_generative, attention_mask, encoder_output)
if not use_fp8:
validate_result(fname, inp_generative, model, atol=1e-3, input_names=input_names, output_names=output_names)
else:
validate_result(fname, inp_generative, model, atol=1e-2, is_fp8=use_fp8,
input_names=input_names, output_names=output_names, allow_cnt_errors=3)
@pytest.mark.parametrize("use_fp8", [False, True])
@pytest.mark.parametrize("use_mask, attn_mask_type", test_configs_multihead_attention)
@pytest.mark.parametrize("output_layernorm", [
#True, # TO DO: handle this
False
])
@pytest.mark.parametrize("precision", [torch.float32, torch.float16, torch.bfloat16])
@pytest.mark.parametrize("fuse_qkv_params", [False, True])
@pytest.mark.parametrize("zero_centered_gamma", [False, True])
@pytest.mark.parametrize("activation", supported_activations)
def test_export_transformer_layer(
seed_default_rng,
set_max_seq_len,
use_fp8: bool,
use_mask: bool,
attn_mask_type: str,
output_layernorm: bool,
precision: torch.dtype,
fuse_qkv_params: bool,
zero_centered_gamma: bool,
activation: str,
):
# Skip FP8 tests on non-hopper devices
if use_fp8 and not fp8_available:
pytest.skip(reason_for_no_fp8)
# Layer configuration
hidden_size = 64
sequence_length = 128
batch_size = 1
ffn_hidden_size = 256
num_attention_heads = 4
input_tensor = torch.rand(sequence_length, batch_size, hidden_size, dtype=precision, device="cuda")
input_names = ["input", "attention_mask", "self_attn_mask_type"]
attention_mask = None
if use_mask and attn_mask_type != "causal":
# Generate a random mask with 50% probability for 0 or 1.
probs = 0.5 * torch.ones(batch_size, 1, sequence_length, sequence_length, device="cuda", dtype=precision)
attention_mask = torch.bernoulli(probs).to("cuda", dtype=torch.bool)
inp = (input_tensor, attention_mask, attn_mask_type)
fp8_str = "_fp8" if use_fp8 else ""
fuse_qkv_params_str = "_fused-qkv" if fuse_qkv_params else ""
high_prec_str = dtype2str(precision)
attn_mask_str = get_attn_mask_str(use_mask, attn_mask_type)
fname = f"te.transformer_layer{fp8_str}{attn_mask_str}{fuse_qkv_params_str}{high_prec_str}_{activation}.onnx"
model = te.TransformerLayer(
hidden_size,
ffn_hidden_size,
num_attention_heads,
output_layernorm=output_layernorm,
params_dtype=precision,
fuse_qkv_params=fuse_qkv_params,
zero_centered_gamma=zero_centered_gamma,
activation=activation).to(device='cuda')
do_export(model, inp, fname, use_fp8, input_names=input_names)
te_outputs = te_infer(model, inp, is_fp8=use_fp8)
serialize_inputs_outputs(fname, inp, te_outputs, input_names=input_names)
if precision in (torch.bfloat16, ):
return
atol = 5e-1 if use_fp8 else (5e-1 if activation=="swiglu" else 1e-3)
validate_result(fname, inp, model, atol=atol, is_fp8=use_fp8, input_names=input_names, te_outputs=te_outputs)
@pytest.mark.parametrize("use_fp8", [True])
@pytest.mark.parametrize("ln_scale_factor", [448*2])
@pytest.mark.parametrize("gemm_scale_factors", [(224, 224,),])
@pytest.mark.parametrize("precision", [torch.float32, torch.float16, torch.bfloat16])
@pytest.mark.parametrize("zero_centered_gamma", [False, True])
def test_export_gemm_layernorm(
seed_default_rng,
use_fp8: bool,
ln_scale_factor: float,
gemm_scale_factors: Tuple[float, float],
precision: torch.dtype,
zero_centered_gamma: bool
):
"""This is a regression test for testing that all LN inputs have the same type.
The test sets up GEMM with FP32 output which feeds into an LN that is configured
with FP16 or BF16 weights and bias.
"""
# Skip FP8 tests on non-hopper devices
if use_fp8 and not fp8_available:
pytest.skip(reason_for_no_fp8)
class TestFP8_GemmLayernorm(nn.Module):
def __init__(self) -> None:
super().__init__()
normalized_shape = torch.Size(inp.shape[1:])
self.weight = torch.randn(*normalized_shape, dtype=precision, device="cuda")
self.bias = torch.zeros(*normalized_shape, dtype=precision, device="cuda")
self.eps = 1e-6 # An arbitrary small value
self.fp8_tensor = tex.FP8FwdTensors.GEMM1_INPUT
self.meta = create_meta(ln_scale_factor)
self.fp8_type = tex.DType.kFloat8E4M3
self.gemm = TestFP8_GEMM(
precision, use_bias=False, gelu=False, scale_factors=gemm_scale_factors)
def forward(self, inp, weight):
x = self.gemm(inp, weight)
x = texcpp.layernorm_fwd_fp8_inf(
x,
self.weight,
self.bias,
self.eps,
self.meta,
self.fp8_tensor,
self.fp8_type,
zero_centered_gamma)
x = cast_from_fp8(
x,
self.meta,
self.fp8_tensor,
self.fp8_type,
tex.DType.kFloat32 if precision == torch.float32 else tex.DType.kFloat16)
return x
out_features = 128
hidden_size = 128
in_features = 128
class TestFP8_GEMM(nn.Module):
def __init__(self, precision, use_bias, gelu, scale_factors):
super().__init__()
self.use_bias = use_bias
self.gelu = gelu
self.precision = precision
self.fp8_tensor_inp = tex.FP8FwdTensors.GEMM1_INPUT
self.fp8_tensor_weight = tex.FP8FwdTensors.GEMM1_WEIGHT
nb_inp_scales, nb_weight_scales = 1, out_features
act_scale_factor, weight_scale_factor = scale_factors
self.meta_inp = create_meta(act_scale_factor, nb_inp_scales)
self.meta_weight = create_meta(weight_scale_factor, nb_weight_scales)
bias_size = nb_weight_scales
self.bias = torch.randn(bias_size, dtype=precision, device="cuda")
self.gelu_input = torch.randn(hidden_size, out_features, dtype=precision, device="cuda")
self.inp_type = tex.DType.kFloat8E4M3
self.weights_type = tex.DType.kFloat8E4M3
self.outp_type = precision
def forward(self, inp, weight):
inp_fp8 = cast_to_fp8(
inp,
self.meta_inp,
self.fp8_tensor_inp,
self.inp_type)
weight_fp8 = cast_to_fp8(
weight,
self.meta_weight,
self.fp8_tensor_weight,
self.weights_type)
ret = fp8_gemm(
weight_fp8,
self.meta_weight.scale_inv,
self.fp8_tensor_weight,
self.inp_type,
inp_fp8,
self.meta_inp.scale_inv,
self.fp8_tensor_inp,
self.weights_type,
self.outp_type,
get_workspace(),
bias=self.bias,
use_bias=self.use_bias,
use_split_accumulator=False)
return ret
inp = torch.randn(hidden_size, in_features, dtype=precision, device="cuda")
weight = torch.randn(out_features, in_features, dtype=precision, device="cuda")
model = TestFP8_GemmLayernorm()
high_prec_str = dtype2str(precision)
fp8_str = f"_fp8" if use_fp8 else ""
fname = f"te.gemm_layernorm{fp8_str}{high_prec_str}.onnx"
input_names = ['input', 'weight']
do_export(model, (inp, weight), fname, use_fp8=use_fp8, input_names=input_names)
te_outputs = te_infer(model, (inp, weight), is_fp8=use_fp8)
serialize_inputs_outputs(fname, (inp, weight), te_outputs, input_names=input_names)
if precision not in (torch.bfloat16, ):
validate_result(
fname, (inp, weight), model, atol=5e-2, is_fp8=use_fp8, allow_cnt_errors=2,
input_names=input_names, te_outputs=te_outputs)
@skip_FP8
@pytest.mark.parametrize("use_fp8", [True, False])
@pytest.mark.parametrize("precision", [torch.float16, torch.bfloat16])
@pytest.mark.parametrize("zero_centered_gamma", [True])
def test_export_gpt_generation(
seed_default_rng,
set_max_seq_len,
use_fp8: bool,
precision: torch.dtype,
zero_centered_gamma: bool,
):
"""Test that the ONNX model can correctly handle inputs with different shapes and that
the attention mask it adjusted on-the-fly to different sequence lengths.
"""
# Skip FP8 tests on non-hopper devices
if use_fp8 and not fp8_available:
pytest.skip(reason_for_no_fp8)
# Layer configuration
hidden_size = 64
sequence_length = 128
batch_size = 1
ffn_hidden_size = 256
num_attention_heads = 4
attention_mask = None
use_mask = True
attn_mask_type = "causal"
fuse_qkv_params = True
output_layernorm = False
fp8_str = "_fp8" if use_fp8 else ""
fuse_qkv_params_str = "_fused-qkv" if fuse_qkv_params else ""
high_prec_str = dtype2str(precision)
attn_mask_str = get_attn_mask_str(use_mask, attn_mask_type)
fname = f"te.transformer_layer_generative{fp8_str}{attn_mask_str}{fuse_qkv_params_str}{high_prec_str}.onnx"
model = te.TransformerLayer(
hidden_size,
ffn_hidden_size,
num_attention_heads,
output_layernorm=output_layernorm,
params_dtype=precision,
fuse_qkv_params=fuse_qkv_params,
zero_centered_gamma=zero_centered_gamma).to(device='cuda')
# "Context phase": use full input sequence length
input_names = ["input", "attention_mask", "self_attn_mask_type"]
output_names = ["output"]
input_tensor = torch.rand(sequence_length, batch_size, hidden_size, dtype=precision, device="cuda")
inp = (input_tensor, None, attn_mask_type)
do_export(model, inp, fname, use_fp8,
input_names=input_names, output_names=output_names,
dynamic_axes={"input": {0: "seq", 1:"bs"},
"output": {0: "seq", 1:"bs"}, })
te_outputs = te_infer(model, inp, is_fp8=use_fp8)
serialize_inputs_outputs(fname, inp, te_outputs, input_names=input_names, output_names=output_names)
if precision not in (torch.bfloat16, ):
validate_result(fname, inp, model, atol=6e-3, is_fp8=use_fp8, input_names=input_names,
te_outputs=te_outputs)
# "Generative phase": use a single input (sequence len=1). For FP8 we need to pad the sequence to mult of 8.
sequence_length = 1 if not use_fp8 else 8
input_tensor = torch.rand(sequence_length, batch_size, hidden_size, dtype=precision, device="cuda")
inp = (input_tensor, attention_mask)
te_outputs = te_infer(model, inp, is_fp8=use_fp8)
serialize_inputs_outputs(fname, inp, te_outputs, input_names=input_names)
if precision not in (torch.bfloat16, ):
validate_result(fname, inp, model, atol=6e-3, is_fp8=use_fp8, input_names=input_names,
te_outputs=te_outputs)
@pytest.mark.parametrize("enabled", [True, False])
def test_export_ctx_manager(enabled):
assert is_in_onnx_export_mode() == False
with te.onnx_export(enabled):
assert is_in_onnx_export_mode() == enabled
assert is_in_onnx_export_mode() == False
| TransformerEngine-main | tests/pytorch/test_onnx_export.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""
This file contains tests for saving and loading TransformerEngine torch checkpoints.
The purpose of this test is to validate the TransformerEngine hooks for saving FP8 metadata
in torch checkpoints, which are called as part of torch.save() and torch.load().
The test verifies the values of FP8 metadata object after saving and loading a checkpoint
are identical to the original values.
"""
import tempfile
import pytest
import torch
import transformer_engine.pytorch as te
import transformer_engine_extensions as tex
from transformer_engine.pytorch.cpp_extensions import fp8_gemm, cast_to_fp8, cast_from_fp8
from transformer_engine.pytorch.module.base import get_workspace
from transformer_engine.pytorch.module.base import TransformerEngineBaseModule
def init_meta(size: int=1):
meta = tex.FP8TensorMeta()
meta.scale = torch.ones(size, dtype=torch.float32, device="cuda")
meta.scale_inv = torch.ones(size, dtype=torch.float32, device="cuda")
meta.amax_history = torch.zeros(1, size, dtype=torch.float32, device="cuda")
return meta
@pytest.mark.parametrize("scale_fwd", [224, 112, 66])
@pytest.mark.parametrize("scale_bwd", [448, 33])
@pytest.mark.parametrize("history_fwd", [1.23, 4.56])
@pytest.mark.parametrize("history_bwd", [2.34, 5.67])
def test_export_loaded_checkpoint(scale_fwd, scale_bwd, history_fwd, history_bwd):
# Skip FP8 tests on non-hopper devices
if torch.cuda.get_device_properties(torch.cuda.current_device()).major < 9:
pytest.skip("Device compute capability 9.x required for FP8 execution.")
tmp_filename = tempfile.NamedTemporaryFile().name
precision = torch.float32
class Test_TE_Export(TransformerEngineBaseModule):
def __init__(self, precision, use_bias):
super().__init__()
self.use_bias = use_bias
self.precision = precision
self.fp8_tensor_inp = tex.FP8FwdTensors.GEMM1_INPUT
self.fp8_tensor_weight = tex.FP8FwdTensors.GEMM1_WEIGHT
nb_inp_scales = nb_weight_scales = 1
self.meta_inp = init_meta(nb_inp_scales)
self.meta_weight = init_meta(nb_weight_scales)
bias_size = nb_weight_scales
self.bias = torch.randn(bias_size, dtype=precision, device="cuda")
self.inp_type = tex.DType.kFloat8E4M3
self.weights_type = tex.DType.kFloat8E4M3
self.outp_type = precision
def forward(self, inp, weight):
inp_fp8 = cast_to_fp8(
inp,
self.meta_inp,
self.fp8_tensor_inp,
self.inp_type)
weight_fp8 = cast_to_fp8(
weight,
self.meta_weight,
self.fp8_tensor_weight,
self.weights_type)
ret = fp8_gemm(
weight_fp8,
self.meta_weight.scale_inv,
self.fp8_tensor_weight,
self.inp_type,
inp_fp8,
self.meta_inp.scale_inv,
self.fp8_tensor_inp,
self.weights_type,
self.outp_type,
get_workspace(),
bias=self.bias,
use_bias=self.use_bias,
use_split_accumulator=False)
return ret
model_in = Test_TE_Export(precision, True)
with te.fp8_autocast(enabled=True):
model_in.fp8_init()
# scaling fwd
model_in.fp8_meta["scaling_fwd"].scale = torch.ones(3, dtype=torch.float32, device="cuda") * scale_fwd
model_in.fp8_meta["scaling_fwd"].scale_inv = torch.ones(3, dtype=torch.float32, device="cuda") / scale_fwd
model_in.fp8_meta["scaling_fwd"].amax_history = torch.ones(3, dtype=torch.float32, device="cuda") * history_fwd
# scaling bwd
model_in.fp8_meta["scaling_bwd"].scale = torch.ones(2, dtype=torch.float32, device="cuda") * scale_bwd
model_in.fp8_meta["scaling_bwd"].scale_inv = torch.ones(2, dtype=torch.float32, device="cuda") / scale_bwd
model_in.fp8_meta["scaling_bwd"].amax_history = torch.ones(2, dtype=torch.float32, device="cuda") * history_bwd
torch.save(model_in.state_dict(), tmp_filename)
model_out = Test_TE_Export(precision, True)
model_out.load_state_dict(torch.load(tmp_filename))
model_out.eval()
# scaling fwd
assert torch.allclose(model_in.fp8_meta["scaling_fwd"].scale, model_out.fp8_meta["scaling_fwd"].scale)
assert torch.allclose(model_in.fp8_meta["scaling_fwd"].scale_inv, model_out.fp8_meta["scaling_fwd"].scale_inv)
assert torch.allclose(model_in.fp8_meta["scaling_fwd"].amax_history, model_out.fp8_meta["scaling_fwd"].amax_history)
# scaling bwd
assert torch.allclose(model_in.fp8_meta["scaling_bwd"].scale, model_out.fp8_meta["scaling_bwd"].scale)
assert torch.allclose(model_in.fp8_meta["scaling_bwd"].scale_inv, model_out.fp8_meta["scaling_bwd"].scale_inv)
assert torch.allclose(model_in.fp8_meta["scaling_bwd"].amax_history, model_out.fp8_meta["scaling_bwd"].amax_history)
| TransformerEngine-main | tests/pytorch/test_torch_save_load.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
from typing import Tuple
import pytest
import torch
import transformer_engine.pytorch as te
# Model names for test_torch_dynamo
_model_names = ["Linear", "LayerNorm", "LayerNormLinear", "LayerNormMLP"]
@pytest.mark.skipif(torch.__version__ < "2", reason="torch.compile not available")
@pytest.mark.parametrize("model_name", _model_names)
def test_torch_dynamo(model_name: str):
"""Test compatibility with Torch Dynamo
Construct model, optimize with Torch Dynamo, and perform a single
forward and backward pass.
"""
# Helper function to construct tensor with default options
def make_tensor(
dims: Tuple[int],
dtype: torch.dtype = torch.float32,
device: torch.device = "cuda",
requires_grad: bool = True,
**kwargs,
):
return torch.zeros(
dims,
dtype=dtype,
device=device,
requires_grad=requires_grad,
**kwargs,
)
# Construct model and input tensors
model = None
inputs = []
if model_name == "Linear":
model = te.Linear(16, 16)
inputs = [make_tensor([16,16])]
elif model_name == "LayerNorm":
model = te.LayerNorm(16)
inputs = [make_tensor([16,16])]
elif model_name == "LayerNormLinear":
model = te.LayerNormLinear(16,16)
inputs = [make_tensor([16,16])]
elif model_name == "LayerNormMLP":
model = te.LayerNormMLP(16,16)
inputs = [make_tensor([16,16])]
assert model is not None, f"could not construct {model_name}"
# Optimize model with TorchDynamo
torch.compile(model)
# Forward and backward pass
out = model(*inputs)
out.backward(torch.zeros_like(out))
| TransformerEngine-main | tests/pytorch/test_jit.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
import torch
import pytest
from transformer_engine.pytorch.fp8 import fp8_autocast, FP8GlobalStateManager
from transformer_engine.pytorch.utils import (
init_method_normal,
scaled_init_method_normal,
)
from transformer_engine.pytorch import (
LayerNormLinear,
Linear,
LayerNormMLP,
TransformerLayer,
RMSNorm,
LayerNorm,
)
from transformer_engine.common import recipe
# Only run FP8 tests on H100.
fp8_available, reason_for_no_fp8 = FP8GlobalStateManager.is_fp8_available()
def custom_amax_to_scale(
amax: torch.Tensor,
scale: torch.Tensor,
fp8_max: torch.Tensor,
recipe: recipe.DelayedScaling,
) -> torch.Tensor:
"""Custom func to test recipe."""
sf = fp8_max / amax
sf = torch.where(amax > 0.0, sf, scale)
sf = torch.where(torch.isfinite(amax), sf, scale)
return sf
def custom_amax_compute(amax_history: torch.Tensor) -> torch.Tensor:
"""Custom func to test recipe."""
return torch.min(amax_history, dim=0).values
class ModelConfig:
def __init__(
self, hidden_size, eps, num_attention_heads, embed, num_layers, seq_len
):
self.hidden_size = hidden_size
self.eps = eps
self.num_attention_heads = num_attention_heads
self.embed = embed
self.num_layers = num_layers
self.seq_len = seq_len
model_configs = {
"126m": ModelConfig(768, 1e-5, 12, 64, 12, 2048),
}
fp8_recipes = [
None, # Handles non-FP8 case
recipe.DelayedScaling(0, 1, recipe.Format.E4M3),
recipe.DelayedScaling(0, 1, recipe.Format.HYBRID),
recipe.DelayedScaling(
0, 1, recipe.Format.E4M3, override_linear_precision=(False, False, True)
),
recipe.DelayedScaling(
0, 1, recipe.Format.E4M3, amax_history_len=16, amax_compute_algo="most_recent"
),
recipe.DelayedScaling(
0, 1, recipe.Format.E4M3, amax_history_len=16, amax_compute_algo="max"
),
recipe.DelayedScaling(
0,
1,
recipe.Format.E4M3,
amax_history_len=16,
amax_compute_algo=custom_amax_compute,
),
recipe.DelayedScaling(
0,
1,
recipe.Format.E4M3,
amax_history_len=16,
scaling_factor_compute_algo=custom_amax_to_scale,
),
]
param_types = [torch.float32, torch.float16]
if torch.cuda.is_bf16_supported():
param_types.append(torch.bfloat16)
batch_sizes = [1, 2]
all_boolean = [True, False]
all_activations = ["gelu", "relu", "reglu", "geglu", "swiglu"]
all_normalizations = ["LayerNorm", "RMSNorm"]
def _disable_wgrads(block):
for p in block.parameters():
p.requires_grad = False
def _test_sanity_e2e_cuda_graph(block, bs, dtype, config, fp8_recipe, skip_wgrad):
# Initialize loss function and optimizer.
loss_fn = torch.nn.MSELoss()
optimizer = torch.optim.SGD(block.parameters(), lr=0.1)
# Placeholders used for capture.
static_input = torch.randn(config.seq_len, bs, config.hidden_size, device='cuda', dtype=dtype, requires_grad=True)
static_target = torch.randn(config.seq_len, bs, config.hidden_size, device='cuda', dtype=dtype)
real_input = torch.rand_like(static_input)
real_target = torch.rand_like(static_target)
use_fp8 = fp8_recipe is not None
if skip_wgrad:
_disable_wgrads(block)
# Pre graph capture warmup in a separate stream.
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
for _ in range(3):
optimizer.zero_grad(set_to_none=True)
with fp8_autocast(enabled=use_fp8, fp8_recipe=fp8_recipe):
out = block(static_input)
loss = loss_fn(out, static_target)
loss.backward()
optimizer.step()
torch.cuda.current_stream().wait_stream(s)
# Capture.
g = torch.cuda.CUDAGraph()
optimizer.zero_grad(set_to_none=True)
with torch.cuda.graph(g):
with fp8_autocast(enabled=use_fp8, fp8_recipe=fp8_recipe):
static_output = block(static_input)
static_loss = loss_fn(static_output, static_target)
static_loss.backward()
optimizer.step()
# Fills the graph's input memory with new data to compute on
with torch.no_grad():
static_input.copy_(real_input)
static_target.copy_(real_target)
g.replay()
torch.cuda.synchronize()
def _test_sanity_e2e_amp(block, bs, dtype, config, fp8_recipe, skip_wgrad):
te_inp_hidden_states = torch.randn(
config.seq_len, bs, config.hidden_size, dtype=torch.float32, requires_grad=True
).cuda()
te_inp_hidden_states.retain_grad()
te_inp_attn_mask = (
torch.rand(
(
1,
1,
config.seq_len,
config.seq_len,
)
)
.cuda()
.bool()
)
if skip_wgrad:
_disable_wgrads(block)
use_fp8 = fp8_recipe is not None
with torch.autocast(device_type="cuda", enabled=True, dtype=dtype):
with fp8_autocast(enabled=use_fp8, fp8_recipe=fp8_recipe):
te_out = block(te_inp_hidden_states, attention_mask=te_inp_attn_mask)
loss = te_out.sum()
loss.backward()
torch.cuda.synchronize()
assert te_out.dtype == dtype, "AMP wrong output type."
assert te_inp_hidden_states.grad.dtype == torch.float32, "AMP wrong dgrad type."
for name, p in block.named_parameters():
if p.requires_grad:
assert p.grad.dtype == torch.float32, f"AMP wrong wgrad type for {name}."
def _test_sanity_e2e_gradient_accumulation_fusion(block, bs, dtype, config, fp8_recipe, skip_wgrad):
te_inp_hidden_states = torch.randn(
config.seq_len, bs, config.hidden_size, dtype=dtype, requires_grad=True
).cuda()
te_inp_attn_mask = (
torch.rand(
(
1,
1,
config.seq_len,
config.seq_len,
)
)
.cuda()
.bool()
)
if skip_wgrad:
_disable_wgrads(block)
for name, p in block.named_parameters():
if "layer_norm_weight" in name:
continue
elif "weight" in name and p.requires_grad:
p.main_grad = torch.zeros_like(p)
use_fp8 = fp8_recipe is not None
with fp8_autocast(enabled=use_fp8, fp8_recipe=fp8_recipe):
te_out = block(te_inp_hidden_states, attention_mask=te_inp_attn_mask)
loss = te_out.sum()
loss.backward()
torch.cuda.synchronize()
for name, p in block.named_parameters():
if "layer_norm_weight" in name:
continue
elif "weight" in name and p.requires_grad:
assert (
p.grad is None and torch.count_nonzero(p.main_grad) > 0
), "Gradient not accumulated."
def _test_sanity_e2e(block, bs, dtype, config, fp8_recipe, skip_wgrad):
te_inp_hidden_states = torch.randn(
config.seq_len, bs, config.hidden_size, dtype=dtype, requires_grad=True
).cuda()
te_inp_attn_mask = (
torch.rand(
(
1,
1,
config.seq_len,
config.seq_len,
)
)
.cuda()
.bool()
)
if skip_wgrad:
_disable_wgrads(block)
use_fp8 = fp8_recipe is not None
with fp8_autocast(enabled=use_fp8, fp8_recipe=fp8_recipe):
te_out = block(te_inp_hidden_states, attention_mask=te_inp_attn_mask)
loss = te_out.sum()
loss.backward()
torch.cuda.synchronize()
def _test_sanity_e2e_T5(block, bs, dtype, config, fp8_recipe, skip_wgrad):
te_inp_hidden_states = torch.randn(
config.seq_len, bs, config.hidden_size, dtype=dtype, requires_grad=True
).cuda()
te_inp_attn_mask = (
torch.rand(
(
1,
1,
config.seq_len,
config.seq_len,
)
)
.cuda()
.bool()
)
if skip_wgrad:
_disable_wgrads(block)
use_fp8 = fp8_recipe is not None
with fp8_autocast(enabled=use_fp8, fp8_recipe=fp8_recipe):
te_out = block(
te_inp_hidden_states,
attention_mask=te_inp_attn_mask,
encoder_output=te_inp_hidden_states
)
loss = te_out.sum()
loss.backward()
torch.cuda.synchronize()
def _test_sanity_common(block, bs, dtype, config, fp8_recipe, skip_wgrad, skip_dgrad):
if skip_dgrad and skip_wgrad:
pytest.skip("No gradient computation; Skipping to avoid PyTorch RuntimeError.")
te_inp = torch.randn(
config.seq_len, bs, config.hidden_size, dtype=dtype, requires_grad=not skip_dgrad
).cuda()
if skip_wgrad:
_disable_wgrads(block)
use_fp8 = fp8_recipe is not None
with fp8_autocast(enabled=use_fp8, fp8_recipe=fp8_recipe):
te_out = block(te_inp)
if isinstance(te_out, tuple):
te_out = te_out[0]
loss = te_out.sum()
loss.backward()
torch.cuda.synchronize()
def _test_sanity_normalization_amp(block, bs, dtype, config, skip_wgrad, skip_dgrad):
if skip_dgrad and skip_wgrad:
pytest.skip("No gradient computation; Skipping to avoid PyTorch RuntimeError.")
te_inp = torch.randn(
config.seq_len, bs, config.hidden_size, requires_grad=True
).cuda()
te_inp.retain_grad()
with torch.autocast(device_type="cuda", enabled=True, dtype=dtype):
te_out = block(te_inp)
loss = te_out.sum()
loss.backward()
torch.cuda.synchronize()
assert te_out.dtype == dtype, "AMP wrong output type."
assert te_inp.grad.dtype == torch.float32, "AMP wrong dgrad type."
for name, p in block.named_parameters():
if p.requires_grad:
assert p.grad.dtype == torch.float32, f"AMP wrong wgrad type for {name}."
@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("bs", batch_sizes)
@pytest.mark.parametrize("model", model_configs.keys())
@pytest.mark.parametrize("skip_wgrad", all_boolean)
@pytest.mark.parametrize("skip_dgrad", all_boolean)
@pytest.mark.parametrize("normalization", all_normalizations)
def test_sanity_normalization_amp(dtype, bs, model, skip_wgrad, skip_dgrad, normalization):
config = model_configs[model]
module = RMSNorm if normalization == "RMSNorm" else LayerNorm
block = (
module(
config.hidden_size,
eps=config.eps,
)
.to(dtype=torch.float32)
.cuda()
)
_test_sanity_normalization_amp(block, bs, dtype, config, skip_wgrad, skip_dgrad)
@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("bs", batch_sizes)
@pytest.mark.parametrize("fp8_recipe", fp8_recipes)
@pytest.mark.parametrize("model", model_configs.keys())
@pytest.mark.parametrize("skip_wgrad", all_boolean)
@pytest.mark.parametrize("zero_centered_gamma", all_boolean)
@pytest.mark.parametrize("skip_dgrad", all_boolean)
@pytest.mark.parametrize("normalization", all_normalizations)
def test_sanity_layernorm_linear(dtype, bs, fp8_recipe, model, skip_wgrad,
zero_centered_gamma, skip_dgrad,
normalization):
if fp8_recipe is not None and not fp8_available:
pytest.skip(reason_for_no_fp8)
if normalization == "RMSNorm" and zero_centered_gamma:
pytest.skip("RMSNorm does not support zero_centered_gamma yet!")
config = model_configs[model]
sigma = 0.023
init_method = init_method_normal(sigma)
block = (
LayerNormLinear(
config.hidden_size,
config.hidden_size * 3,
eps=config.eps,
init_method=init_method,
zero_centered_gamma=zero_centered_gamma,
normalization=normalization,
)
.to(dtype=dtype)
.cuda()
)
_test_sanity_common(block, bs, dtype, config, fp8_recipe, skip_wgrad, skip_dgrad)
@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("bs", batch_sizes)
@pytest.mark.parametrize("fp8_recipe", fp8_recipes)
@pytest.mark.parametrize("model", model_configs.keys())
@pytest.mark.parametrize("skip_wgrad", all_boolean)
@pytest.mark.parametrize("skip_dgrad", all_boolean)
def test_sanity_linear(dtype, bs, fp8_recipe, model, skip_wgrad, skip_dgrad):
if fp8_recipe is not None and not fp8_available:
pytest.skip(reason_for_no_fp8)
config = model_configs[model]
sigma = 0.023
output_layer_init_method = scaled_init_method_normal(sigma, config.num_layers)
block = (
Linear(
config.hidden_size, config.hidden_size, init_method=output_layer_init_method
)
.to(dtype=dtype)
.cuda()
)
_test_sanity_common(block, bs, dtype, config, fp8_recipe, skip_wgrad, skip_dgrad)
@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("bs", batch_sizes)
@pytest.mark.parametrize("fp8_recipe", fp8_recipes)
@pytest.mark.parametrize("model", model_configs.keys())
@pytest.mark.parametrize("skip_wgrad", all_boolean)
@pytest.mark.parametrize("zero_centered_gamma", all_boolean)
@pytest.mark.parametrize("skip_dgrad", all_boolean)
@pytest.mark.parametrize("activation", all_activations)
@pytest.mark.parametrize("normalization", all_normalizations)
def test_sanity_layernorm_mlp(dtype, bs, fp8_recipe, model, skip_wgrad,
zero_centered_gamma, skip_dgrad, activation,
normalization):
if fp8_recipe is not None and not fp8_available:
pytest.skip(reason_for_no_fp8)
if normalization == "RMSNorm" and zero_centered_gamma:
pytest.skip("RMSNorm does not support zero_centered_gamma yet!")
config = model_configs[model]
sigma = 0.023
init_method = init_method_normal(sigma)
output_layer_init_method = scaled_init_method_normal(sigma, config.num_layers)
block = (
LayerNormMLP(
config.hidden_size,
4 * config.hidden_size,
eps=config.eps,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
zero_centered_gamma=zero_centered_gamma,
activation=activation,
normalization=normalization,
)
.to(dtype=dtype)
.cuda()
)
_test_sanity_common(block, bs, dtype, config, fp8_recipe, skip_wgrad, skip_dgrad)
@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("bs", batch_sizes)
@pytest.mark.parametrize("fp8_recipe", fp8_recipes)
@pytest.mark.parametrize("model", model_configs.keys())
@pytest.mark.parametrize("skip_wgrad", all_boolean)
@pytest.mark.parametrize("zero_centered_gamma", all_boolean)
@pytest.mark.parametrize("bias", all_boolean)
@pytest.mark.parametrize("activation", all_activations)
@pytest.mark.parametrize("normalization", all_normalizations)
def test_sanity_gpt(dtype, bs, fp8_recipe, model, skip_wgrad,
zero_centered_gamma, bias, activation,
normalization):
if fp8_recipe is not None and not fp8_available:
pytest.skip(reason_for_no_fp8)
if normalization == "RMSNorm" and zero_centered_gamma:
pytest.skip("RMSNorm does not support zero_centered_gamma yet!")
config = model_configs[model]
sigma = 0.023
init_method = init_method_normal(sigma)
output_layer_init_method = scaled_init_method_normal(sigma, config.num_layers)
block = (
TransformerLayer(
config.hidden_size,
4 * config.hidden_size,
config.num_attention_heads,
layernorm_epsilon=config.eps,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
hidden_dropout=0.1,
attention_dropout=0.1,
kv_channels=config.embed,
apply_residual_connection_post_layernorm=False,
output_layernorm=False,
zero_centered_gamma=zero_centered_gamma,
bias=bias,
activation=activation,
normalization=normalization,
)
.to(dtype=dtype)
.cuda()
)
_test_sanity_e2e(block, bs, dtype, config, fp8_recipe, skip_wgrad)
@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("bs", batch_sizes)
@pytest.mark.parametrize("fp8_recipe", fp8_recipes)
@pytest.mark.parametrize("model", model_configs.keys())
@pytest.mark.parametrize("skip_wgrad", all_boolean)
@pytest.mark.parametrize("zero_centered_gamma", all_boolean)
@pytest.mark.parametrize("normalization", all_normalizations)
def test_sanity_bert(dtype, bs, fp8_recipe, model, skip_wgrad, zero_centered_gamma,
normalization):
if fp8_recipe is not None and not fp8_available:
pytest.skip(reason_for_no_fp8)
if normalization == "RMSNorm" and zero_centered_gamma:
pytest.skip("RMSNorm does not support zero_centered_gamma yet!")
config = model_configs[model]
sigma = 0.023
init_method = init_method_normal(sigma)
output_layer_init_method = scaled_init_method_normal(sigma, config.num_layers)
block = (
TransformerLayer(
config.hidden_size,
4 * config.hidden_size,
config.num_attention_heads,
layernorm_epsilon=config.eps,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
hidden_dropout=0.1,
attention_dropout=0.1,
kv_channels=config.embed,
apply_residual_connection_post_layernorm=True,
output_layernorm=True,
zero_centered_gamma=zero_centered_gamma,
normalization=normalization,
)
.to(dtype=dtype)
.cuda()
)
_test_sanity_e2e(block, bs, dtype, config, fp8_recipe, skip_wgrad)
@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("bs", batch_sizes)
@pytest.mark.parametrize("fp8_recipe", fp8_recipes)
@pytest.mark.parametrize("model", model_configs.keys())
@pytest.mark.parametrize("skip_wgrad", all_boolean)
@pytest.mark.parametrize("zero_centered_gamma", all_boolean)
@pytest.mark.parametrize("normalization", all_normalizations)
def test_sanity_T5(dtype, bs, fp8_recipe, model, skip_wgrad, zero_centered_gamma,
normalization):
if fp8_recipe is not None and not fp8_available:
pytest.skip(reason_for_no_fp8)
if normalization == "RMSNorm" and zero_centered_gamma:
pytest.skip("RMSNorm does not support zero_centered_gamma yet!")
config = model_configs[model]
sigma = 0.023
init_method = init_method_normal(sigma)
output_layer_init_method = scaled_init_method_normal(sigma, config.num_layers)
block = (
TransformerLayer(
config.hidden_size,
4 * config.hidden_size,
config.num_attention_heads,
layernorm_epsilon=config.eps,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
hidden_dropout=0.1,
attention_dropout=0.1,
kv_channels=config.embed,
apply_residual_connection_post_layernorm=False,
output_layernorm=False,
layer_type="decoder",
zero_centered_gamma=zero_centered_gamma,
normalization=normalization,
)
.to(dtype=dtype)
.cuda()
)
_test_sanity_e2e_T5(block, bs, dtype, config, fp8_recipe, skip_wgrad)
@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("bs", batch_sizes)
@pytest.mark.parametrize("fp8_recipe", fp8_recipes)
@pytest.mark.parametrize("model", model_configs.keys())
@pytest.mark.parametrize("skip_wgrad", all_boolean)
def test_sanity_amp_and_nvfuser(dtype, bs, fp8_recipe, model, skip_wgrad):
if fp8_recipe is not None and not fp8_available:
pytest.skip(reason_for_no_fp8)
config = model_configs[model]
sigma = 0.023
init_method = init_method_normal(sigma)
output_layer_init_method = scaled_init_method_normal(sigma, config.num_layers)
block = (
TransformerLayer(
config.hidden_size,
4 * config.hidden_size,
config.num_attention_heads,
layernorm_epsilon=config.eps,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
hidden_dropout=0.1,
attention_dropout=0.1,
kv_channels=config.embed,
)
.to(dtype=torch.float32)
.cuda()
)
_test_sanity_e2e_amp(block, bs, dtype, config, fp8_recipe, skip_wgrad)
@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("bs", batch_sizes)
@pytest.mark.parametrize("fp8_recipe", fp8_recipes)
@pytest.mark.parametrize("model", model_configs.keys())
@pytest.mark.parametrize("skip_wgrad", all_boolean)
def test_sanity_drop_path(dtype, bs, fp8_recipe, model, skip_wgrad):
if fp8_recipe is not None and not fp8_available:
pytest.skip(reason_for_no_fp8)
config = model_configs[model]
sigma = 0.023
init_method = init_method_normal(sigma)
output_layer_init_method = scaled_init_method_normal(sigma, config.num_layers)
block = (
TransformerLayer(
config.hidden_size,
4 * config.hidden_size,
config.num_attention_heads,
layernorm_epsilon=config.eps,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
hidden_dropout=0.1,
attention_dropout=0.1,
kv_channels=config.embed,
apply_residual_connection_post_layernorm=False,
output_layernorm=False,
drop_path_rate=1.0,
)
.to(dtype=dtype)
.cuda()
)
_test_sanity_e2e(block, bs, dtype, config, fp8_recipe, skip_wgrad)
@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("bs", batch_sizes)
@pytest.mark.parametrize("fp8_recipe", fp8_recipes)
@pytest.mark.parametrize("model", model_configs.keys())
@pytest.mark.parametrize("skip_wgrad", all_boolean)
def test_sanity_fused_qkv_params(dtype, bs, fp8_recipe, model, skip_wgrad):
if fp8_recipe is not None and not fp8_available:
pytest.skip(reason_for_no_fp8)
config = model_configs[model]
sigma = 0.023
init_method = init_method_normal(sigma)
output_layer_init_method = scaled_init_method_normal(sigma, config.num_layers)
block = (
TransformerLayer(
config.hidden_size,
4 * config.hidden_size,
config.num_attention_heads,
layernorm_epsilon=config.eps,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
hidden_dropout=0.1,
attention_dropout=0.1,
kv_channels=config.embed,
apply_residual_connection_post_layernorm=False,
output_layernorm=False,
fuse_qkv_params=True,
)
.to(dtype=dtype)
.cuda()
)
_test_sanity_e2e(block, bs, dtype, config, fp8_recipe, skip_wgrad)
@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("bs", batch_sizes)
@pytest.mark.parametrize("fp8_recipe", fp8_recipes)
@pytest.mark.parametrize("model", model_configs.keys())
@pytest.mark.parametrize("skip_wgrad", all_boolean)
@pytest.mark.parametrize("zero_centered_gamma", all_boolean)
def test_sanity_gradient_accumulation_fusion(dtype, bs, fp8_recipe, model, skip_wgrad, zero_centered_gamma):
if fp8_recipe is not None and not fp8_available:
pytest.skip(reason_for_no_fp8)
config = model_configs[model]
sigma = 0.023
init_method = init_method_normal(sigma)
output_layer_init_method = scaled_init_method_normal(sigma, config.num_layers)
block = (
TransformerLayer(
config.hidden_size,
4 * config.hidden_size,
config.num_attention_heads,
layernorm_epsilon=config.eps,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
hidden_dropout=0.1,
attention_dropout=0.1,
kv_channels=config.embed,
apply_residual_connection_post_layernorm=False,
output_layernorm=False,
zero_centered_gamma=zero_centered_gamma,
fuse_qkv_params=True,
fuse_wgrad_accumulation=True,
)
.to(dtype=dtype)
.cuda()
)
_test_sanity_e2e_gradient_accumulation_fusion(block, bs, dtype, config, fp8_recipe, skip_wgrad)
@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("bs", batch_sizes)
@pytest.mark.parametrize("fp8_recipe", fp8_recipes)
@pytest.mark.parametrize("model", model_configs.keys())
@pytest.mark.parametrize("skip_wgrad", all_boolean)
@pytest.mark.parametrize("zero_centered_gamma", all_boolean)
@pytest.mark.parametrize("normalization", all_normalizations)
def test_gpt_cuda_graph(dtype, bs, fp8_recipe, model, skip_wgrad, zero_centered_gamma,
normalization):
if fp8_recipe is not None and not fp8_available:
pytest.skip(reason_for_no_fp8)
if normalization == "RMSNorm" and zero_centered_gamma:
pytest.skip("RMSNorm does not support zero_centered_gamma yet!")
config = model_configs[model]
sigma = 0.023
init_method = init_method_normal(sigma)
output_layer_init_method = scaled_init_method_normal(sigma, config.num_layers)
block = (
TransformerLayer(
config.hidden_size,
4 * config.hidden_size,
config.num_attention_heads,
layernorm_epsilon=config.eps,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
hidden_dropout=0.1,
attention_dropout=0.1,
kv_channels=config.embed,
apply_residual_connection_post_layernorm=False,
output_layernorm=False,
zero_centered_gamma=zero_centered_gamma,
fuse_qkv_params=True,
normalization=normalization,
)
.to(dtype=dtype)
.cuda()
)
_test_sanity_e2e_cuda_graph(block, bs, dtype, config, fp8_recipe, skip_wgrad)
| TransformerEngine-main | tests/pytorch/test_sanity.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
import torch
import pytest
from transformer_engine.pytorch.utils import (
init_method_normal,
scaled_init_method_normal,
get_device_compute_capability,
)
from transformer_engine.pytorch.fp8 import FP8GlobalStateManager
from transformer_engine.pytorch import TransformerLayer
from transformer_engine.pytorch.attention import DotProductAttention
import os
from pkg_resources import packaging
from importlib.metadata import version
from test_numerics import get_dummy_cuda_rng_tracker, reset_rng_states
fp8_available, reason_for_no_fp8 = FP8GlobalStateManager.is_fp8_available()
_flash_attn_version = packaging.version.Version(version("flash-attn"))
_flash_attn_2_available = _flash_attn_version >= packaging.version.Version("2")
class ModelConfig:
def __init__(
self, num_layers, hidden_size, num_attention_heads, head_dim, seq_len,
dropout_p, attn_mask_type,
):
self.num_layers = num_layers
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.head_dim = head_dim
assert (hidden_size == num_attention_heads * head_dim
), """hidden_size must be = num_heads x head_dim."""
self.seq_len = seq_len
self.dropout_p = dropout_p
self.attn_mask_type = attn_mask_type
model_configs = {
"test1": ModelConfig(1, 1024, 16, 64, 128, 0.0, "causal"),
"test2": ModelConfig(1, 1024, 16, 64, 512, 0.0, "causal"),
"test3": ModelConfig(1, 1024, 16, 64, 2048, 0.0, "causal"),
"test4": ModelConfig(1, 2048, 16, 128, 128, 0.0, "causal"),
"test5": ModelConfig(1, 2048, 16, 128, 512, 0.0, "causal"),
"test6": ModelConfig(1, 2048, 16, 128, 2048, 0.0, "causal"),
"test7": ModelConfig(1, 1024, 16, 64, 128, 0.0, "no_mask"),
"test8": ModelConfig(1, 1024, 16, 64, 512, 0.0, "no_mask"),
}
param_types = [torch.float16]
if torch.cuda.is_bf16_supported():
param_types.append(torch.bfloat16)
batch_sizes = [1, 2, 32]
@pytest.mark.skipif(
get_device_compute_capability() < 8.0, reason="Compute capability 8.0+ is required.")
@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("bs", batch_sizes)
@pytest.mark.parametrize("model", model_configs.keys())
@pytest.mark.parametrize("ckpt_attn", [True, False])
@pytest.mark.parametrize("bias_type", ["no_bias", "post_scale_bias"])
def test_dot_product_attention(dtype, bs, model, ckpt_attn, bias_type):
"""Test DotProductAttention module with three backends,
FlashAttention, FusedAttention and UnfusedDotProductAttention"""
config = model_configs[model]
if bias_type == "no_bias":
flash_attn_fwd, flash_attn_bwd = _run_dot_product_attention(
dtype, bs, config, "FlashAttention", ckpt_attn, bias_type)
fused_attn_fwd, fused_attn_bwd = _run_dot_product_attention(
dtype, bs, config, "FusedAttention", ckpt_attn, bias_type)
unfused_attn_fwd, unfused_attn_bwd = _run_dot_product_attention(
dtype, bs, config, "UnfusedDotProductAttention", ckpt_attn, bias_type)
atol, rtol = (2.5e-2, 2.5e-2) if dtype == torch.bfloat16 else (5e-3, 5e-3)
if bias_type == "no_bias":
assert torch.allclose(fused_attn_fwd, flash_attn_fwd, atol=atol, rtol=rtol)
assert torch.allclose(fused_attn_bwd, flash_attn_bwd, atol=atol, rtol=rtol)
assert torch.allclose(fused_attn_fwd, unfused_attn_fwd, atol=atol, rtol=rtol)
assert torch.allclose(fused_attn_bwd, unfused_attn_bwd, atol=atol, rtol=rtol)
def _run_dot_product_attention(dtype, bs, config, backend, ckpt_attn, bias_type):
reset_rng_states()
os.environ["NVTE_FLASH_ATTN"] = "0"
os.environ["NVTE_FUSED_ATTN"] = "0"
if backend == "FlashAttention":
os.environ["NVTE_FLASH_ATTN"] = "1"
if backend == "FusedAttention":
os.environ["NVTE_FUSED_ATTN"] = "1"
inp = torch.randn(
config.seq_len, bs, 3, config.num_attention_heads, config.head_dim,
dtype=dtype).cuda()
inp.requires_grad=True
seqlens = torch.empty(bs, dtype=torch.int32).cuda()
seqlens.fill_(config.seq_len)
cu_seqlens = torch.zeros(bs + 1, device=inp.device, dtype=torch.int32)
cu_seqlens[1:] = torch.cumsum(seqlens, dim=0)
op_grad = torch.randn(
config.seq_len, bs, config.num_attention_heads * config.head_dim,
dtype = dtype).cuda()
if bias_type != "no_bias":
bias = torch.randn(1, config.num_attention_heads, config.seq_len, config.seq_len,
dtype=dtype).cuda()
else:
bias = None
block = (
DotProductAttention(
config.num_attention_heads,
config.head_dim,
attention_dropout=config.dropout_p,
sequence_parallel=False,
tp_size=1,
get_rng_state_tracker=get_dummy_cuda_rng_tracker,
tp_group=None,
layer_number=1,
attention_type="self"
).to(dtype=dtype).cuda()
)
q = inp[:, :,0,:,:]
k = inp[:, :,1,:,:]
v = inp[:, :,2,:,:]
op = block(q, k, v, attn_mask_type=config.attn_mask_type,
checkpoint_core_attention=ckpt_attn,
core_attention_bias_type=bias_type,
core_attention_bias=bias)
op.backward(op_grad)
return op, inp.grad
@pytest.mark.skipif(
get_device_compute_capability() < 8.0, reason="Compute capability 8.0+ is required.")
@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("bs", batch_sizes)
@pytest.mark.parametrize("model", model_configs.keys())
@pytest.mark.parametrize("ckpt_attn", [False])
@pytest.mark.parametrize("bias_type", ["no_bias", "post_scale_bias"])
def test_transformer_layer(dtype, bs, model, ckpt_attn, bias_type):
"""Test TransformerLayer module when its DotProductAttention is enabled with
FlashAttention, FusedAttention, or UnfusedDotProductAttention backend"""
config = model_configs[model]
if bias_type == "no_bias":
flash_attn_fwd, flash_attn_bwd = _run_transformer_layer(
dtype, bs, config, "FlashAttention", ckpt_attn, bias_type)
fused_attn_fwd, fused_attn_bwd = _run_transformer_layer(
dtype, bs, config, "FusedAttention", ckpt_attn, bias_type)
unfused_attn_fwd, unfused_attn_bwd = _run_transformer_layer(
dtype, bs, config, "UnfusedDotProductAttention", ckpt_attn, bias_type)
atol, rtol = (5e-1, 5e-2)
if bias_type == "no_bias":
assert torch.allclose(fused_attn_fwd, flash_attn_fwd, atol=atol, rtol=rtol)
assert torch.allclose(fused_attn_bwd, flash_attn_bwd, atol=atol, rtol=rtol)
assert torch.allclose(fused_attn_fwd, unfused_attn_fwd, atol=atol, rtol=rtol)
assert torch.allclose(fused_attn_bwd, unfused_attn_bwd, atol=atol, rtol=rtol)
def _run_transformer_layer(dtype, bs, config, backend, ckpt_attn, bias_type):
reset_rng_states()
os.environ["NVTE_FLASH_ATTN"] = "0"
os.environ["NVTE_FUSED_ATTN"] = "0"
if backend == "FlashAttention":
os.environ["NVTE_FLASH_ATTN"] = "1"
if backend == "FusedAttention":
os.environ["NVTE_FUSED_ATTN"] = "1"
inp = torch.randn(
config.seq_len, bs, config.num_attention_heads * config.head_dim,
dtype=dtype).cuda()
inp.requires_grad=True
seqlens = torch.empty(bs, dtype=torch.int32).cuda()
seqlens.fill_(config.seq_len)
cu_seqlens = torch.zeros(bs + 1, device=inp.device, dtype=torch.int32)
cu_seqlens[1:] = torch.cumsum(seqlens, dim=0)
sigma = 0.02
init_method = init_method_normal(sigma)
output_layer_init_method = scaled_init_method_normal(sigma, config.num_layers)
layer_number = 1
drop_path_rate = 0.0
drop_path_rates = [
rate.item() for rate in torch.linspace(0, drop_path_rate, config.num_layers)]
if bias_type != "no_bias":
bias = torch.randn(1, config.num_attention_heads, config.seq_len, config.seq_len,
dtype=dtype).cuda()
else:
bias = None
block = (
TransformerLayer(
config.hidden_size,
4 * config.hidden_size,
config.num_attention_heads,
layernorm_epsilon=1e-5,
hidden_dropout=0.0,
attention_dropout=config.dropout_p,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
layer_number=layer_number,
kv_channels=config.head_dim,
tp_group=None,
tp_size=1,
params_dtype=dtype,
get_rng_state_tracker=None,
fuse_wgrad_accumulation=False,
seq_length=config.seq_len,
micro_batch_size=bs,
sequence_parallel=False,
apply_residual_connection_post_layernorm=False,
output_layernorm=False,
layer_type="encoder",
drop_path_rate=drop_path_rates[layer_number - 1],
set_parallel_mode=True,
fuse_qkv_params=True,
zero_centered_gamma=False,
qkv_weight_interleaved=False,
ub_tp_comm_overlap=False,
bias=True,
)
.to(dtype=dtype)
.cuda()
)
num_iters = 10
for i in range(num_iters):
op = block(inp, self_attn_mask_type=config.attn_mask_type,
checkpoint_core_attention=ckpt_attn,
core_attention_bias_type=bias_type,
core_attention_bias=bias)
loss = op.sum()
loss.backward()
return op, inp.grad
@pytest.mark.skipif(not _flash_attn_2_available, reason="FA2.0 is not available")
@pytest.mark.skipif(
get_device_compute_capability() < 8.0, reason="Compute capability 8.0+ is required.")
@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("bs", batch_sizes)
@pytest.mark.parametrize("model", model_configs.keys())
def test_transformer_layer_gqa(dtype, bs, model):
"""Test TransformerLayer module when its DotProductAttention is enabled with
FlashAttention, FusedAttention, or UnfusedDotProductAttention backend"""
config = model_configs[model]
def find_factors(x):
f = []
for i in range(1, x + 1):
if x % i == 0:
f.append(i)
return f
num_querys_per_gqa_group = find_factors(config.num_attention_heads)
for num_q_per_gqa_group in num_querys_per_gqa_group:
flash_attn_fwd, flash_attn_bwd = _run_transformer_layer_gqa(
dtype, bs, config, "FlashAttention", num_q_per_gqa_group)
unfused_attn_fwd, unfused_attn_bwd = _run_transformer_layer_gqa(
dtype, bs, config, "UnfusedDotProductAttention", num_q_per_gqa_group)
atol, rtol = 5e-1, 5e-2
assert torch.allclose(flash_attn_fwd, unfused_attn_fwd, atol=atol, rtol=rtol)
assert torch.allclose(flash_attn_bwd, unfused_attn_bwd, atol=atol, rtol=rtol)
def _run_transformer_layer_gqa(dtype, bs, config, backend, num_querys_per_gqa_group):
reset_rng_states()
os.environ["NVTE_FLASH_ATTN"] = "0"
if backend == "FlashAttention":
os.environ["NVTE_FLASH_ATTN"] = "1"
inp = torch.randn(
config.seq_len, bs, config.num_attention_heads * config.head_dim,
dtype=dtype).cuda()
inp.requires_grad=True
seqlens = torch.empty(bs, dtype=torch.int32).cuda()
seqlens.fill_(config.seq_len)
cu_seqlens = torch.zeros(bs + 1, device=inp.device, dtype=torch.int32)
cu_seqlens[1:] = torch.cumsum(seqlens, dim=0)
op_grad = torch.randn(
config.seq_len, bs, config.num_attention_heads * config.head_dim,
dtype=dtype).cuda()
sigma = 0.02
init_method = init_method_normal(sigma)
output_layer_init_method = scaled_init_method_normal(sigma, config.num_layers)
layer_number = 1
drop_path_rate = 0.0
drop_path_rates = [
rate.item() for rate in torch.linspace(0, drop_path_rate, config.num_layers)]
block = (
TransformerLayer(
config.hidden_size,
4 * config.hidden_size,
config.num_attention_heads,
num_gqa_groups=config.num_attention_heads / num_querys_per_gqa_group,
layernorm_epsilon=1e-5,
hidden_dropout=0.0,
attention_dropout=config.dropout_p,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
layer_number=layer_number,
kv_channels=config.head_dim,
tp_group=None,
tp_size= 1,
params_dtype=dtype,
get_rng_state_tracker=None,
fuse_wgrad_accumulation=False,
seq_length=config.seq_len,
micro_batch_size=bs,
sequence_parallel=False,
apply_residual_connection_post_layernorm=False,
output_layernorm=False,
layer_type="encoder",
drop_path_rate=drop_path_rates[layer_number - 1],
set_parallel_mode=True,
fuse_qkv_params=True,
zero_centered_gamma=False,
qkv_weight_interleaved=False,
ub_tp_comm_overlap=False,
bias=True,
)
.to(dtype=dtype)
.cuda()
)
op = block(inp, self_attn_mask_type=config.attn_mask_type)
op.backward(op_grad)
return op, inp.grad
model_configs_fp8 = {
"test1": ModelConfig(1, 1024, 16, 64, 512, 0.0, "no_mask"),
}
batch_sizes_fp8 = [1, 4]
param_types_fp8 = [torch.float16]
@pytest.mark.skipif(not fp8_available, reason=reason_for_no_fp8)
@pytest.mark.parametrize("dtype", param_types_fp8)
@pytest.mark.parametrize("bs", batch_sizes_fp8)
@pytest.mark.parametrize("model", model_configs_fp8.keys())
def test_dpa_fp8(dtype, bs, model):
"""Test DotProductAttention module with FP8,
using cpp_extensions import fused_attn_fwd/bwd_qkvpacked and UnfusedDotProductAttention"""
config = model_configs_fp8[model]
fused_attn_fwd, fused_attn_bwd = _run_dpa_fp8(
dtype, bs, config, "FusedAttention")
unfused_attn_fwd, unfused_attn_bwd = _run_dpa_fp8_ref(
dtype, bs, config, "UnfusedDotProductAttention")
atol, rtol = (2.5e-2, 2.5e-2)
assert torch.allclose(fused_attn_fwd, unfused_attn_fwd, atol=atol, rtol=rtol)
assert torch.allclose(fused_attn_bwd, unfused_attn_bwd, atol=atol, rtol=rtol)
def _run_dpa_fp8(dtype, bs, config, backend):
reset_rng_states()
os.environ["NVTE_FLASH_ATTN"] = "0"
os.environ["NVTE_FUSED_ATTN"] = "0"
inp = 0.01 * torch.randn(
bs * config.seq_len, config.num_attention_heads * config.head_dim,
dtype=dtype).cuda()
inp.requires_grad=True
seqlens = torch.empty(bs, dtype=torch.int32).cuda()
seqlens.fill_(config.seq_len)
cu_seqlens = torch.zeros(bs + 1, device=inp.device, dtype=torch.int32)
cu_seqlens[1:] = torch.cumsum(seqlens, dim=0)
op_grad = 0.01 * torch.randn(
bs * config.seq_len, config.num_attention_heads * config.head_dim,
dtype=dtype).cuda()
torch.save(op_grad, 'op_grad.pt')
fp8_recipe = recipe.DelayedScaling(
margin=0,
interval=1,
fp8_format=recipe.Format.HYBRID,
amax_history_len=1,
amax_compute_algo="most_recent",
)
dpa = DPA_FP8(config).to(dtype=torch.float16).cuda()
with fp8_autocast(enabled=True, fp8_recipe=fp8_recipe):
op = dpa(inp, cu_seqlens, config.seq_len)
op.backward(op_grad)
context = torch.load("ctx.pt")
dqkv = torch.load('dqkv.pt')
return (context.view(bs, config.seq_len, -1).transpose(0,1),
dqkv.view(bs, config.seq_len, 3, config.num_attention_heads, config.head_dim).transpose(0,1).contiguous())
def _run_dpa_fp8_ref(dtype, bs, config, backend):
os.environ["NVTE_FLASH_ATTN"] = "0"
os.environ["NVTE_FUSED_ATTN"] = "0"
if backend == "FlashAttention":
os.environ["NVTE_FLASH_ATTN"] = "1"
if backend == "FusedAttention":
os.environ["NVTE_FUSED_ATTN"] = "1"
inp = torch.load('qkv.pt').cuda()
inp.requires_grad=True
seqlens = torch.empty(bs, dtype=torch.int32).cuda()
seqlens.fill_(config.seq_len)
cu_seqlens = torch.zeros(bs + 1, device=inp.device, dtype=torch.int32)
cu_seqlens[1:] = torch.cumsum(seqlens, dim=0)
op_grad = torch.load('op_grad.pt').cuda().view(bs, config.seq_len, -1).transpose(0,1)
block = (
DotProductAttention(
config.num_attention_heads,
config.head_dim,
attention_dropout=config.dropout_p,
sequence_parallel=False,
tp_size=1,
get_rng_state_tracker=None,
tp_group=None,
layer_number=1,
attention_type="self"
).to(dtype=dtype).cuda()
)
q = inp[:, :,0,:,:]
k = inp[:, :,1,:,:]
v = inp[:, :,2,:,:]
op = block(q, k, v, attn_mask_type=config.attn_mask_type)
op.backward(op_grad)
torch.save(op,'ctx_ref.pt')
torch.save(inp.grad,'dqkv_ref.pt')
return op, inp.grad
from torch.nn.parameter import Parameter
import transformer_engine.pytorch.cpp_extensions as ext
import transformer_engine_extensions as tex
import transformer_engine.pytorch.fp8 as fp8
from transformer_engine.pytorch import fp8_autocast
from transformer_engine.pytorch.module.base import TransformerEngineBaseModule, _prepare_backward
from transformer_engine.common import recipe
from typing import Union, Dict, Any, Tuple, List
from transformer_engine.pytorch.cpp_extensions.fused_attn import (
fused_attn_fwd_qkvpacked,
fused_attn_bwd_qkvpacked,
FusedAttnBackend)
_CUBLASLT_WORKSPACE_SIZE_BYTES = 33_554_432 # 32MiB
_2X_ACC_FPROP = False
_2X_ACC_DGRAD = False
_2X_ACC_WGRAD = False
META_QKV = tex.FP8FwdTensors.GEMM1_OUTPUT
META_O = tex.FP8FwdTensors.GEMM2_INPUT
META_DO = tex.FP8BwdTensors.GRAD_INPUT2
META_DQKV = tex.FP8BwdTensors.GRAD_OUTPUT1
META_S = tex.FP8FwdTensors.GEMM3_WEIGHT
META_DS = tex.FP8BwdTensors.GRAD_INPUT3
class _dpa_fp8(torch.autograd.Function):
@staticmethod
def forward(
ctx,
inp: torch.Tensor,
qkv_weight: torch.Tensor,
qkv_bias: torch.Tensor,
cu_seqlens: torch.Tensor,
num_attention_heads: int,
p_dropout: float,
max_s: int,
fast_zero_fill: bool,
fp8_meta: Dict[str, Any],
workspace: torch.Tensor,
is_training: bool,
) -> torch.Tensor:
assert inp.dim() == 2
in_features = qkv_weight.shape[-1]
h = num_attention_heads
d = in_features // h
b = cu_seqlens.numel() - 1
is_nl = False
if b < 4 and b > 1:
max_s = 512
is_nl = True
fp8_dtype_forward = fp8.get_fp8_te_dtype(fp8_meta["recipe"], fprop_tensor=True)
inputmat, inputmat_t = ext.fp8_cast_transpose_fused(
inp,
fp8_meta["scaling_fwd"],
tex.FP8FwdTensors.GEMM1_INPUT,
fp8_dtype_forward,
)
qkv_weight_fp8, qkv_weight_t_fp8 = ext.fp8_cast_transpose_fused(
qkv_weight,
fp8_meta["scaling_fwd"],
tex.FP8FwdTensors.GEMM1_WEIGHT,
fp8_dtype_forward,
)
M = None
ZInv = None
philox_unpacked = None
qkv_out = ext.fp8_gemm(
qkv_weight_fp8,
fp8_meta["scaling_fwd"].scale_inv,
tex.FP8FwdTensors.GEMM1_WEIGHT,
fp8_dtype_forward,
inputmat,
fp8_meta["scaling_fwd"].scale_inv,
tex.FP8FwdTensors.GEMM1_INPUT,
fp8_dtype_forward,
torch.uint8,
workspace,
bias=qkv_bias,
use_bias=True,
out_index=META_QKV,
fp8_meta_tensor=fp8_meta["scaling_fwd"],
use_split_accumulator=_2X_ACC_FPROP,
D_dtype=fp8_dtype_forward,
)
qkv_out = qkv_out.view(-1, 3, h, d)
qkv_out_fp16 = ext.cast_from_fp8(qkv_out, fp8_meta["scaling_fwd"],
META_QKV, fp8_dtype_forward,
tex.DType.kFloat16).view(b, max_s, 3, h, d).transpose(0,1).contiguous()
torch.save(qkv_out_fp16, 'qkv.pt')
# FMHA
context_, aux_ctx_tensors, *rest = fused_attn_fwd_qkvpacked(
is_training,
max_s,
cu_seqlens,
qkv_out,
fp8_dtype_forward,
FusedAttnBackend["FP8"],
None,
fp8_meta["scaling_fwd"].scale_inv[META_QKV],
fp8_meta["scaling_fwd"].scale[META_S],
fp8_meta["scaling_fwd"].scale[META_O],
fp8_meta["scaling_fwd"].amax_history[0][META_S],
fp8_meta["scaling_fwd"].amax_history[0][META_O],
attn_scale=None,
dropout=p_dropout,
fast_zero_fill=fast_zero_fill,
qkv_layout="qkv_interleaved",
attn_bias_type="no_bias",
attn_mask_type="padding",
rng_gen=None,
)
M, ZInv, philox_unpacked = aux_ctx_tensors
context = context_.view(-1, in_features)
context_t = tex.fp8_transpose(context, fp8_dtype_forward)
ctx.save_for_backward(
inputmat_t, qkv_weight_t_fp8, workspace,
qkv_out,
context_, context_t,
fp8_meta["scaling_fwd"].scale,
fp8_meta["scaling_fwd"].scale_inv,
)
ctx.aux_ctx_tensors = aux_ctx_tensors
ctx.fp8_meta = fp8_meta
ctx.cu_seqlens = cu_seqlens
ctx.p_dropout = p_dropout
ctx.max_s = max_s
ctx.fast_zero_fill = fast_zero_fill
ctx.is_nl = is_nl
ctx.hidden_size = in_features
ctx.num_attention_heads = num_attention_heads
context_fp16 = ext.cast_from_fp8(context, fp8_meta["scaling_fwd"],
META_O, fp8_dtype_forward, tex.DType.kFloat16)
torch.save(context_fp16, 'ctx.pt')
return context_fp16
@staticmethod
def backward(
ctx, grad_output: torch.Tensor
) -> Tuple[Union[torch.Tensor, None], ...]:
with _prepare_backward(True, ctx.fp8_meta, None, 1, name="_DPA"):
(
inputmat_t,
qkv_weight_t_fp8,
workspace,
qkv_out,
context, context_t,
fwd_scales,
fwd_scale_inverses,
) = ctx.saved_tensors
fp8_dtype_forward = fp8.get_fp8_te_dtype(
ctx.fp8_meta["recipe"], fprop_tensor=True
)
fp8_dtype_backward = fp8.get_fp8_te_dtype(
ctx.fp8_meta["recipe"], fprop_tensor=False
)
proj_dgrad = ext.cast_to_fp8(
grad_output, ctx.fp8_meta["scaling_bwd"], META_DO, fp8_dtype_backward
)
dqkv, *rest = fused_attn_bwd_qkvpacked(
ctx.max_s,
ctx.cu_seqlens,
qkv_out,
context,
proj_dgrad.view_as(context),
fp8_dtype_forward,
ctx.aux_ctx_tensors,
FusedAttnBackend["FP8"],
fwd_scale_inverses[META_QKV], # d_scale_qkv,
fwd_scale_inverses[META_S], # d_scale_s,
fwd_scale_inverses[META_O], # d_scale_o,
ctx.fp8_meta['scaling_bwd'].scale_inv[META_DO], # d_scale_do
fwd_scales[META_S], # q_scale_s
ctx.fp8_meta['scaling_bwd'].scale[META_DS], # q_scale_ds
ctx.fp8_meta['scaling_bwd'].scale[META_DQKV], # q_scale_dqkv
ctx.fp8_meta['scaling_bwd'].amax_history[0][META_DS], # amax_ds
ctx.fp8_meta['scaling_bwd'].amax_history[0][META_DQKV], # amax_dqkv
None,
ctx.p_dropout,
ctx.fast_zero_fill,
"qkv_interleaved",
"no_bias",
"padding",
)
dqkv_grad_output_c = dqkv.view(-1, 3*ctx.hidden_size)
dqkv_grad_output_c_fp16 = ext.cast_from_fp8(dqkv_grad_output_c,
ctx.fp8_meta["scaling_bwd"], META_DQKV,
fp8_dtype_backward, tex.DType.kFloat16)
torch.save(dqkv_grad_output_c_fp16, 'dqkv.pt')
qkv_bgrad, dqkv_grad_output_t = ext.fp8_transpose_bgrad_fused(
dqkv_grad_output_c,
ctx.fp8_meta["scaling_bwd"],
META_DQKV,
fp8_dtype_backward,
torch.float16,
)
# QKV DGRAD
qkv_dgrad = ext.fp8_gemm(
qkv_weight_t_fp8,
fwd_scale_inverses,
tex.FP8FwdTensors.GEMM1_WEIGHT,
fp8_dtype_forward,
dqkv_grad_output_c,
ctx.fp8_meta["scaling_bwd"].scale_inv,
META_DQKV,
fp8_dtype_backward,
torch.float16,
workspace,
use_split_accumulator=_2X_ACC_DGRAD,
)
# QKV WGRAD
qkv_wgrad = ext.fp8_gemm(
inputmat_t,
fwd_scale_inverses,
tex.FP8FwdTensors.GEMM1_INPUT,
fp8_dtype_forward,
dqkv_grad_output_t,
ctx.fp8_meta["scaling_bwd"].scale_inv,
META_DQKV,
fp8_dtype_backward,
torch.float16,
workspace,
use_split_accumulator=_2X_ACC_WGRAD,
)
return (qkv_dgrad,
qkv_wgrad,
qkv_bgrad,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None)
class DPA_FP8(TransformerEngineBaseModule):
def __init__(
self,
config,
params_dtype: torch.dtype = torch.float32):
super().__init__()
self.p_dropout = config.dropout_p
self.h = config.num_attention_heads
self.hidden_size = config.hidden_size
self.head_dim = config.head_dim
self.fast_zero_fill = True
self.qkv_weight = Parameter(
torch.empty(
self.hidden_size * 3,
self.hidden_size,
device=torch.cuda.current_device(),
dtype=params_dtype,
)
)
self.fp8_weight_shapes.append(self.qkv_weight.shape)
self.qkv_bias = Parameter(
torch.empty(
self.hidden_size * 3,
device=torch.cuda.current_device(),
dtype=params_dtype,
)
)
with torch.no_grad():
self.qkv_bias.zero_()
self.qkv_weight.fill_(1.0)
self.workspace = torch.empty(
_CUBLASLT_WORKSPACE_SIZE_BYTES, dtype=torch.int8, device="cuda"
)
def forward(
self, inp: torch.Tensor,
cu_seqlens, max_s,
) -> torch.Tensor:
with self.prepare_forward(inp, None, num_gemms=3) as inp:
out = _dpa_fp8.apply(
inp,
self.qkv_weight,
self.qkv_bias,
cu_seqlens,
self.h,
self.p_dropout,
max_s,
self.fast_zero_fill,
self.fp8_meta,
self.workspace,
self.training)
return out
def get_fp8_weights_scratchpad(
self,
is_first_microbatch: Union[bool, None],
) -> List[torch.Tensor]:
"""Needs override."""
| TransformerEngine-main | tests/pytorch/test_fused_attn.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
import math
import os
import contextlib
from typing import List, Optional
import pytest
import copy
import torch
import torch.nn as nn
from torch.nn import Parameter
from torch import _C
from torch.cuda import _lazy_call, device as device_ctx_manager
from transformer_engine.pytorch.utils import (
init_method_normal,
scaled_init_method_normal,
attention_mask_func,
)
from transformer_engine.pytorch import (
DotProductAttention, LayerNormLinear, LayerNormMLP, Linear,
MultiheadAttention, RMSNorm, TransformerLayer
)
from transformer_engine.pytorch.distributed import checkpoint as te_checkpoint
seed = 1234
rng_str = "rng_state"
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
# Record initial RNG state from script run.
_cpu_rng_state = torch.get_rng_state()
_cuda_rng_state = torch.cuda.get_rng_state()
class ModelConfig:
def __init__(self, hidden_size, eps, num_attention_heads, embed, num_layers, seq_len):
self.hidden_size = hidden_size
self.eps = eps
self.num_attention_heads = num_attention_heads
self.embed = embed
self.num_layers = num_layers
self.seq_len = seq_len
model_configs = {
"126m": ModelConfig(768, 1e-5, 12, 64, 12, 2048),
}
param_types = [torch.float32, torch.float16]
if torch.cuda.is_bf16_supported():
param_types.append(torch.bfloat16)
batch_sizes = [1, 2]
all_boolean = [True, False]
all_activations = ["gelu", "relu", "reglu", "geglu", "swiglu"]
all_normalizations = ["LayerNorm", "RMSNorm"]
mask_types = ["causal", "no_mask"]
def get_causal_attn_mask(sq: int) -> torch.Tensor:
return torch.triu(torch.ones(sq, sq, device="cuda"), diagonal=1).bool()
def assert_all_equal(l1: List[torch.Tensor], l2: List[torch.Tensor]) -> bool:
"""Ensures two lists are equal."""
assert len(l1) == len(l2), "Unequal number of outputs."
for t1, t2 in zip(l1, l2):
assert torch.equal(t1, t2), "Output mismatch."
def assert_allclose(l1: List[torch.Tensor], l2: List[torch.Tensor], atol: float) -> bool:
"""Ensures two lists are equal."""
assert len(l1) == len(l2), "Unequal number of outputs."
for t1, t2 in zip(l1, l2):
result = torch.allclose(t1, t2, atol=atol)
if not result:
diff = torch.abs(t1 - t2).flatten()
m = torch.argmax(diff)
msg = (f"Outputs not close enough."
f"Location of the maximum difference: {m.item()} "
f"with {t1.flatten()[m].item()} vs {t2.flatten()[m].item()} "
f"(diff {diff[m].item()})."
)
raise AssertionError(msg)
def _set_cuda_rng_state(new_state, device=-1):
"""Sets the random number generator state of the current GPU.
Argumentss:
new_state (torch.ByteTensor): The desired state
This function is adapted from PyTorch repo (torch.cuda.set_rng_state)
with a single change: the input state is not cloned. Cloning caused
major performance issues for +4 GPU cases.
"""
if hasattr(_C, "_cuda_setRNGState") and callable(_C._cuda_setRNGState):
# older PyTorch
def cb():
with device_ctx_manager(device):
_C._cuda_setRNGState(new_state)
else:
# newer PyTorch
if device == -1:
device = torch.device("cuda")
elif isinstance(device, str):
device = torch.device(device)
elif isinstance(device, int):
device = torch.device("cuda", device)
def cb():
idx = device.index
if idx is None:
idx = torch.cuda.current_device()
default_generator = torch.cuda.default_generators[idx]
default_generator.set_state(new_state)
_lazy_call(cb)
def reset_rng_states() -> None:
# revert back to initial RNG state.
torch.set_rng_state(_cpu_rng_state)
_set_cuda_rng_state(_cuda_rng_state)
class CudaRNGStatesTracker:
"""Tracker for the cuda RNG states.
Using the `add` method, a cuda rng state is initialized based on
the input `seed` and is assigned to `name`. Later, by forking the
rng state, we can perform operations and return to our starting
cuda state.
"""
def __init__(self):
# Map from a string name to the cuda rng state.
self.states_ = {}
# Seeds are just for book keeping and ensure no seed is set twice.
self.seeds_ = set()
def reset(self):
"""Set to the initial state (no tracker)."""
self.states_ = {}
self.seeds_ = set()
def get_states(self):
"""Get rng states. Copy the dictionary so we have direct
pointers to the states, not just a pointer to the dictionary."""
states = {}
for name in self.states_:
states[name] = self.states_[name]
return states
def set_states(self, states):
"""Set the rng states. For efficiency purposes, we do not check
the size of seed for compatibility."""
self.states_ = states
def add(self, name, seed):
"""Track the rng state."""
# Check seed is not already used.
if seed in self.seeds_:
raise Exception("seed {} already exists".format(seed))
self.seeds_.add(seed)
# Check that state is not already defined.
if name in self.states_:
raise Exception("cuda rng state {} already exists".format(name))
# Get the current rng state.
orig_rng_state = torch.cuda.get_rng_state()
# Set the new state and store it.
torch.cuda.manual_seed(seed)
self.states_[name] = torch.cuda.get_rng_state()
# Reset rng state to what it was.
_set_cuda_rng_state(orig_rng_state)
@contextlib.contextmanager
def fork(self, name=rng_str):
"""Fork the cuda rng state, perform operations, and exit with
the original state."""
# Check if we have added the state
if name not in self.states_:
raise Exception("cuda rng state {} is not added".format(name))
# Store current rng state.
orig_cuda_rng_state = torch.cuda.get_rng_state()
# Set rng state to the desired one
_set_cuda_rng_state(self.states_[name])
# Do the stuff we wanted to do.
try:
yield
finally:
# Update the current rng state for later use.
self.states_[name] = torch.cuda.get_rng_state()
# And set the state to the original state we started with.
_set_cuda_rng_state(orig_cuda_rng_state)
_DUMMY_CUDA_RNG_STATE_TRACKER = CudaRNGStatesTracker()
_DUMMY_CUDA_RNG_STATE_TRACKER.add(rng_str, seed)
def get_dummy_cuda_rng_tracker():
"""Get cuda rng tracker."""
return _DUMMY_CUDA_RNG_STATE_TRACKER
class TorchScaledMaskedSoftmax(nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(
self, inp: torch.Tensor, mask: torch.Tensor, scale: Optional[float] = None
) -> torch.Tensor:
dtype = inp.dtype
inp = inp.float()
if scale is not None:
inp = inp * scale
mask_output = attention_mask_func(inp, mask) if mask is not None else inp
probs = torch.nn.Softmax(dim=-1)(mask_output)
probs = probs.to(dtype)
return probs
class TorchDotProductAttention(torch.nn.Module):
def __init__(
self,
kv_channels: int,
attention_dropout: float = 0.0,
) -> None:
super().__init__()
self.norm_factor = math.sqrt(kv_channels)
self.scale_mask_softmax = TorchScaledMaskedSoftmax()
self.attention_dropout = torch.nn.Dropout(attention_dropout)
def forward(
self,
query_layer: torch.Tensor,
key_layer: torch.Tensor,
value_layer: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
batch_size, seqlen = query_layer.shape[1], query_layer.shape[0]
# [b, np, sq, sk]
output_size = (
query_layer.size(1),
query_layer.size(2),
query_layer.size(0),
key_layer.size(0),
)
# [sq, b, np, hn] -> [sq, b * np, hn]
query_layer = query_layer.reshape(
output_size[2], output_size[0] * output_size[1], -1
)
# [sk, b, np, hn] -> [sk, b * np, hn]
key_layer = key_layer.reshape(output_size[3], output_size[0] * output_size[1], -1)
# preallocting result tensor: [b * np, sq, sk]
matmul_result = torch.empty(
output_size[0] * output_size[1],
output_size[2],
output_size[3],
dtype=query_layer.dtype,
device=torch.cuda.current_device(),
)
# Raw attention scores. [b * np, sq, sk]
matmul_result = torch.baddbmm(
matmul_result,
query_layer.transpose(0, 1), # [b * np, sq, hn]
key_layer.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk]
beta=0.0,
alpha=(1.0 / self.norm_factor),
)
# change view to [b, np, sq, sk]
attention_scores = matmul_result.view(*output_size)
# attention scores and attention mask [b, np, sq, sk]
attention_probs = self.scale_mask_softmax(attention_scores, attention_mask)
attention_probs = self.attention_dropout(attention_probs)
# value_layer -> context layer.
# [sk, b, np, hn] --> [b, np, sq, hn]
output_size = (
value_layer.size(1),
value_layer.size(2),
query_layer.size(0),
value_layer.size(3),
)
# change view [sk, b * np, hn]
value_layer = value_layer.reshape(
value_layer.size(0), output_size[0] * output_size[1], -1
)
# change view [b * np, sq, sk]
attention_probs = attention_probs.view(
output_size[0] * output_size[1], output_size[2], -1
)
# matmul: [b * np, sq, hn]
context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1))
# change view [b, np, sq, hn]
context_layer = context_layer.view(*output_size)
# [b, np, sq, hn] --> [sq, b, np, hn]
context_layer = context_layer.permute(2, 0, 1, 3).contiguous()
# [sq, b, np, hn] --> [sq, b, hp]
context_layer = context_layer.view(seqlen, batch_size, -1)
return context_layer
# Adapted from https://github.com/bzhangGo/rmsnorm/blob/c6691f20ec0af4128c8159c903071f7575404295/rmsnorm_torch.py
class TorchRMSNorm(nn.Module):
def __init__(self, in_features, eps=1e-5):
super().__init__()
self.eps = eps
self.in_features = in_features
self.weight = nn.Parameter(torch.ones(in_features))
self.register_parameter("weight", self.weight)
def forward(self, x):
norm_x2 = torch.sum(x.float()**2, dim=-1, keepdim=True)
d_x = self.in_features
rms_x2 = norm_x2 / d_x + self.eps
r_rms_x = rms_x2 ** (-1. / 2)
x_normed = x * r_rms_x
return (self.weight.float() * x_normed).to(x.dtype)
class TorchLayerNormLinear(nn.Module):
def __init__(self, in_features: int, out_features: int,
eps: float, bias: bool = True,
normalization: str = "LayerNorm"):
super().__init__()
if normalization == "LayerNorm":
self.layernorm = nn.LayerNorm(in_features, eps=eps)
elif normalization == "RMSNorm":
self.layernorm = TorchRMSNorm(in_features, eps=eps)
else:
raise RuntimeError("Unsupported normalization")
self.linear = nn.Linear(in_features, out_features)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.linear(self.layernorm(x))
class TorchMHA(nn.Module):
def __init__(self, hidden_size: int, num_attention_heads: int):
super().__init__()
self.mhsa = nn.MultiheadAttention(
embed_dim=hidden_size,
num_heads=num_attention_heads,
dropout=0.1,
bias=True,
batch_first=False,
)
def forward(self, x, attention_mask=None):
output = self.mhsa(x, x, x, attn_mask=attention_mask, need_weights=False)
if isinstance(output, tuple):
output = output[0]
return output
_supported_act = {'geglu' : nn.GELU(approximate="tanh"),
'gelu' : nn.GELU(approximate="tanh"),
'reglu' : nn.ReLU(),
'relu' : nn.ReLU(),
'swiglu' : nn.SiLU()}
class TorchGLU(nn.Module):
def __init__(self, activation: str):
super().__init__()
self.act = _supported_act[activation]
def forward(self, x):
shape = x.size(-1)
a = x[..., :shape // 2]
b = x[..., (shape // 2):]
a = self.act(a)
return a * b
class TorchLayerNormMLP(nn.Module):
def __init__(self, hidden_size: int, ffn_hidden_size: int,
eps: float = 1e-5, activation = 'gelu',
normalization: str = "LayerNorm"):
super().__init__()
if normalization == "LayerNorm":
self.ln = nn.LayerNorm(hidden_size, eps=eps)
elif normalization == "RMSNorm":
self.ln = TorchRMSNorm(hidden_size, eps=eps)
else:
raise RuntimeError("Unsupported normalization")
if 'glu' in activation:
fc1_output_features = 2 * ffn_hidden_size
self.gelu = TorchGLU(activation)
else:
fc1_output_features = ffn_hidden_size
self.gelu = _supported_act[activation]
self.fc1 = nn.Linear(hidden_size, fc1_output_features)
self.fc2 = nn.Linear(ffn_hidden_size, hidden_size)
def forward(self, x):
return self.fc2(self.gelu(self.fc1(self.ln(x))))
class TorchGPT(nn.Module):
def __init__(self, hidden_size: int, eps: float, num_attention_heads: int):
super().__init__()
self.ln = nn.LayerNorm(hidden_size, eps=eps)
self.causal_attn = TorchMHA(hidden_size, num_attention_heads)
self.ln_mlp = TorchLayerNormMLP(hidden_size, 4 * hidden_size, eps)
self.resid_attn_dropout = nn.Dropout(0.1)
self.resid_mlp_dropout = nn.Dropout(0.1)
def forward(
self,
x: torch.Tensor,
attn_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
a = self.ln(x)
b = self.causal_attn(a, attn_mask)
x = x + self.resid_attn_dropout(b)
n = self.ln_mlp(x)
x = x + self.resid_mlp_dropout(n)
return x
def _test_e2e_selective_recompute(block, bs, dtype, config, recompute=False):
reset_rng_states()
te_inp_hidden_states = torch.randn(
config.seq_len, bs, config.hidden_size, dtype=dtype, requires_grad=True
).cuda()
te_inp_hidden_states.retain_grad()
te_inp_attn_mask = get_causal_attn_mask(config.seq_len)
te_out = block(
te_inp_hidden_states,
attention_mask=te_inp_attn_mask,
checkpoint_core_attention=recompute,
)
loss = te_out.sum()
loss.backward()
torch.cuda.synchronize()
outputs = [te_out, te_inp_hidden_states.grad]
for p in block.parameters():
if p.requires_grad:
outputs.append(p.grad)
return outputs
@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("bs", batch_sizes)
@pytest.mark.parametrize("model", model_configs.keys())
def test_gpt_selective_activation_recompute(dtype, bs, model):
config = model_configs[model]
sigma = 0.023
init_method = init_method_normal(sigma)
output_layer_init_method = scaled_init_method_normal(sigma, config.num_layers)
block = (
TransformerLayer(
config.hidden_size,
4 * config.hidden_size,
config.num_attention_heads,
layernorm_epsilon=config.eps,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
hidden_dropout=0.1,
attention_dropout=0.1,
kv_channels=config.embed,
apply_residual_connection_post_layernorm=False,
output_layernorm=False,
get_rng_state_tracker=get_dummy_cuda_rng_tracker,
params_dtype=dtype,
)
.cuda()
.eval()
)
outputs = _test_e2e_selective_recompute(block, bs, dtype, config, recompute=False)
outputs_recompute = _test_e2e_selective_recompute(block, bs, dtype, config, recompute=True)
assert_all_equal(outputs, outputs_recompute)
def _test_e2e_full_recompute(block, bs, dtype, config, recompute=False):
reset_rng_states()
te_inp_hidden_states = torch.randn(
config.seq_len, bs, config.hidden_size, dtype=dtype, requires_grad=True
).cuda()
te_inp_hidden_states.retain_grad()
te_inp_attn_mask = get_causal_attn_mask(config.seq_len)
if recompute:
te_out = te_checkpoint(
block,
False, # distribute_saved_activations
get_dummy_cuda_rng_tracker,
None, # tp_group
te_inp_hidden_states,
attention_mask=te_inp_attn_mask,
checkpoint_core_attention=False,
)
else:
te_out = block(
te_inp_hidden_states,
attention_mask=te_inp_attn_mask,
checkpoint_core_attention=False,
)
loss = te_out.sum()
loss.backward()
torch.cuda.synchronize()
outputs = [te_out, te_inp_hidden_states.grad]
for p in block.parameters():
if p.requires_grad:
outputs.append(p.grad)
return outputs
@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("bs", batch_sizes)
@pytest.mark.parametrize("model", model_configs.keys())
def test_gpt_full_activation_recompute(dtype, bs, model):
config = model_configs[model]
sigma = 0.023
init_method = init_method_normal(sigma)
output_layer_init_method = scaled_init_method_normal(sigma, config.num_layers)
block = (
TransformerLayer(
config.hidden_size,
4 * config.hidden_size,
config.num_attention_heads,
layernorm_epsilon=config.eps,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
hidden_dropout=0.1,
attention_dropout=0.1,
kv_channels=config.embed,
apply_residual_connection_post_layernorm=False,
output_layernorm=False,
get_rng_state_tracker=get_dummy_cuda_rng_tracker,
params_dtype=dtype,
)
.cuda()
.eval()
)
outputs = _test_e2e_full_recompute(block, bs, dtype, config, recompute=False)
outputs_recompute = _test_e2e_full_recompute(block, bs, dtype, config, recompute=True)
assert_all_equal(outputs, outputs_recompute)
def _test_e2e_checkpointing_get_model(config, dtype):
sigma = 0.023
init_method = init_method_normal(sigma)
output_layer_init_method = scaled_init_method_normal(sigma, config.num_layers)
return (
TransformerLayer(
config.hidden_size,
4 * config.hidden_size,
config.num_attention_heads,
layernorm_epsilon=config.eps,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
hidden_dropout=0.1,
attention_dropout=0.1,
kv_channels=config.embed,
apply_residual_connection_post_layernorm=False,
output_layernorm=False,
params_dtype=dtype,
)
.cuda()
.eval()
)
def _test_e2e_checkpointing(bs, dtype, config, checkpoint=False, steps=10, path="checkpoint.pt"):
reset_rng_states()
te_inp_hidden_states = torch.randn(
config.seq_len, bs, config.hidden_size, dtype=dtype, requires_grad=True
).cuda()
te_inp_hidden_states.retain_grad()
te_inp_attn_mask = get_causal_attn_mask(config.seq_len)
block = _test_e2e_checkpointing_get_model(config, dtype)
for _ in range(steps // 2):
te_out = block(
te_inp_hidden_states,
te_inp_attn_mask,
)
loss = te_out.sum()
loss.backward()
if checkpoint:
# This process is necessary so that we can start afresh with
# a new model while erasing all internal state to ensure that
# loading from a checkpoint gives bitwise identical results.
# Since gradients are being accumulated, it is important to
# restore them post loading the checkpoint.
torch.save(block.state_dict(), path)
param_grads = []
for p in block.parameters():
if p.requires_grad:
param_grads.append(p.grad.clone())
del block
block = _test_e2e_checkpointing_get_model(config, dtype)
block.load_state_dict(torch.load(path))
for p in block.parameters():
if p.requires_grad:
p.grad = param_grads.pop(0)
assert not param_grads, "Oops!"
for _ in range(steps // 2):
te_out = block(
te_inp_hidden_states,
te_inp_attn_mask,
)
loss = te_out.sum()
loss.backward()
torch.cuda.synchronize()
if os.path.exists(path):
os.remove(path)
outputs = [te_out, te_inp_hidden_states.grad]
for p in block.parameters():
if p.requires_grad:
outputs.append(p.grad)
return outputs
@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("bs", batch_sizes)
@pytest.mark.parametrize("model", model_configs.keys())
def test_gpt_checkpointing(dtype, bs, model):
config = model_configs[model]
outputs = _test_e2e_checkpointing(bs, dtype, config, checkpoint=False)
outputs_recompute = _test_e2e_checkpointing(bs, dtype, config, checkpoint=True)
assert_all_equal(outputs, outputs_recompute)
def _test_e2e_gpt_accuracy(block, bs, dtype, config):
reset_rng_states()
inp_hidden_states = torch.randn(
config.seq_len, bs, config.hidden_size, dtype=dtype, requires_grad=True
).cuda()
inp_hidden_states.retain_grad()
inp_attn_mask = get_causal_attn_mask(config.seq_len)
out = block(inp_hidden_states, inp_attn_mask)
loss = out.sum()
loss.backward()
torch.cuda.synchronize()
outputs = [out, inp_hidden_states.grad]
for p in block.parameters():
if p.requires_grad:
outputs.append(p.grad)
return outputs
@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("bs", batch_sizes)
@pytest.mark.parametrize("model", model_configs.keys())
def test_gpt_accuracy(dtype, bs, model):
config = model_configs[model]
te_gpt = (
TransformerLayer(
hidden_size=config.hidden_size,
ffn_hidden_size=4 * config.hidden_size,
num_attention_heads=config.num_attention_heads,
layernorm_epsilon=config.eps,
attention_dropout=0.1,
hidden_dropout=0.1,
fuse_qkv_params=True,
qkv_weight_interleaved=False,
)
.to(dtype=dtype)
.cuda()
.eval()
)
torch_gpt = (
TorchGPT(
config.hidden_size,
config.eps,
config.num_attention_heads,
)
.to(dtype=dtype)
.cuda()
.eval()
)
# Share params
with torch.no_grad():
torch_gpt.ln.weight = Parameter(
te_gpt.self_attention.layernorm_qkv.layer_norm_weight.clone()
)
torch_gpt.ln.bias = Parameter(te_gpt.self_attention.layernorm_qkv.layer_norm_bias.clone())
torch_gpt.causal_attn.mhsa.in_proj_weight = Parameter(
te_gpt.self_attention.layernorm_qkv.weight.clone()
)
torch_gpt.causal_attn.mhsa.in_proj_bias = Parameter(
te_gpt.self_attention.layernorm_qkv.bias.clone()
)
torch_gpt.causal_attn.mhsa.out_proj.weight = Parameter(
te_gpt.self_attention.proj.weight.clone()
)
torch_gpt.causal_attn.mhsa.out_proj.bias = Parameter(
te_gpt.self_attention.proj.bias.clone()
)
torch_gpt.ln_mlp.ln.weight = Parameter(te_gpt.layernorm_mlp.layer_norm_weight.clone())
torch_gpt.ln_mlp.ln.bias = Parameter(te_gpt.layernorm_mlp.layer_norm_bias.clone())
torch_gpt.ln_mlp.fc1.weight = Parameter(te_gpt.layernorm_mlp.fc1_weight.clone())
torch_gpt.ln_mlp.fc1.bias = Parameter(te_gpt.layernorm_mlp.fc1_bias.clone())
torch_gpt.ln_mlp.fc2.weight = Parameter(te_gpt.layernorm_mlp.fc2_weight.clone())
torch_gpt.ln_mlp.fc2.bias = Parameter(te_gpt.layernorm_mlp.fc2_bias.clone())
te_outputs = _test_e2e_gpt_accuracy(te_gpt, bs, dtype, config)
torch_outputs = _test_e2e_gpt_accuracy(torch_gpt, bs, dtype, config)
# Check output.
if dtype == torch.float32:
assert_allclose(te_outputs[0], torch_outputs[0], 5e-3)
else:
assert_allclose(te_outputs[0], torch_outputs[0], 5e-2)
def _test_mha_accuracy(block, bs, dtype, config, mask_type, te=True):
reset_rng_states()
inp_hidden_states = torch.randn(
config.seq_len, bs, config.hidden_size, dtype=dtype, requires_grad=True
).cuda()
inp_hidden_states.retain_grad()
inp_attn_mask = get_causal_attn_mask(config.seq_len) if mask_type == "causal" else None
forward_kwargs = {}
if te:
forward_kwargs["attn_mask_type"] = mask_type
forward_kwargs["attention_mask"] = inp_attn_mask
out = block(inp_hidden_states, **forward_kwargs)
loss = out.sum()
loss.backward()
torch.cuda.synchronize()
outputs = [out, inp_hidden_states.grad]
for p in block.parameters():
if p.requires_grad:
outputs.append(p.grad)
return outputs
@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("bs", batch_sizes)
@pytest.mark.parametrize("model", model_configs.keys())
@pytest.mark.parametrize("mask_type", mask_types)
def test_mha_accuracy(dtype, bs, model, mask_type):
config = model_configs[model]
te_mha = (
MultiheadAttention(
config.hidden_size,
config.num_attention_heads,
fuse_qkv_params=True,
qkv_weight_interleaved=False,
input_layernorm=False,
)
.to(dtype=dtype)
.cuda()
.eval()
)
torch_mha = (
TorchMHA(
config.hidden_size,
config.num_attention_heads,
)
.to(dtype=dtype)
.cuda()
.eval()
)
# Share params
with torch.no_grad():
torch_mha.mhsa.in_proj_weight = Parameter(te_mha.qkv.weight.clone())
torch_mha.mhsa.in_proj_bias = Parameter(te_mha.qkv.bias.clone())
torch_mha.mhsa.out_proj.weight = Parameter(te_mha.proj.weight.clone())
torch_mha.mhsa.out_proj.bias = Parameter(te_mha.proj.bias.clone())
te_outputs = _test_mha_accuracy(te_mha, bs, dtype, config, mask_type, te=True)
torch_outputs = _test_mha_accuracy(torch_mha, bs, dtype, config, mask_type, te=False)
# Check output.
if dtype == torch.float32:
assert_allclose(te_outputs[0], torch_outputs[0], 5e-3)
else:
assert_allclose(te_outputs[0], torch_outputs[0], 5e-2)
def _test_granular_accuracy(block, bs, dtype, config):
reset_rng_states()
inp_hidden_states = torch.randn(
config.seq_len, bs, config.hidden_size, dtype=dtype, requires_grad=True
).cuda()
inp_hidden_states.retain_grad()
out = block(inp_hidden_states)
loss = out.sum()
loss.backward()
torch.cuda.synchronize()
outputs = [out, inp_hidden_states.grad]
for p in block.parameters():
if p.requires_grad:
outputs.append(p.grad)
return outputs
def _test_dpa_accuracy(block, bs, dtype, config):
reset_rng_states()
mask = torch.triu(torch.ones(config.seq_len, config.seq_len, device="cuda"), diagonal=1).bool()
query, key, value = [
torch.randn(config.seq_len, bs, config.num_attention_heads,
config.embed, dtype=dtype, requires_grad=True).cuda() for _ in range(3)]
query.retain_grad()
key.retain_grad()
value.retain_grad()
out = block(query, key, value, mask)
loss = out.sum()
loss.backward()
torch.cuda.synchronize()
return [out, query.grad, key.grad, value.grad]
@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("bs", batch_sizes)
@pytest.mark.parametrize("model", model_configs.keys())
def test_dpa_accuracy(dtype, bs, model):
config = model_configs[model]
te_dpa = (
DotProductAttention(
config.num_attention_heads,
config.embed,
attention_dropout=0.1, # dropout
)
.to(dtype=dtype)
.cuda()
.eval()
)
torch_dpa = (
TorchDotProductAttention(
config.embed,
0.1, # dropout
)
.to(dtype=dtype)
.cuda()
.eval()
)
te_outputs = _test_dpa_accuracy(te_dpa, bs, dtype, config)
torch_outputs = _test_dpa_accuracy(torch_dpa, bs, dtype, config)
# Check output.
if dtype == torch.float32:
assert_allclose(te_outputs[0], torch_outputs[0], 5e-3)
else:
assert_allclose(te_outputs[0], torch_outputs[0], 5e-2)
@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("bs", batch_sizes)
@pytest.mark.parametrize("model", model_configs.keys())
def test_linear_accuracy(dtype, bs, model):
config = model_configs[model]
te_linear = (
Linear(
config.hidden_size,
4 * config.hidden_size,
bias=True,
)
.to(dtype=dtype)
.cuda()
.eval()
)
torch_linear = (
torch.nn.Linear(
config.hidden_size,
4 * config.hidden_size,
bias=True,
)
.to(dtype=dtype)
.cuda()
.eval()
)
# Share params
with torch.no_grad():
torch_linear.weight = Parameter(te_linear.weight.clone())
torch_linear.bias = Parameter(te_linear.bias.clone())
te_outputs = _test_granular_accuracy(te_linear, bs, dtype, config)
torch_outputs = _test_granular_accuracy(torch_linear, bs, dtype, config)
# Check output.
if dtype == torch.float32:
assert_allclose(te_outputs[0], torch_outputs[0], 5e-3)
else:
assert_allclose(te_outputs[0], torch_outputs[0], 5e-2)
@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("bs", batch_sizes)
@pytest.mark.parametrize("model", model_configs.keys())
@pytest.mark.parametrize("eps", [1e-1, 1e-3, 1e-5, 1e-7])
def test_rmsnorm_accuracy(dtype, bs, model, eps):
config = model_configs[model]
te_rmsnorm = (
RMSNorm(
config.hidden_size,
eps=eps,
)
.to(dtype=dtype)
.cuda()
.eval()
)
torch_rmsnorm = (
TorchRMSNorm(
config.hidden_size,
eps=eps,
)
.to(dtype=dtype)
.cuda()
.eval()
)
# Share params
with torch.no_grad():
torch_rmsnorm.weight = Parameter(te_rmsnorm.weight.clone())
te_outputs = _test_granular_accuracy(te_rmsnorm, bs, dtype, config)
torch_outputs = _test_granular_accuracy(torch_rmsnorm, bs, dtype, config)
# Check output.
if dtype == torch.float32:
assert_allclose(te_outputs[0], torch_outputs[0], 1e-7)
else:
assert_allclose(te_outputs[0], torch_outputs[0], 2e-2)
@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("bs", batch_sizes)
@pytest.mark.parametrize("model", model_configs.keys())
@pytest.mark.parametrize("normalization", all_normalizations)
def test_layernorm_linear_accuracy(dtype, bs, model, normalization):
config = model_configs[model]
te_ln_linear = (
LayerNormLinear(
config.hidden_size,
4 * config.hidden_size,
config.eps,
bias=True,
normalization=normalization,
)
.to(dtype=dtype)
.cuda()
.eval()
)
torch_ln_linear = (
TorchLayerNormLinear(
config.hidden_size,
4 * config.hidden_size,
config.eps,
bias=True,
normalization=normalization,
)
.to(dtype=dtype)
.cuda()
.eval()
)
# Share params
with torch.no_grad():
torch_ln_linear.layernorm.weight = Parameter(te_ln_linear.layer_norm_weight.clone())
if normalization != "RMSNorm":
torch_ln_linear.layernorm.bias = Parameter(te_ln_linear.layer_norm_bias.clone())
torch_ln_linear.linear.weight = Parameter(te_ln_linear.weight.clone())
torch_ln_linear.linear.bias = Parameter(te_ln_linear.bias.clone())
te_outputs = _test_granular_accuracy(te_ln_linear, bs, dtype, config)
torch_outputs = _test_granular_accuracy(torch_ln_linear, bs, dtype, config)
# Check output.
if dtype == torch.float32:
assert_allclose(te_outputs[0], torch_outputs[0], 5e-3)
else:
assert_allclose(te_outputs[0], torch_outputs[0], 5e-2)
@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("bs", batch_sizes)
@pytest.mark.parametrize("model", model_configs.keys())
@pytest.mark.parametrize("activation", all_activations)
@pytest.mark.parametrize("normalization", all_normalizations)
def test_layernorm_mlp_accuracy(dtype, bs, model, activation, normalization):
config = model_configs[model]
te_ln_mlp = (
LayerNormMLP(
config.hidden_size,
4 * config.hidden_size,
activation=activation,
normalization=normalization,
)
.to(dtype=dtype)
.cuda()
.eval()
)
torch_ln_mlp = (
TorchLayerNormMLP(
config.hidden_size,
4 * config.hidden_size,
activation=activation,
normalization=normalization,
)
.to(dtype=dtype)
.cuda()
.eval()
)
# Share params
with torch.no_grad():
torch_ln_mlp.ln.weight = Parameter(te_ln_mlp.layer_norm_weight.clone())
if normalization != "RMSNorm":
torch_ln_mlp.ln.bias = Parameter(te_ln_mlp.layer_norm_bias.clone())
torch_ln_mlp.fc1.weight = Parameter(te_ln_mlp.fc1_weight.clone())
torch_ln_mlp.fc1.bias = Parameter(te_ln_mlp.fc1_bias.clone())
torch_ln_mlp.fc2.weight = Parameter(te_ln_mlp.fc2_weight.clone())
torch_ln_mlp.fc2.bias = Parameter(te_ln_mlp.fc2_bias.clone())
te_outputs = _test_granular_accuracy(te_ln_mlp, bs, dtype, config)
torch_outputs = _test_granular_accuracy(torch_ln_mlp, bs, dtype, config)
# Check output.
if dtype == torch.float32:
assert_allclose(te_outputs[0], torch_outputs[0], 5e-3)
else:
assert_allclose(te_outputs[0], torch_outputs[0], 5e-2)
def _test_gpt_e2e_cuda_graph(block, bs, dtype, config, graph):
reset_rng_states()
# Initialize loss function and optimizer.
loss_fn = torch.nn.MSELoss()
optimizer = torch.optim.SGD(block.parameters(), lr=0.1)
# Placeholders used for graph capture.
static_input = torch.randn(config.seq_len, bs, config.hidden_size, device='cuda', dtype=dtype, requires_grad=True)
static_target = torch.randn(config.seq_len, bs, config.hidden_size, device='cuda', dtype=dtype)
real_input = torch.rand_like(static_input)
real_target = torch.rand_like(static_target)
# Basic training loop.
def train_step():
optimizer.zero_grad(set_to_none=False)
out = block(static_input)
loss = loss_fn(out, static_target)
loss.backward()
optimizer.step()
return out
# Warmup steps in a separate stream.
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
for _ in range(3):
train_step()
torch.cuda.current_stream().wait_stream(s)
# Capture graph.
g = None
static_output = None
if graph:
g = torch.cuda.CUDAGraph()
with torch.cuda.graph(g):
static_output = train_step()
# Run with new data.
with torch.no_grad():
static_input.copy_(real_input)
static_target.copy_(real_target)
if graph:
g.replay()
else:
static_output = train_step()
grads = [static_input.grad]
for p in block.parameters():
if p.requires_grad:
grads.append(p.grad)
with torch.no_grad():
output = static_output.clone()
return output, grads
@pytest.mark.parametrize("dtype", param_types)
@pytest.mark.parametrize("bs", batch_sizes)
@pytest.mark.parametrize("model", model_configs.keys())
def test_gpt_cuda_graph(dtype, bs, model):
config = model_configs[model]
sigma = 0.023
init_method = init_method_normal(sigma)
output_layer_init_method = scaled_init_method_normal(sigma, config.num_layers)
block = (
TransformerLayer(
config.hidden_size,
4 * config.hidden_size,
config.num_attention_heads,
layernorm_epsilon=config.eps,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
hidden_dropout=0.1,
attention_dropout=0.1,
kv_channels=config.embed,
apply_residual_connection_post_layernorm=False,
output_layernorm=False,
)
.to(dtype=dtype)
.cuda()
)
graphed_block = copy.deepcopy(block)
out, grads = _test_gpt_e2e_cuda_graph(block, bs, dtype, config, False)
graphed_out, graphed_grads = _test_gpt_e2e_cuda_graph(graphed_block, bs, dtype, config, True)
params = list(block.parameters())
graphed_params = list(graphed_block.parameters())
# Check that results match
assert_allclose(out, graphed_out, 1e-3)
assert_allclose(params, graphed_params, 1e-3)
assert_allclose(grads, graphed_grads, 1e-3)
| TransformerEngine-main | tests/pytorch/test_numerics.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
import transformer_engine.pytorch
print("OK")
| TransformerEngine-main | tests/pytorch/test_sanity_import.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
import os
import re
import glob
import datetime
from prettytable import PrettyTable
from matplotlib import pyplot as plt
NUM_MOST_RECENT_RUNS = 100
te_path = os.getenv("TE_PATH", "/opt/transformerengine")
mlm_log_dir = os.path.join(te_path, "ci_logs")
te_ci_log_dir = "/data/transformer_engine_ci_logs"
te_ci_plot_dir = os.path.join(te_ci_log_dir, "plots")
convergence_pattern = (
"validation loss at iteration \d* on validation set | lm loss"
" value: ([\d.]*)E\+(\d*) | lm loss PPL: ([\d.]*)E\+(\d*)"
)
perf_pattern = "elapsed time per iteration \(ms\): ([\d.]*)"
def get_output_file():
now = datetime.datetime.now()
default_fname = f"unknown_pipeline_id_{now.month}_{now.day}_{now.year}_{now.hour}_{now.minute}"
fname = f"{os.getenv('CI_PIPELINE_ID', default_fname)}.txt"
return os.path.join(te_ci_log_dir, fname)
def get_run_metrics(filename):
"""Return the loss, perplexity, and step time for a given megatron-LM logfile."""
with open(filename, "r") as f:
data = f.read()
# Loss and PPL
convergence_matches = re.findall(convergence_pattern, data)
loss = round(float(convergence_matches[1][0]) * (10 ** int(convergence_matches[1][1])), 2)
ppl = round(float(convergence_matches[2][2]) * (10 ** int(convergence_matches[2][3])), 2)
step_times_str = re.findall(perf_pattern, data)
step_times = [float(x) for x in step_times_str]
avg_step_time = round(sum(step_times) / len(step_times), 2)
return loss, ppl, avg_step_time
def print_run_logs():
tables = []
raw_logs = []
for model_config in os.listdir(mlm_log_dir):
model_config_dir = os.path.join(mlm_log_dir, model_config)
table = PrettyTable()
table.title = model_config
table.field_names = ["Config", "Loss", "Perplexity", "Avg time per step (ms)"]
for exp in os.listdir(model_config_dir):
filename = os.path.join(model_config_dir, exp)
loss, ppl, time_per_step = get_run_metrics(filename)
exp_name = exp[:-4]
table.add_row([exp_name, loss, ppl, time_per_step])
raw_logs.append(f"{model_config} {exp_name} {loss} {ppl} {time_per_step}\n")
tables.append(table)
with open(get_output_file(), "w") as f:
for raw_log in raw_logs:
f.write(raw_log)
for table in tables:
print(table)
def save_plot(title, legend, data, filename, ylabel):
x = list(range(1, len(data[0]) + 1))
plt.figure()
for label, y in zip(legend, data):
plt.plot(x, y, "-o", label=label)
plt.title(title)
plt.legend()
plt.xlabel(f"Last {NUM_MOST_RECENT_RUNS} runs")
plt.ylabel(ylabel)
plt.savefig(os.path.join(te_ci_plot_dir, filename))
def perf_and_loss_plots():
files = glob.glob(os.path.join(te_ci_log_dir, "*.txt"))
files.sort(key=os.path.getctime)
files = files[-NUM_MOST_RECENT_RUNS:]
data = {}
for filename in files:
with open(filename) as file:
for line in file:
line = line.strip()
model_config, exp_name, loss, _, time_per_step = line.split(" ")
if model_config not in data:
data[model_config] = {}
if exp_name not in data[model_config]:
data[model_config][exp_name] = {"loss": [], "perf": []}
data[model_config][exp_name]["loss"].append(float(loss))
data[model_config][exp_name]["perf"].append(float(time_per_step))
for model_config, experiments in data.items():
lm_loss_data = []
lm_perf_data = []
legend = []
for exp_name, lm_data in experiments.items():
legend.append(exp_name)
lm_loss_data.append(lm_data["loss"])
lm_perf_data.append(lm_data["perf"])
save_plot(
model_config + " loss", legend,
lm_loss_data, model_config + "_loss.png",
"LM-Loss",
)
save_plot(
model_config + " perf",
legend, lm_perf_data, model_config + "_perf.png",
"Time per step (ms)",
)
if __name__ == "__main__":
print_run_logs()
perf_and_loss_plots()
| TransformerEngine-main | tests/pytorch/distributed/print_logs.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
from typing import List, Tuple, Union
import pytest
import subprocess
import os
from dataclasses import dataclass, asdict
from functools import lru_cache
import torch
@dataclass()
class ModelConfigGPT:
NUM_LAYERS: int = 12
HIDDEN_SIZE: int = 768
NHEADS: int = 12
SEQLEN: int = 2048
MAX_POSITION_EMBEDDINGS: int = 2048
LR: float = 6.0e-4
MIN_LR: float = 6.0e-5
SPLIT: str = "98,2,0"
CLIP_GRAD: float = 1.0
WEIGHT_DECAY: float = 0.1
ADAM_BETA1: float = 0.9
ADAM_BETA2: float = 0.95
INIT_METHOD_STD: float = 0.023
model_configs = {
"126m": ModelConfigGPT(),
}
dtypes = ["bf16"]
fp8_recipes = [False, "hybrid"]
all_boolean = [True, False]
te_path = os.getenv("TE_PATH", "/opt/transformerengine")
mlm_log_dir = os.path.join(te_path, "ci_logs")
@lru_cache(maxsize=1)
def get_parallel_configs() -> List[Tuple[int, int]]:
"""Returns valid combinations of (tp, pp)."""
sizes = [1, 2, 4]
num_devices = torch.cuda.device_count()
parallel_configs = []
if num_devices > 1:
for dp in sizes:
for tp in sizes:
for pp in sizes:
if dp * tp * pp == num_devices:
parallel_configs.append((dp, tp, pp))
return parallel_configs
def get_filename(
model: str, dp: int, tp: int, pp: int, sp: bool, use_te: bool, fp8_recipe: Union[bool, str]
) -> str:
sp = tp if sp else 1
config = f"gpt3_{model}_dp{dp}_tp{tp}_pp{pp}_sp{sp}"
config_dir = os.path.join(mlm_log_dir, config)
os.makedirs(config_dir, exist_ok=True)
fname = f"{'te' if use_te else 'megatron'}" + (f"_fp8_{fp8_recipe}" if fp8_recipe else "") + ".txt"
return os.path.join(config_dir, fname)
def get_bash_arguments(filename: str, **kwargs) -> List[str]:
args = []
script_path = os.path.join(te_path, "tests/pytorch/distributed/run_megatron_lm_gpt.sh")
args.append(script_path)
for k, v in kwargs.items():
args.append(f"{k}={str(v)}")
args.append(f"FILENAME={filename}")
return args
@pytest.mark.parametrize("sp", all_boolean)
@pytest.mark.parametrize("use_te", all_boolean)
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("fp8_recipe", fp8_recipes)
@pytest.mark.parametrize("dp, tp, pp", get_parallel_configs())
@pytest.mark.parametrize("model", model_configs.keys())
def test_distributed(dtype, fp8_recipe, dp, tp, pp, sp, use_te, model):
if sp and tp == 1:
pytest.skip("No tensor parallel.")
if fp8_recipe and not use_te:
pytest.skip("TransformerEngine needed for FP8.")
subprocess.run(
get_bash_arguments(
get_filename(model, dp, tp, pp, sp, use_te, fp8_recipe),
DTYPE=dtype,
FP8=fp8_recipe,
SP=sp,
DP_SIZE=dp,
TP_SIZE=tp,
PP_SIZE=pp,
TRANSFORMER_IMPL="transformer_engine" if use_te else "local",
**asdict(model_configs[model]),
),
check=True)
| TransformerEngine-main | tests/pytorch/distributed/test_convergence.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
import unittest
import flax
import jax
import jax.numpy as jnp
import numpy as np
from utils import assert_allclose
from transformer_engine.common.recipe import DelayedScaling
from transformer_engine.common.recipe import Format as FP8Format
from transformer_engine.jax import fp8_autocast, get_delayed_scaling
from transformer_engine.jax.fp8 import FP8Helper, is_fp8_available, AmaxComputeAlgo
from transformer_engine.jax.sharding import infer_major_sharding_type
from transformer_engine.jax.sharding import MajorShardingType
from transformer_engine.jax.sharding import ShardingResource
is_fp8_supported, reason = is_fp8_available()
class TestFP8Helper(unittest.TestCase):
@unittest.skipIf(not is_fp8_supported, reason=reason)
def test_initialize(self):
margin = 5.0
fp8_format = FP8Format.E4M3
update_fp8meta_interval = 10
amax_history_len = 10
FP8Helper.initialize(margin=margin,
fp8_format=fp8_format,
update_fp8meta_interval=update_fp8meta_interval,
amax_history_len=amax_history_len)
self.assertEqual(
FP8Helper.MARGIN, margin, f"FP8Helper.MARGIN initialization failed, should be {margin}"
f" but got {FP8Helper.MARGIN}.")
self.assertEqual(
FP8Helper.FP8_FORMAT, fp8_format,
f"FP8Helper.FP8_FORMAT initialization failed, should be {fp8_format}"
f" but got {FP8Helper.FP8_FORMAT}.")
self.assertEqual(
FP8Helper.UPDATE_FP8META_INTERVAL, update_fp8meta_interval,
"FP8Helper.UPDATE_FP8META_INTERVAL initialization failed, should be"
f"{update_fp8meta_interval} but got {FP8Helper.UPDATE_FP8META_INTERVAL}.")
self.assertEqual(
FP8Helper.AMAX_HISTORY_LEN, amax_history_len,
f"FP8Helper.AMAX_HISTORY_LEN initialization failed, should be {amax_history_len}"
f" but got {FP8Helper.AMAX_HISTORY_LEN}.")
FP8Helper.finalize()
@unittest.skipIf(not is_fp8_supported, reason=reason)
def test_update_fp8_metas(self):
FP8Helper.initialize(margin=3.0, amax_history_len=3)
seed = 0
key1, key2 = jax.random.split(jax.random.PRNGKey(seed))
num_of_gemm = 10
num_of_meta = FP8Helper.NUM_META_PER_GEMM * num_of_gemm
def select_amax(amaxes):
if FP8Helper.AMAX_COMPUTE_ALGO == AmaxComputeAlgo.MAX:
return jnp.max(amaxes, axis=-1, keepdims=True)
return amaxes[:, 0:1]
def get_fp8_scale(fp8_max, amax, scale):
fp8_max = np.array(fp8_max)
amax = np.array(amax)
scale = np.array(scale)
exp = np.floor(np.log2(fp8_max / amax)) - FP8Helper.MARGIN
sf = np.round(np.power(2, np.abs(exp)))
sf = np.where(amax > 0.0, sf, scale)
sf = np.where(np.isfinite(amax), sf, scale)
return np.where(exp < 0, 1 / sf, sf)
amax_meta_shape = (num_of_meta, FP8Helper.AMAX_HISTORY_LEN)
scale_meta_shape = (num_of_meta, 1)
fp8_max_array = FP8Helper.generate_fp8_max_array(num_of_meta)
fp8_amax_array1 = jax.random.uniform(key1, shape=amax_meta_shape)
fp8_scale_array1 = get_fp8_scale(fp8_max_array, select_amax(fp8_amax_array1),
jnp.ones(scale_meta_shape))
fp8_scale_inv_array1 = 1 / fp8_scale_array1
fp8_amax_array2 = jax.random.uniform(key2, shape=amax_meta_shape)
fp8_scale_array2 = get_fp8_scale(fp8_max_array, select_amax(fp8_amax_array2),
jnp.ones(scale_meta_shape))
fp8_scale_inv_array2 = 1 / fp8_scale_array2
state = flax.core.frozen_dict.FrozenDict({
FP8Helper.FP8_COLLECTION_NAME: {
"test_update_fp8_metas1": {
FP8Helper.FP8_MAX_NAME: fp8_max_array,
FP8Helper.FP8_AMAX_NAME: fp8_amax_array1,
FP8Helper.FP8_SCALE_NAME: jnp.ones(scale_meta_shape),
FP8Helper.FP8_SCALE_INV_NAME: jnp.ones(scale_meta_shape)
},
"test_update_fp8_metas2": {
FP8Helper.FP8_MAX_NAME: fp8_max_array,
FP8Helper.FP8_AMAX_NAME: fp8_amax_array2,
FP8Helper.FP8_SCALE_NAME: jnp.ones(scale_meta_shape),
FP8Helper.FP8_SCALE_INV_NAME: jnp.ones(scale_meta_shape)
}
}
})
updated_state = FP8Helper.update_fp8_metas(state)
state_array, _ = jax.tree_util.tree_flatten(updated_state)
meta_per_gemm = FP8Helper.NUM_META_PER_GEMM + 1
scale_shift = 2
scale_inv_shift = 3
assert_allclose(state_array[0 * meta_per_gemm + scale_shift], fp8_scale_array1)
assert_allclose(state_array[0 * meta_per_gemm + scale_inv_shift], fp8_scale_inv_array1)
assert_allclose(state_array[1 * meta_per_gemm + scale_shift], fp8_scale_array2)
assert_allclose(state_array[1 * meta_per_gemm + scale_inv_shift], fp8_scale_inv_array2)
FP8Helper.finalize()
@unittest.skipIf(not is_fp8_supported, reason=reason)
def test_generate_fp8_max_array(self):
num_of_meta = FP8Helper.NUM_META_PER_GEMM * 2
def get_ref(format_for_test):
refer_list = []
for i in range(num_of_meta):
val = format_for_test.value.max_bwd \
if i % FP8Helper.NUM_META_PER_GEMM == FP8Helper.GRAD_META_IDX_PER_GEMM \
else format_for_test.value.max_fwd
refer_list.append([val])
return jnp.asarray(refer_list)
for fp8_format in FP8Format:
FP8Helper.initialize(fp8_format=fp8_format)
assert_allclose(get_ref(fp8_format), FP8Helper.generate_fp8_max_array(num_of_meta))
FP8Helper.finalize()
@unittest.skipIf(not is_fp8_supported, reason=reason)
def test_update_collections(self):
original_val = 0.0
updated_val = 10.0
original_state = {
"test1": original_val,
"test2": original_val,
}
updated_state = FP8Helper.update_collections({"test1": updated_val}, original_state)
self.assertEqual(updated_state["test1"], updated_val)
self.assertEqual(updated_state["test2"], original_val)
original_state = flax.core.frozen_dict.FrozenDict(original_state)
updated_state = FP8Helper.update_collections({"test1": updated_val}, original_state)
self.assertEqual(updated_state["test1"], updated_val)
self.assertEqual(updated_state["test2"], original_val)
class TestFP8Functions(unittest.TestCase):
def _check_defult_state(self):
self.assertFalse(FP8Helper.is_fp8_enabled())
self.assertEqual(infer_major_sharding_type(), MajorShardingType.SINGLE)
def _compare_delay_scaling(self, ref, test):
self.assertTrue(ref.margin == test.margin)
self.assertTrue(ref.interval == test.interval)
self.assertTrue(ref.fp8_format == test.fp8_format)
self.assertTrue(ref.amax_history_len == test.amax_history_len)
self.assertTrue(ref.amax_compute_algo == test.amax_compute_algo)
@unittest.skipIf(not is_fp8_supported, reason=reason)
def test_fp8_autocast(self):
FP8Helper.finalize() # Ensure the testing not affect by previous tests.
self._check_defult_state()
with fp8_autocast(enabled=False, fp8_recipe=DelayedScaling()):
self.assertFalse(FP8Helper.is_fp8_enabled())
self._compare_delay_scaling(get_delayed_scaling(), DelayedScaling())
self._check_defult_state()
ds = DelayedScaling(margin=5.0, interval=3, fp8_format=FP8Format.E4M3, amax_history_len=1)
with fp8_autocast(enabled=True, fp8_recipe=ds):
self.assertTrue(FP8Helper.is_fp8_enabled())
self._compare_delay_scaling(get_delayed_scaling(), ds)
self._check_defult_state()
ds = DelayedScaling(margin=3.0, interval=1, fp8_format=FP8Format.HYBRID, amax_history_len=1)
with fp8_autocast(enabled=True, fp8_recipe=ds):
self.assertTrue(FP8Helper.is_fp8_enabled())
self._compare_delay_scaling(get_delayed_scaling(), ds)
self._check_defult_state()
@unittest.skipIf(not is_fp8_supported, reason=reason)
def test_fp8_autocast_with_sharding_resource(self):
FP8Helper.finalize() # Ensure the testing not affect by previous tests.
self._check_defult_state()
ds = DelayedScaling(margin=5.0, interval=3, fp8_format=FP8Format.E4M3, amax_history_len=1)
# TODO (Ming Huang): Suport multi-GPUs testing. # pylint: disable=fixme
# srs = (
# (ShardingResource(None, None), MajorShardingType.SINGLE),
# (ShardingResource('dp', None), MajorShardingType.DP),
# (ShardingResource(None, 'tp'), MajorShardingType.TP),
# (ShardingResource('dp', 'tp'), MajorShardingType.DPTP),
# )
srs = (
(ShardingResource(None, None), MajorShardingType.SINGLE),
(ShardingResource('dp', None), MajorShardingType.SINGLE),
(ShardingResource(None, 'tp'), MajorShardingType.SINGLE),
(ShardingResource('dp', 'tp'), MajorShardingType.SINGLE),
)
# TODO (Ming Huang): Suport multi-GPUs testing. # pylint: disable=fixme
mesh_shape = (1, 1)
devices = np.asarray(jax.devices()[:1]).reshape(*mesh_shape)
with jax.sharding.Mesh(devices, ('dp', 'tp')):
for sr, mst in srs:
with fp8_autocast(enabled=True, fp8_recipe=ds, sharding_resource=sr):
self.assertTrue(FP8Helper.is_fp8_enabled())
self._compare_delay_scaling(get_delayed_scaling(), ds)
self.assertEqual(infer_major_sharding_type(), mst)
self._check_defult_state()
| TransformerEngine-main | tests/jax/test_helper.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
from functools import partial
import flax
import jax
import jax.numpy as jnp
import pytest
from transformer_engine.common.recipe import Format
from transformer_engine.jax.flax import TransformerLayer, TransformerLayerType
from transformer_engine.jax.fp8 import FP8Helper, is_fp8_available
from utils import assert_allclose
from utils import DecoderLayer as RefDecoderLayer
from utils import EncoderLayer as RefEncoderLayer
is_fp8_supported, reason = is_fp8_available()
def loss_fn(diff_xs, no_diff_xs, params, others, model, rngs):
output = model.apply({"params": params, **others}, *diff_xs, *no_diff_xs, rngs=rngs)
return jnp.mean(output)
def generate_test_rngs():
data_rng = jax.random.PRNGKey(0)
init_rng = {'params': jax.random.PRNGKey(1), 'dropout': jax.random.PRNGKey(2)}
apply_rng = {'dropout': jax.random.PRNGKey(3)}
return data_rng, init_rng, apply_rng
def generate_layer(layer_cls, init_rng, diff_inputs, no_diff_inputs):
layer = layer_cls()
variables = layer.init(init_rng, *diff_inputs, *no_diff_inputs)
others, params = flax.core.pop(variables, 'params')
del variables
return layer, params, others
def compare_dict(ref_fd, test_fd, rtol=1e-05, atol=1e-08):
# To be compatible with both Flax>=0.7.1 or <0.7.1
# since Flax 0.7.1 removed FrozenDict.
ref_fd = flax.core.unfreeze(ref_fd)
test_fd = flax.core.unfreeze(test_fd)
for key in ref_fd:
assert key in test_fd, \
f"{key} not found in test dict {test_fd}"
assert isinstance(test_fd[key], type(ref_fd[key])), \
f"The data type is not match between ref and test " \
f"dict on {key=}"
if isinstance(ref_fd[key], dict):
compare_dict(ref_fd[key], test_fd[key], rtol, atol)
else:
assert_allclose(ref_fd[key],
test_fd[key],
rtol=rtol,
atol=atol,
err_msg=f"{key=} is not close")
DATA_SHAPE = [(32, 128, 1024), (32, 512, 1024)] # (batch, seqlen, emb_dim)
DTYPE = [jnp.float32, jnp.bfloat16]
FP8_FORMATS = [Format.E4M3, Format.HYBRID]
_KEY_OF_RESIDUAL_POST_LAYERNORM = "apply_residual_connection_post_layernorm"
_KEY_OF_OUTPUT_LAYERNORM = "output_layernorm"
_KEY_OF_DROP_PATH = "drop_path"
_KEY_OF_FUSE_QKV_PARAMS = "fuse_qkv_params"
_KEY_OF_DROPOUT_RATE = "dropout_rate"
_KEY_OF_MLP_ACTIVATIONS = "mlp_activations"
_KEY_OF_FUSE_MLP_WI = "fuse_mlp_wi"
_KEY_OF_LAYERNORM_TYPE = 'layernorm_type'
_KEY_OF_ZERO_CENTERED_GAMMA = 'zero_centered_gamma'
_KEY_OF_TRANSPOSE_BS = 'transpose_batch_sequence'
_KEY_OF_SCALE_ATTN_LOGITS = "scale_attn_logits"
BASE_ATTRS = {_KEY_OF_TRANSPOSE_BS: True}
ATTRS = [{
_KEY_OF_LAYERNORM_TYPE: 'rmsnorm',
}, {
_KEY_OF_LAYERNORM_TYPE: 'layernorm',
}, {
_KEY_OF_LAYERNORM_TYPE: 'layernorm',
_KEY_OF_ZERO_CENTERED_GAMMA: True
}, {
_KEY_OF_LAYERNORM_TYPE: 'rmsnorm',
_KEY_OF_RESIDUAL_POST_LAYERNORM: True
}, {
_KEY_OF_LAYERNORM_TYPE: 'rmsnorm',
_KEY_OF_OUTPUT_LAYERNORM: True
}, {
_KEY_OF_LAYERNORM_TYPE: 'rmsnorm',
_KEY_OF_RESIDUAL_POST_LAYERNORM: True,
_KEY_OF_OUTPUT_LAYERNORM: True
}, {
_KEY_OF_LAYERNORM_TYPE: 'rmsnorm',
_KEY_OF_DROP_PATH: 0.1
}, {
_KEY_OF_LAYERNORM_TYPE: 'rmsnorm',
_KEY_OF_FUSE_QKV_PARAMS: False
}, {
_KEY_OF_LAYERNORM_TYPE: 'rmsnorm',
_KEY_OF_DROPOUT_RATE: 0.0,
_KEY_OF_MLP_ACTIVATIONS: (('gelu', 'linear')),
_KEY_OF_FUSE_MLP_WI: True
}, {
_KEY_OF_SCALE_ATTN_LOGITS: True,
_KEY_OF_LAYERNORM_TYPE: 'rmsnorm',
_KEY_OF_DROPOUT_RATE: 0.8,
_KEY_OF_MLP_ACTIVATIONS: (('gelu', 'linear')),
_KEY_OF_FUSE_MLP_WI: True
}, {
_KEY_OF_TRANSPOSE_BS: False,
_KEY_OF_SCALE_ATTN_LOGITS: True,
_KEY_OF_LAYERNORM_TYPE: 'rmsnorm',
_KEY_OF_DROPOUT_RATE: 0.0,
_KEY_OF_MLP_ACTIVATIONS: (('gelu', 'linear')),
_KEY_OF_FUSE_MLP_WI: True
}]
ATTRS = [{**BASE_ATTRS, **attr} for attr in ATTRS]
class TestEncoderLayer:
@staticmethod
def sync_params(ref, target, attrs):
fuse_qkv = attrs.get(_KEY_OF_FUSE_QKV_PARAMS, True)
unfreeze_target = flax.core.unfreeze(target)
if fuse_qkv:
unfreeze_target['attention']['qkv']['kernel'] = \
jnp.reshape(ref['attention']['qkv']['kernel'],
unfreeze_target['attention']['qkv']['kernel'].shape)
else:
unfreeze_target['attention']['query']['kernel'] = \
ref['attention']['query']['kernel']
unfreeze_target['attention']['key']['kernel'] = \
ref['attention']['key']['kernel']
unfreeze_target['attention']['value']['kernel'] = \
ref['attention']['value']['kernel']
unfreeze_target['mlp']['wi_kernel'] = \
jnp.reshape(ref['mlp']['wi']['kernel'], unfreeze_target['mlp']['wi_kernel'].shape)
unfreeze_target['mlp']['wo_kernel'] = \
ref['mlp']['wo']['kernel']
return ref, unfreeze_target
def forward_runner(self, data_shape, dtype, attrs, rtol=1e-05, atol=1e-08):
transpose_batch_sequence = _KEY_OF_TRANSPOSE_BS in attrs and attrs[_KEY_OF_TRANSPOSE_BS]
batch, seqlen = data_shape[:2]
if transpose_batch_sequence:
data_shape = (data_shape[1], data_shape[0], *data_shape[2:])
sequence_dim = 0 if transpose_batch_sequence else 1
data_rng, init_rng, apply_rng = generate_test_rngs()
inputs = (jax.random.normal(data_rng, data_shape, dtype),)
padded_mask = jnp.zeros((batch, 1, seqlen, seqlen), dtype=jnp.uint8)
ref_masks = (1 - padded_mask,)
test_masks = (None, padded_mask) # The second arg of Transformer is encoded tokens.
te_layer_attrs = {}
for k, v in attrs.items():
if k == 'dropout_rate':
te_layer_attrs['attention_dropout'] = v
te_layer_attrs['hidden_dropout'] = v
elif k == 'fuse_mlp_wi':
continue
else:
te_layer_attrs[k] = v
ref_layer_cls = partial(RefEncoderLayer, dtype=dtype, **attrs)
layer_cls = partial(TransformerLayer,
hidden_dropout_dims=(sequence_dim,),
layer_type=TransformerLayerType.ENCODER,
self_attn_mask_type='padding',
dtype=dtype,
**te_layer_attrs)
ref_layer, ref_params, ref_others = generate_layer(ref_layer_cls, init_rng, inputs,
ref_masks)
test_layer, test_params, test_others = generate_layer(layer_cls, init_rng, inputs,
test_masks)
ref_params, test_params = TestEncoderLayer.sync_params(ref_params, test_params, attrs)
ref_out = loss_fn(inputs, ref_masks, ref_params, ref_others, ref_layer, apply_rng)
test_out = loss_fn(inputs, test_masks, test_params, test_others, test_layer, apply_rng)
assert_allclose(ref_out, test_out, rtol=rtol, atol=atol)
del data_rng, init_rng, apply_rng
def forward_backward_runner(self, data_shape, dtype, attrs, rtol=1e-05, atol=1e-08):
transpose_batch_sequence = _KEY_OF_TRANSPOSE_BS in attrs and attrs[_KEY_OF_TRANSPOSE_BS]
batch, seqlen = data_shape[:2]
if transpose_batch_sequence:
data_shape = (data_shape[1], data_shape[0], *data_shape[2:])
sequence_dim = 0 if transpose_batch_sequence else 1
data_rng, init_rng, apply_rng = generate_test_rngs()
inputs = (jax.random.normal(data_rng, data_shape, dtype),)
padded_mask = jnp.zeros((batch, 1, seqlen, seqlen), dtype=jnp.uint8)
ref_masks = (1 - padded_mask,)
test_masks = (None, padded_mask) # The second arg of Transformer is encoded tokens.
te_layer_attrs = {}
for k, v in attrs.items():
if k == 'dropout_rate':
te_layer_attrs['attention_dropout'] = v
te_layer_attrs['hidden_dropout'] = v
elif k == 'fuse_mlp_wi':
continue
else:
te_layer_attrs[k] = v
ref_layer_cls = partial(RefEncoderLayer, dtype=dtype, **attrs)
layer_cls = partial(TransformerLayer,
hidden_dropout_dims=(sequence_dim,),
layer_type=TransformerLayerType.ENCODER,
self_attn_mask_type='padding',
dtype=dtype,
**te_layer_attrs)
ref_layer, ref_params, ref_others = generate_layer(ref_layer_cls, init_rng, inputs,
ref_masks)
test_layer, test_params, test_others = generate_layer(layer_cls, init_rng, inputs,
test_masks)
ref_params, test_params = TestEncoderLayer.sync_params(ref_params, test_params, attrs)
if FP8Helper.is_fp8_enabled():
for _ in range(4):
_, tmp_grad = jax.value_and_grad(loss_fn, argnums=(3,),
has_aux=False)(inputs, test_masks, test_params,
test_others, test_layer, apply_rng)
_, fp8_meta_grad = flax.core.pop(tmp_grad[0], FP8Helper.FP8_COLLECTION_NAME)
test_others = FP8Helper.update_collections(
{FP8Helper.FP8_COLLECTION_NAME: fp8_meta_grad}, test_others)
test_others = FP8Helper.update_fp8_metas(test_others)
del tmp_grad, fp8_meta_grad
grad_fn = jax.value_and_grad(loss_fn, argnums=(0, 2), has_aux=False)
ref_out, ref_grads = grad_fn(inputs, ref_masks, ref_params, ref_others, ref_layer,
apply_rng)
test_out, test_grads = grad_fn(inputs, test_masks, test_params, test_others, test_layer,
apply_rng)
assert_allclose(ref_out, test_out, rtol=rtol, atol=atol)
assert_allclose(ref_grads[0][0], test_grads[0][0], rtol=rtol, atol=atol) # dgrad
def reorganize_test_wgrad(test_wgrad, attrs):
fuse_qkv = attrs.get(_KEY_OF_FUSE_QKV_PARAMS, True)
attn_name = 'attention'
unfreeze_test_wgrad = flax.core.unfreeze(test_wgrad)
if "output_layernorm" not in attrs:
unfreeze_test_wgrad['pre_attention_layer_norm'] = {}
pre_attn_layer_key = 'qkv' if fuse_qkv else 'query'
unfreeze_test_wgrad['pre_attention_layer_norm']['scale'] = \
unfreeze_test_wgrad[attn_name][pre_attn_layer_key]['scale']
del unfreeze_test_wgrad[attn_name][pre_attn_layer_key]['scale']
if 'ln_bias' in unfreeze_test_wgrad[attn_name][pre_attn_layer_key]:
unfreeze_test_wgrad['pre_attention_layer_norm']['ln_bias'] = \
unfreeze_test_wgrad[attn_name][pre_attn_layer_key]['ln_bias']
del unfreeze_test_wgrad[attn_name][pre_attn_layer_key]['ln_bias']
if fuse_qkv:
unfreeze_test_wgrad[attn_name]['qkv']['kernel'] = \
jnp.reshape(unfreeze_test_wgrad[attn_name]['qkv']['kernel'],
(unfreeze_test_wgrad[attn_name]['qkv']['kernel'].shape[0], -1))
unfreeze_test_wgrad['pre_mlp_layer_norm'] = {}
unfreeze_test_wgrad['pre_mlp_layer_norm']['scale'] = \
unfreeze_test_wgrad['mlp']['scale']
del unfreeze_test_wgrad['mlp']['scale']
if 'ln_bias' in unfreeze_test_wgrad['mlp']:
unfreeze_test_wgrad['pre_mlp_layer_norm']['ln_bias'] = \
unfreeze_test_wgrad['mlp']['ln_bias']
del unfreeze_test_wgrad['mlp']['ln_bias']
unfreeze_test_wgrad['mlp']['wi'] = {}
unfreeze_test_wgrad['mlp']['wi']['kernel'] = \
jnp.reshape(unfreeze_test_wgrad['mlp']['wi_kernel'],
(unfreeze_test_wgrad['mlp']['wi_kernel'].shape[0], -1))
del unfreeze_test_wgrad['mlp']['wi_kernel']
unfreeze_test_wgrad['mlp']['wo'] = {}
unfreeze_test_wgrad['mlp']['wo']['kernel'] = \
unfreeze_test_wgrad['mlp']['wo_kernel']
del unfreeze_test_wgrad['mlp']['wo_kernel']
return unfreeze_test_wgrad
compare_dict(ref_grads[1],
reorganize_test_wgrad(test_grads[1], attrs),
rtol=rtol,
atol=atol) # wgrad
del data_rng, init_rng, apply_rng
@pytest.mark.parametrize('data_shape', DATA_SHAPE)
@pytest.mark.parametrize('dtype', DTYPE)
@pytest.mark.parametrize('attrs', ATTRS)
def test_forward(self, data_shape, dtype, attrs):
FP8Helper.finalize() # Ensure FP8 disabled.
self.forward_runner(data_shape, dtype, attrs, rtol=1e-05, atol=2e-04)
@pytest.mark.skipif(not is_fp8_supported, reason=reason)
@pytest.mark.parametrize('data_shape', DATA_SHAPE)
@pytest.mark.parametrize('dtype', DTYPE)
@pytest.mark.parametrize('fp8_format', FP8_FORMATS)
@pytest.mark.parametrize('attrs', ATTRS)
def test_forward_with_fp8(self, data_shape, dtype, fp8_format, attrs):
FP8Helper.initialize(fp8_format=fp8_format)
self.forward_runner(data_shape, dtype, attrs, rtol=1e-04, atol=1e-03)
FP8Helper.finalize()
@pytest.mark.parametrize('data_shape', DATA_SHAPE)
@pytest.mark.parametrize('dtype', DTYPE)
@pytest.mark.parametrize('attrs', ATTRS)
def test_forward_backward(self, data_shape, dtype, attrs):
FP8Helper.finalize() # Ensure FP8 disabled.
self.forward_backward_runner(data_shape, dtype, attrs, rtol=1e-05, atol=2e-04)
@pytest.mark.skipif(not is_fp8_supported, reason=reason)
@pytest.mark.parametrize('data_shape', DATA_SHAPE)
@pytest.mark.parametrize('dtype', DTYPE)
@pytest.mark.parametrize('fp8_format', FP8_FORMATS)
@pytest.mark.parametrize('attrs', ATTRS)
def test_forward_backward_with_fp8(self, data_shape, dtype, fp8_format, attrs):
FP8Helper.initialize(fp8_format=fp8_format)
self.forward_backward_runner(data_shape, dtype, attrs, rtol=1e-04, atol=1e-03)
FP8Helper.finalize()
class TestDecoderLayer:
@staticmethod
def sync_params(ref, target, attrs):
fuse_qkv = attrs.get(_KEY_OF_FUSE_QKV_PARAMS, True)
unfreeze_target = flax.core.unfreeze(target)
if fuse_qkv:
unfreeze_target['self_attention']['qkv']['kernel'] = \
jnp.reshape(ref['self_attention']['qkv']['kernel'],
unfreeze_target['self_attention']['qkv']['kernel'].shape)
unfreeze_target['encoder_decoder_attention']['kv']['kernel'] = \
jnp.reshape(ref['encoder_decoder_attention']['kv']['kernel'],
unfreeze_target['encoder_decoder_attention']['kv']['kernel'].shape)
else:
unfreeze_target['self_attention']['query']['kernel'] = \
ref['self_attention']['query']['kernel']
unfreeze_target['self_attention']['key']['kernel'] = \
ref['self_attention']['key']['kernel']
unfreeze_target['self_attention']['value']['kernel'] = \
ref['self_attention']['value']['kernel']
unfreeze_target['encoder_decoder_attention']['query']['kernel'] = \
ref['encoder_decoder_attention']['query']['kernel']
unfreeze_target['mlp']['wi_kernel'] = \
jnp.reshape(ref['mlp']['wi']['kernel'], unfreeze_target['mlp']['wi_kernel'].shape)
unfreeze_target['mlp']['wo_kernel'] = \
ref['mlp']['wo']['kernel']
return ref, unfreeze_target
def forward_runner(self, data_shape, dtype, attrs, rtol=1e-05, atol=1e-08):
transpose_batch_sequence = _KEY_OF_TRANSPOSE_BS in attrs and attrs[_KEY_OF_TRANSPOSE_BS]
batch, seqlen = data_shape[:2]
if transpose_batch_sequence:
data_shape = (data_shape[1], data_shape[0], *data_shape[2:])
sequence_dim = 0 if transpose_batch_sequence else 1
data_rng, init_rng, apply_rng = generate_test_rngs()
inputs = (jax.random.normal(data_rng, data_shape,
dtype), jax.random.normal(data_rng, data_shape, dtype))
padded_mask = jnp.zeros((batch, 1, seqlen, seqlen), dtype=jnp.uint8)
causal_mask = jnp.triu(jnp.ones((batch, 1, seqlen, seqlen), dtype=jnp.uint8), k=1)
ref_masks = (1 - causal_mask, 1 - padded_mask)
test_masks = (causal_mask, padded_mask)
te_layer_attrs = {}
for k, v in attrs.items():
if k == 'dropout_rate':
te_layer_attrs['attention_dropout'] = v
te_layer_attrs['hidden_dropout'] = v
elif k == 'fuse_mlp_wi':
continue
else:
te_layer_attrs[k] = v
ref_layer_cls = partial(RefDecoderLayer, dtype=dtype, **attrs)
layer_cls = partial(TransformerLayer,
hidden_dropout_dims=(sequence_dim,),
layer_type=TransformerLayerType.DECODER,
dtype=dtype,
**te_layer_attrs)
ref_layer, ref_params, ref_others = generate_layer(ref_layer_cls, init_rng, inputs,
ref_masks)
test_layer, test_params, test_others = generate_layer(layer_cls, init_rng, inputs,
test_masks)
ref_params, test_params = TestDecoderLayer.sync_params(ref_params, test_params, attrs)
ref_out = loss_fn(inputs, ref_masks, ref_params, ref_others, ref_layer, apply_rng)
test_out = loss_fn(inputs, test_masks, test_params, test_others, test_layer, apply_rng)
assert_allclose(ref_out, test_out, rtol=rtol, atol=atol)
del data_rng, init_rng, apply_rng
def forward_backward_runner(self, data_shape, dtype, attrs, rtol=1e-05, atol=1e-08):
transpose_batch_sequence = _KEY_OF_TRANSPOSE_BS in attrs and attrs[_KEY_OF_TRANSPOSE_BS]
batch, seqlen = data_shape[:2]
if transpose_batch_sequence:
data_shape = (data_shape[1], data_shape[0], *data_shape[2:])
sequence_dim = 0 if transpose_batch_sequence else 1
data_rng, init_rng, apply_rng = generate_test_rngs()
inputs = (jax.random.normal(data_rng, data_shape,
dtype), jax.random.normal(data_rng, data_shape, dtype))
padded_mask = jnp.zeros((batch, 1, seqlen, seqlen), dtype=jnp.uint8)
causal_mask = jnp.triu(jnp.ones((batch, 1, seqlen, seqlen), dtype=jnp.uint8), k=1)
ref_masks = (1 - causal_mask, 1 - padded_mask)
test_masks = (causal_mask, padded_mask)
te_layer_attrs = {}
for k, v in attrs.items():
if k == 'dropout_rate':
te_layer_attrs['attention_dropout'] = v
te_layer_attrs['hidden_dropout'] = v
elif k == 'fuse_mlp_wi':
continue
else:
te_layer_attrs[k] = v
ref_layer_cls = partial(RefDecoderLayer, dtype=dtype, **attrs)
layer_cls = partial(TransformerLayer,
hidden_dropout_dims=(sequence_dim,),
layer_type=TransformerLayerType.DECODER,
dtype=dtype,
**te_layer_attrs)
ref_layer, ref_params, ref_others = generate_layer(ref_layer_cls, init_rng, inputs,
ref_masks)
test_layer, test_params, test_others = generate_layer(layer_cls, init_rng, inputs,
test_masks)
ref_params, test_params = TestDecoderLayer.sync_params(ref_params, test_params, attrs)
if FP8Helper.is_fp8_enabled():
for _ in range(4):
_, tmp_grad = jax.value_and_grad(loss_fn, argnums=(3,),
has_aux=False)(inputs, test_masks, test_params,
test_others, test_layer, apply_rng)
_, fp8_meta_grad = flax.core.pop(tmp_grad[0], FP8Helper.FP8_COLLECTION_NAME)
test_others = FP8Helper.update_collections(
{FP8Helper.FP8_COLLECTION_NAME: fp8_meta_grad}, test_others)
test_others = FP8Helper.update_fp8_metas(test_others)
del tmp_grad, fp8_meta_grad
grad_fn = jax.value_and_grad(loss_fn, argnums=(0, 2), has_aux=False)
ref_out, ref_grads = grad_fn(inputs, ref_masks, ref_params, ref_others, ref_layer,
apply_rng)
test_out, test_grads = grad_fn(inputs, test_masks, test_params, test_others, test_layer,
apply_rng)
assert_allclose(ref_out, test_out, rtol=rtol, atol=atol)
assert_allclose(ref_grads[0][0], test_grads[0][0], rtol=rtol, atol=atol) # dgrad
def reorganize_test_wgrad(test_wgrad, attrs):
fuse_qkv = attrs.get(_KEY_OF_FUSE_QKV_PARAMS, True)
attn_name = 'self_attention'
unfreeze_test_wgrad = flax.core.unfreeze(test_wgrad)
if "output_layernorm" not in attrs:
unfreeze_test_wgrad['pre_self_attention_layer_norm'] = {}
pre_attn_layer_key = 'qkv' if fuse_qkv else 'query'
unfreeze_test_wgrad['pre_self_attention_layer_norm']['scale'] = \
unfreeze_test_wgrad[attn_name][pre_attn_layer_key]['scale']
del unfreeze_test_wgrad[attn_name][pre_attn_layer_key]['scale']
if 'ln_bias' in unfreeze_test_wgrad[attn_name][pre_attn_layer_key]:
unfreeze_test_wgrad['pre_self_attention_layer_norm']['ln_bias'] = \
unfreeze_test_wgrad[attn_name][pre_attn_layer_key]['ln_bias']
del unfreeze_test_wgrad[attn_name][pre_attn_layer_key]['ln_bias']
if fuse_qkv:
unfreeze_test_wgrad[attn_name]['qkv']['kernel'] = \
jnp.reshape(unfreeze_test_wgrad[attn_name]['qkv']['kernel'],
(unfreeze_test_wgrad[attn_name]['qkv']['kernel'].shape[0], -1))
attn_name = 'encoder_decoder_attention'
unfreeze_test_wgrad[attn_name]['kv']['kernel'] = \
jnp.reshape(unfreeze_test_wgrad[attn_name]['kv']['kernel'],
(unfreeze_test_wgrad[attn_name]['kv']['kernel'].shape[0], -1))
unfreeze_test_wgrad['pre_cross_attention_layer_norm'] = {}
unfreeze_test_wgrad['pre_cross_attention_layer_norm']['scale'] = \
unfreeze_test_wgrad['encoder_decoder_attention']['query']['scale']
del unfreeze_test_wgrad['encoder_decoder_attention']['query']['scale']
if 'ln_bias' in unfreeze_test_wgrad['encoder_decoder_attention']['query']:
unfreeze_test_wgrad['pre_cross_attention_layer_norm']['ln_bias'] = \
unfreeze_test_wgrad['encoder_decoder_attention']['query']['ln_bias']
del unfreeze_test_wgrad['encoder_decoder_attention']['query']['ln_bias']
unfreeze_test_wgrad['pre_mlp_layer_norm'] = {}
unfreeze_test_wgrad['pre_mlp_layer_norm']['scale'] = \
unfreeze_test_wgrad['mlp']['scale']
del unfreeze_test_wgrad['mlp']['scale']
if 'ln_bias' in unfreeze_test_wgrad['mlp']:
unfreeze_test_wgrad['pre_mlp_layer_norm']['ln_bias'] = \
unfreeze_test_wgrad['mlp']['ln_bias']
del unfreeze_test_wgrad['mlp']['ln_bias']
unfreeze_test_wgrad['mlp']['wi'] = {}
unfreeze_test_wgrad['mlp']['wi']['kernel'] = \
jnp.reshape(unfreeze_test_wgrad['mlp']['wi_kernel'],
(unfreeze_test_wgrad['mlp']['wi_kernel'].shape[0], -1))
del unfreeze_test_wgrad['mlp']['wi_kernel']
unfreeze_test_wgrad['mlp']['wo'] = {}
unfreeze_test_wgrad['mlp']['wo']['kernel'] = \
unfreeze_test_wgrad['mlp']['wo_kernel']
del unfreeze_test_wgrad['mlp']['wo_kernel']
return unfreeze_test_wgrad
compare_dict(ref_grads[1],
reorganize_test_wgrad(test_grads[1], attrs),
rtol=rtol,
atol=atol) # wgrad
del data_rng, init_rng, apply_rng
@pytest.mark.parametrize('data_shape', DATA_SHAPE)
@pytest.mark.parametrize('dtype', DTYPE)
@pytest.mark.parametrize('attrs', ATTRS)
def test_forward(self, data_shape, dtype, attrs):
FP8Helper.finalize() # Ensure FP8 disabled.
self.forward_runner(data_shape, dtype, attrs, rtol=1e-05, atol=2e-04)
@pytest.mark.skipif(not is_fp8_supported, reason=reason)
@pytest.mark.parametrize('data_shape', DATA_SHAPE)
@pytest.mark.parametrize('dtype', DTYPE)
@pytest.mark.parametrize('fp8_format', FP8_FORMATS)
@pytest.mark.parametrize('attrs', ATTRS)
def test_forward_with_fp8(self, data_shape, dtype, fp8_format, attrs):
FP8Helper.initialize(fp8_format=fp8_format)
self.forward_runner(data_shape, dtype, attrs, rtol=1e-04, atol=3e-02)
FP8Helper.finalize()
@pytest.mark.parametrize('data_shape', DATA_SHAPE)
@pytest.mark.parametrize('dtype', DTYPE)
@pytest.mark.parametrize('attrs', ATTRS)
def test_forward_backward(self, data_shape, dtype, attrs):
FP8Helper.finalize() # Ensure FP8 disabled.
self.forward_backward_runner(data_shape, dtype, attrs, rtol=1e-05, atol=2e-04)
@pytest.mark.skipif(not is_fp8_supported, reason=reason)
@pytest.mark.parametrize('data_shape', DATA_SHAPE)
@pytest.mark.parametrize('dtype', DTYPE)
@pytest.mark.parametrize('fp8_format', FP8_FORMATS)
@pytest.mark.parametrize('attrs', ATTRS)
def test_forward_backward_with_fp8(self, data_shape, dtype, fp8_format, attrs):
FP8Helper.initialize(fp8_format=fp8_format)
self.forward_backward_runner(data_shape, dtype, attrs, rtol=1e-04, atol=3e-02)
FP8Helper.finalize()
| TransformerEngine-main | tests/jax/test_layer.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
import jax
import numpy as np
import pytest
from utils import is_devices_enough
from transformer_engine.jax.flax import extend_logical_axis_rules
from transformer_engine.jax.sharding import get_dot_sharding_meta
from transformer_engine.jax.sharding import get_elementwise_sharding_meta
from transformer_engine.jax.sharding import get_fp8_meta_sharding_meta
from transformer_engine.jax.sharding import global_shard_guard
from transformer_engine.jax.sharding import infer_major_sharding_type
from transformer_engine.jax.sharding import is_dp_enabled, is_tp_enabled
from transformer_engine.jax.sharding import ShardingMeta, ShardingResource, ShardingType
def _get_sharding_resource(mesh_names, sharding_type):
dp_r = None
tp_r = None
if sharding_type in (ShardingType.DP, ShardingType.DP_TP_COL, ShardingType.DP_TP_ROW):
dp_r = mesh_names[0]
if sharding_type in (ShardingType.TP_COL, ShardingType.TP_ROW):
tp_r = mesh_names[0]
if sharding_type in (ShardingType.DP_TP_COL, ShardingType.DP_TP_ROW):
tp_r = mesh_names[1]
return ShardingResource(dp_r, tp_r)
DEVICE_COUNT = 4
MESH_CONFIG = [((4,), ("dp",), ShardingType.DP), ((4,), ("tp",), ShardingType.TP_COL),
((4,), ("tp",), ShardingType.TP_ROW), ((2, 2), ("dp", "tp"), ShardingType.DP_TP_COL),
((2, 2), ("dp", "tp"), ShardingType.DP_TP_ROW)]
LOGICAL_RULES = [
[(('a1', None), ('a2', 'ma2')), False],
[(('a1', None), ('a2', 'ma2'), ('a3', ('ma31', 'ma32'))), True],
[(('a1', None), ('a2', 'ma2'), ('a3', 'ma31'), ('a3', 'ma32')), False],
[(('a1', None), ('a2', 'ma2'), ('batch', 'batch_1200234')), True],
[(('a1', None), ('a2', 'ma2'), ('a2', 'ma1'), ('batch', 'model'), ('batch', 'data')), True],
]
SRS = [
ShardingResource(),
ShardingResource('data', None),
ShardingResource(None, 'model'),
ShardingResource('data', 'model')
]
class TestShardingSideAPI:
@pytest.mark.parametrize('base_rules,need_assert', LOGICAL_RULES)
@pytest.mark.parametrize('sr', SRS)
def test_extend_logical_axis_rules(self, base_rules, need_assert, sr):
with global_shard_guard(sr):
try:
target_te_rules = extend_logical_axis_rules(tuple())
extended_rules = extend_logical_axis_rules(base_rules)
assert extended_rules == (*base_rules, *target_te_rules)
assert not need_assert
except AssertionError as ae:
assert need_assert, f"{ae.args}"
class TestGeneralFunc:
@pytest.mark.parametrize('mesh_shape,mesh_names,sharding_type', MESH_CONFIG)
@pytest.mark.skipif(not is_devices_enough(DEVICE_COUNT), reason='Num of GPU is not enough')
def test_infer_major_sharding_type(
self,
mesh_shape, # pylint: disable=unused-argument
mesh_names,
sharding_type):
devices = np.asarray(jax.devices()[:DEVICE_COUNT]).reshape(*mesh_shape)
with global_shard_guard(_get_sharding_resource(mesh_names, sharding_type)):
with jax.sharding.Mesh(devices, mesh_names):
assert infer_major_sharding_type() is sharding_type.value[0]
@pytest.mark.parametrize('mesh_shape,mesh_names,sharding_type', MESH_CONFIG)
def test_is_dp_enabled(
self,
mesh_shape, # pylint: disable=unused-argument
mesh_names, # pylint: disable=unused-argument
sharding_type):
if sharding_type in (ShardingType.DP, ShardingType.DP_TP_COL, ShardingType.DP_TP_ROW):
assert is_dp_enabled(sharding_type.value[0])
else:
assert not is_dp_enabled(sharding_type.value[0])
@pytest.mark.parametrize('mesh_shape,mesh_names,sharding_type', MESH_CONFIG)
def test_is_tp_enabled(
self,
mesh_shape, # pylint: disable=unused-argument
mesh_names, # pylint: disable=unused-argument
sharding_type):
if sharding_type is ShardingType.DP:
assert not is_tp_enabled(sharding_type.value[0])
else:
assert is_tp_enabled(sharding_type.value[0])
class TestShardingMetaGenerator:
BATCH_AXIS_NAME = 'batch'
MODEL_AXIS_NAME = 'model'
@pytest.mark.parametrize('mesh_shape,mesh_names,sharding_type', MESH_CONFIG)
@pytest.mark.skipif(not is_devices_enough(DEVICE_COUNT), reason='Num of GPU is not enough')
def test_fp8_meta(self, mesh_shape, mesh_names, sharding_type, num_of_fp8_meta=4):
def stack_axes_meta(mapping):
return tuple(mapping for _ in range(num_of_fp8_meta))
def get_ref_sm():
if sharding_type == ShardingType.DP:
return ShardingMeta(stack_axes_meta({}), stack_axes_meta({}),
{TestShardingMetaGenerator.BATCH_AXIS_NAME: mesh_names[0]}, (),
())
if sharding_type == ShardingType.TP_COL:
return ShardingMeta(stack_axes_meta({}), stack_axes_meta({}),
{TestShardingMetaGenerator.MODEL_AXIS_NAME: mesh_names[0]}, (),
())
if sharding_type == ShardingType.TP_ROW:
return ShardingMeta(stack_axes_meta({}), stack_axes_meta({}),
{TestShardingMetaGenerator.MODEL_AXIS_NAME: mesh_names[0]}, (),
())
if sharding_type == ShardingType.DP_TP_COL:
return ShardingMeta(
stack_axes_meta({}), stack_axes_meta({}), {
TestShardingMetaGenerator.BATCH_AXIS_NAME: mesh_names[0],
TestShardingMetaGenerator.MODEL_AXIS_NAME: mesh_names[1]
}, (), ())
if sharding_type == ShardingType.DP_TP_ROW:
return ShardingMeta(
stack_axes_meta({}), stack_axes_meta({}), {
TestShardingMetaGenerator.BATCH_AXIS_NAME: mesh_names[0],
TestShardingMetaGenerator.MODEL_AXIS_NAME: mesh_names[1]
}, (), ())
return None
devices = np.asarray(jax.devices()[:DEVICE_COUNT]).reshape(*mesh_shape)
with global_shard_guard(_get_sharding_resource(mesh_names, sharding_type)):
with jax.sharding.Mesh(devices, mesh_names):
test_sm = get_fp8_meta_sharding_meta(
sharding_type,
num_of_fp8_meta,
dp_axis_name=TestShardingMetaGenerator.BATCH_AXIS_NAME,
tp_axis_name=TestShardingMetaGenerator.MODEL_AXIS_NAME)
assert test_sm == get_ref_sm()
@pytest.mark.parametrize('mesh_shape,mesh_names,sharding_type', MESH_CONFIG)
@pytest.mark.parametrize('a_shape, b_shape', [((64, 128, 256), (256, 512)),
((128, 64, 512), (512, 256))])
@pytest.mark.parametrize('batch_dim_of_a', [0, 1])
@pytest.mark.skipif(not is_devices_enough(DEVICE_COUNT), reason='Num of GPU is not enough')
def test_dot(self, mesh_shape, mesh_names, sharding_type, a_shape, b_shape, batch_dim_of_a):
model_dim_of_a = len(a_shape) - 1
model_dim_of_b = 0 if sharding_type in (ShardingType.TP_ROW, ShardingType.DP_TP_ROW) else 1
contracting_dims = ((-1,), (0,))
def get_ref_sm():
out_shape = (*a_shape[:min(contracting_dims[0])],
*b_shape[max(contracting_dims[1]) + 1:])
if sharding_type == ShardingType.DP:
a_new_shape = (*a_shape[:batch_dim_of_a], mesh_shape[0], -1,
*a_shape[batch_dim_of_a + 1:])
return ShardingMeta(({
batch_dim_of_a: TestShardingMetaGenerator.BATCH_AXIS_NAME
}, {}), ({
batch_dim_of_a: TestShardingMetaGenerator.BATCH_AXIS_NAME
}), {TestShardingMetaGenerator.BATCH_AXIS_NAME: mesh_names[0]},
[a_new_shape, b_shape], [out_shape])
if sharding_type == ShardingType.TP_COL:
b_new_shape = (b_shape[0], mesh_shape[0], b_shape[1] // mesh_shape[0])
return ShardingMeta(({}, {
1: TestShardingMetaGenerator.MODEL_AXIS_NAME
}), ({
len(out_shape) - 1: TestShardingMetaGenerator.MODEL_AXIS_NAME
}), {TestShardingMetaGenerator.MODEL_AXIS_NAME: mesh_names[0]},
[a_shape, b_new_shape], [out_shape])
if sharding_type == ShardingType.TP_ROW:
a_new_shape = (*a_shape[:-1], mesh_shape[0], a_shape[-1] // mesh_shape[0])
b_new_shape = (mesh_shape[0], b_shape[0] // mesh_shape[0], b_shape[1])
return ShardingMeta(({
len(a_new_shape) - 2: TestShardingMetaGenerator.MODEL_AXIS_NAME
}, {
0: TestShardingMetaGenerator.MODEL_AXIS_NAME
}), ({}), {TestShardingMetaGenerator.MODEL_AXIS_NAME: mesh_names[0]},
[a_new_shape, b_new_shape], [out_shape])
if sharding_type == ShardingType.DP_TP_COL:
a_new_shape = (*a_shape[:batch_dim_of_a], mesh_shape[0],
a_shape[batch_dim_of_a] // mesh_shape[0],
*a_shape[batch_dim_of_a + 1:])
b_new_shape = (b_shape[0], mesh_shape[1], b_shape[1] // mesh_shape[1])
return ShardingMeta(
({
batch_dim_of_a: TestShardingMetaGenerator.BATCH_AXIS_NAME
}, {
1: TestShardingMetaGenerator.MODEL_AXIS_NAME
}), ({
batch_dim_of_a: TestShardingMetaGenerator.BATCH_AXIS_NAME,
len(out_shape): TestShardingMetaGenerator.MODEL_AXIS_NAME
}), {
TestShardingMetaGenerator.BATCH_AXIS_NAME: mesh_names[0],
TestShardingMetaGenerator.MODEL_AXIS_NAME: mesh_names[1]
}, [a_new_shape, b_new_shape], [out_shape])
if sharding_type == ShardingType.DP_TP_ROW:
a_new_shape = (*a_shape[:batch_dim_of_a], mesh_shape[0],
a_shape[batch_dim_of_a] // mesh_shape[0],
*a_shape[batch_dim_of_a + 1:-1], mesh_shape[1],
a_shape[-1] // mesh_shape[1])
b_new_shape = (mesh_shape[1], b_shape[0] // mesh_shape[1], b_shape[1])
return ShardingMeta(
({
batch_dim_of_a: TestShardingMetaGenerator.BATCH_AXIS_NAME,
len(a_new_shape) - 2: TestShardingMetaGenerator.MODEL_AXIS_NAME
}, {
0: TestShardingMetaGenerator.MODEL_AXIS_NAME
}), ({
batch_dim_of_a: TestShardingMetaGenerator.BATCH_AXIS_NAME
}), {
TestShardingMetaGenerator.BATCH_AXIS_NAME: mesh_names[0],
TestShardingMetaGenerator.MODEL_AXIS_NAME: mesh_names[1]
}, [a_new_shape, b_new_shape], [out_shape])
return None
devices = np.asarray(jax.devices()[:DEVICE_COUNT]).reshape(*mesh_shape)
with global_shard_guard(_get_sharding_resource(mesh_names, sharding_type)):
with jax.sharding.Mesh(devices, mesh_names):
test_sm = get_dot_sharding_meta(
sharding_type,
a_shape,
b_shape,
batch_dim_of_a,
model_dim_of_a,
model_dim_of_b,
contracting_dims,
dp_axis_name=TestShardingMetaGenerator.BATCH_AXIS_NAME,
tp_axis_name=TestShardingMetaGenerator.MODEL_AXIS_NAME)
assert test_sm == get_ref_sm()
@pytest.mark.parametrize('mesh_shape,mesh_names,sharding_type', MESH_CONFIG)
@pytest.mark.parametrize('input_shape', [(64, 128, 256), (128, 64, 512)])
@pytest.mark.parametrize('other_shape', [(256,), (512,)])
@pytest.mark.parametrize('batch_dim', [0, 1])
@pytest.mark.skipif(not is_devices_enough(DEVICE_COUNT), reason='Num of GPU is not enough')
def test_elementwise(self, mesh_shape, mesh_names, sharding_type, input_shape, other_shape,
batch_dim):
def get_ref_sm():
need_assert = True
ref_sharding_meta = None
if input_shape[-1] != other_shape[0]:
need_assert = True
ref_sharding_meta = None
elif sharding_type is (ShardingType.DP_TP_COL, ShardingType.DP):
need_assert = False
input_new_shape = (*input_shape[:batch_dim], mesh_shape[0], -1,
*input_shape[batch_dim + 1:])
ref_sharding_meta = ShardingMeta(({
batch_dim: TestShardingMetaGenerator.BATCH_AXIS_NAME
}, {}), ({
batch_dim: TestShardingMetaGenerator.BATCH_AXIS_NAME
}), {TestShardingMetaGenerator.BATCH_AXIS_NAME: mesh_names[0]},
[input_new_shape, other_shape], [input_shape])
elif sharding_type is ShardingType.TP_COL:
need_assert = False
ref_sharding_meta = ShardingMeta(({}, {}), ({}), {}, [input_shape, other_shape],
[input_shape])
elif sharding_type is ShardingType.TP_ROW:
need_assert = False
input_new_shape = (*input_shape[:-1], mesh_shape[0], -1)
other_new_shape = (mesh_shape[0], -1)
ref_sharding_meta = ShardingMeta(({
len(input_new_shape) - 2: TestShardingMetaGenerator.MODEL_AXIS_NAME
}, {
0: TestShardingMetaGenerator.MODEL_AXIS_NAME
}), ({
len(input_new_shape) - 2: TestShardingMetaGenerator.MODEL_AXIS_NAME
}), {TestShardingMetaGenerator.MODEL_AXIS_NAME: mesh_names[0]},
[input_new_shape, other_new_shape], [input_shape])
elif sharding_type is ShardingType.DP_TP_ROW:
need_assert = False
input_new_shape = (*input_shape[:batch_dim], mesh_shape[0], -1,
*input_shape[batch_dim + 1:-1], mesh_shape[1],
input_shape[-1] // mesh_shape[1])
other_new_shape = (mesh_shape[0], -1)
ref_sharding_meta = ShardingMeta(
({
batch_dim: TestShardingMetaGenerator.BATCH_AXIS_NAME,
len(input_new_shape) - 2: TestShardingMetaGenerator.MODEL_AXIS_NAME
}, {
0: TestShardingMetaGenerator.MODEL_AXIS_NAME
}), ({
batch_dim: TestShardingMetaGenerator.BATCH_AXIS_NAME,
len(input_new_shape) - 2: TestShardingMetaGenerator.MODEL_AXIS_NAME
}), {
TestShardingMetaGenerator.BATCH_AXIS_NAME: mesh_names[0],
TestShardingMetaGenerator.MODEL_AXIS_NAME: mesh_names[1]
}, [input_new_shape, other_new_shape], [input_shape])
return ref_sharding_meta, need_assert
devices = np.asarray(jax.devices()[:DEVICE_COUNT]).reshape(*mesh_shape)
with global_shard_guard(_get_sharding_resource(mesh_names, sharding_type)):
with jax.sharding.Mesh(devices, mesh_names):
ref_sm, need_assert = get_ref_sm()
try:
test_sm = get_elementwise_sharding_meta(
sharding_type,
input_shape,
other_shape,
batch_dim,
dp_axis_name=TestShardingMetaGenerator.BATCH_AXIS_NAME,
tp_axis_name=TestShardingMetaGenerator.MODEL_AXIS_NAME)
assert not need_assert
assert test_sm == ref_sm
except (NotImplementedError, AssertionError) as e:
assert need_assert, f"{e.args}"
| TransformerEngine-main | tests/jax/test_sharding.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
from functools import partial
from typing import Dict
import flax
import jax
import jax.numpy as jnp
from praxis import pax_fiddle
from praxis.base_layer import WeightInit, DEFAULT_INIT_MUTABLE_LIST
import pytest
from transformer_engine.common.recipe import DelayedScaling, Format
from transformer_engine.jax import fp8_autocast, update_fp8_metas, update_collections
from transformer_engine.jax.flax import DenseGeneral, LayerNormDenseGeneral
from transformer_engine.jax.flax import LayerNorm as flax_LayerNorm
from transformer_engine.jax.flax import LayerNormMLP as flax_LayerNormMLP
from transformer_engine.jax.flax import MultiHeadAttention as flax_MultiHeadAttention
from transformer_engine.jax.flax import RelativePositionBiases as flax_RelativePositionBiases
from transformer_engine.jax.flax import TransformerLayer as flax_TransformerLayer
from transformer_engine.jax.flax.module import Softmax
from transformer_engine.jax.fp8 import FP8Helper, is_fp8_available
from transformer_engine.jax.praxis import LayerNorm
from transformer_engine.jax.praxis import FusedSoftmax, LayerNorm
from transformer_engine.jax.praxis import LayerNormLinear, LayerNormMLP, Linear
from transformer_engine.jax.praxis import MultiHeadAttention, RelativePositionBiases
from transformer_engine.jax.praxis import TransformerEngineBaseLayer, TransformerLayer, TransformerLayerType
from transformer_engine.jax.softmax import SoftmaxType
from utils import assert_allclose
is_fp8_supported, reason = is_fp8_available()
DATA_SHAPE = [(128, 32, 512), (512, 32, 512)]
DTYPE = [jnp.float32, jnp.bfloat16]
ENABLE_FP8 = [False, True]
FP8_FORMATS = [Format.E4M3, Format.HYBRID]
def compare_dict(ref_fd, test_fd, rtol=1e-05, atol=1e-08):
for key in ref_fd:
assert key in test_fd, \
f"{key} not found in test dict {test_fd}"
assert isinstance(test_fd[key], type(ref_fd[key])), \
f"The data type is not match between ref and test " \
f" Dict on {key=}"
if isinstance(ref_fd[key], Dict):
compare_dict(ref_fd[key], test_fd[key], rtol, atol)
else:
assert_allclose(ref_fd[key],
test_fd[key],
rtol=rtol,
atol=atol,
err_msg=f"{key=} is not close")
class TestLayer:
@staticmethod
def loss(inner_variables, *inner_inputs, module, mean_out=True):
outs = module.apply(inner_variables, *inner_inputs)
out = outs
if isinstance(outs, tuple):
# The first place of outs is the real output, others
# are auxiliary values.
out = outs[0]
return jnp.mean(out) if mean_out else out
@staticmethod
def loss_and_grads(module, variables, *inputs):
grad_fn = jax.value_and_grad(TestLayer.loss, argnums=(0, 1))
loss_val, (wgrads, dgrad) = grad_fn(variables, *inputs, module=module)
if FP8Helper.is_fp8_enabled():
wgrads = update_fp8_metas(wgrads)
return loss_val, wgrads, dgrad
def input_getter(self, shape, dtype):
raise NotImplementedError
def get_layer_name(self):
raise NotImplementedError
def generate_praxis_p_and_flax_cls(self, dtype, attrs):
raise NotImplementedError
def sync_variables(self, praxis_variables, flax_variables):
synced_praxis_variables = praxis_variables
lyr_name = self.get_layer_name()
synced_praxis_variables['params'][lyr_name]['cld'] = \
flax.core.unfreeze(flax_variables['params'])
return synced_praxis_variables, flax_variables
def sync_wgrads(self, praxis_wgrads, flax_wgrads):
synced_praxis_grads = praxis_wgrads
lyr_name = self.get_layer_name()
synced_praxis_grads['params'] = \
synced_praxis_grads['params'][lyr_name]['cld']
if FP8Helper.is_fp8_enabled():
synced_praxis_grads[FP8Helper.FP8_COLLECTION_NAME] = \
synced_praxis_grads[FP8Helper.FP8_COLLECTION_NAME][lyr_name]['cld']
return synced_praxis_grads, flax.core.unfreeze(flax_wgrads)
def forward_backward_runner(self,
data_shape,
dtype,
praxis_p,
flax_cls,
rtol=1e-05,
atol=1e-08):
init_key = jax.random.PRNGKey(seed=1234)
test_inputs = self.input_getter(data_shape, dtype)
praxis_layer = praxis_p.Instantiate()
# This is a workaround to correctly enable FP8 meta generation for Praxis.
# TODO (Ming Huang): To come out a better solution.
mutable_list = DEFAULT_INIT_MUTABLE_LIST + [FP8Helper.FP8_COLLECTION_NAME]
praxis_variables = praxis_layer.init(init_key, *test_inputs, mutable=mutable_list)
flax_layer = flax_cls()
flax_variables = flax_layer.init(init_key, *test_inputs)
if "params_axes" in flax_variables:
flax_variables, _ = flax.core.pop(flax_variables, "params_axes")
if FP8Helper.is_fp8_enabled():
flax_variables, _ = flax.core.pop(flax_variables,
FP8Helper.FP8_COLLECTION_NAME + "_axes")
praxis_variables, flax_variables = self.sync_variables(praxis_variables, flax_variables)
iter_times = 5 if FP8Helper.is_fp8_enabled() else 1
for _ in range(iter_times):
praxis_loss, praxis_wgrads, praxis_dgrad = \
TestLayer.loss_and_grads(praxis_layer, praxis_variables, *test_inputs)
flax_loss, flax_wgrads, flax_dgrad = \
TestLayer.loss_and_grads(flax_layer, flax_variables, *test_inputs)
if FP8Helper.is_fp8_enabled():
praxis_wgrads.pop('params')
praxis_variables = update_collections(praxis_wgrads, praxis_variables)
flax_wgrads, _ = flax.core.pop(flax_wgrads, 'params')
flax_variables = update_collections(flax_wgrads, flax_variables)
praxis_loss, praxis_wgrads, praxis_dgrad = \
TestLayer.loss_and_grads(praxis_layer, praxis_variables, *test_inputs)
flax_loss, flax_wgrads, flax_dgrad = \
TestLayer.loss_and_grads(flax_layer, flax_variables, *test_inputs)
assert_allclose(praxis_loss, flax_loss, rtol=rtol, atol=atol)
assert_allclose(praxis_dgrad, flax_dgrad, rtol=rtol, atol=atol)
praxis_wgrads, flax_wgrads = self.sync_wgrads(praxis_wgrads, flax_wgrads)
compare_dict(praxis_wgrads, flax_wgrads, rtol=rtol, atol=atol)
class LayerNormAttr:
LN_TYPE = 'layernorm_type'
ZERO_CEN = 'zero_centered_gamma'
ATTRS = [{
LN_TYPE: "layernorm",
ZERO_CEN: False
}, {
LN_TYPE: "layernorm",
ZERO_CEN: True
}, {
LN_TYPE: "rmsnorm",
ZERO_CEN: False
}]
class TestLayerNorm(TestLayer):
def input_getter(self, shape, dtype):
data_key = jax.random.PRNGKey(seed=1234)
return (jax.random.normal(data_key, shape, dtype),)
def get_layer_name(self):
return 'layer_norm'
def generate_praxis_p_and_flax_cls(self, dtype, attrs):
layernorm_type = attrs[LayerNormAttr.LN_TYPE]
zero_centered_gamma = attrs[LayerNormAttr.ZERO_CEN]
scale_init = None
bias_init = WeightInit.Constant(0.0)
transpose_batch_sequence = False
praxis_p = pax_fiddle.Config(LayerNorm,
name='layer_norm',
dtype=dtype,
layernorm_type=layernorm_type,
zero_centered_gamma=zero_centered_gamma,
scale_init=scale_init,
bias_init=bias_init,
transpose_batch_sequence=transpose_batch_sequence)
flax_cls = partial(flax_LayerNorm,
layernorm_type=layernorm_type,
zero_centered_gamma=zero_centered_gamma,
scale_init=scale_init,
bias_init=TransformerEngineBaseLayer.generate_params_init(
"ln_bias", bias_init),
dtype=dtype,
transpose_batch_sequence=transpose_batch_sequence)
return praxis_p, flax_cls
@pytest.mark.parametrize('data_shape', DATA_SHAPE)
@pytest.mark.parametrize('dtype', DTYPE)
@pytest.mark.parametrize('attrs', LayerNormAttr.ATTRS)
def test_forward_backward(self, data_shape, dtype, attrs, rtol=1e-05, atol=1e-08):
praxis_p, flax_cls = self.generate_praxis_p_and_flax_cls(dtype, attrs)
self.forward_backward_runner(data_shape, dtype, praxis_p, flax_cls, rtol, atol)
class FusedSoftmaxAttr:
SCALE_FACTOR = 'scale_factor'
ST_TYPE = 'softmax_type'
ATTRS = [{
SCALE_FACTOR: 0.0,
ST_TYPE: SoftmaxType.SCALED
}, {
SCALE_FACTOR: 0.0,
ST_TYPE: SoftmaxType.SCALED_MASKED
}, {
SCALE_FACTOR: 0.0,
ST_TYPE: SoftmaxType.SCALED_UPPER_TRIANG_MASKED
}]
class TestFusedSoftmax(TestLayer):
def input_getter(self, shape, dtype):
data_key = jax.random.PRNGKey(seed=1234)
return jax.random.normal(data_key, shape, dtype), \
jnp.ones(shape, dtype=jnp.uint8) # Masks
def generate_praxis_p_and_flax_cls(self, dtype, attrs):
scale_factor = attrs[FusedSoftmaxAttr.SCALE_FACTOR]
softmax_type = attrs[FusedSoftmaxAttr.ST_TYPE]
praxis_p = pax_fiddle.Config(FusedSoftmax,
name='fused_softmax',
scale_factor=scale_factor,
softmax_type=softmax_type)
flax_cls = partial(Softmax, scale_factor=scale_factor, softmax_type=softmax_type)
return praxis_p, flax_cls
def sync_variables(self, praxis_variables, flax_variables):
return praxis_variables, flax_variables
def sync_wgrads(self, praxis_wgrads, flax_wgrads):
return praxis_wgrads, flax_wgrads
@pytest.mark.parametrize('data_shape', [(32, 1, 128, 128), (32, 1, 512, 128)])
@pytest.mark.parametrize('dtype', DTYPE)
@pytest.mark.parametrize('attrs', FusedSoftmaxAttr.ATTRS)
def test_forward_backward(self, data_shape, dtype, attrs, rtol=1e-05, atol=1e-08):
if (attrs[FusedSoftmaxAttr.ST_TYPE] == SoftmaxType.SCALED_UPPER_TRIANG_MASKED) and \
(data_shape[-2] != data_shape[-1]):
pass # Skip, due to not support
else:
praxis_p, flax_cls = self.generate_praxis_p_and_flax_cls(dtype, attrs)
self.forward_backward_runner(data_shape, dtype, praxis_p, flax_cls, rtol, atol)
class LinearAttr:
FEATURE = 'features'
USE_BIAS = 'use_bias'
ATTRS = [{
FEATURE: 512,
USE_BIAS: False
}, {
FEATURE: 512,
USE_BIAS: True
}, {
FEATURE: 1024,
USE_BIAS: False
}, {
FEATURE: 1024,
USE_BIAS: True
}]
class TestLinear(TestLayer):
def input_getter(self, shape, dtype):
data_key = jax.random.PRNGKey(seed=1234)
return (jax.random.normal(data_key, shape, dtype),)
def get_layer_name(self):
return 'linear'
def generate_praxis_p_and_flax_cls(self, dtype, attrs):
out_features = attrs[LinearAttr.FEATURE]
kernel_init = WeightInit.Gaussian(1.0)
use_bias = attrs[LinearAttr.USE_BIAS]
bias_init = WeightInit.Constant(0.0)
axis = -1
transpose_batch_sequence = False
praxis_p = pax_fiddle.Config(Linear,
name='linear',
dtype=dtype,
out_features=out_features,
params_init=kernel_init,
use_bias=use_bias,
bias_init=bias_init,
axis=axis,
transpose_batch_sequence=transpose_batch_sequence)
flax_cls = partial(
DenseGeneral,
features=out_features,
kernel_init=TransformerEngineBaseLayer.generate_params_init("kernel", kernel_init),
use_bias=use_bias,
bias_init=TransformerEngineBaseLayer.generate_params_init("bias", bias_init),
axis=axis,
dtype=dtype,
transpose_batch_sequence=transpose_batch_sequence)
return praxis_p, flax_cls
@pytest.mark.parametrize('data_shape', DATA_SHAPE)
@pytest.mark.parametrize('dtype', DTYPE)
@pytest.mark.parametrize('attrs', LinearAttr.ATTRS)
def test_forward_backward(self, data_shape, dtype, attrs, rtol=1e-05, atol=1e-08):
praxis_p, flax_cls = self.generate_praxis_p_and_flax_cls(dtype, attrs)
self.forward_backward_runner(data_shape, dtype, praxis_p, flax_cls, rtol, atol)
@pytest.mark.skipif(not is_fp8_supported, reason=reason)
@pytest.mark.parametrize('data_shape', DATA_SHAPE)
@pytest.mark.parametrize('dtype', DTYPE)
@pytest.mark.parametrize('attrs', LinearAttr.ATTRS)
@pytest.mark.parametrize('fp8_format', FP8_FORMATS)
def test_forward_backward_fp8(self,
data_shape,
dtype,
attrs,
fp8_format,
rtol=1e-05,
atol=1e-08):
ds = DelayedScaling(fp8_format=fp8_format)
with fp8_autocast(enabled=True, fp8_recipe=ds):
praxis_p, flax_cls = self.generate_praxis_p_and_flax_cls(dtype, attrs)
self.forward_backward_runner(data_shape, dtype, praxis_p, flax_cls, rtol, atol)
class LayerNormLinearAttr:
FEATURE = 'features'
USE_BIAS = 'use_bias'
ENABLE_LN = 'enable_layernorm'
LN_TYPE = 'layernorm_type'
ZERO_CEN = 'zero_centered_gamma'
ATTRS = [{
FEATURE: 512,
USE_BIAS: True,
ENABLE_LN: True,
LN_TYPE: 'layernorm',
ZERO_CEN: False
}, {
FEATURE: 512,
USE_BIAS: True,
ENABLE_LN: True,
LN_TYPE: 'layernorm',
ZERO_CEN: False
}, {
FEATURE: 512,
USE_BIAS: True,
ENABLE_LN: True,
LN_TYPE: 'layernorm',
ZERO_CEN: True
}, {
FEATURE: 512,
USE_BIAS: True,
ENABLE_LN: True,
LN_TYPE: 'layernorm',
ZERO_CEN: True
}, {
FEATURE: 512,
USE_BIAS: True,
ENABLE_LN: True,
LN_TYPE: 'rmsnorm',
ZERO_CEN: False
}, {
FEATURE: 512,
USE_BIAS: True,
ENABLE_LN: True,
LN_TYPE: 'rmsnorm',
ZERO_CEN: False
}, {
FEATURE: 512,
USE_BIAS: True,
ENABLE_LN: False,
LN_TYPE: 'layernorm',
ZERO_CEN: False
}]
class TestLayerNormLinear(TestLayer):
def input_getter(self, shape, dtype):
data_key = jax.random.PRNGKey(seed=1234)
return (jax.random.normal(data_key, shape, dtype),)
def get_layer_name(self):
return 'ln_linear'
def generate_praxis_p_and_flax_cls(self, dtype, attrs):
out_features = attrs[LayerNormLinearAttr.FEATURE]
enable_layernorm = attrs[LayerNormLinearAttr.ENABLE_LN]
layernorm_type = attrs[LayerNormLinearAttr.LN_TYPE]
zero_centered_gamma = attrs[LayerNormLinearAttr.ZERO_CEN]
kernel_init = WeightInit.Gaussian(1.0)
use_bias = attrs[LayerNormLinearAttr.USE_BIAS]
bias_init = WeightInit.Constant(0.0)
axis = -1
transpose_batch_sequence = False
praxis_p = pax_fiddle.Config(LayerNormLinear,
name='ln_linear',
dtype=dtype,
out_features=out_features,
enable_layernorm=enable_layernorm,
layernorm_type=layernorm_type,
zero_centered_gamma=zero_centered_gamma,
params_init=kernel_init,
use_bias=use_bias,
bias_init=bias_init,
axis=axis,
transpose_batch_sequence=transpose_batch_sequence)
flax_cls = partial(
LayerNormDenseGeneral,
features=out_features,
enable_layernorm=enable_layernorm,
layernorm_type=layernorm_type,
zero_centered_gamma=zero_centered_gamma,
kernel_init=TransformerEngineBaseLayer.generate_params_init("kernel", kernel_init),
use_bias=use_bias,
bias_init=TransformerEngineBaseLayer.generate_params_init("bias", bias_init),
axis=axis,
dtype=dtype,
transpose_batch_sequence=transpose_batch_sequence)
return praxis_p, flax_cls
@pytest.mark.parametrize('data_shape', DATA_SHAPE)
@pytest.mark.parametrize('dtype', DTYPE)
@pytest.mark.parametrize('attrs', LayerNormLinearAttr.ATTRS)
def test_forward_backward(self, data_shape, dtype, attrs, rtol=1e-05, atol=1e-08):
praxis_p, flax_cls = self.generate_praxis_p_and_flax_cls(dtype, attrs)
self.forward_backward_runner(data_shape, dtype, praxis_p, flax_cls, rtol, atol)
@pytest.mark.skipif(not is_fp8_supported, reason=reason)
@pytest.mark.parametrize('data_shape', DATA_SHAPE)
@pytest.mark.parametrize('dtype', DTYPE)
@pytest.mark.parametrize('attrs', LayerNormLinearAttr.ATTRS)
@pytest.mark.parametrize('fp8_format', FP8_FORMATS)
def test_forward_backward_fp8(self,
data_shape,
dtype,
attrs,
fp8_format,
rtol=1e-05,
atol=1e-08):
ds = DelayedScaling(fp8_format=fp8_format)
with fp8_autocast(enabled=True, fp8_recipe=ds):
praxis_p, flax_cls = self.generate_praxis_p_and_flax_cls(dtype, attrs)
self.forward_backward_runner(data_shape, dtype, praxis_p, flax_cls, rtol, atol)
class LayerNormMLPAttr:
INTERMEDIATE_DIM = 'intermediate_dim'
USE_BIAS = 'use_bias'
ENABLE_LN = 'enable_layernorm'
LN_TYPE = 'layernorm_type'
ZERO_CEN = 'zero_centered_gamma'
ACTIVATION = 'activations'
ATTRS = [{
INTERMEDIATE_DIM: 2048,
USE_BIAS: True,
ENABLE_LN: True,
LN_TYPE: 'layernorm',
ZERO_CEN: False,
ACTIVATION: ('relu',)
}, {
INTERMEDIATE_DIM: 2048,
USE_BIAS: True,
ENABLE_LN: True,
LN_TYPE: 'layernorm',
ZERO_CEN: True,
ACTIVATION: ('relu',)
}, {
INTERMEDIATE_DIM: 2048,
USE_BIAS: True,
ENABLE_LN: True,
LN_TYPE: 'rmsnorm',
ZERO_CEN: False,
ACTIVATION: ('relu',)
}, {
INTERMEDIATE_DIM: 2048,
USE_BIAS: True,
ENABLE_LN: True,
LN_TYPE: 'rmsnorm',
ZERO_CEN: False,
ACTIVATION: ('gelu', 'linear')
}, {
INTERMEDIATE_DIM: 2048,
USE_BIAS: True,
ENABLE_LN: True,
LN_TYPE: 'rmsnorm',
ZERO_CEN: False,
ACTIVATION: ('gelu', 'linear')
}]
class TestLayerNormMLP(TestLayer):
def input_getter(self, shape, dtype):
data_key = jax.random.PRNGKey(seed=1234)
return (jax.random.normal(data_key, shape, dtype),)
def get_layer_name(self):
return 'ln_mlp'
def generate_praxis_p_and_flax_cls(self, dtype, attrs):
intermediate_dim = attrs[LayerNormMLPAttr.INTERMEDIATE_DIM]
enable_layernorm = attrs[LayerNormMLPAttr.ENABLE_LN]
layernorm_type = attrs[LayerNormMLPAttr.LN_TYPE]
zero_centered_gamma = attrs[LayerNormMLPAttr.ZERO_CEN]
kernel_init = WeightInit.Gaussian(1.0)
use_bias = attrs[LayerNormMLPAttr.USE_BIAS]
bias_init = WeightInit.Constant(0.0)
activations = attrs[LayerNormMLPAttr.ACTIVATION]
axis = -1
transpose_batch_sequence = False
praxis_p = pax_fiddle.Config(LayerNormMLP,
name='ln_mlp',
dtype=dtype,
intermediate_dim=intermediate_dim,
enable_layernorm=enable_layernorm,
layernorm_type=layernorm_type,
zero_centered_gamma=zero_centered_gamma,
params_init=kernel_init,
use_bias=use_bias,
bias_init=bias_init,
activations=activations,
intermediate_dropout_rate=0.0,
axis=axis,
transpose_batch_sequence=transpose_batch_sequence)
flax_cls = partial(
flax_LayerNormMLP,
intermediate_dim=intermediate_dim,
enable_layernorm=enable_layernorm,
layernorm_type=layernorm_type,
zero_centered_gamma=zero_centered_gamma,
kernel_init=TransformerEngineBaseLayer.generate_params_init("kernel", kernel_init),
use_bias=use_bias,
bias_init=TransformerEngineBaseLayer.generate_params_init("bias", bias_init),
activations=activations,
intermediate_dropout_rate=0.0,
axis=axis,
dtype=dtype,
transpose_batch_sequence=transpose_batch_sequence)
return praxis_p, flax_cls
@pytest.mark.parametrize('data_shape', DATA_SHAPE)
@pytest.mark.parametrize('dtype', DTYPE)
@pytest.mark.parametrize('attrs', LayerNormMLPAttr.ATTRS)
def test_forward_backward(self, data_shape, dtype, attrs, rtol=1e-05, atol=1e-08):
praxis_p, flax_cls = self.generate_praxis_p_and_flax_cls(dtype, attrs)
self.forward_backward_runner(data_shape, dtype, praxis_p, flax_cls, rtol, atol)
@pytest.mark.skipif(not is_fp8_supported, reason=reason)
@pytest.mark.parametrize('data_shape', DATA_SHAPE)
@pytest.mark.parametrize('dtype', DTYPE)
@pytest.mark.parametrize('attrs', LayerNormMLPAttr.ATTRS)
@pytest.mark.parametrize('fp8_format', FP8_FORMATS)
def test_forward_backward_fp8(self,
data_shape,
dtype,
attrs,
fp8_format,
rtol=1e-05,
atol=1e-08):
ds = DelayedScaling(fp8_format=fp8_format)
with fp8_autocast(enabled=True, fp8_recipe=ds):
praxis_p, flax_cls = self.generate_praxis_p_and_flax_cls(dtype, attrs)
self.forward_backward_runner(data_shape, dtype, praxis_p, flax_cls, rtol, atol)
class TestRelativePositionBias(TestLayer):
def get_layer_name(self):
return 'relative_position_bias'
def generate_praxis_p_and_flax_cls(self, dtype, attrs):
num_buckets = 32
max_distance = 128
num_attention_heads = 64
rb_stddev = (num_attention_heads * num_buckets)**-0.5
embedding_init = WeightInit.Gaussian(rb_stddev)
praxis_p = pax_fiddle.Config(RelativePositionBiases,
name='relative_position_bias',
dtype=dtype,
num_buckets=num_buckets,
max_distance=max_distance,
num_attention_heads=num_attention_heads,
embedding_init=embedding_init)
flax_cls = partial(flax_RelativePositionBiases,
num_buckets=num_buckets,
max_distance=max_distance,
num_attention_heads=num_attention_heads,
embedding_init=TransformerEngineBaseLayer.generate_params_init(
"rel_embedding", embedding_init),
dtype=dtype)
return praxis_p, flax_cls
@pytest.mark.parametrize('data_shape', DATA_SHAPE)
@pytest.mark.parametrize('dtype', DTYPE)
@pytest.mark.parametrize('attrs', [{}])
def test_forward(self, data_shape, dtype, attrs, rtol=1e-05, atol=1e-08):
praxis_p, flax_cls = self.generate_praxis_p_and_flax_cls(dtype, attrs)
init_key = jax.random.PRNGKey(seed=1234)
test_inputs = [(128, 128, True), (128, 128, False)]
for test_input in test_inputs:
praxis_layer = praxis_p.Instantiate()
praxis_variables = praxis_layer.init(init_key, *test_input)
flax_layer = flax_cls()
flax_variables = flax_layer.init(init_key, *test_input)
if "params_axes" in flax_variables:
flax_variables, _ = flax.core.pop(flax_variables, "params_axes")
if FP8Helper.is_fp8_enabled():
flax_variables, _ = flax.core.pop(flax_variables,
FP8Helper.FP8_COLLECTION_NAME + "_axes")
praxis_variables, flax_variables = self.sync_variables(praxis_variables, flax_variables)
praxis_loss= \
TestLayer.loss(praxis_variables, *test_input, module=praxis_layer, mean_out=False)
flax_loss = \
TestLayer.loss(flax_variables, *test_input, module=flax_layer, mean_out=False)
assert_allclose(praxis_loss, flax_loss, rtol=rtol, atol=atol)
class MultiHeadAttnAttr:
USE_BIAS = 'use_bias'
LN_TYPE = 'layernorm_type'
ATTN_MASK_TYPE = 'attn_mask_type'
ZERO_CEN = 'zero_centered_gamma'
ATTRS = [{
USE_BIAS: True,
LN_TYPE: 'layernorm',
ZERO_CEN: False,
ATTN_MASK_TYPE: 'padding'
}, {
USE_BIAS: True,
LN_TYPE: 'layernorm',
ZERO_CEN: True,
ATTN_MASK_TYPE: 'padding'
}, {
USE_BIAS: True,
LN_TYPE: 'rmsnorm',
ZERO_CEN: False,
ATTN_MASK_TYPE: 'padding'
}, {
USE_BIAS: True,
LN_TYPE: 'layernorm',
ZERO_CEN: False,
ATTN_MASK_TYPE: 'causal'
}, {
USE_BIAS: True,
LN_TYPE: 'layernorm',
ZERO_CEN: True,
ATTN_MASK_TYPE: 'causal'
}, {
USE_BIAS: True,
LN_TYPE: 'rmsnorm',
ZERO_CEN: False,
ATTN_MASK_TYPE: 'causal'
}]
class TestMultiHeadAttn(TestLayer):
def input_getter(self, shape, dtype):
data_key = jax.random.PRNGKey(seed=1234)
return (jax.random.normal(data_key, shape,
dtype), jax.random.normal(data_key, shape, dtype))
def get_layer_name(self):
return 'multi_head_attn'
def generate_praxis_p_and_flax_cls(self, dtype, attrs):
head_dim = 64
num_heads = 16
layernorm_type = attrs[MultiHeadAttnAttr.LN_TYPE]
zero_centered_gamma = attrs[MultiHeadAttnAttr.ZERO_CEN]
kernel_init = WeightInit.Gaussian(1.0)
use_bias = attrs[MultiHeadAttnAttr.USE_BIAS]
bias_init = WeightInit.Constant(0.0)
apply_residual_connection_post_layernorm = False
output_layernorm = False
attn_mask_type = attrs[MultiHeadAttnAttr.ATTN_MASK_TYPE]
fuse_qkv: bool = True
transpose_batch_sequence = True
scale_attn_logits = False
scaled_query_init = True
float32_logits = False
praxis_p = pax_fiddle.Config(
MultiHeadAttention,
name='mha',
dtype=dtype,
head_dim=head_dim,
num_heads=num_heads,
layernorm_type=layernorm_type,
zero_centered_gamma=zero_centered_gamma,
params_init=kernel_init,
use_bias=use_bias,
bias_init=bias_init,
apply_residual_connection_post_layernorm=apply_residual_connection_post_layernorm,
output_layernorm=output_layernorm,
attn_mask_type=attn_mask_type,
fuse_qkv=fuse_qkv,
transpose_batch_sequence=transpose_batch_sequence,
scale_attn_logits=scale_attn_logits,
scaled_query_init=scaled_query_init,
float32_logits=float32_logits)
flax_cls = partial(
flax_MultiHeadAttention,
dtype=dtype,
head_dim=head_dim,
num_heads=num_heads,
layernorm_type=layernorm_type,
zero_centered_gamma=zero_centered_gamma,
kernel_init=TransformerEngineBaseLayer.generate_params_init("kernel", kernel_init),
use_bias=use_bias,
bias_init=TransformerEngineBaseLayer.generate_params_init("bias", bias_init),
apply_residual_connection_post_layernorm=apply_residual_connection_post_layernorm,
output_layernorm=output_layernorm,
attn_mask_type=attn_mask_type,
fuse_qkv=fuse_qkv,
transpose_batch_sequence=transpose_batch_sequence,
scale_attn_logits=scale_attn_logits,
scaled_query_init=scaled_query_init,
float32_logits=float32_logits)
return praxis_p, flax_cls
@pytest.mark.parametrize('data_shape', DATA_SHAPE)
@pytest.mark.parametrize('dtype', DTYPE)
@pytest.mark.parametrize('attrs', MultiHeadAttnAttr.ATTRS)
def test_forward_backward(self, data_shape, dtype, attrs, rtol=1e-05, atol=1e-08):
praxis_p, flax_cls = self.generate_praxis_p_and_flax_cls(dtype, attrs)
self.forward_backward_runner(data_shape, dtype, praxis_p, flax_cls, rtol, atol)
@pytest.mark.skipif(not is_fp8_supported, reason=reason)
@pytest.mark.parametrize('data_shape', DATA_SHAPE)
@pytest.mark.parametrize('dtype', DTYPE)
@pytest.mark.parametrize('attrs', MultiHeadAttnAttr.ATTRS)
@pytest.mark.parametrize('fp8_format', FP8_FORMATS)
def test_forward_backward_fp8(self,
data_shape,
dtype,
attrs,
fp8_format,
rtol=1e-05,
atol=1e-08):
ds = DelayedScaling(fp8_format=fp8_format)
with fp8_autocast(enabled=True, fp8_recipe=ds):
praxis_p, flax_cls = self.generate_praxis_p_and_flax_cls(dtype, attrs)
self.forward_backward_runner(data_shape, dtype, praxis_p, flax_cls, rtol, atol)
class TransformerLayerAttr:
USE_BIAS = 'use_bias'
LN_TYPE = 'layernorm_type'
ACTIVATION = 'activations'
LYR_TYPE = 'layer_type'
ZERO_CEN = 'zero_centered_gamma'
TRANSPOSE_BS = 'transpose_batch_sequence'
ATTRS = [{
USE_BIAS: True,
LN_TYPE: 'layernorm',
ZERO_CEN: False,
ACTIVATION: ('relu',),
LYR_TYPE: TransformerLayerType.ENCODER,
TRANSPOSE_BS: True
}, {
USE_BIAS: True,
LN_TYPE: 'layernorm',
ZERO_CEN: False,
ACTIVATION: ('relu',),
LYR_TYPE: TransformerLayerType.ENCODER,
TRANSPOSE_BS: False
}, {
USE_BIAS: True,
LN_TYPE: 'layernorm',
ZERO_CEN: True,
ACTIVATION: ('relu',),
LYR_TYPE: TransformerLayerType.ENCODER,
TRANSPOSE_BS: True
}, {
USE_BIAS: True,
LN_TYPE: 'layernorm',
ZERO_CEN: True,
ACTIVATION: ('relu',),
LYR_TYPE: TransformerLayerType.ENCODER,
TRANSPOSE_BS: False
}, {
USE_BIAS: True,
LN_TYPE: 'rmsnorm',
ZERO_CEN: False,
ACTIVATION: ('relu',),
LYR_TYPE: TransformerLayerType.ENCODER,
TRANSPOSE_BS: True
}, {
USE_BIAS: True,
LN_TYPE: 'rmsnorm',
ZERO_CEN: False,
ACTIVATION: ('relu',),
LYR_TYPE: TransformerLayerType.ENCODER,
TRANSPOSE_BS: False
}, {
USE_BIAS: True,
LN_TYPE: 'layernorm',
ZERO_CEN: True,
ACTIVATION: ('relu',),
LYR_TYPE: TransformerLayerType.DECODER,
TRANSPOSE_BS: True
}, {
USE_BIAS: True,
LN_TYPE: 'layernorm',
ZERO_CEN: True,
ACTIVATION: ('relu',),
LYR_TYPE: TransformerLayerType.DECODER,
TRANSPOSE_BS: False
}, {
USE_BIAS: True,
LN_TYPE: 'layernorm',
ZERO_CEN: False,
ACTIVATION: ('relu',),
LYR_TYPE: TransformerLayerType.DECODER,
TRANSPOSE_BS: True
}, {
USE_BIAS: True,
LN_TYPE: 'layernorm',
ZERO_CEN: False,
ACTIVATION: ('relu',),
LYR_TYPE: TransformerLayerType.DECODER,
TRANSPOSE_BS: False
}, {
USE_BIAS: True,
LN_TYPE: 'rmsnorm',
ZERO_CEN: False,
ACTIVATION: ('relu',),
LYR_TYPE: TransformerLayerType.DECODER,
TRANSPOSE_BS: True
}, {
USE_BIAS: True,
LN_TYPE: 'rmsnorm',
ZERO_CEN: False,
ACTIVATION: ('relu',),
LYR_TYPE: TransformerLayerType.DECODER,
TRANSPOSE_BS: False
}, {
USE_BIAS: True,
LN_TYPE: 'layernorm',
ZERO_CEN: False,
ACTIVATION: ('gelu', 'linear'),
LYR_TYPE: TransformerLayerType.ENCODER,
TRANSPOSE_BS: True
}, {
USE_BIAS: True,
LN_TYPE: 'layernorm',
ZERO_CEN: False,
ACTIVATION: ('gelu', 'linear'),
LYR_TYPE: TransformerLayerType.ENCODER,
TRANSPOSE_BS: False
}, {
USE_BIAS: True,
LN_TYPE: 'rmsnorm',
ZERO_CEN: False,
ACTIVATION: ('gelu', 'linear'),
LYR_TYPE: TransformerLayerType.ENCODER,
TRANSPOSE_BS: True
}, {
USE_BIAS: True,
LN_TYPE: 'rmsnorm',
ZERO_CEN: False,
ACTIVATION: ('gelu', 'linear'),
LYR_TYPE: TransformerLayerType.ENCODER,
TRANSPOSE_BS: False
}, {
USE_BIAS: True,
LN_TYPE: 'layernorm',
ZERO_CEN: False,
ACTIVATION: ('gelu', 'linear'),
LYR_TYPE: TransformerLayerType.DECODER,
TRANSPOSE_BS: True
}, {
USE_BIAS: True,
LN_TYPE: 'layernorm',
ZERO_CEN: False,
ACTIVATION: ('gelu', 'linear'),
LYR_TYPE: TransformerLayerType.DECODER,
TRANSPOSE_BS: False
}, {
USE_BIAS: True,
LN_TYPE: 'rmsnorm',
ZERO_CEN: False,
ACTIVATION: ('gelu', 'linear'),
LYR_TYPE: TransformerLayerType.DECODER,
TRANSPOSE_BS: True
}, {
USE_BIAS: True,
LN_TYPE: 'rmsnorm',
ZERO_CEN: False,
ACTIVATION: ('gelu', 'linear'),
LYR_TYPE: TransformerLayerType.DECODER,
TRANSPOSE_BS: False
}]
class TestTransformer(TestLayer):
def input_getter(self, shape, dtype):
data_key = jax.random.PRNGKey(seed=1234)
return (jax.random.normal(data_key, shape,
dtype), jax.random.normal(data_key, shape, dtype))
def get_layer_name(self):
return 'transformerlayer'
def generate_praxis_p_and_flax_cls(self, dtype, attrs):
hidden_size = 512
mlp_hidden_size = 2048
num_attention_heads = 8
layernorm_type = attrs[TransformerLayerAttr.LN_TYPE]
hidden_dropout = 0.0
attention_dropout = 0.0
mlp_activations = attrs[TransformerLayerAttr.ACTIVATION]
kernel_init = WeightInit.Gaussian(1.0)
use_bias = attrs[TransformerLayerAttr.USE_BIAS]
bias_init = WeightInit.Constant(0.0)
layer_type = attrs[TransformerLayerAttr.LYR_TYPE]
enable_relative_embedding = True
relative_embedding = pax_fiddle.Config(RelativePositionBiases,
num_attention_heads=num_attention_heads)
drop_path = 0.0
transpose_batch_sequence = attrs[TransformerLayerAttr.TRANSPOSE_BS]
rel_embedding_init = RelativePositionBiases.generate_embedding_init(
relative_embedding.embedding_init, relative_embedding.num_attention_heads,
relative_embedding.num_buckets)
relative_embedding_flax_module = flax_RelativePositionBiases(
num_buckets=relative_embedding.num_buckets,
max_distance=relative_embedding.max_distance,
num_attention_heads=relative_embedding.num_attention_heads,
embedding_init=TransformerEngineBaseLayer.generate_params_init(
"rel_embedding", rel_embedding_init),
embedding_axes=relative_embedding.embedding_axes,
dtype=relative_embedding.dtype)
praxis_p = pax_fiddle.Config(TransformerLayer,
name='transformer_layer',
params_init=kernel_init,
dtype=dtype,
hidden_size=hidden_size,
mlp_hidden_size=mlp_hidden_size,
num_attention_heads=num_attention_heads,
layernorm_type=layernorm_type,
hidden_dropout=hidden_dropout,
attention_dropout=attention_dropout,
mlp_activations=mlp_activations,
use_bias=use_bias,
bias_init=bias_init,
layer_type=layer_type,
enable_relative_embedding=enable_relative_embedding,
relative_embedding=relative_embedding,
drop_path=drop_path,
transpose_batch_sequence=transpose_batch_sequence)
flax_cls = partial(flax_TransformerLayer,
dtype=dtype,
hidden_size=hidden_size,
mlp_hidden_size=mlp_hidden_size,
num_attention_heads=num_attention_heads,
layernorm_type=layernorm_type,
hidden_dropout=hidden_dropout,
attention_dropout=attention_dropout,
mlp_activations=mlp_activations,
mha_kernel_init=TransformerEngineBaseLayer.generate_params_init(
"mha_kernel", kernel_init),
mlp_kernel_init=TransformerEngineBaseLayer.generate_params_init(
"mlp_kernel", kernel_init),
use_bias=use_bias,
bias_init=TransformerEngineBaseLayer.generate_params_init(
"bias", bias_init),
layer_type=layer_type,
enable_relative_embedding=enable_relative_embedding,
relative_embedding=relative_embedding_flax_module,
drop_path=drop_path,
transpose_batch_sequence=transpose_batch_sequence)
return praxis_p, flax_cls
@pytest.mark.parametrize('data_shape', DATA_SHAPE)
@pytest.mark.parametrize('dtype', DTYPE)
@pytest.mark.parametrize('attrs', TransformerLayerAttr.ATTRS)
def test_forward_backward(self, data_shape, dtype, attrs, rtol=1e-05, atol=1e-08):
praxis_p, flax_cls = self.generate_praxis_p_and_flax_cls(dtype, attrs)
self.forward_backward_runner(data_shape, dtype, praxis_p, flax_cls, rtol, atol)
@pytest.mark.skipif(not is_fp8_supported, reason=reason)
@pytest.mark.parametrize('data_shape', DATA_SHAPE)
@pytest.mark.parametrize('dtype', DTYPE)
@pytest.mark.parametrize('attrs', TransformerLayerAttr.ATTRS)
@pytest.mark.parametrize('fp8_format', FP8_FORMATS)
def test_forward_backward_fp8(self,
data_shape,
dtype,
attrs,
fp8_format,
rtol=1e-05,
atol=1e-08):
ds = DelayedScaling(fp8_format=fp8_format)
with fp8_autocast(enabled=True, fp8_recipe=ds):
praxis_p, flax_cls = self.generate_praxis_p_and_flax_cls(dtype, attrs)
self.forward_backward_runner(data_shape, dtype, praxis_p, flax_cls, rtol, atol)
| TransformerEngine-main | tests/jax/test_praxis_layers.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
import pytest
import jax.numpy as jnp
from jax.core import ShapedArray
from transformer_engine_jax import DType
from transformer_engine.jax.cpp_extensions import te_dtype_to_jax_dtype
from transformer_engine.jax.cpp_extensions import GemmPrimitive
SHAPES = [(256, 256, 512), (32, 32, 32), (16384, 1024, 2816), (16384, 2816, 1024),
(16384, 1024, 1024)]
NAMED_SHAPES = [{}, {
"data": 4
}, {
"data": 2
}, {
"model": 4
}, {
"model": 2
}, {
"data": 4,
"model": 2
}, {
"model": 4,
"data": 2
}]
DTYPE = [DType.kFloat32, DType.kFloat16, DType.kBFloat16]
TRANSPOSE = [True, False]
class TestGEMMShapeInfer:
@staticmethod
def _joint_named_shape(ns1, ns2):
output_named_shape = {**ns1}
need_assert = False
for key in ns2:
if key in output_named_shape and output_named_shape[key] != ns2[key]:
need_assert = True
else:
output_named_shape[key] = ns2[key]
return output_named_shape, need_assert
@staticmethod
def _get_shapes(m, n, k, transa, transb):
# te_gemm only support TN and col-major, then we have to reorder a, b shape
# to compute row-major matrices calculate in col-major algos.
a = (m, k) if transa else (k, m)
b = (k, n) if transb else (n, k)
out = (n, m)
return a, b, out
@pytest.mark.parametrize('shapes', SHAPES)
@pytest.mark.parametrize('named_shape1', NAMED_SHAPES)
@pytest.mark.parametrize('named_shape2', NAMED_SHAPES)
@pytest.mark.parametrize('te_dtype', DTYPE)
@pytest.mark.parametrize('transa', TRANSPOSE)
@pytest.mark.parametrize('transb', TRANSPOSE)
def test_shape_infer(self, shapes, named_shape1, named_shape2, te_dtype, transa, transb):
a_shape, b_shape, out_shape = TestGEMMShapeInfer._get_shapes(*shapes, transa, transb)
dtype = te_dtype_to_jax_dtype(te_dtype)
mat_a = ShapedArray(a_shape, dtype, named_shape=named_shape1)
mat_b = ShapedArray(b_shape, dtype, named_shape=named_shape2)
scale_inv_a = ShapedArray((3, 1), jnp.float32)
scale_inv_b = ShapedArray((3, 1), jnp.float32)
ref_out_named_shape, need_assert = TestGEMMShapeInfer._joint_named_shape(
named_shape1, named_shape2)
ref_out = ShapedArray(out_shape, dtype, named_shape=ref_out_named_shape)
try:
test_out = GemmPrimitive.abstract(mat_a,
mat_b,
scale_inv_a,
scale_inv_b,
A_dtype=te_dtype,
B_dtype=te_dtype,
D_dtype=te_dtype,
transa=transa,
transb=transb,
use_split_accumulator=False)
assert not need_assert
assert ref_out == test_out
except AssertionError as ae:
assert need_assert, f"{ae.args}"
| TransformerEngine-main | tests/jax/test_custom_call_shape.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
import functools
import operator
from typing import Any, Callable, Tuple, Sequence, Union, Iterable, Optional
import jax
import jax.numpy as jnp
import numpy as np
from flax import linen as nn
from flax.linen import partitioning as nn_partitioning
from jax import lax, vmap
from jax import nn as jax_nn
from jax import random as jax_random
PRNGKey = Any
Shape = Tuple[int, ...]
DType = jnp.dtype
Array = Any
PrecisionLike = Union[None, str, lax.Precision, Tuple[str, str], Tuple[lax.Precision,
lax.Precision]]
Initializer = Callable[[PRNGKey, Shape, DType], Array]
def is_devices_enough(required):
return len(jax.devices()) >= required
def _generate_drop_path_shape(shape: Sequence[int], batch_dim: int) -> Sequence[int]:
# Generate broadcast dims for drop_path.
drop_path_shape = list(range(0, len(shape)))
drop_path_shape.pop(batch_dim)
return drop_path_shape
def _normalize_axes(axes: Iterable[int], ndim: int) -> Tuple[int]:
# A tuple by convention. len(axes_tuple) then also gives the rank efficiently.
return tuple(ax if ax >= 0 else ndim + ax for ax in axes)
def _canonicalize_tuple(x):
if isinstance(x, Iterable):
return tuple(x)
return (x,)
def _convert_to_activation_function(fn_or_string: Union[str, Callable]) -> Callable:
"""Convert a string to an activation function."""
if fn_or_string == 'linear':
return lambda x: x
if isinstance(fn_or_string, str):
return getattr(nn, fn_or_string)
if callable(fn_or_string):
return fn_or_string
raise ValueError(f"don't know how to convert {fn_or_string} to an activation function")
def combine_masks(*masks: Optional[Array], dtype: DType = jnp.float32):
"""Combine attention masks.
Args:
*masks: set of attention mask arguments to combine, some can be None.
dtype: final mask dtype
Returns:
Combined mask, reduced by logical and, returns None if no masks given.
"""
masks = [m for m in masks if m is not None]
if not masks:
return None
assert all(map(lambda x: x.ndim == masks[0].ndim,
masks)), (f'masks must have same rank: {tuple(map(lambda x: x.ndim, masks))}')
mask, *other_masks = masks
for other_mask in other_masks:
mask = jnp.logical_and(mask, other_mask)
return mask.astype(dtype)
def combine_biases(*masks: Optional[Array]):
"""Combine attention biases.
Args:
*masks: set of attention bias arguments to combine, some can be None.
Returns:
Combined mask, reduced by summation, returns None if no masks given.
"""
masks = [m for m in masks if m is not None]
if not masks:
return None
assert all(map(lambda x: x.ndim == masks[0].ndim,
masks)), (f'masks must have same rank: {tuple(map(lambda x: x.ndim, masks))}')
mask, *other_masks = masks
for other_mask in other_masks:
mask = mask + other_mask
return mask
def dot_product_attention(query: Array,
key: Array,
value: Array,
transpose_batch_sequence: bool,
bias: Optional[Array] = None,
dropout_rng: Optional[PRNGKey] = None,
dropout_rate: float = 0.,
deterministic: bool = False,
dtype: DType = jnp.float32,
float32_logits: bool = False):
"""Computes dot-product attention given query, key, and value.
This is the core function for applying attention based on
https://arxiv.org/abs/1706.03762. It calculates the attention weights given
query and key and combines the values using the attention weights.
Args:
query: queries for calculating attention with shape of `[batch, q_length,
num_heads, qk_depth_per_head]`.
key: keys for calculating attention with shape of `[batch, kv_length,
num_heads, qk_depth_per_head]`.
value: values to be used in attention with shape of `[batch, kv_length,
num_heads, v_depth_per_head]`.
bias: bias for the attention weights. This should be broadcastable to the
shape `[batch, num_heads, q_length, kv_length]` This can be used for
incorporating causal masks, padding masks, proximity bias, etc.
dropout_rng: JAX PRNGKey: to be used for dropout
dropout_rate: dropout rate
deterministic: bool, deterministic or not (to apply dropout)
dtype: the dtype of the computation (default: float32)
float32_logits: bool, if True then compute logits in float32 to avoid
numerical issues with bfloat16.
Returns:
Output of shape `[batch, length, num_heads, v_depth_per_head]`.
"""
assert key.ndim == query.ndim == value.ndim, 'q, k, v must have same rank.'
batch_dim = 1 if transpose_batch_sequence else 0
assert query.shape[batch_dim] == key.shape[batch_dim] == value.shape[batch_dim], (
'q, k, v batch dims must match.')
assert query.shape[-2] == key.shape[-2] == value.shape[-2], ('q, k, v num_heads must match.')
sequence_dim = 0 if transpose_batch_sequence else 1
assert key.shape[sequence_dim] == value.shape[sequence_dim], 'k, v lengths must match.'
assert query.shape[-1] == key.shape[-1], 'q, k depths must match.'
# Casting logits and softmax computation for float32 for model stability.
if float32_logits:
query = query.astype(jnp.float32)
key = key.astype(jnp.float32)
# `attn_weights`: [batch, num_heads, q_length, kv_length]
if transpose_batch_sequence:
attn_weights = jnp.einsum('qbhd,kbhd->bhqk', query, key)
else:
attn_weights = jnp.einsum('bqhd,bkhd->bhqk', query, key)
# Apply attention bias: masking, dropout, proximity bias, etc.
if bias is not None:
attn_weights = attn_weights + bias.astype(attn_weights.dtype)
# Normalize the attention weights across `kv_length` dimension.
attn_weights = jax_nn.softmax(attn_weights).astype(dtype)
# Apply attention dropout.
if not deterministic and dropout_rate > 0.:
keep_prob = 1.0 - dropout_rate
# T5 broadcasts along the "length" dim, but unclear which one that
# corresponds to in positional dimensions here, assuming query dim.
dropout_shape = list(attn_weights.shape)
keep = jax_random.bernoulli(dropout_rng, keep_prob, dropout_shape)
multiplier = (keep.astype(attn_weights.dtype) / jnp.asarray(keep_prob, dtype=dtype))
attn_weights = attn_weights * multiplier
# Take the linear combination of `value`.
if transpose_batch_sequence:
return jnp.einsum('bhqk,kbhd->qbhd', attn_weights, value)
return jnp.einsum('bhqk,bkhd->bqhd', attn_weights, value)
class DenseGeneral(nn.Module):
"""A linear transformation with flexible axes and FP8 support.
Attributes:
features: tuple with numbers of output features.
axis: tuple with axes to apply the transformation on.
dtype: the dtype of the computation (default: float32).
kernel_init: initializer function for the weight matrix.
use_bias: whether to add a bias to the output (default: False).
bias_init: initializer function for the bias vector.
"""
features: Union[Iterable[int], int]
axis: Union[Iterable[int], int] = -1
dtype: DType = jnp.float32
kernel_init: Initializer = None
kernel_axes: Tuple[str, ...] = ()
use_bias: bool = False
bias_init: Initializer = nn.initializers.zeros
bias_axes: Tuple[str, ...] = ()
def __post_init__(self):
if self.kernel_init is None:
self.kernel_init = nn.initializers.variance_scaling(1.0, 'fan_in', 'truncated_normal')
super().__post_init__()
@nn.compact
def __call__(self, inputs: Array) -> Array:
"""Applies a linear transformation to the inputs along multiple dimensions.
Args:
inputs: The nd-array to be transformed.
Returns:
The transformed input.
"""
features = _canonicalize_tuple(self.features)
axis = _canonicalize_tuple(self.axis)
inputs = jnp.asarray(inputs, self.dtype)
axis = _normalize_axes(axis, inputs.ndim)
kernel_shape = tuple(inputs.shape[ax] for ax in axis) + features
kernel_param_shape = (np.prod([inputs.shape[ax] for ax in axis]), np.prod(features))
kernel = nn_partitioning.param_with_axes('kernel',
self.kernel_init,
kernel_param_shape,
jnp.float32,
axes=self.kernel_axes)
kernel = jnp.asarray(kernel, self.dtype)
kernel = jnp.reshape(kernel, kernel_shape)
if self.use_bias:
bias = nn_partitioning.param_with_axes('bias',
self.bias_init, (self.features,),
self.dtype,
axes=self.bias_axes)
else:
bias = None
contract_ind = tuple(range(0, len(axis)))
y = lax.dot_general(inputs, kernel, ((axis, contract_ind), ((), ())))
if bias is not None:
y += jnp.reshape(bias, (1,) * (y.ndim - 1) + (-1,))
return y
class MlpBlock(nn.Module):
"""Transformer MLP / feed-forward block.
Attributes:
intermediate_dim: Shared dimension of hidden layers.
activations: Type of activations for each layer. Each element is either
'linear', a string function name in flax.linen, or a function.
kernel_init: Kernel function, passed to the dense layers.
deterministic: Whether the dropout layers should be deterministic.
intermediate_dropout_rate: Dropout rate used after the intermediate layers.
dtype: Type for the dense layer.
"""
transpose_batch_sequence: bool
intermediate_dim: int = 2048
activations: Sequence[Union[str, Callable]] = ('relu',)
kernel_init: Initializer = None
intermediate_dropout_rate: float = 0.1
dtype: Any = jnp.float32
fuse_wi: bool = False
def __post_init__(self):
if self.kernel_init is None:
self.kernel_init = nn.initializers.variance_scaling(1.0, 'fan_in', 'truncated_normal')
super().__post_init__()
@nn.compact
def __call__(self, inputs, deterministic: bool = False):
"""Applies Transformer MlpBlock module."""
# Iterate over specified MLP input activation functions.
# e.g. ('relu',) or ('gelu', 'linear') for gated-gelu.
activations = []
if self.fuse_wi:
dense_name = 'wi'
num_activations = len(self.activations)
x = DenseGeneral(self.intermediate_dim * num_activations,
dtype=self.dtype,
kernel_init=self.kernel_init,
kernel_axes=('embed', 'mlp'),
name=dense_name)(inputs)
x = jnp.split(x, num_activations, axis=-1)
for idx, act_fn in enumerate(self.activations):
x_i = _convert_to_activation_function(act_fn)(x[idx])
activations.append(x_i)
else:
for idx, act_fn in enumerate(self.activations):
dense_name = 'wi' if len(self.activations) == 1 else f'wi_{idx}'
x = DenseGeneral(self.intermediate_dim,
dtype=self.dtype,
kernel_init=self.kernel_init,
kernel_axes=('embed', 'mlp'),
name=dense_name)(inputs)
x = _convert_to_activation_function(act_fn)(x)
activations.append(x)
# Take elementwise product of above intermediate activations.
x = functools.reduce(operator.mul, activations)
dropout_broadcast_dims = (0,) if self.transpose_batch_sequence else (1,)
# Apply dropout and final dense output projection.
x = nn.Dropout(rate=self.intermediate_dropout_rate, broadcast_dims=dropout_broadcast_dims)(
x, deterministic=deterministic) # Broadcast along length.
if self.transpose_batch_sequence:
x = nn_partitioning.with_sharding_constraint(x, ('length', 'batch', 'mlp'))
else:
x = nn_partitioning.with_sharding_constraint(x, ('batch', 'length', 'mlp'))
output = DenseGeneral(inputs.shape[-1],
dtype=self.dtype,
kernel_init=self.kernel_init,
kernel_axes=('mlp', 'embed'),
name='wo')(x)
return output
dynamic_vector_slice_in_dim = vmap(lax.dynamic_slice_in_dim, in_axes=(None, 0, None, None))
class MultiHeadAttention(nn.Module):
"""Multi-head dot-product attention.
Attributes:
num_heads: number of attention heads. Features (i.e. inputs_q.shape[-1])
should be divisible by the number of heads.
head_dim: dimension of each head.
dtype: the dtype of the computation.
dropout_rate: dropout rate
kernel_init: initializer for the kernel of the Dense layers.
float32_logits: bool, if True then compute logits in float32 to avoid
numerical issues with bfloat16.
"""
num_heads: int
head_dim: int
transpose_batch_sequence: bool
dtype: DType = jnp.float32
dropout_rate: float = 0.
kernel_init: Initializer = None
float32_logits: bool = False # computes logits in float32 for stability.
scale_attn_logits: bool = False
scaled_query_init: bool = True
fuse_qkv: bool = True
def __post_init__(self):
if self.kernel_init is None:
self.kernel_init = nn.initializers.variance_scaling(1.0, 'fan_in', 'normal')
super().__post_init__()
@nn.compact
def __call__(self,
inputs_q: Array,
inputs_kv: Array,
mask: Optional[Array] = None,
bias: Optional[Array] = None,
*,
decode: bool = False,
deterministic: bool = False) -> Array:
"""Applies multi-head dot product attention on the input data.
Projects the inputs into multi-headed query, key, and value vectors,
applies dot-product attention and project the results to an output vector.
There are two modes: decoding and non-decoding (e.g., training). The mode is
determined by `decode` argument. For decoding, this method is called twice,
first to initialize the cache and then for an actual decoding process. The
two calls are differentiated by the presence of 'cached_key' in the variable
dict. In the cache initialization stage, the cache variables are initialized
as zeros and will be filled in the subsequent decoding process.
In the cache initialization call, `inputs_q` has a shape [batch, length,
q_features] and `inputs_kv`: [batch, length, kv_features]. During the
incremental decoding stage, query, key and value all have the shape [batch,
1, qkv_features] corresponding to a single step.
Args:
inputs_q: input queries of shape `[batch, q_length, q_features]`.
inputs_kv: key/values of shape `[batch, kv_length, kv_features]`.
mask: attention mask of shape `[batch, num_heads, q_length, kv_length]`.
bias: attention bias of shape `[batch, num_heads, q_length, kv_length]`.
decode: Whether to prepare and use an autoregressive cache.
deterministic: Disables dropout if set to True.
Returns:
output of shape `[batch, length, q_features]`.
"""
projection = functools.partial(DenseGeneral,
axis=-1,
features=self.num_heads * self.head_dim,
kernel_axes=('embed', 'joined_kv'),
dtype=self.dtype)
# NOTE: T5 does not explicitly rescale the attention logits by
# 1/sqrt(depth_kq)! This is folded into the initializers of the
# linear transformations, which is equivalent under Adafactor
depth_scaling = jnp.sqrt(self.head_dim).astype(self.dtype)
query_init = lambda *args: self.kernel_init(*args) / ( # pylint: disable=unnecessary-lambda-assignment
depth_scaling if self.scaled_query_init else 1.0)
# Project inputs_q to multi-headed q/k/v
# dimensions are then [batch, length, num_heads, head_dim]
def qkv_init(key, shape, dtype):
assert shape[-1] % 3 == 0
q_shape = (shape[0], shape[1] // 3)
k_shape = (shape[0], shape[1] // 3)
v_shape = (shape[0], shape[1] // 3)
q_kernel = query_init(key, q_shape, dtype)
k_kernel = self.kernel_init(key, k_shape, dtype) # pylint: disable=too-many-function-args
v_kernel = self.kernel_init(key, v_shape, dtype) # pylint: disable=too-many-function-args
return jnp.concatenate([q_kernel, k_kernel, v_kernel], axis=-1, dtype=dtype)
if self.fuse_qkv:
if inputs_q is inputs_kv:
qkv_proj = DenseGeneral(axis=-1,
features=self.num_heads * self.head_dim * 3,
kernel_axes=('embed', 'joined_kv'),
kernel_init=qkv_init,
name='qkv',
dtype=self.dtype)(inputs_kv)
query, key, value = jnp.split(
qkv_proj, [self.num_heads * self.head_dim, self.num_heads * self.head_dim * 2],
axis=-1)
if self.scale_attn_logits:
query = query / depth_scaling
else:
query = projection(kernel_init=query_init, name='query')( \
(inputs_q / depth_scaling) if self.scale_attn_logits else inputs_q)
kv_proj = DenseGeneral(axis=-1,
features=self.num_heads * self.head_dim * 2,
kernel_axes=('embed', 'joined_kv'),
kernel_init=self.kernel_init,
name='kv',
dtype=self.dtype)(inputs_kv)
key, value = jnp.split(kv_proj, [self.num_heads * self.head_dim], axis=-1)
else:
query = projection(kernel_init=query_init, name='query')( \
(inputs_q / depth_scaling) if self.scale_attn_logits else inputs_q)
key = projection(kernel_init=self.kernel_init, name='key')(inputs_kv)
value = projection(kernel_init=self.kernel_init, name='value')(inputs_kv)
query = query.reshape((query.shape[0], query.shape[1], self.num_heads, self.head_dim))
key = key.reshape((key.shape[0], key.shape[1], self.num_heads, self.head_dim))
value = value.reshape((value.shape[0], value.shape[1], self.num_heads, self.head_dim))
if self.transpose_batch_sequence:
query = nn_partitioning.with_sharding_constraint(query,
('length', 'batch', 'heads', 'kv'))
key = nn_partitioning.with_sharding_constraint(key, ('length', 'batch', 'heads', 'kv'))
value = nn_partitioning.with_sharding_constraint(value,
('length', 'batch', 'heads', 'kv'))
else:
query = nn_partitioning.with_sharding_constraint(query,
('batch', 'length', 'heads', 'kv'))
key = nn_partitioning.with_sharding_constraint(key, ('batch', 'length', 'heads', 'kv'))
value = nn_partitioning.with_sharding_constraint(value,
('batch', 'length', 'heads', 'kv'))
if decode:
# Detect if we're initializing by absence of existing cache data.
is_initialized = self.has_variable('cache', 'cached_key')
# The key and value have dimension [batch, length, num_heads, head_dim],
# but we cache them as [batch, num_heads, head_dim, length] as a TPU
# fusion optimization. This also enables the "scatter via one-hot
# broadcast" trick, which means we do a one-hot broadcast instead of a
# scatter/gather operations, resulting in a 3-4x speedup in practice.
swap_dims = lambda x: x[:-3] + tuple(x[i] for i in [-2, -1, -3]) # pylint: disable=unnecessary-lambda-assignment
cached_key = self.variable('cache', 'cached_key', jnp.zeros, swap_dims(key.shape),
key.dtype)
cached_value = self.variable('cache', 'cached_value', jnp.zeros, swap_dims(value.shape),
value.dtype)
cache_index = self.variable('cache', 'cache_index',
lambda: jnp.array(0, dtype=jnp.int32))
if is_initialized:
batch, num_heads, head_dim, length = cached_key.value.shape
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
# Sanity shape check of cached key against input query.
expected_shape = (batch, 1, num_heads, head_dim)
if expected_shape != query.shape:
raise ValueError(
'Autoregressive cache shape error, '
f"expected query shape {expected_shape} instead got {query.shape}.")
# Create a OHE of the current index. NOTE: the index is increased below.
cur_index = cache_index.value
one_hot_indices = jax_nn.one_hot(cur_index, length, dtype=key.dtype)
# In order to update the key, value caches with the current key and
# value, we move the length axis to the back, similar to what we did for
# the cached ones above.
# Note these are currently the key and value of a single position, since
# we feed one position at a time.
one_token_key = jnp.moveaxis(key, -3, -1)
one_token_value = jnp.moveaxis(value, -3, -1)
# Update key, value caches with our new 1d spatial slices.
# We implement an efficient scatter into the cache via one-hot
# broadcast and addition.
key = cached_key.value + one_token_key * one_hot_indices
value = cached_value.value + one_token_value * one_hot_indices
cached_key.value = key
cached_value.value = value
cache_index.value = cache_index.value + 1
# Move the keys and values back to their original shapes.
key = jnp.moveaxis(key, -1, -3)
value = jnp.moveaxis(value, -1, -3)
# Causal mask for cached decoder self-attention: our single query
# position should only attend to those key positions that have already
# been generated and cached, not the remaining zero elements.
mask = combine_masks(
mask,
jnp.broadcast_to(
jnp.arange(length) <= cur_index,
# (1, 1, length) represent (head dim, query length, key length)
# query length is 1 because during decoding we deal with one
# index.
# The same mask is applied to all batch elements and heads.
(batch, 1, 1, length)))
# Grab the correct relative attention bias during decoding. This is
# only required during single step decoding.
if bias is not None:
# The bias is a full attention matrix, but during decoding we only
# have to take a slice of it.
# This is equivalent to bias[..., cur_index:cur_index+1, :].
bias = dynamic_vector_slice_in_dim(jnp.squeeze(bias, axis=0),
jnp.reshape(cur_index, (-1)), 1, -2)
# Convert the boolean attention mask to an attention bias.
if mask is not None:
# attention mask in the form of attention bias
attention_bias = lax.select(mask > 0,
jnp.full(mask.shape, 0.).astype(self.dtype),
jnp.full(mask.shape, -1e10).astype(self.dtype))
else:
attention_bias = None
# Add provided bias term (e.g. relative position embedding).
if bias is not None:
attention_bias = combine_biases(attention_bias, bias)
dropout_rng = None
if not deterministic and self.dropout_rate > 0.:
dropout_rng = self.make_rng('dropout')
# Apply attention.
x = dot_product_attention(query,
key,
value,
transpose_batch_sequence=self.transpose_batch_sequence,
bias=attention_bias,
dropout_rng=dropout_rng,
dropout_rate=self.dropout_rate,
deterministic=deterministic,
dtype=self.dtype,
float32_logits=self.float32_logits)
x = x.reshape((x.shape[0], x.shape[1], x.shape[2] * x.shape[3]))
if self.transpose_batch_sequence:
x = nn_partitioning.with_sharding_constraint(x, ('length', 'batch', 'joined_kv'))
else:
x = nn_partitioning.with_sharding_constraint(x, ('batch', 'length', 'joined_kv'))
# Back to the original inputs dimensions.
out = DenseGeneral(
features=inputs_q.shape[-1], # output dim is set to the input dim.
axis=-1,
kernel_init=self.kernel_init,
kernel_axes=('joined_kv', 'embed'),
dtype=self.dtype,
name='out')(x)
return out
class LayerNorm(nn.Module):
"""T5 Layer normalization operating on the last axis of the input data."""
epsilon: float = 1e-6
dtype: Any = jnp.float32
layernorm_type: str = 'layernorm'
zero_centered_gamma: bool = False
scale_init: Initializer = None
bias_init: Initializer = nn.initializers.zeros
def __post_init__(self):
if self.scale_init is None:
if not self.zero_centered_gamma:
self.scale_init = nn.initializers.ones
else:
self.scale_init = nn.initializers.zeros
super().__post_init__()
@nn.compact
def __call__(self, x: jnp.ndarray) -> jnp.ndarray:
"""Applies layer normalization on the input."""
x = jnp.asarray(x, jnp.float32)
features = x.shape[-1]
scale = nn_partitioning.param_with_axes('scale',
self.scale_init, (features,),
jnp.float32,
axes=('embed',))
scale = jnp.asarray(scale, self.dtype)
if self.layernorm_type == 'layernorm':
mean = jnp.mean(x, axis=-1, keepdims=True)
var = jnp.mean(jnp.square(x - mean), axis=-1, keepdims=True)
y = (x - mean) * lax.rsqrt(var + self.epsilon)
bias = nn_partitioning.param_with_axes('ln_bias',
self.bias_init, (features,),
jnp.float32,
axes=('embed',))
bias = jnp.asarray(bias, self.dtype)
y = jnp.asarray(y, self.dtype)
if not self.zero_centered_gamma:
z = y * scale + bias
else:
z = y * (scale + 1) + bias
else:
assert self.layernorm_type == 'rmsnorm'
assert not self.zero_centered_gamma
mean2 = jnp.mean(lax.square(x), axis=-1, keepdims=True)
y = jnp.asarray(x * lax.rsqrt(mean2 + self.epsilon), self.dtype)
z = y * scale
return z
class RelativePositionBiases(nn.Module):
"""Adds T5-style relative positional embeddings to the attention logits.
Attributes:
num_buckets: Number of buckets to bucket distances between key and query
positions into.
max_distance: Maximum distance before everything is lumped into the last
distance bucket.
num_heads: Number of heads in the attention layer. Each head will get a
different relative position weighting.
dtype: Type of arrays through this module.
embedding_init: initializer for relative embedding table.
"""
num_buckets: int
max_distance: int
num_heads: int
dtype: Any
embedding_init: Callable[..., Array] = nn.linear.default_embed_init
@staticmethod
def _relative_position_bucket(relative_position,
bidirectional=True,
num_buckets=32,
max_distance=128):
"""Translate relative position to a bucket number for relative attention.
The relative position is defined as memory_position - query_position, i.e.
the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are
invalid.
We use smaller buckets for small absolute relative_position and larger
buckets for larger absolute relative_positions. All relative
positions >=max_distance map to the same bucket. All relative
positions <=-max_distance map to the same bucket. This should allow for
more graceful generalization to longer sequences than the model has been
trained on.
Args:
relative_position: an int32 array
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32
values in the range [0, num_buckets)
"""
ret = 0
n = -relative_position
if bidirectional:
num_buckets //= 2
ret += (n < 0).astype(np.int32) * num_buckets
n = np.abs(n)
else:
n = np.maximum(n, 0)
# now n is in the range [0, inf)
max_exact = num_buckets // 2
is_small = n < max_exact
val_if_large = max_exact + (
np.log(n.astype(np.float32) / max_exact + np.finfo(np.float32).eps) /
np.log(max_distance / max_exact) * (num_buckets - max_exact)).astype(np.int32)
val_if_large = np.minimum(val_if_large, num_buckets - 1)
ret += np.where(is_small, n, val_if_large)
return ret
@nn.compact
def __call__(self, qlen, klen, bidirectional=True):
"""Produce relative position embedding attention biases.
Args:
qlen: attention query length.
klen: attention key length.
bidirectional: whether to allow positive memory-query relative position
embeddings.
Returns:
output: `(1, len, q_len, k_len)` attention bias
"""
context_position = np.arange(qlen, dtype=jnp.int32)[:, None]
memory_position = np.arange(klen, dtype=jnp.int32)[None, :]
relative_position = memory_position - context_position # shape (qlen, klen)
rp_bucket = self._relative_position_bucket(relative_position,
bidirectional=bidirectional,
num_buckets=self.num_buckets,
max_distance=self.max_distance)
relative_attention_bias = nn_partitioning.param_with_axes(
'rel_embedding',
self.embedding_init, (self.num_heads, self.num_buckets),
jnp.float32,
axes=('heads', 'relpos_buckets'))
relative_attention_bias = jnp.asarray(relative_attention_bias, self.dtype)
# Instead of using a slow gather, we create a leading-dimension one-hot
# array from rp_bucket and use it to perform the gather-equivalent via a
# contraction, i.e.:
# (num_head, num_buckets) x (num_buckets one-hot, qlen, klen).
# This is equivalent to relative_attention_bias[:, rp_bucket]
bcast_iota = lax.broadcasted_iota(jnp.int32, (self.num_buckets, 1, 1), 0)
rp_bucket_one_hot = jnp.array(rp_bucket[jnp.newaxis, ...] == bcast_iota, dtype=self.dtype)
# --> shape (qlen, klen, num_heads)
values = lax.dot_general(
relative_attention_bias,
rp_bucket_one_hot,
(
((1,), (0,)), # rhs, lhs contracting dims
((), ()))) # no batched dims
# Add a singleton batch dimension.
# --> shape (1, num_heads, qlen, klen)
return values[jnp.newaxis, ...]
class EncoderLayer(nn.Module):
"""Transformer encoder layer."""
relative_embedding: nn.Module = None
num_heads: int = 8
head_dim: int = 64
dropout_rate: float = 0.1
transpose_batch_sequence: bool = True
float32_attention_logits: bool = False
scale_attn_logits: bool = False
scaled_query_init: bool = True
mlp_dim: int = 2048
mlp_activations: Sequence[str] = ('relu',)
dtype: Any = jnp.float32
apply_residual_connection_post_layernorm: bool = False
layernorm_type: str = 'layernorm'
zero_centered_gamma: bool = False
output_layernorm: bool = False
drop_path: float = 0.0
fuse_qkv_params: bool = True
fuse_mlp_wi: bool = False
@nn.compact
def __call__(self, inputs, encoder_mask=None, deterministic=False):
# Relative position embedding as attention biases.
sequence_dim = 0 if self.transpose_batch_sequence else 1
batch_dim = 1 - sequence_dim
if self.relative_embedding is None:
rel_emb = RelativePositionBiases(num_buckets=32,
max_distance=128,
num_heads=self.num_heads,
dtype=self.dtype,
embedding_init=nn.initializers.variance_scaling(
1.0, 'fan_avg', 'uniform'),
name='relpos_bias')
else:
rel_emb = self.relative_embedding
encoder_bias = rel_emb(inputs.shape[sequence_dim], inputs.shape[sequence_dim], True)
# Attention block.
residual = inputs
if not self.output_layernorm:
# Attention block.
x = LayerNorm(layernorm_type=self.layernorm_type,
zero_centered_gamma=self.zero_centered_gamma,
dtype=self.dtype,
name="pre_attention_layer_norm")(inputs)
if self.apply_residual_connection_post_layernorm:
residual = x
else:
x = inputs
# [batch, length, emb_dim] -> [batch, length, emb_dim]
x = MultiHeadAttention(num_heads=self.num_heads,
dtype=self.dtype,
head_dim=self.head_dim,
transpose_batch_sequence=self.transpose_batch_sequence,
dropout_rate=self.dropout_rate,
float32_logits=self.float32_attention_logits,
scale_attn_logits=self.scale_attn_logits,
scaled_query_init=self.scaled_query_init,
fuse_qkv=self.fuse_qkv_params,
name='attention')(x,
x,
encoder_mask,
encoder_bias,
deterministic=deterministic)
x = nn.Dropout(rate=self.dropout_rate,
broadcast_dims=(sequence_dim,))(x, deterministic=deterministic)
if self.drop_path > 0.0:
drop_path_shape = _generate_drop_path_shape(x.shape, batch_dim)
x = nn.Dropout(rate=self.drop_path,
broadcast_dims=drop_path_shape)(x, deterministic=deterministic)
x = x + residual
# MLP block.
residual = x
y = LayerNorm(layernorm_type=self.layernorm_type,
zero_centered_gamma=self.zero_centered_gamma,
dtype=self.dtype,
name='pre_mlp_layer_norm')(x)
if self.apply_residual_connection_post_layernorm:
residual = y
# [batch, length, emb_dim] -> [batch, length, emb_dim]
y = MlpBlock(
transpose_batch_sequence=self.transpose_batch_sequence,
intermediate_dim=self.mlp_dim,
activations=self.mlp_activations,
intermediate_dropout_rate=self.dropout_rate,
dtype=self.dtype,
fuse_wi=self.fuse_mlp_wi,
name='mlp',
)(y, deterministic=deterministic)
y = nn.Dropout(rate=self.dropout_rate,
broadcast_dims=(sequence_dim,))(y, deterministic=deterministic)
if self.drop_path > 0.0:
drop_path_shape = _generate_drop_path_shape(y.shape, batch_dim)
y = nn.Dropout(rate=self.drop_path,
broadcast_dims=drop_path_shape)(y, deterministic=deterministic)
y = y + residual
if self.output_layernorm:
y = LayerNorm(layernorm_type=self.layernorm_type,
zero_centered_gamma=self.zero_centered_gamma,
dtype=self.dtype,
name="output_layer_norm")(y)
return y
class DecoderLayer(nn.Module):
"""Transformer decoder layer that attends to the encoder."""
relative_embedding: nn.Module = None
num_heads: int = 8
head_dim: int = 64
dropout_rate: float = 0.1
transpose_batch_sequence: bool = True
float32_attention_logits: bool = False
scale_attn_logits: bool = False
scaled_query_init: bool = True
mlp_dim: int = 2048
mlp_activations: Sequence[str] = ('relu',)
dtype: Any = jnp.float32
apply_residual_connection_post_layernorm: bool = False
output_layernorm: bool = False
layernorm_type: str = 'layernorm'
zero_centered_gamma: bool = False
drop_path: float = 0.0
fuse_qkv_params: bool = True
fuse_mlp_wi: bool = False
@nn.compact
def __call__(self,
inputs,
encoded,
decoder_mask=None,
encoder_decoder_mask=None,
deterministic=False,
decode=False,
max_decode_length=None):
# Relative position embedding as attention biases.
sequence_dim = 0 if self.transpose_batch_sequence else 1
batch_dim = 1 - sequence_dim
l = max_decode_length if decode and max_decode_length else inputs.shape[sequence_dim]
if self.relative_embedding is None:
rel_emb = RelativePositionBiases(num_buckets=32,
max_distance=128,
num_heads=self.num_heads,
dtype=self.dtype,
embedding_init=nn.initializers.variance_scaling(
1.0, 'fan_avg', 'uniform'),
name='relpos_bias')
else:
rel_emb = self.relative_embedding
decoder_bias = rel_emb(l, l, False)
# inputs: embedded inputs to the decoder with shape [batch, length, emb_dim]
residual = inputs
if not self.output_layernorm:
# Attention block.
x = LayerNorm(layernorm_type=self.layernorm_type,
zero_centered_gamma=self.zero_centered_gamma,
dtype=self.dtype,
name="pre_self_attention_layer_norm")(inputs)
if self.apply_residual_connection_post_layernorm:
residual = x
else:
x = inputs
# Self-attention block
x = MultiHeadAttention(num_heads=self.num_heads,
dtype=self.dtype,
head_dim=self.head_dim,
transpose_batch_sequence=self.transpose_batch_sequence,
dropout_rate=self.dropout_rate,
float32_logits=self.float32_attention_logits,
scale_attn_logits=self.scale_attn_logits,
scaled_query_init=self.scaled_query_init,
fuse_qkv=self.fuse_qkv_params,
name='self_attention')(x,
x,
decoder_mask,
decoder_bias,
deterministic=deterministic,
decode=decode)
x = nn.Dropout(rate=self.dropout_rate,
broadcast_dims=(sequence_dim,))(x, deterministic=deterministic)
if self.drop_path > 0.0:
drop_path_shape = _generate_drop_path_shape(x.shape, batch_dim)
x = nn.Dropout(rate=self.drop_path,
broadcast_dims=drop_path_shape)(x, deterministic=deterministic)
x = x + residual
# Encoder-Decoder block.
residual = x
y = LayerNorm(layernorm_type=self.layernorm_type,
zero_centered_gamma=self.zero_centered_gamma,
dtype=self.dtype,
name='pre_cross_attention_layer_norm')(x)
if self.apply_residual_connection_post_layernorm:
residual = y
y = MultiHeadAttention(num_heads=self.num_heads,
dtype=self.dtype,
head_dim=self.head_dim,
transpose_batch_sequence=self.transpose_batch_sequence,
dropout_rate=self.dropout_rate,
float32_logits=self.float32_attention_logits,
scale_attn_logits=self.scale_attn_logits,
scaled_query_init=self.scaled_query_init,
fuse_qkv=self.fuse_qkv_params,
name='encoder_decoder_attention')(y,
encoded,
encoder_decoder_mask,
deterministic=deterministic)
y = nn.Dropout(rate=self.dropout_rate,
broadcast_dims=(sequence_dim,))(y, deterministic=deterministic)
y = y + residual
# MLP block.
residual = y
z = LayerNorm(layernorm_type=self.layernorm_type,
zero_centered_gamma=self.zero_centered_gamma,
dtype=self.dtype,
name='pre_mlp_layer_norm')(y)
if self.apply_residual_connection_post_layernorm:
residual = z
z = MlpBlock(
transpose_batch_sequence=self.transpose_batch_sequence,
intermediate_dim=self.mlp_dim,
activations=self.mlp_activations,
intermediate_dropout_rate=self.dropout_rate,
dtype=self.dtype,
fuse_wi=self.fuse_mlp_wi,
name='mlp',
)(z, deterministic=deterministic)
z = nn.Dropout(rate=self.dropout_rate,
broadcast_dims=(sequence_dim,))(z, deterministic=deterministic)
if self.drop_path > 0.0:
drop_path_shape = _generate_drop_path_shape(z.shape, batch_dim)
z = nn.Dropout(rate=self.drop_path,
broadcast_dims=drop_path_shape)(z, deterministic=deterministic)
z = z + residual
if self.output_layernorm:
z = LayerNorm(layernorm_type=self.layernorm_type,
zero_centered_gamma=self.zero_centered_gamma,
dtype=self.dtype,
name="output_layer_norm")(z)
return z
def assert_allclose(actual,
desired,
rtol=1e-05,
atol=1e-08,
equal_nan=True,
err_msg='',
verbose=True):
if not isinstance(actual, float):
actual = actual.astype(jnp.float32)
if not isinstance(desired, float):
desired = desired.astype(jnp.float32)
np.testing.assert_allclose(actual, desired, rtol, atol, equal_nan, err_msg, verbose)
| TransformerEngine-main | tests/jax/utils.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
import functools
import operator
import jax
import jax.numpy as jnp
import numpy as np
import pytest
from jax import lax
from jax import jit, value_and_grad
from flax import linen as nn
from utils import assert_allclose
from transformer_engine.common.recipe import Format
from transformer_engine.jax.cpp_extensions import dgated_gelu, gated_gelu
from transformer_engine.jax.cpp_extensions import dgated_gelu_cast_transpose, gated_gelu_fp8
from transformer_engine.jax.cpp_extensions import dequantize, quantize
from transformer_engine.jax.dot import fp8_dot
from transformer_engine.jax.fp8 import DType, FP8GemmPackage, FP8Helper, _format2dtypes
from transformer_engine.jax.fp8 import is_fp8_available
from transformer_engine.jax.layernorm import layernorm
from transformer_engine.jax.mlp import fp8_ln_mlp
GEMM_CASES = [(256, 256, 512), (32, 32, 32), (16384, 1024, 2816), (16384, 2816, 1024),
(16384, 1024, 1024)]
FP8_COMPUTE_TYPE = [_format2dtypes(Format.E4M3), _format2dtypes(Format.HYBRID)]
LN_CASES = [(512, 1024)]
DTYPES = [jnp.bfloat16, jnp.float32]
is_fp8_supported, reason = is_fp8_available()
class TestFP8Dot:
@pytest.mark.skipif(not is_fp8_supported, reason=reason)
def test_qdq(self):
FP8_E4M3_MAX = 448
x = jnp.asarray([[-1, 0.1], [2, 3]], jnp.float32)
amax = jnp.max(jnp.abs(x)).reshape(1)
scale = jnp.asarray(FP8_E4M3_MAX / amax, jnp.float32).reshape(1)
scale_inv = (1 / scale).reshape(1)
y, new_amax = quantize(x, amax, scale, scale_inv, out_dtype=DType.kFloat8E4M3)
assert_allclose(new_amax, 3.0)
no_use = jnp.zeros(1, jnp.float32)
z = dequantize(y,
no_use,
no_use,
scale_inv,
fp8_dtype=DType.kFloat8E4M3,
out_dtype=DType.kFloat32)
assert_allclose(z, x, rtol=5e-2, atol=5e-2)
def test_compile_bf16(self):
key = jax.random.PRNGKey(0)
subkeys = jax.random.split(key, 2)
a = jax.random.normal(subkeys[0], (256, 512), jnp.bfloat16)
b = jax.random.normal(subkeys[1], (512, 256), jnp.bfloat16)
def func(x, y):
fp8_max = FP8Helper.generate_fp8_max_array(FP8Helper.NUM_META_PER_GEMM)
fp8_metas_amax = jnp.zeros((FP8Helper.NUM_META_PER_GEMM, FP8Helper.AMAX_HISTORY_LEN),
jnp.float32)
fp8_metas_scale = jnp.ones((FP8Helper.NUM_META_PER_GEMM, 1), jnp.float32)
fp8_metas_scale_inv = jnp.ones((FP8Helper.NUM_META_PER_GEMM, 1), jnp.float32)
# x = input, matrix 2d
# y = input, matrix 2d (weight)
fp8_gemm_pkg = FP8GemmPackage(1, x, [y], fp8_max, fp8_metas_amax, fp8_metas_scale,
fp8_metas_scale_inv)
return jnp.sum(fp8_dot(fp8_gemm_pkg, *_format2dtypes(None)))
value_n_grad_func = value_and_grad(func, (0, 1))
value_n_grad_func_compiled = jit(value_n_grad_func).lower(a, b).compile()
value_n_grad_func_compiled(a, b)
@pytest.mark.skipif(not is_fp8_supported, reason=reason)
@pytest.mark.parametrize('compute_type', FP8_COMPUTE_TYPE)
def test_compile_fp8(self, compute_type):
key = jax.random.PRNGKey(0)
subkeys = jax.random.split(key, 2)
a = jax.random.normal(subkeys[0], (256, 512), jnp.bfloat16)
b = jax.random.normal(subkeys[1], (512, 256), jnp.bfloat16)
def func(x, y):
fp8_max = FP8Helper.generate_fp8_max_array(FP8Helper.NUM_META_PER_GEMM)
fp8_metas_amax = jnp.zeros((FP8Helper.NUM_META_PER_GEMM, FP8Helper.AMAX_HISTORY_LEN),
jnp.float32)
fp8_metas_scale = jnp.ones((FP8Helper.NUM_META_PER_GEMM, 1), jnp.float32)
fp8_metas_scale_inv = jnp.ones((FP8Helper.NUM_META_PER_GEMM, 1), jnp.float32)
fp8_gemm_pkg = FP8GemmPackage(1, x, [y], fp8_max, fp8_metas_amax, fp8_metas_scale,
fp8_metas_scale_inv)
return jnp.sum(fp8_dot(fp8_gemm_pkg, *compute_type))
value_n_grad_func = value_and_grad(func, (0, 1))
value_n_grad_func_compiled = jit(value_n_grad_func).lower(a, b).compile()
value_n_grad_func_compiled(a, b)
@pytest.mark.parametrize('m,n,k', GEMM_CASES)
def test_forward_bf16(self, m, n, k):
key = jax.random.PRNGKey(0)
subkeys = jax.random.split(key, 2)
a = jax.random.normal(subkeys[0], (m, k), jnp.bfloat16)
b = jax.random.normal(subkeys[1], (k, n), jnp.bfloat16)
fp8_max = FP8Helper.generate_fp8_max_array(FP8Helper.NUM_META_PER_GEMM)
fp8_metas_amax = jnp.zeros((FP8Helper.NUM_META_PER_GEMM, FP8Helper.AMAX_HISTORY_LEN),
jnp.float32)
fp8_metas_scale = jnp.ones((FP8Helper.NUM_META_PER_GEMM, 1), jnp.float32)
fp8_metas_scale_inv = jnp.ones((FP8Helper.NUM_META_PER_GEMM, 1), jnp.float32)
fp8_gemm_pkg = FP8GemmPackage(1, a, [b], fp8_max, fp8_metas_amax, fp8_metas_scale,
fp8_metas_scale_inv)
primitive_out = fp8_dot(fp8_gemm_pkg, *_format2dtypes(None))
ref_out = jnp.dot(a, b)
assert_allclose(primitive_out, ref_out)
@pytest.mark.skipif(not is_fp8_supported, reason=reason)
@pytest.mark.parametrize('m,n,k', GEMM_CASES)
@pytest.mark.parametrize('compute_type', FP8_COMPUTE_TYPE)
def test_forward_fp8_randint(self, m, n, k, compute_type):
key = jax.random.PRNGKey(0)
subkeys = jax.random.split(key, 2)
# TODO(rewang): add float random test
min_val, max_val = -8, 8
a = jax.random.randint(subkeys[0], (m, k), min_val, max_val).astype(jnp.bfloat16)
b = jax.random.randint(subkeys[1], (k, n), min_val, max_val).astype(jnp.bfloat16)
fp8_max = FP8Helper.generate_fp8_max_array(FP8Helper.NUM_META_PER_GEMM)
fp8_metas_amax = jnp.zeros((FP8Helper.NUM_META_PER_GEMM, FP8Helper.AMAX_HISTORY_LEN),
jnp.float32)
fp8_metas_scale = jnp.ones((FP8Helper.NUM_META_PER_GEMM, 1), jnp.float32)
fp8_metas_scale_inv = jnp.ones((FP8Helper.NUM_META_PER_GEMM, 1), jnp.float32)
fp8_meta = [fp8_max, fp8_metas_amax, fp8_metas_scale, fp8_metas_scale_inv]
# calculate amax
fp8_gemm_pkg = FP8GemmPackage(1, a, [b], *fp8_meta)
primitive_out = fp8_dot(fp8_gemm_pkg, *compute_type)
# calculate scale by amax
fp8_meta = FP8Helper._update_fp8_metas_impl(fp8_meta)
fp8_gemm_pkg = FP8GemmPackage(1, a, [b], *fp8_meta)
primitive_out = fp8_dot(fp8_gemm_pkg, *compute_type)
ref_out = jnp.dot(a, b)
ref_out = ref_out.astype(jnp.float32)
primitive_out = primitive_out.astype(jnp.float32)
assert_allclose(primitive_out, ref_out)
@pytest.mark.parametrize('m,n,k', GEMM_CASES)
def test_grad_bf16(self, m, n, k):
key = jax.random.PRNGKey(0)
subkeys = jax.random.split(key, 2)
a = jax.random.normal(subkeys[0], (m, k), jnp.bfloat16)
b = jax.random.normal(subkeys[1], (k, n), jnp.bfloat16)
def primitive_func(x, y):
fp8_max = FP8Helper.generate_fp8_max_array(FP8Helper.NUM_META_PER_GEMM)
fp8_metas_amax = jnp.zeros((FP8Helper.NUM_META_PER_GEMM, FP8Helper.AMAX_HISTORY_LEN),
jnp.float32)
fp8_metas_scale = jnp.ones((FP8Helper.NUM_META_PER_GEMM, 1), jnp.float32)
fp8_metas_scale_inv = jnp.ones((FP8Helper.NUM_META_PER_GEMM, 1), jnp.float32)
fp8_gemm_pkg = FP8GemmPackage(1, x, [y], fp8_max, fp8_metas_amax, fp8_metas_scale,
fp8_metas_scale_inv)
return jnp.mean(fp8_dot(fp8_gemm_pkg, *_format2dtypes(None)))
def ref_func(x, y):
return jnp.mean(jnp.dot(x, y))
value_n_grad_primitive_func = value_and_grad(primitive_func, (0, 1))
value_n_grad_ref_func = value_and_grad(ref_func, (0, 1))
primitive_out, (primitive_a_grad, primitive_b_grad) = value_n_grad_primitive_func(a, b)
ref_out, (ref_a_grad, ref_b_grad) = value_n_grad_ref_func(a, b)
assert_allclose(primitive_out, ref_out)
assert_allclose(primitive_a_grad, ref_a_grad)
assert_allclose(primitive_b_grad, ref_b_grad, atol=1e-5)
@pytest.mark.skipif(not is_fp8_supported, reason=reason)
@pytest.mark.parametrize('m,n,k', GEMM_CASES)
@pytest.mark.parametrize('compute_type', FP8_COMPUTE_TYPE)
def test_grad_fp8_randint(self, m, n, k, compute_type):
key = jax.random.PRNGKey(0)
subkeys = jax.random.split(key, 2)
# TODO(rewang): add float random test
min_val, max_val = -8, 8
a = jax.random.randint(subkeys[0], (m, k), min_val, max_val).astype(jnp.bfloat16)
b = jax.random.randint(subkeys[1], (k, n), min_val, max_val).astype(jnp.bfloat16)
fp8_max = FP8Helper.generate_fp8_max_array(FP8Helper.NUM_META_PER_GEMM)
fp8_metas_amax = jnp.zeros((FP8Helper.NUM_META_PER_GEMM, FP8Helper.AMAX_HISTORY_LEN),
jnp.float32)
fp8_metas_scale = jnp.ones((FP8Helper.NUM_META_PER_GEMM, 1), jnp.float32)
fp8_metas_scale_inv = jnp.ones((FP8Helper.NUM_META_PER_GEMM, 1), jnp.float32)
fp8_meta = [fp8_max, fp8_metas_amax, fp8_metas_scale, fp8_metas_scale_inv]
def primitive_func(x, y, metas):
fp8_gemm_pkg = FP8GemmPackage(1, x, [y], *metas)
return jnp.sum(fp8_dot(fp8_gemm_pkg, *compute_type))
def ref_func(x, y):
return jnp.sum(jnp.dot(x, y))
value_n_grad_primitive_func = value_and_grad(primitive_func, (0, 1))
value_n_grad_ref_func = value_and_grad(ref_func, (0, 1))
ref_out, (ref_a_grad, ref_b_grad) = value_n_grad_ref_func(a, b)
# calculate amax
primitive_out, (primitive_a_grad,
primitive_b_grad) = value_n_grad_primitive_func(a, b, fp8_meta)
# calculate scale by amax
fp8_meta = FP8Helper._update_fp8_metas_impl(fp8_meta)
primitive_out, (primitive_a_grad,
primitive_b_grad) = value_n_grad_primitive_func(a, b, fp8_meta)
assert_allclose(primitive_out, ref_out)
assert_allclose(primitive_a_grad, ref_a_grad)
assert_allclose(primitive_b_grad, ref_b_grad)
def test_contracting_dims_bf16(self):
key = jax.random.PRNGKey(0)
subkeys = jax.random.split(key, 2)
a = jax.random.normal(subkeys[0], (32, 8, 16, 64), jnp.bfloat16)
b = jax.random.normal(subkeys[1], (16, 64, 128), jnp.bfloat16)
def primitive_func(x, y):
fp8_max = FP8Helper.generate_fp8_max_array(FP8Helper.NUM_META_PER_GEMM)
fp8_metas_amax = jnp.zeros((FP8Helper.NUM_META_PER_GEMM, FP8Helper.AMAX_HISTORY_LEN),
jnp.float32)
fp8_metas_scale = jnp.ones((FP8Helper.NUM_META_PER_GEMM, 1), jnp.float32)
fp8_metas_scale_inv = jnp.ones((FP8Helper.NUM_META_PER_GEMM, 1), jnp.float32)
fp8_gemm_pkg = FP8GemmPackage(1, x, [y], fp8_max, fp8_metas_amax, fp8_metas_scale,
fp8_metas_scale_inv)
return jnp.sum(fp8_dot(fp8_gemm_pkg, *_format2dtypes(None), ((2, 3), (0, 1))))
def ref_func(x, y):
return jnp.sum(lax.dot_general(x, y, dimension_numbers=(((2, 3), (0, 1)), ((), ()))))
value_n_grad_primitive_func = value_and_grad(primitive_func, (0, 1))
value_n_grad_ref_func = value_and_grad(ref_func, (0, 1))
primitive_out, (primitive_a_grad, primitive_b_grad) = value_n_grad_primitive_func(a, b)
ref_out, (ref_a_grad, ref_b_grad) = value_n_grad_ref_func(a, b)
assert_allclose(primitive_out, ref_out)
assert_allclose(primitive_a_grad, ref_a_grad)
assert_allclose(primitive_b_grad, ref_b_grad)
@pytest.mark.skipif(not is_fp8_supported, reason=reason)
@pytest.mark.parametrize('m,n,k', [(256, 256, 512), (16384, 1024, 2816), (16384, 2816, 1024),
(16384, 1024, 1024)])
def test_grad_fp8_mlp_randint(self, m, n, k):
key = jax.random.PRNGKey(0)
subkeys = jax.random.split(key, 4)
activations = ('gelu', 'linear')
a = jax.random.uniform(subkeys[0], (m, k), jnp.bfloat16, 5, 8)
k1 = jax.random.uniform(subkeys[1], (k, n * len(activations)), jnp.bfloat16, 5, 8)
k2 = jax.random.uniform(subkeys[2], (n, k), jnp.bfloat16, 5, 8)
s = jax.random.uniform(subkeys[3], (k,), jnp.bfloat16, 5, 8)
fp8_max = FP8Helper.generate_fp8_max_array(FP8Helper.NUM_META_PER_GEMM * 2)
fp8_metas_amax = jnp.zeros((FP8Helper.NUM_META_PER_GEMM * 2, FP8Helper.AMAX_HISTORY_LEN),
jnp.float32)
fp8_metas_scale = jnp.ones((FP8Helper.NUM_META_PER_GEMM * 2, 1), jnp.float32)
fp8_metas_scale_inv = jnp.ones((FP8Helper.NUM_META_PER_GEMM * 2, 1), jnp.float32)
fp8_meta = [fp8_max, fp8_metas_amax, fp8_metas_scale, fp8_metas_scale_inv]
compute_type = _format2dtypes(Format.HYBRID)
def primitive_func(x, ln_s, y, z, metas):
# x is input tensor, matrix 2d
# y, z are weights, matrix 2d
# out = (x * y) * z
fp8_gemm_pkg = FP8GemmPackage(2, x, [y, z], *metas)
return jnp.mean(
fp8_ln_mlp(fp8_gemm_pkg,
ln_s,
None,
"rmsnorm",
*compute_type,
activations=activations))
def _convert_to_activation_function(fn_or_string):
"""Convert a string to an activation function."""
if fn_or_string == 'linear':
return lambda x: x
if isinstance(fn_or_string, str):
return getattr(nn, fn_or_string)
if callable(fn_or_string):
return fn_or_string
raise ValueError(f"don't know how to convert {fn_or_string} to an activation function")
def fp8_ln_mlp_py(inputs: jnp.ndarray,
ln_scale: jnp.ndarray,
kernel_1: jnp.ndarray,
kernel_2: jnp.ndarray,
fp8_maxs: jnp.ndarray,
amax: jnp.ndarray,
scale: jnp.ndarray,
scale_inv: jnp.ndarray,
fwd_dtype,
bwd_dtype,
epsilon=1e-6,
contracting_dims=((-1,), (0,)),
dp_dim_index=0,
activations=('gelu', 'linear')) -> jnp.ndarray:
x = jnp.asarray(inputs, jnp.float32)
mean2 = jnp.mean(jax.lax.square(x), axis=-1, keepdims=True)
y = jnp.asarray(x * jax.lax.rsqrt(mean2 + epsilon), jnp.bfloat16)
ln_out = y * ln_scale
ln_out = jnp.asarray(ln_out, jnp.bfloat16)
fp8_gemm_1_pkg = FP8GemmPackage(1, ln_out, [kernel_1],
fp8_maxs[:FP8Helper.NUM_META_PER_GEMM],
amax[:FP8Helper.NUM_META_PER_GEMM],
scale[:FP8Helper.NUM_META_PER_GEMM],
scale_inv[:FP8Helper.NUM_META_PER_GEMM])
linear_1_out = fp8_dot(fp8_gemm_1_pkg,
fwd_dtype,
bwd_dtype,
contracting_dims,
dp_dim_index=dp_dim_index)
x = jnp.split(linear_1_out, len(activations), axis=-1)
acts = []
for idx, act_fn in enumerate(activations):
x_i = _convert_to_activation_function(act_fn)(x[idx])
acts.append(x_i)
x = functools.reduce(operator.mul, acts)
x = jnp.asarray(x, jnp.bfloat16)
fp8_gemm_2_pkg = FP8GemmPackage(1, x, [kernel_2],
fp8_maxs[FP8Helper.NUM_META_PER_GEMM:],
amax[FP8Helper.NUM_META_PER_GEMM:],
scale[FP8Helper.NUM_META_PER_GEMM:],
scale_inv[FP8Helper.NUM_META_PER_GEMM:])
output = fp8_dot(fp8_gemm_2_pkg,
fwd_dtype,
bwd_dtype,
contracting_dims,
dp_dim_index=dp_dim_index)
return output
def ref_func(x, ln_s, y, z, metas):
return jnp.mean(
fp8_ln_mlp_py(x, ln_s, y, z, *metas, *compute_type, activations=activations))
value_n_grad_primitive_func = jit(value_and_grad(primitive_func, (0, 1, 2, 3)))
value_n_grad_ref_func = jit(value_and_grad(ref_func, (0, 1, 2, 3)))
ref_out, (ref_a_grad, ref_s_grad, ref_k1_grad,
ref_k2_grad) = value_n_grad_ref_func(a, s, k1, k2, fp8_meta)
# calculate amax
primitive_out, (primitive_a_grad, primitive_s_grad, primitive_k1_grad,
primitive_k2_grad) = value_n_grad_primitive_func(a, s, k1, k2, fp8_meta)
# calculate scale by amax
fp8_meta = FP8Helper._update_fp8_metas_impl(fp8_meta)
primitive_out, (primitive_a_grad, primitive_s_grad, primitive_k1_grad,
primitive_k2_grad) = value_n_grad_primitive_func(a, s, k1, k2, fp8_meta)
assert_allclose(primitive_out, ref_out, rtol=1e-2)
assert_allclose(jnp.asarray(primitive_a_grad, np.float32),
jnp.asarray(ref_a_grad, np.float32),
rtol=1e-2)
assert_allclose(jnp.asarray(primitive_k1_grad, np.float32),
jnp.asarray(ref_k1_grad, np.float32),
rtol=1e-2)
assert_allclose(jnp.asarray(primitive_k2_grad, np.float32),
jnp.asarray(ref_k2_grad, np.float32),
rtol=1e-2)
assert_allclose(jnp.asarray(primitive_s_grad, np.float32),
jnp.asarray(ref_s_grad, np.float32),
rtol=1e-2)
@pytest.fixture(name="random_inputs")
def random_inputs_fixture(shape):
key = jax.random.PRNGKey(0)
subkeys = jax.random.split(key, 4)
out = jax.random.uniform(subkeys[0], shape, jnp.bfloat16, 5, 8)
return out
class TestGatedGeLu:
def ref_func(self, inputs):
def jax_gated_gelu(x):
x = jnp.split(x, 2, axis=-1)
acts = [jax.nn.gelu(x[0]), x[1]]
x = functools.reduce(operator.mul, acts)
x = jnp.asarray(x, jnp.bfloat16)
return x
func = jit(value_and_grad(lambda x: jnp.mean(jax_gated_gelu(x))))
return func(inputs)
def prim_func(self, inputs):
@jax.custom_vjp
def primitive(x):
out, _ = primitive_fwd(x)
return out
def primitive_fwd(x):
out = gated_gelu(x)
ctx = x
return out, ctx
def primitive_bwd(ctx, g):
x = ctx
out = dgated_gelu(g, x)
return (out,)
primitive.defvjp(primitive_fwd, primitive_bwd)
func = jit(value_and_grad(lambda x: jnp.mean(primitive(x))))
return func(inputs)
@pytest.mark.parametrize('shape', [(32, 64), (64, 256)])
def test_gated_gelu(self, random_inputs):
x = random_inputs
prim_out, prim_grad = self.prim_func(x)
ref_out, ref_grad = self.ref_func(x)
assert_allclose(prim_out, ref_out, rtol=1e-2)
assert_allclose(prim_grad, ref_grad, rtol=1e-1, atol=1e-3)
class TestGatedGeLuFP8(TestGatedGeLu):
def prim_func(self, inputs):
amax = self.amax
scale = self.scale
scale_inv = self.scale_inv
no_use = jnp.zeros(1, jnp.float32)
@jax.custom_vjp
def primitive(x, y, z):
out = primitive_fwd(x, y, z)
return out
def primitive_fwd(x, y, z): # pylint: disable=unused-argument
out, _ = gated_gelu_fp8(x, amax, scale, scale_inv, DType.kFloat8E5M2)
out = dequantize(out, no_use, no_use, scale_inv, DType.kFloat8E5M2, DType.kBFloat16)
ctx = x
return out, ctx
def primitive_bwd(ctx, g):
x = ctx
dgelu, dgelu_trans, amax_out = dgated_gelu_cast_transpose(g, x, amax, scale, scale_inv,
DType.kFloat8E5M2)
dgelu = dequantize(dgelu, no_use, no_use, scale_inv, DType.kFloat8E5M2, DType.kFloat32)
dgelu_trans = dequantize(dgelu_trans, no_use, no_use, scale_inv, DType.kFloat8E5M2,
DType.kFloat32)
return dgelu, dgelu_trans, amax_out
primitive.defvjp(primitive_fwd, primitive_bwd)
func = jit(value_and_grad(lambda x, y, z: jnp.mean(primitive(x, y, z)), (0, 1, 2)))
return func(inputs, no_use, no_use)
@pytest.mark.skipif(not is_fp8_supported, reason=reason)
@pytest.mark.parametrize('shape', [(32, 64), (64, 256)])
def test_gated_gelu(self, random_inputs):
self.amax = jnp.zeros(1, jnp.float32)
self.scale = jnp.ones(1, jnp.float32)
self.scale_inv = jnp.ones(1, jnp.float32)
x = random_inputs
prim_out, (prim_grad, prim_grad_trans, amax) = self.prim_func(x)
ref_out, ref_grad = self.ref_func(x)
assert_allclose(prim_out, ref_out, rtol=1e-2)
assert_allclose(amax, jnp.amax(jnp.abs(ref_grad)), rtol=1e-2)
assert_allclose(prim_grad, ref_grad, rtol=1e-1, atol=1e-3)
assert_allclose(prim_grad_trans, jnp.transpose(ref_grad), rtol=1e-1, atol=1e-3)
class TestRMSNorm:
@pytest.mark.parametrize('n, hidden', LN_CASES)
@pytest.mark.parametrize('dtype', DTYPES)
def test_forward_backward(self, n, hidden, dtype):
key = jax.random.PRNGKey(0)
subkeys = jax.random.split(key, 2)
x = jax.random.uniform(subkeys[0], (n, hidden), dtype, -2, 1)
scale = jax.random.uniform(subkeys[1], (hidden,), jnp.float32, -2, 1)
scale = jnp.asarray(scale, dtype)
epsilon = 1e-6
def reference_rmsnorm(x, scale):
x = jnp.asarray(x, jnp.float32)
mean2 = jnp.mean(lax.square(x), axis=-1, keepdims=True)
y = jnp.asarray(x * lax.rsqrt(mean2 + epsilon), dtype)
return y * scale
jitted_primitive = jit(
value_and_grad(lambda x, scale: jnp.mean(layernorm(x, scale, None, "rmsnorm")), (0, 1)))
jitted_reference = jit(
value_and_grad(lambda x, scale: jnp.mean(reference_rmsnorm(x, scale)), (0, 1)))
primitive_out, (primitive_dx, primitive_dgamma) = jitted_primitive(x, scale)
reference_out, (reference_dx, reference_dgamma) = jitted_reference(x, scale)
if dtype == jnp.float32:
assert_allclose(primitive_out, reference_out, rtol=1e-7)
assert_allclose(primitive_dx, reference_dx, rtol=1e-7)
assert_allclose(primitive_dgamma, reference_dgamma, rtol=1e-7)
else:
assert_allclose(primitive_out, reference_out, rtol=1e-3)
assert_allclose(primitive_dx, reference_dx, rtol=1e-4, atol=5e-8)
assert_allclose(primitive_dgamma, reference_dgamma, rtol=1e-4, atol=5e-8)
class TestLayerNorm:
@pytest.mark.parametrize('n, hidden', LN_CASES)
@pytest.mark.parametrize('dtype', DTYPES)
@pytest.mark.parametrize('zero_centered_gamma', [False, True])
def test_forward_backward(self, n, hidden, zero_centered_gamma, dtype):
key = jax.random.PRNGKey(0)
subkeys = jax.random.split(key, 3)
x = jax.random.uniform(subkeys[0], (n, hidden), dtype, -1, 1)
scale_range = (-1, 1) if zero_centered_gamma else (0, 2)
scale = jax.random.uniform(subkeys[1], (hidden,), jnp.float32, *scale_range)
scale = jnp.asarray(scale, dtype)
bias = jax.random.uniform(subkeys[2], (hidden,), jnp.float32, -1, 1)
bias = jnp.asarray(bias, dtype)
epsilon = 1e-6
def reference_layernorm(x, scale, bias, zero_centered_gamma, eps):
x_ = jnp.asarray(x, jnp.float32)
mean = jnp.mean(x_, axis=-1, keepdims=True)
var = jnp.mean(jnp.square(x_ - mean), axis=-1, keepdims=True)
normed_input = (x_ - mean) * jax.lax.rsqrt(var + eps)
# Align TE implementation
if zero_centered_gamma:
return jnp.asarray(normed_input * (scale + 1) + bias).astype(x.dtype)
return jnp.asarray(normed_input * scale + bias).astype(x.dtype)
def compute_loss(x):
# Higher precision to compute the loss
x_ = x.astype(jnp.float32)
return jnp.mean(jnp.square(x_)).astype(x.dtype)
jitted_primitive = jit(
value_and_grad(
lambda x, scale, bias: compute_loss(
layernorm(x, scale, bias, "layernorm", zero_centered_gamma, epsilon)),
(0, 1, 2)))
jitted_reference = jit(
value_and_grad(
lambda x, scale, bias: compute_loss(
reference_layernorm(x, scale, bias, zero_centered_gamma, epsilon)), (0, 1, 2)))
primitive_out, (primitive_dx, primitive_dgamma,
primitive_dbeta) = jitted_primitive(x, scale, bias)
reference_out, (reference_dx, reference_dgamma,
reference_dbeta) = jitted_reference(x, scale, bias)
if dtype == jnp.float32:
assert_allclose(primitive_out, reference_out, rtol=1e-7)
assert_allclose(primitive_dx, reference_dx, rtol=1e-7)
assert_allclose(primitive_dgamma, reference_dgamma, rtol=1e-7)
assert_allclose(primitive_dbeta, reference_dbeta, rtol=1e-7)
else:
assert_allclose(primitive_out, reference_out, rtol=1e-7)
assert_allclose(primitive_dx, reference_dx, rtol=1e-5, atol=1e-6)
assert_allclose(primitive_dgamma, reference_dgamma, rtol=1e-5, atol=3e-5)
assert_allclose(primitive_dbeta, reference_dbeta, rtol=1e-5, atol=3e-5)
| TransformerEngine-main | tests/jax/test_custom_call_compute.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Tests for fused attention"""
import os
from enum import Enum
from math import sqrt
import jax
import jax.numpy as jnp
import numpy as np
import pytest
from flax.linen import combine_masks
from flax.linen import dot_product_attention
from flax.linen import make_attention_mask
from flax.linen import make_causal_mask
from jax import value_and_grad, jit
from transformer_engine.jax.fused_attn import AttnBiasType, AttnMaskType
from transformer_engine.jax.fused_attn import self_fused_attn, cross_fused_attn
from transformer_engine.jax.fused_attn import is_fused_attn_kernel_available
from transformer_engine_jax import get_device_compute_capability
# Type annotations
Array = jnp.ndarray
class Backend(Enum):
"""
Fused attn backend.
Unit tests only, transformer will auto dispatch to the best backend
"""
Max512 = "0"
Arbitrary = "1"
@pytest.fixture(name="backend", params=[Backend.Max512, Backend.Arbitrary])
def fixture_backend(request):
"""
Fixture of setting up/tearing down backend
"""
backend = request.param
os.environ["NVTE_FUSED_ATTN_BACKEND"] = backend.value
yield backend
os.environ["NVTE_FUSED_ATTN_BACKEND"] = ""
SELF_CASES = [(32, 512, 16, 64), (32, 128, 16, 64), (4, 2048, 12, 64)]
CROSS_CASES = [(32, 128, 512, 16, 64)]
DTYPES = [jnp.bfloat16, jnp.float16]
def make_decoder_mask(tokens: Array) -> Array:
"""
Create padded causal mask
"""
causal_mask = make_causal_mask(tokens)
padding_mask = make_attention_mask(tokens > 0, tokens > 0)
return combine_masks(causal_mask, padding_mask)
def jax_self_attn(qkv, bias, q_token, kv_token, dropout_rng, **kwargs):
"""
Self attention with JAX native implementation
"""
attn_mask_type = kwargs['attn_mask_type']
if attn_mask_type == AttnMaskType.CAUSAL_MASK:
mask = make_decoder_mask(q_token)
else:
mask = make_attention_mask(q_token > 0, kv_token > 0)
query, key, value = jnp.split(qkv, [1, 2], axis=-3)
query = jnp.squeeze(query)
key = jnp.squeeze(key)
value = jnp.squeeze(value)
output = dot_product_attention(query,
key,
value,
bias=bias,
mask=mask,
deterministic=not kwargs['is_training'],
dropout_rate=kwargs['dropout_probability'],
dropout_rng=dropout_rng,
dtype=qkv.dtype)
return output
def jax_cross_attn(q, kv, q_token, kv_token, dropout_rng, **kwargs):
"""
Cross attention with JAX native implementation
"""
assert q.dtype == kv.dtype
attn_mask_type = kwargs['attn_mask_type']
if attn_mask_type == AttnMaskType.CAUSAL_MASK:
raise NotImplementedError
mask = make_attention_mask(q_token > 0, kv_token > 0)
query = q
key, value = jnp.split(kv, [1], axis=-3)
key = jnp.squeeze(key)
value = jnp.squeeze(value)
output = dot_product_attention(query,
key,
value,
bias=None,
mask=mask,
deterministic=not kwargs['is_training'],
dropout_rate=kwargs['dropout_probability'],
dropout_rng=dropout_rng,
dtype=q.dtype)
return output
def customcall_self_fused_attn(qkv, bias, q_token, kv_token, dropout_rng, **kwargs):
"""
Self fused attention
"""
if kwargs['attn_mask_type'] == AttnMaskType.CAUSAL_MASK:
mask = make_decoder_mask(q_token)
else:
mask = make_attention_mask(q_token > 0, kv_token > 0)
# mask invert
mask = (mask == 0)
return self_fused_attn(qkv, bias, mask, dropout_rng, **kwargs)
def customcall_cross_fused_attn(q, kv, q_token, kv_token, dropout_rng, **kwargs):
"""
Cross fused attention
"""
assert q.dtype == kv.dtype
if kwargs['attn_mask_type'] == AttnMaskType.CAUSAL_MASK:
raise NotImplementedError
mask = make_attention_mask(q_token > 0, kv_token > 0)
# mask invert
mask = (mask == 0)
return cross_fused_attn(q, kv, mask, dropout_rng, **kwargs)
@pytest.mark.parametrize('b, s, h, d', SELF_CASES)
@pytest.mark.parametrize('attn_bias_type', [AttnBiasType.NO_BIAS, AttnBiasType.POST_SCALE_BIAS])
@pytest.mark.parametrize('attn_mask_type', [AttnMaskType.PADDING_MASK, AttnMaskType.CAUSAL_MASK])
@pytest.mark.parametrize('dropout_probability', [0., 0.1])
@pytest.mark.parametrize('dtype', DTYPES)
@pytest.mark.parametrize('is_training', [True, False])
@pytest.mark.parametrize('pad_ratio', [0, 0.3])
class TestSelfFusedAttn():
"""Tests for transformer_engine.jax.fused_attn.self_fused_attn"""
@staticmethod
def _check_inputs(s, *, attn_bias_type, attn_mask_type, backend, dropout_probability, dtype,
head_dim, pad_ratio):
if (s > 512 or backend == Backend.Arbitrary) and pad_ratio != 0:
pytest.skip("Arbitrary seqlen backend hasn't support padded input.")
if not is_fused_attn_kernel_available(dtype, dtype, attn_bias_type, attn_mask_type,
dropout_probability, s, s, head_dim):
pytest.skip("Unsupported inputs combination or device compute capability.")
def _set_inputs(self, b, s, h, d, *, attn_bias_type, attn_mask_type, backend,
dropout_probability, dtype, is_training, pad_ratio):
"""Setup the test inputs"""
self.__class__._check_inputs(s,
attn_bias_type=attn_bias_type,
attn_mask_type=attn_mask_type,
backend=backend,
dropout_probability=dropout_probability,
dtype=dtype,
head_dim=d,
pad_ratio=pad_ratio)
key = jax.random.PRNGKey(0)
subkeys = jax.random.split(key, 2)
qkv_shape = (b, s, 3, h, d)
bias_shape = (1, h, s, s)
pad_len = int(s * pad_ratio)
self.valid_len = s - pad_len
min_val, max_val = -1, 1
self.qkv = jax.random.uniform(subkeys[0], qkv_shape, dtype, min_val, max_val)
with_bias = attn_bias_type != AttnBiasType.NO_BIAS
self.bias = jax.random.uniform(subkeys[1], bias_shape, dtype, min_val,
max_val) if with_bias else None
self.q_token = jnp.concatenate((jnp.ones((b, self.valid_len)), jnp.zeros((b, pad_len))),
axis=-1)
self.kv_token = self.q_token
self.scaling_factor = 1. / sqrt(d)
self.dropout_probability = dropout_probability
self.dropout_rng = jax.random.PRNGKey(0) if self.dropout_probability > 0 else None
self.attn_bias_type = attn_bias_type
self.attn_mask_type = attn_mask_type
self.is_training = is_training
def test_forward(self, b, s, h, d, attn_bias_type, attn_mask_type, backend, dropout_probability,
dtype, is_training, pad_ratio):
"""
Test forward without using JIT
"""
self._set_inputs(b,
s,
h,
d,
attn_bias_type=attn_bias_type,
attn_mask_type=attn_mask_type,
backend=backend,
dropout_probability=dropout_probability,
dtype=dtype,
is_training=is_training,
pad_ratio=pad_ratio)
primitive_out = customcall_self_fused_attn(self.qkv,
self.bias,
self.q_token,
self.kv_token,
self.dropout_rng,
attn_bias_type=self.attn_bias_type,
attn_mask_type=attn_mask_type,
scaling_factor=self.scaling_factor,
dropout_probability=self.dropout_probability,
is_training=self.is_training)
reference_out = jax_self_attn(self.qkv,
self.bias,
self.q_token,
self.kv_token,
self.dropout_rng,
attn_mask_type=attn_mask_type,
scaling_factor=self.scaling_factor,
dropout_probability=self.dropout_probability,
is_training=self.is_training)
ref_valid, _ = jnp.split(reference_out, (self.valid_len,), axis=1)
pri_valid, pri_invalid = jnp.split(primitive_out, (self.valid_len,), axis=1)
# Dropout can't get the bitmatch result, skip the elementwise comparison
if is_training and dropout_probability > 0.:
return
np.testing.assert_allclose(jnp.asarray(pri_valid, np.float32),
jnp.asarray(ref_valid, np.float32),
rtol=1e-4,
atol=1e-2)
np.testing.assert_allclose(jnp.asarray(pri_invalid, jnp.float32),
jnp.zeros_like(pri_invalid, jnp.float32))
def test_forward_backward(self, b, s, h, d, attn_bias_type, attn_mask_type, backend,
dropout_probability, dtype, is_training, pad_ratio):
"""
Test forward, backward, and autodiff by jax.value_and_grad
"""
if not is_training:
pytest.skip(f"Backward doesn't support {is_training=}")
self._set_inputs(b,
s,
h,
d,
attn_bias_type=attn_bias_type,
attn_mask_type=attn_mask_type,
backend=backend,
dropout_probability=dropout_probability,
dtype=dtype,
is_training=is_training,
pad_ratio=pad_ratio)
def grad_func(fused_attn_func, *args, **kwargs):
# Gradient is small, use a gradient multiplier to amplify the graident
gradient_multiplier = 1000 if dtype == jnp.bfloat16 else 10000
if attn_mask_type == AttnMaskType.CAUSAL_MASK:
gradient_multiplier = gradient_multiplier / 10
# Keep only valid result for the gradient
# fused_attn output has shape (b, s, h, d)
valid_fused_attn_ret, _ = jnp.split(fused_attn_func(*args, **kwargs), (self.valid_len,),
axis=1)
return (jnp.mean(valid_fused_attn_ret, dtype=jnp.float32) *
gradient_multiplier).astype(dtype)
kwargs = {
'attn_bias_type': self.attn_bias_type,
'attn_mask_type': attn_mask_type,
'scaling_factor': self.scaling_factor,
'dropout_probability': self.dropout_probability,
'is_training': self.is_training
}
# Use FP16/BF16 to sum the results may cause overflow, use FP32 for the summation
jitted_primitive = jit(
value_and_grad(
lambda qkv, bias, q_token, kv_token, dropout_rng: grad_func(
customcall_self_fused_attn, qkv, bias, q_token, kv_token, dropout_rng, **kwargs
), (0, 1)))
jitted_reference = jit(
value_and_grad(
lambda qkv, bias, q_token, kv_token, dropout_rng: grad_func(
jax_self_attn, qkv, bias, q_token, kv_token, dropout_rng, **kwargs), (0, 1)))
primitive_out, (primitive_dqkv,
primitive_dbias) = jitted_primitive(self.qkv, self.bias, self.q_token,
self.kv_token, self.dropout_rng)
reference_out, (reference_dqkv,
reference_dbias) = jitted_reference(self.qkv, self.bias, self.q_token,
self.kv_token, self.dropout_rng)
# Dropout can't get the bitmatch result, skip the elementwise comparison
if dropout_probability > 0.:
return
np.testing.assert_allclose(jnp.asarray(primitive_out, np.float32),
jnp.asarray(reference_out, np.float32),
rtol=1e-4,
atol=1e-5)
valid_primitive_dqkv, invalid_primitive_dqkv = jnp.split(primitive_dqkv, (self.valid_len,),
axis=1)
valid_reference_dqkv, invalid_reference_dqkv = jnp.split(reference_dqkv, (self.valid_len,),
axis=1)
valid_primitive_dq, valid_primitive_dk, valid_primitive_dv = jnp.split(
valid_primitive_dqkv.astype(jnp.float32), 3, axis=2)
valid_reference_dq, valid_reference_dk, valid_reference_dv = jnp.split(
valid_reference_dqkv.astype(jnp.float32), 3, axis=2)
np.testing.assert_allclose(valid_primitive_dq, valid_reference_dq, rtol=1e-4, atol=1e-5)
np.testing.assert_allclose(valid_primitive_dk, valid_reference_dk, rtol=1e-4, atol=1e-5)
np.testing.assert_allclose(valid_primitive_dv, valid_reference_dv, rtol=1e-4, atol=1e-5)
assert jnp.allclose(invalid_primitive_dqkv, invalid_reference_dqkv)
# Padded part should be 0s
assert jnp.allclose(invalid_primitive_dqkv, jnp.zeros_like(invalid_primitive_dqkv))
if self.attn_bias_type != AttnBiasType.NO_BIAS:
# dbias valid part
np.testing.assert_allclose(
jnp.asarray(primitive_dbias[:, :, :self.valid_len, :self.valid_len], np.float32),
jnp.asarray(reference_dbias[:, :, :self.valid_len, :self.valid_len], np.float32),
rtol=1e-4,
atol=3e-5)
# dbias padded part
np.testing.assert_allclose(
jnp.asarray(primitive_dbias[:, :, self.valid_len:, self.valid_len:], np.float32),
jnp.asarray(reference_dbias[:, :, self.valid_len:, self.valid_len:], np.float32))
assert jnp.allclose(
primitive_dbias[:, :, self.valid_len:, self.valid_len:],
jnp.zeros_like(primitive_dbias[:, :, self.valid_len:, self.valid_len:]))
@pytest.mark.skipif(get_device_compute_capability(0) not in [80, 90],
reason="Fused attention kernel is not supported.")
@pytest.mark.parametrize('b, s_q, s_kv, h, d', CROSS_CASES)
@pytest.mark.parametrize('attn_mask_type', [AttnMaskType.PADDING_MASK])
@pytest.mark.parametrize('dropout_probability', [0., 0.1])
@pytest.mark.parametrize('dtype', DTYPES)
@pytest.mark.parametrize('is_training', [True, False])
@pytest.mark.parametrize('pad_ratio', [0.3])
class TestCrossFusedAttn():
"""Tests for transformer_engine.jax.fused_attn.cross_fused_attn"""
def _set_inputs(self, b, s_q, s_kv, h, d, *, attn_mask_type, dropout_probability, dtype,
is_training, pad_ratio):
key = jax.random.PRNGKey(0)
subkeys = jax.random.split(key, 2)
q_shape = (b, s_q, h, d)
kv_shape = (b, s_kv, 2, h, d)
q_pad_len = int(s_q * pad_ratio)
kv_pad_len = int(s_kv * pad_ratio)
self.q_valid_len = s_q - q_pad_len
self.kv_valid_len = s_kv - kv_pad_len
min_val, max_val = -1, 1
self.q = jax.random.uniform(subkeys[0], q_shape, dtype, min_val, max_val)
self.kv = jax.random.uniform(subkeys[1], kv_shape, dtype, min_val, max_val)
self.q_token = jnp.concatenate((jnp.ones((b, self.q_valid_len)), jnp.zeros((b, q_pad_len))),
axis=-1)
self.kv_token = jnp.concatenate((jnp.ones((b, self.kv_valid_len)), jnp.zeros(
(b, kv_pad_len))),
axis=-1)
self.scaling_factor = 1. / sqrt(d)
self.dropout_probability = dropout_probability
self.dropout_rng = jax.random.PRNGKey(0) if self.dropout_probability > 0 else None
self.attn_bias_type = AttnBiasType.NO_BIAS
self.attn_mask_type = attn_mask_type
self.is_training = is_training
def test_forward(self, b, s_q, s_kv, h, d, attn_mask_type, dropout_probability, dtype,
is_training, pad_ratio):
"""
Test forward without using JIT
"""
self._set_inputs(b,
s_q,
s_kv,
h,
d,
attn_mask_type=attn_mask_type,
dropout_probability=dropout_probability,
dtype=dtype,
is_training=is_training,
pad_ratio=pad_ratio)
primitive_out = customcall_cross_fused_attn(self.q,
self.kv,
self.q_token,
self.kv_token,
self.dropout_rng,
attn_bias_type=self.attn_bias_type,
attn_mask_type=attn_mask_type,
scaling_factor=self.scaling_factor,
dropout_probability=self.dropout_probability,
is_training=self.is_training)
reference_out = jax_cross_attn(self.q,
self.kv,
self.q_token,
self.kv_token,
self.dropout_rng,
attn_mask_type=attn_mask_type,
scaling_factor=self.scaling_factor,
dropout_probability=self.dropout_probability,
is_training=self.is_training)
# Dropout can't get the bitmatch result, skip the elementwise comparison
if is_training and dropout_probability > 0.:
return
ref_valid, _ = jnp.split(reference_out, (self.q_valid_len,), axis=1)
pri_valid, pri_invalid = jnp.split(primitive_out, (self.q_valid_len,), axis=1)
np.testing.assert_allclose(jnp.asarray(pri_valid, np.float32),
jnp.asarray(ref_valid, np.float32),
rtol=1e-4,
atol=2e-3)
np.testing.assert_allclose(jnp.asarray(pri_invalid, jnp.float32),
jnp.zeros_like(pri_invalid, jnp.float32))
def test_forward_backward(self, b, s_q, s_kv, h, d, attn_mask_type, dropout_probability, dtype,
is_training, pad_ratio):
"""
Test forward, backward, and autodiff by jax.value_and_grad
"""
if not is_training:
pytest.skip(f"Backward doesn't support {is_training=}")
self._set_inputs(b,
s_q,
s_kv,
h,
d,
attn_mask_type=attn_mask_type,
dropout_probability=dropout_probability,
dtype=dtype,
is_training=is_training,
pad_ratio=pad_ratio)
def grad_func(fused_attn_func, *args, **kwargs):
# Gradient is small, use a gradient multiplier to amplify the graident
gradient_multiplier = 10000
if attn_mask_type == AttnMaskType.CAUSAL_MASK:
gradient_multiplier = gradient_multiplier / 10
# Keep only valid result for the gradient
# fused_attn output has shape (b, s_q, h, d)
valid_fused_attn_ret, _ = jnp.split(fused_attn_func(*args, **kwargs),
(self.q_valid_len,),
axis=1)
return (jnp.mean(valid_fused_attn_ret, dtype=jnp.float32) *
gradient_multiplier).astype(dtype)
kwargs = {
'attn_bias_type': self.attn_bias_type,
'attn_mask_type': attn_mask_type,
'scaling_factor': self.scaling_factor,
'dropout_probability': self.dropout_probability,
'is_training': self.is_training
}
# Use FP16/BF16 to sum the results may cause overflow, use FP32 for the summation
jitted_primitive = jit(
value_and_grad(
lambda q, kv, q_token, kv_token, dropout_rng: grad_func(
customcall_cross_fused_attn, q, kv, q_token, kv_token, dropout_rng, **kwargs),
(0, 1)))
jitted_reference = jit(
value_and_grad(
lambda q, kv, q_token, kv_token, dropout_rng: grad_func(
jax_cross_attn, q, kv, q_token, kv_token, dropout_rng, **kwargs), (0, 1)))
primitive_out, (primitive_dq,
primitive_dkv) = jitted_primitive(self.q, self.kv, self.q_token,
self.kv_token, self.dropout_rng)
reference_out, (reference_dq,
reference_dkv) = jitted_reference(self.q, self.kv, self.q_token,
self.kv_token, self.dropout_rng)
# Dropout can't get the bitmatch result, skip the elementwise comparison
if dropout_probability > 0.:
return
np.testing.assert_allclose(jnp.asarray(primitive_out, np.float32),
jnp.asarray(reference_out, np.float32),
rtol=1e-4,
atol=1e-5)
valid_primitive_dq, invalid_primitive_dq = jnp.split(primitive_dq, (self.q_valid_len,),
axis=1)
valid_reference_dq, invalid_reference_dq = jnp.split(reference_dq, (self.q_valid_len,),
axis=1)
valid_primitive_dkv, invalid_primitive_dkv = jnp.split(primitive_dkv, (self.kv_valid_len,),
axis=1)
valid_reference_dkv, invalid_reference_dkv = jnp.split(reference_dkv, (self.kv_valid_len,),
axis=1)
# dQ
np.testing.assert_allclose(jnp.asarray(valid_primitive_dq, np.float32),
jnp.asarray(valid_reference_dq, np.float32),
rtol=1e-4,
atol=1e-5)
# dK
np.testing.assert_allclose(jnp.asarray(valid_primitive_dkv[:, :, 0], np.float32),
jnp.asarray(valid_reference_dkv[:, :, 0], np.float32),
rtol=1e-4,
atol=1e-5)
# dV
np.testing.assert_allclose(jnp.asarray(valid_primitive_dkv[:, :, 1], np.float32),
jnp.asarray(valid_reference_dkv[:, :, 1], np.float32),
rtol=1e-4,
atol=1e-5)
assert jnp.allclose(invalid_primitive_dq, invalid_reference_dq)
assert jnp.allclose(invalid_primitive_dkv, invalid_reference_dkv)
# Padded part should be 0s
assert jnp.allclose(invalid_primitive_dq, jnp.zeros_like(invalid_primitive_dq))
assert jnp.allclose(invalid_primitive_dkv, jnp.zeros_like(invalid_primitive_dkv))
| TransformerEngine-main | tests/jax/test_fused_attn.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
import transformer_engine.jax
print("OK")
| TransformerEngine-main | tests/jax/test_sanity_import.py |
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
import os
import sys
import json
import datetime
if len(sys.argv) < 2:
print("Usage: python copyright_checker.py <path>")
path = sys.argv[1]
config_path = os.path.dirname(os.path.realpath(__file__)) + "/config.json"
class bcolors:
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def print_ok(msg):
print(f"{bcolors.OKGREEN}{msg}{bcolors.ENDC}")
def print_fail(msg):
print(f"{bcolors.FAIL}{msg}{bcolors.ENDC}")
def print_warn(msg):
print(f"{bcolors.WARNING}{msg}{bcolors.ENDC}")
with open(config_path, "r") as f:
c = json.load(f)
current_year = datetime.date.today().year
if c["initial_year"] == current_year:
year_string = str(current_year)
else:
year_string = str(c["initial_year"]) + "-" + str(current_year)
copyright_string = c["copyright"].replace("<YEAR>", year_string)
license = c["license"].split('\n')
excludes = c["exclude"]
root_path = os.path.abspath(path)
copyright_only = c["copyright_only"]
exclude_copyright = c["exclude_copyright"]
has_gitignore = os.path.exists(root_path + "/.gitignore")
def strip_star_slash(s):
ret = s
if ret.startswith('*'):
ret = ret[1:]
if ret.endswith('/'):
ret = ret[:-1]
return ret
if has_gitignore:
with open(root_path + "/.gitignore", "r") as f:
for line in f.readlines():
excludes.append(strip_star_slash(line.strip()))
def get_file_type(path):
ext = {"c": ["c", "cpp", "cu", "h", "cuh"],
"py": ["py"],
"rst": ["rst"],
"txt": ["txt"],
"cfg": ["cfg"],
"sh": ["sh"],
"md": ["md"],
}
tmp = path.split(".")
for filetype, ext_list in ext.items():
if tmp[-1] in ext_list:
return filetype
return "unknown"
success = True
def check_file(path):
global success
N = 10
ftype = get_file_type(path)
if ftype == "unknown":
print_warn("Unknown filetype")
return
check_copyright = True
for e in exclude_copyright:
if path.endswith(e):
check_copyright = False
with open(path, "r") as f:
copyright_found = False
license_found = True
try:
if check_copyright:
for _ in range(N):
line = f.readline()
if line.find(copyright_string) != -1:
copyright_found = True
break
if not copyright_only:
first_license_line = True
for l in license:
if first_license_line:
# may skip some lines
first_license_line = False
for _ in range(N):
line = f.readline()
if line.find(l) != -1:
break
else:
line = f.readline()
if line.find(l) == -1:
license_found = False
break
except:
pass
finally:
if not copyright_found:
print_fail("No copyright found!")
success = False
if not license_found:
print_fail("No license found!")
success = False
if copyright_found and license_found:
print_ok("OK")
for root, dirs, files in os.walk(root_path):
print(f"Entering {root}")
hidden = [d for d in dirs if d.startswith('.')] + [f for f in files if f.startswith('.')]
all_excludes = excludes + hidden
to_remove = []
for d in dirs:
d_path = root + "/" + d
for e in all_excludes:
if d_path.endswith(e):
to_remove.append(d)
for f in files:
f_path = root + "/" + f
for e in all_excludes:
if f_path.endswith(e):
to_remove.append(f)
for d in to_remove:
if d in dirs:
dirs.remove(d)
if d in files:
files.remove(d)
for filename in files:
print(f"Checking {filename}")
check_file(os.path.abspath(root + "/" + filename))
if not success:
raise Exception("Some copyrights/licenses are missing!")
| TransformerEngine-main | qa/L0_license/copyright_checker.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
import os
import sys
import sphinx_rtd_theme
from sphinx.ext.autodoc.mock import mock
from sphinx.ext.autodoc import between, ClassDocumenter, AttributeDocumenter
from sphinx.util import inspect
from builtins import str
from enum import Enum
import re
import subprocess
from pathlib import Path
from datetime import date
te_path = os.path.dirname(os.path.realpath(__file__))
with open(te_path + "/../VERSION", "r") as f:
te_version = f.readline().strip()
release_year = 2022
current_year = date.today().year
if current_year == release_year:
copyright_year = release_year
else:
copyright_year = str(release_year) + "-" + str(current_year)
project = u'Transformer Engine'
copyright = u'{}, NVIDIA CORPORATION & AFFILIATES. All rights reserved.'.format(copyright_year)
author = u'NVIDIA CORPORATION & AFFILIATES'
git_sha = os.getenv("GIT_SHA")
if not git_sha:
try:
git_sha = subprocess.check_output(["git", "log", "--pretty=format:'%h'", "-n1"]).decode('ascii').replace("'","").strip()
except:
git_sha = u'0000000'
git_sha = git_sha[:7] if len(git_sha) > 7 else git_sha
version = str(te_version + u"-" + git_sha)
release = te_version
# hack: version is used for html creation, so put the version picker
# link here as well:
option_on = " selected"
option_off = ""
release_opt = option_on
option_nr = 0
version = version + """<br/>
Version select: <select onChange="window.location.href = this.value" onFocus="this.selectedIndex = {0}">
<option value="https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/index.html"{1}>Current release</option>
<option value="https://docs.nvidia.com/deeplearning/transformer-engine/documentation-archive.html">Older releases</option>
</select>""".format(option_nr, release_opt)
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.ifconfig',
'nbsphinx',
'breathe',
'autoapi.extension',
]
templates_path = ['_templates']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
source_suffix = '.rst'
master_doc = 'index'
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_static_path = ['_static']
html_css_files = [
'css/nvidia_font.css',
]
html_theme_options = {
'display_version': True,
'collapse_navigation': False,
'logo_only': False
}
napoleon_custom_sections = [('Parallelism parameters', 'params_style'),
('Optimization parameters', 'params_style'),
('Values', 'params_style')]
breathe_projects = {"TransformerEngine": os.path.abspath("doxygen/xml/")}
breathe_default_project = "TransformerEngine"
autoapi_generate_api_docs = False
autoapi_dirs = ["../transformer_engine"]
| TransformerEngine-main | docs/conf.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
import math
from typing import Callable, Optional
import torch
import transformer_engine.pytorch as te
from transformer_engine.pytorch.fp8 import DelayedScaling, dist_group_type
def speedometer(
module: torch.nn.Module,
input: torch.Tensor,
output_grad: torch.Tensor,
forward_kwargs: dict = {},
fp8_autocast_kwargs: Optional[dict] = None,
timing_iters: int = 50,
warmup_iters: int = 50,
) -> None:
"""Measure average run time for a PyTorch module
Performs forward and backward passes.
"""
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
if fp8_autocast_kwargs is None:
fp8_autocast_kwargs = { "enabled": False }
# Warmup runs
torch.cuda.synchronize()
for _ in range(warmup_iters):
with te.fp8_autocast(**fp8_autocast_kwargs):
output = module(input, **forward_kwargs)
output.backward(output_grad)
# Timing runs
start.record()
for _ in range(timing_iters):
with te.fp8_autocast(**fp8_autocast_kwargs):
output = module(input, **forward_kwargs)
output.backward(output_grad)
end.record()
torch.cuda.synchronize()
print(f"Mean time: {start.elapsed_time(end)/timing_iters} ms")
class DotProductAttention(torch.nn.Module):
"""Attention operation in Transformer layer
Built with plain PyTorch modules.
"""
def __init__(
self,
num_attention_heads: int,
kv_channels: int,
attention_dropout: float,
) -> None:
super().__init__()
self.projection_size = kv_channels * num_attention_heads
self.hidden_size_per_attention_head = kv_channels
self.norm_factor = math.sqrt(self.hidden_size_per_attention_head)
self.dropout = torch.nn.Dropout(attention_dropout)
def masked_softmax(
self,
inp: torch.Tensor,
mask: Optional[torch.Tensor]
) -> torch.Tensor:
if mask is not None:
inp.masked_fill_(mask, -10000.0)
return torch.nn.Softmax(dim=-1)(inp)
def forward(
self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
b = query.size(1)
np = query.size(2)
sq = query.size(0)
sk = key.size(0)
hn = value.size(3)
# [sq, b, np, hn] -> [sq, b * np, hn]
query = query.view(sq, b * np, -1)
# [sk, b, np, hn] -> [sk, b * np, hn]
key = key.view(sk, b * np, -1)
bmm1 = torch.bmm(query.transpose(0, 1), key.transpose(0, 1).transpose(1, 2)) / self.norm_factor
# change view to [b, np, sq, sk]
attention_scores = bmm1.view(b, np, sq, sk)
attention_probs = self.masked_softmax(attention_scores, attention_mask)
attention_probs = self.dropout(attention_probs)
# change view [sk, b * np, hn]
value = value.view(sk, b * np, -1)
# change view [b * np, sq, sk]
attention_probs = attention_probs.view(b * np, sq, -1)
# matmul: [b * np, sq, hn]
context = torch.bmm(attention_probs, value.transpose(0, 1))
# change view [b, np, sq, hn]
context = context.view(b, np, sq, hn)
# [b, np, sq, hn] --> [sq, b, np, hn]
context = context.permute(2, 0, 1, 3).contiguous()
# [sq, b, np, hn] --> [sq, b, hp]
context = context.view(sq, b, self.projection_size)
return context
class BasicMLP(torch.nn.Module):
"""Feed-forward network in Transformer layer
Built with plain PyTorch modules.
"""
def __init__(
self,
hidden_size: int,
ffn_hidden_size: int,
) -> None:
super().__init__()
self.linear1 = torch.nn.Linear(hidden_size, ffn_hidden_size, bias=True)
self.linear2 = torch.nn.Linear(ffn_hidden_size, hidden_size, bias=True)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.linear1(x)
x = torch.nn.functional.gelu(x, approximate='tanh')
x = self.linear2(x)
return x
def share_parameters_with_basic_te_model(te_model, basic_model):
"""Initialize parameters for TE Transformer layer with basic modules
Parameter values are copied from pure PyTorch implementation.
"""
te_model.ln1.weight= basic_model.ln1.weight
te_model.ln1.bias = basic_model.ln1.bias
te_model.qkv_projection.weight = basic_model.qkv_projection.weight
te_model.qkv_projection.bias = basic_model.qkv_projection.bias
te_model.projection.weight = basic_model.projection.weight
te_model.projection.bias = basic_model.projection.bias
te_model.ln2.weight = basic_model.ln2.weight
te_model.ln2.bias = basic_model.ln2.bias
te_model.mlp.linear1.weight = basic_model.mlp.linear1.weight
te_model.mlp.linear1.bias = basic_model.mlp.linear1.bias
te_model.mlp.linear2.weight = basic_model.mlp.linear2.weight
te_model.mlp.linear2.bias = basic_model.mlp.linear2.bias
def share_parameters_with_fused_te_model(te_model, basic_model):
"""Initialize parameters for TE Transformer layer with fused modules
Parameter values are copied from pure PyTorch implementation.
"""
te_model.ln_qkv.layer_norm_weight = basic_model.ln1.weight
te_model.ln_qkv.layer_norm_bias = basic_model.ln1.bias
te_model.ln_qkv.weight = basic_model.qkv_projection.weight
te_model.ln_qkv.bias = basic_model.qkv_projection.bias
te_model.projection.weight = basic_model.projection.weight
te_model.projection.bias = basic_model.projection.bias
te_model.ln_mlp.layer_norm_weight = basic_model.ln2.weight
te_model.ln_mlp.layer_norm_bias = basic_model.ln2.bias
te_model.ln_mlp.fc1_weight = basic_model.mlp.linear1.weight
te_model.ln_mlp.fc1_bias = basic_model.mlp.linear1.bias
te_model.ln_mlp.fc2_weight = basic_model.mlp.linear2.weight
te_model.ln_mlp.fc2_bias = basic_model.mlp.linear2.bias
def share_parameters_with_transformerlayer_te_model(te_model, basic_model):
"""Initialize parameters for monolithic TE Transformer layer
Parameter values are copied from pure PyTorch implementation.
"""
te_model.self_attention.layernorm_qkv.layer_norm_weight = basic_model.ln1.weight
te_model.self_attention.layernorm_qkv.layer_norm_bias = basic_model.ln1.bias
te_model.self_attention.layernorm_qkv.weight = basic_model.qkv_projection.weight
te_model.self_attention.layernorm_qkv.bias = basic_model.qkv_projection.bias
te_model.self_attention.proj.weight = basic_model.projection.weight
te_model.self_attention.proj.bias = basic_model.projection.bias
te_model.layernorm_mlp.layer_norm_weight = basic_model.ln2.weight
te_model.layernorm_mlp.layer_norm_bias = basic_model.ln2.bias
te_model.layernorm_mlp.fc1_weight = basic_model.mlp.linear1.weight
te_model.layernorm_mlp.fc1_bias = basic_model.mlp.linear1.bias
te_model.layernorm_mlp.fc2_weight = basic_model.mlp.linear2.weight
te_model.layernorm_mlp.fc2_bias = basic_model.mlp.linear2.bias
def cast_to_representable(inp, scale = 1., fp8_format='e4m3'):
import transformer_engine.pytorch.cpp_extensions as texcpp
import transformer_engine_extensions as tex
fp8_type = tex.DType.kFloat8E4M3 if fp8_format == 'e4m3' else tex.DType.kFloat8E5M2
input_type = texcpp.TE_DType[inp.dtype]
meta = tex.FP8TensorMeta()
meta.scale = torch.ones(1,dtype=torch.float32, device="cuda") * scale
meta.scale_inv = torch.ones(1, dtype=torch.float32, device="cuda") / scale
meta.amax_history = torch.zeros(1, 1, dtype=torch.float32, device="cuda")
ret = texcpp.cast_to_fp8(inp, meta, tex.FP8FwdTensors.GEMM1_INPUT, fp8_type)
ret = texcpp.cast_from_fp8(ret, meta, tex.FP8FwdTensors.GEMM1_INPUT, fp8_type, input_type)
return ret
| TransformerEngine-main | docs/examples/quickstart_utils.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""MNIST example of Transformer Engine Paddle"""
import argparse
import os
import unittest
import paddle
from paddle import nn
import paddle.nn.functional as F
from paddle.vision.transforms import Normalize
from paddle.io import DataLoader
from paddle.vision.datasets import MNIST
from paddle.metric import Accuracy
import transformer_engine.paddle as te
from transformer_engine.paddle.fp8 import is_fp8_available
class Net(nn.Layer):
"""Simple network used to train on MNIST"""
def __init__(self, use_te=False):
super().__init__()
self.conv1 = nn.Conv2D(1, 32, 3, 1)
self.conv2 = nn.Conv2D(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
if use_te:
self.fc1 = te.Linear(9216, 128)
self.fc2 = te.Linear(128, 16)
else:
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 16)
self.fc3 = nn.Linear(16, 10)
def forward(self, x):
"""FWD"""
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = paddle.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
x = self.fc3(x)
return x
def train(args, model, train_loader, optimizer, epoch, use_fp8):
"""Training function."""
model.train()
losses = []
for batch_id, (data, labels) in enumerate(train_loader):
with paddle.amp.auto_cast(dtype='bfloat16', level='O2'): # pylint: disable=not-context-manager
with te.fp8_autocast(enabled=use_fp8):
outputs = model(data)
loss = F.cross_entropy(outputs, labels)
losses.append(loss.item())
loss.backward()
optimizer.step()
optimizer.clear_gradients()
if batch_id % args.log_interval == 0:
print(f"Train Epoch: {epoch} "
f"[{batch_id * len(data)}/{len(train_loader.dataset)} "
f"({100. * batch_id / len(train_loader):.0f}%)]\t"
f"Loss: {loss.item():.6f}")
if args.dry_run:
return loss.item()
avg_loss = sum(losses) / len(losses)
print(f"Train Epoch: {epoch}, Average Loss: {avg_loss}")
return avg_loss
def evaluate(model, test_loader, epoch, use_fp8):
"""Testing function."""
model.eval()
metric = Accuracy()
metric.reset()
with paddle.no_grad():
for data, labels in test_loader:
with paddle.amp.auto_cast(dtype='bfloat16', level='O2'): # pylint: disable=not-context-manager
with te.fp8_autocast(enabled=use_fp8):
outputs = model(data)
acc = metric.compute(outputs, labels)
metric.update(acc)
print(f"Epoch[{epoch}] - accuracy: {metric.accumulate():.6f}")
return metric.accumulate()
def calibrate(model, test_loader):
"""Calibration function."""
model.eval()
with paddle.no_grad():
for data, _ in test_loader:
with paddle.amp.auto_cast(dtype='bfloat16', level='O2'): # pylint: disable=not-context-manager
with te.fp8_autocast(enabled=False, calibrating=True):
_ = model(data)
def mnist_parser(args):
"""Parse training settings"""
parser = argparse.ArgumentParser(description="Paddle MNIST Example")
parser.add_argument(
"--batch-size",
type=int,
default=64,
metavar="N",
help="input batch size for training (default: 64)",
)
parser.add_argument(
"--test-batch-size",
type=int,
default=1000,
metavar="N",
help="input batch size for testing (default: 1000)",
)
parser.add_argument(
"--epochs",
type=int,
default=14,
metavar="N",
help="number of epochs to train (default: 14)",
)
parser.add_argument(
"--lr",
type=float,
default=0.001,
metavar="LR",
help="learning rate (default: 0.001)",
)
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="quickly check a single pass",
)
parser.add_argument(
"--save-model",
action="store_true",
default=False,
help="For Saving the current Model",
)
parser.add_argument("--seed", type=int, default=1, metavar="S", help="random seed (default: 1)")
parser.add_argument(
"--log-interval",
type=int,
default=10,
metavar="N",
help="how many batches to wait before logging training status",
)
parser.add_argument("--use-fp8",
action="store_true",
default=False,
help="Use FP8 for inference and training without recalibration. " \
"It also enables Transformer Engine implicitly.")
parser.add_argument("--use-fp8-infer",
action="store_true",
default=False,
help="Use FP8 for inference only. If not using FP8 for training, "
"calibration is performed for FP8 infernece.")
parser.add_argument("--use-te",
action="store_true",
default=False,
help="Use Transformer Engine")
args = parser.parse_args(args)
return args
def train_and_evaluate(args):
"""Execute model training and evaluation loop."""
print(args)
paddle.seed(args.seed)
# Load MNIST dataset
transform = Normalize(mean=[127.5], std=[127.5], data_format='CHW')
train_dataset = MNIST(mode='train', transform=transform)
val_dataset = MNIST(mode='test', transform=transform)
# Define data loaders
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=args.test_batch_size)
# Define model and optimizer
model = Net(use_te=args.use_te)
optimizer = paddle.optimizer.Adam(learning_rate=args.lr, parameters=model.parameters())
# Cast model to BF16
model = paddle.amp.decorate(models=model, level='O2', dtype='bfloat16')
for epoch in range(1, args.epochs + 1):
loss = train(args, model, train_loader, optimizer, epoch, args.use_fp8)
acc = evaluate(model, val_loader, epoch, args.use_fp8)
if args.use_fp8_infer and not args.use_fp8:
calibrate(model, val_loader)
if args.save_model or args.use_fp8_infer:
paddle.save(model.state_dict(), "mnist_cnn.pdparams")
print('Eval with reloaded checkpoint : fp8=' + str(args.use_fp8))
weights = paddle.load("mnist_cnn.pdparams")
model.set_state_dict(weights)
acc = evaluate(model, val_loader, 0, args.use_fp8)
return loss, acc
class TestMNIST(unittest.TestCase):
"""MNIST unittests"""
gpu_has_fp8, reason = is_fp8_available()
@classmethod
def setUpClass(cls):
"""Run MNIST without Transformer Engine"""
cls.args = mnist_parser(["--epochs", "5"])
@staticmethod
def verify(actual):
"""Check If loss and accuracy match target"""
desired_traing_loss = 0.1
desired_test_accuracy = 0.98
assert actual[0] < desired_traing_loss
assert actual[1] > desired_test_accuracy
@unittest.skipIf(paddle.device.cuda.get_device_capability() < (8, 0),
"BF16 MNIST example requires Ampere+ GPU")
def test_te_bf16(self):
"""Test Transformer Engine with BF16"""
self.args.use_te = True
self.args.use_fp8 = False
self.args.save_model = True
actual = train_and_evaluate(self.args)
if os.path.exists("mnist_cnn.pdparams"):
os.remove("mnist_cnn.pdparams")
self.verify(actual)
@unittest.skipIf(not gpu_has_fp8, reason)
def test_te_fp8(self):
"""Test Transformer Engine with FP8"""
self.args.use_te = True
self.args.use_fp8 = True
self.args.save_model = True
actual = train_and_evaluate(self.args)
if os.path.exists("mnist_cnn.pdparams"):
os.remove("mnist_cnn.pdparams")
self.verify(actual)
@unittest.skipIf(not gpu_has_fp8, reason)
def test_te_fp8_calibration(self):
"""Test Transformer Engine with FP8 calibration"""
self.args.use_te = True
self.args.use_fp8 = False
self.args.use_fp8_infer = True
actual = train_and_evaluate(self.args)
if os.path.exists("mnist_cnn.pdparams"):
os.remove("mnist_cnn.pdparams")
self.verify(actual)
if __name__ == "__main__":
train_and_evaluate(mnist_parser(None))
| TransformerEngine-main | examples/paddle/mnist/test_single_gpu_mnist.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
from transformer_engine.tensorflow import Format, DelayedScaling
import argparse
import tensorflow as tf
import time
import transformer_engine.tensorflow as te
from keras import layers
from keras import Model
from typing import Optional
parser = argparse.ArgumentParser(description="Benchmark TransformerLayer.")
parser.add_argument(
'-t', '--type', type=int, default=0,
help="""Pick TE implementation (0:all|1:TF-fp16|2:TE-fp16|3:TE-fp8)""")
args, _ = parser.parse_known_args()
tl_type = args.type
tf.keras.mixed_precision.set_global_policy('mixed_float16')
dropout_rate = 0.0
class DotProductAttention(tf.keras.Model):
"""Attention operation in Transformer layer
"""
def __init__(
self,
num_attention_heads: int,
kv_channels: int,
attention_dropout: float,
):
super().__init__()
self.projection_size = kv_channels * num_attention_heads
self.hidden_size_per_attention_head = float(kv_channels)
self.norm_factor = tf.math.sqrt(self.hidden_size_per_attention_head)
self.dropout = layers.Dropout(attention_dropout)
if self.dropout.dtype_policy.name == 'mixed_float16':
self.norm_factor = tf.cast(self.norm_factor, dtype=tf.float16)
def masked_softmax(
self,
inp: tf.Tensor,
mask: Optional[tf.Tensor]
) -> tf.Tensor:
if mask is not None:
inp = tf.where(mask, -10000.0, inp)
return tf.nn.softmax(inp, axis=-1)
def call(
self,
query: tf.Tensor,
key: tf.Tensor,
value: tf.Tensor,
attention_mask: Optional[tf.Tensor] = None,
) -> tf.Tensor:
b = query.shape[1]
np = query.shape[2]
sq = query.shape[0]
sk = key.shape[0]
hn = value.shape[3]
# [sq, b, np, hn] -> [sq, b * np, hn]
query = tf.reshape(query, shape=(sq, b * np, hn))
# [sk, b, np, hn] -> [sk, b * np, hn]
key = tf.reshape(key, shape=(sk, b * np, hn))
bmm1 = tf.matmul(tf.transpose(query, perm=(1, 0, 2)),
tf.transpose(key, perm=(1, 2, 0))) / self.norm_factor
# change view to [b, np, sq, sk]
attention_scores = tf.reshape(bmm1, shape=(b, np, sq, sk))
attention_probs = self.masked_softmax(attention_scores, attention_mask)
attention_probs = self.dropout(attention_probs)
# change view [sk, b * np, hn]
value = tf.reshape(value, shape=(sk, b * np, hn))
# change view [b * np, sq, sk]
attention_probs = tf.reshape(attention_probs, shape=(b * np, sq, sk))
# matmul: [b * np, sq, hn]
context = tf.matmul(attention_probs,
tf.transpose(value, perm=(1, 0, 2)))
# change view [b, np, sq, hn]
context = tf.reshape(context, shape=(b, np, sq, hn))
# [b, np, sq, hn] --> [sq, b, np, hn]
context = tf.transpose(context, perm=(2, 0, 1, 3))
# [sq, b, np, hn] --> [sq, b, hp]
context = tf.reshape(context, shape=(sq, b, self.projection_size))
return context
class BasicMLP(tf.keras.Model):
"""Feed-forward network in Transformer layer
"""
def __init__(
self,
hidden_size: int,
ffn_hidden_size: int,
):
super().__init__()
self.linear1 = layers.Dense(ffn_hidden_size, use_bias=True)
self.linear2 = layers.Dense(hidden_size, use_bias=True)
def call(
self,
x: tf.Tensor
) -> tf.Tensor:
x = self.linear1(x)
x = tf.nn.gelu(x, approximate=True)
x = self.linear2(x)
return x
class BasicTransformer(tf.keras.Model):
def __init__(
self,
hidden_size: int,
ffn_hidden_size: int,
num_attention_heads: int,
layernorm_eps: int = 1e-5,
attention_dropout: float = 0.1,
hidden_dropout: float = 0.1,
):
super().__init__()
self.num_attention_heads = num_attention_heads
self.kv_channels = hidden_size // num_attention_heads
self.ln1 = layers.LayerNormalization(epsilon=layernorm_eps)
self.qkv_projection = layers.Dense(3 * hidden_size, use_bias=True)
self.attention = DotProductAttention(
num_attention_heads=num_attention_heads,
kv_channels=self.kv_channels,
attention_dropout=attention_dropout,
)
self.projection = layers.Dense(hidden_size, use_bias=True)
self.dropout = layers.Dropout(hidden_dropout)
self.ln2 = layers.LayerNormalization(epsilon=layernorm_eps)
self.mlp = BasicMLP(
hidden_size=hidden_size,
ffn_hidden_size=ffn_hidden_size,
)
def call(
self,
x: tf.Tensor,
attention_mask: tf.Tensor,
) -> tf.Tensor:
res = x
x = self.ln1(x)
# Fused QKV projection
qkv = self.qkv_projection(x)
qkv_shape = qkv.shape
qkv = tf.reshape(qkv,
shape=(qkv_shape[0], qkv_shape[1],
self.num_attention_heads, 3 * self.kv_channels))
q, k, v = tf.split(qkv, 3, axis=3)
x = self.attention(q, k, v, attention_mask)
x = self.projection(x)
x = self.dropout(x)
x = res + x
res = x
x = self.ln2(x)
x = self.mlp(x)
return x + res
class FusedTETransformer(tf.keras.Model):
def __init__(
self,
hidden_size: int,
ffn_hidden_size: int,
num_attention_heads: int,
layernorm_eps: int = 1e-5,
attention_dropout: float = 0.1,
hidden_dropout: float = 0.1,
):
super().__init__()
self.num_attention_heads = num_attention_heads
self.kv_channels = hidden_size // num_attention_heads
self.ln_qkv = te.LayerNormDense(3 * hidden_size, epsilon=layernorm_eps,
use_bias=True)
self.attention = DotProductAttention(
num_attention_heads=num_attention_heads,
kv_channels=self.kv_channels,
attention_dropout=attention_dropout,
)
self.projection = te.Dense(hidden_size, use_bias=True)
self.dropout = layers.Dropout(hidden_dropout)
self.ln_mlp = te.LayerNormMLP(ffn_hidden_size, hidden_size,
epsilon=layernorm_eps, use_bias=True,
return_layernorm_output=False)
def call(
self,
x: tf.Tensor,
attention_mask: tf.Tensor,
) -> tf.Tensor:
res = x
qkv = self.ln_qkv(x)
# Split qkv into query, key and value
qkv_shape = qkv.shape
qkv = tf.reshape(qkv,
shape=(qkv_shape[0], qkv_shape[1],
self.num_attention_heads, 3 * self.kv_channels))
q, k, v = tf.split(qkv, 3, axis=3)
x = self.attention(q, k, v, attention_mask)
x = self.projection(x)
x = self.dropout(x)
x = res + x
res = x
x = self.ln_mlp(x)
return x + res
# Layer configuration
hidden_size = 4096
sequence_length = 2048
batch_size = 4
ffn_hidden_size = 16384
num_attention_heads = 32
dtype = tf.float32
def speedometer(
model: tf.keras.Model,
input: tf.Tensor,
forward_kwargs: dict = {},
fp8_autocast_kwargs: Optional[dict] = None,
timing_iters: int = 50,
warmup_iters: int = 50,
) -> None:
"""Measure average run time for a TF model
Performs forward and backward passes.
"""
if fp8_autocast_kwargs is None:
fp8_autocast_kwargs = {"enabled": False}
p = tf.constant(0.) # Create small tensor to force GPU resync
# Warmup runs
for _ in range(warmup_iters):
with tf.GradientTape(persistent=True) as tape:
tape.watch(input)
with te.fp8_autocast(**fp8_autocast_kwargs):
output = model(input, **forward_kwargs)
loss = tf.reduce_sum(output)
dx, dvars = tape.gradient(loss, [input, model.variables])
(p + 1.).numpy() # Sync the GPU
# Timing runs
start = time.time()
for _ in range(timing_iters):
with tf.GradientTape(persistent=True) as tape:
tape.watch(input)
with te.fp8_autocast(**fp8_autocast_kwargs):
output = model(input, **forward_kwargs)
loss = tf.reduce_sum(output)
dx, dvars = tape.gradient(loss, [input, model.variables])
(p + 1.).numpy() # Sync the GPU
end = time.time()
elapsed_time = (end - start) / timing_iters * 1000
print(f"Mean time: {elapsed_time} ms")
tf.random.set_seed(12)
tf.keras.utils.set_random_seed(1)
# Synthetic data
x = tf.random.normal(shape=(sequence_length, batch_size, hidden_size),
dtype=dtype)
basic_transformer = BasicTransformer(
hidden_size,
ffn_hidden_size,
num_attention_heads,
attention_dropout=dropout_rate,
hidden_dropout=dropout_rate,
)
y = basic_transformer(x, attention_mask=None)
if tl_type in (0, 1):
print("Running in the native TF:")
speedometer(
basic_transformer,
x,
forward_kwargs={"attention_mask": None, "training": True},
)
te_transformer = FusedTETransformer(
hidden_size,
ffn_hidden_size,
num_attention_heads,
attention_dropout=dropout_rate,
hidden_dropout=dropout_rate,
)
fp8_recipe = DelayedScaling(margin=0, interval=1, fp8_format=Format.HYBRID,
amax_compute_algo='max', amax_history_len=16)
# Run once to build the variables.
te_transformer(x, attention_mask=None)
# Sync the variables with the reference.
for v0, v1 in zip(basic_transformer.variables, te_transformer.variables):
v1.assign(v0)
tf.debugging.assert_near(v1, v0)
y_te = te_transformer(x, attention_mask=None)
if tl_type in (0, 2):
print("Running in the TE:")
speedometer(
te_transformer,
x,
forward_kwargs={"attention_mask": None, "training": True},
fp8_autocast_kwargs={"enabled": False, "fp8_recipe": None},
)
with te.fp8_autocast(enabled=True, fp8_recipe=fp8_recipe):
y_te = te_transformer(x, attention_mask=None)
if tl_type in (0, 3):
print("Running in the TE with fp8:")
speedometer(
te_transformer,
x,
forward_kwargs={"attention_mask": None, "training": True},
fp8_autocast_kwargs={"enabled": True, "fp8_recipe": fp8_recipe},
)
| TransformerEngine-main | examples/tensorflow/transformer_layer.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
import argparse
import tensorflow as tf
import tensorflow_datasets as tfds
import transformer_engine.tensorflow as te
class MNIST(tf.keras.Model):
def __init__(self, use_te=False):
super().__init__()
self.flatten = tf.keras.layers.Flatten(input_shape=(28, 28))
if use_te:
self.dense1 = te.Dense(128, kernel_initializer='glorot_uniform',
bias_initializer='zeros')
else:
self.dense1 = tf.keras.layers.Dense(128, activation=None)
self.relu = tf.keras.layers.ReLU()
self.dense2 = tf.keras.layers.Dense(10)
def call(self, x):
x = self.flatten(x)
x = self.dense1(x)
x = self.relu(x)
y = self.dense2(x)
return y
loss_func = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
def train_step(inputs, model, optimizer, use_fp8, fp8_recipe=None):
x, labels = inputs
with tf.GradientTape(persistent=True) as tape:
with te.fp8_autocast(enabled=use_fp8, fp8_recipe=fp8_recipe):
y = model(x, training=True)
loss = loss_func(labels, y)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return loss
val_loss = tf.keras.metrics.Mean(name='val_loss', dtype=tf.float32)
val_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
def valid_step(inputs, model):
x, labels = inputs
predictions = model(x, training=False)
loss = loss_func(labels, predictions)
val_loss.update_state(loss)
val_accuracy.update_state(labels, predictions)
def main():
# Training settings
parser = argparse.ArgumentParser(description="Tensorflow MNIST Example")
parser.add_argument(
"--batch-size",
type=int,
default=128,
metavar="N",
help="input batch size for training (default: 128)",
)
parser.add_argument(
"--test-batch-size",
type=int,
default=128,
metavar="N",
help="input batch size for testing (default: 128)",
)
parser.add_argument(
"--epochs",
type=int,
default=10,
metavar="N",
help="number of epochs to train (default: 10)",
)
parser.add_argument(
"--lr",
type=float,
default=0.001,
metavar="LR",
help="learning rate (default: 0.001)",
)
parser.add_argument(
"--seed", type=int, default=12, metavar="S",
help="random seed (default: 12)"
)
parser.add_argument(
"--use-fp8", action="store_true", default=False,
help="Use FP8 for inference and training without recalibration"
)
parser.add_argument(
"--use-te", action="store_true", default=False,
help="Use Transformer Engine"
)
args = parser.parse_args()
batch_size = args.batch_size
test_batch_size = args.test_batch_size
num_epoch = args.epochs
tf.random.set_seed(args.seed)
tf.keras.utils.set_random_seed(args.seed)
(ds_train, ds_test), ds_info = tfds.load(
'mnist',
split=['train', 'test'],
shuffle_files=True,
as_supervised=True,
with_info=True,
)
nstep_per_epoch = len(ds_train) // batch_size
nstep_per_valid = len(ds_test) // test_batch_size
def normalize_img(image, label):
"""Normalizes images: `uint8` -> `float32`."""
return tf.cast(image, tf.float32) / 255., label
ds_train = ds_train.map(
normalize_img, num_parallel_calls=tf.data.AUTOTUNE)
ds_train = ds_train.cache()
ds_train = ds_train.shuffle(ds_info.splits['train'].num_examples)
ds_train = ds_train.batch(batch_size)
ds_train = ds_train.prefetch(tf.data.AUTOTUNE)
ds_test = ds_test.map(
normalize_img, num_parallel_calls=tf.data.AUTOTUNE)
ds_test = ds_test.batch(batch_size)
ds_test = ds_test.cache()
ds_test = ds_test.prefetch(tf.data.AUTOTUNE)
model = MNIST(use_te=(args.use_te or args.use_fp8))
optimizer = tf.keras.optimizers.Adam(args.lr)
fp8_recipe = te.DelayedScaling(
margin=0, interval=1, fp8_format=te.Format.HYBRID,
amax_compute_algo='max', amax_history_len=16)
for i in range(num_epoch):
ds_train_iter = iter(ds_train)
for _ in range(nstep_per_epoch):
inputs = next(ds_train_iter)
_ = train_step(inputs, model, optimizer, use_fp8=args.use_fp8,
fp8_recipe=fp8_recipe)
val_loss.reset_states()
val_accuracy.reset_states()
ds_test_iter = iter(ds_test)
for _ in range(nstep_per_valid):
inputs = next(ds_test_iter)
valid_step(inputs, model)
print("epoch-{} loss: {} - accuracy: {}".format(
i, val_loss.result(), val_accuracy.result()))
if __name__ == "__main__":
main()
| TransformerEngine-main | examples/tensorflow/mnist.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
from transformer_engine import pytorch as te
class Net(nn.Module):
def __init__(self, use_te=False):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
if use_te:
self.fc1 = te.Linear(9216, 128)
self.fc2 = te.Linear(128, 16)
else:
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 16)
self.fc3 = nn.Linear(16, 10)
def forward(self, x):
"""FWD"""
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
x = self.fc3(x)
output = F.log_softmax(x, dim=1)
return output
def train(args, model, device, train_loader, optimizer, epoch, use_fp8):
"""Training function."""
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
with te.fp8_autocast(enabled=use_fp8):
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print(
f"Train Epoch: {epoch} "
f"[{batch_idx * len(data)}/{len(train_loader.dataset)} "
f"({100. * batch_idx / len(train_loader):.0f}%)]\t"
f"Loss: {loss.item():.6f}"
)
if args.dry_run:
break
def calibrate(model, device, test_loader, fp8):
"""Calibration function."""
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
with te.fp8_autocast(enabled=fp8, calibrating=True):
output = model(data)
def test(model, device, test_loader, use_fp8):
"""Testing function."""
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
with te.fp8_autocast(enabled=use_fp8):
output = model(data)
test_loss += F.nll_loss(
output, target, reduction="sum"
).item() # sum up batch loss
pred = output.argmax(
dim=1, keepdim=True
) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print(
f"\nTest set: Average loss: {test_loss:.4f}, "
f"Accuracy: {correct}/{len(test_loader.dataset)} "
f"({100. * correct / len(test_loader.dataset):.0f}%)\n"
)
def main():
# Training settings
parser = argparse.ArgumentParser(description="PyTorch MNIST Example")
parser.add_argument(
"--batch-size",
type=int,
default=64,
metavar="N",
help="input batch size for training (default: 64)",
)
parser.add_argument(
"--test-batch-size",
type=int,
default=1000,
metavar="N",
help="input batch size for testing (default: 1000)",
)
parser.add_argument(
"--epochs",
type=int,
default=14,
metavar="N",
help="number of epochs to train (default: 14)",
)
parser.add_argument(
"--lr",
type=float,
default=1.0,
metavar="LR",
help="learning rate (default: 1.0)",
)
parser.add_argument(
"--gamma",
type=float,
default=0.7,
metavar="M",
help="Learning rate step gamma (default: 0.7)",
)
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="quickly check a single pass",
)
parser.add_argument(
"--seed", type=int, default=1, metavar="S", help="random seed (default: 1)"
)
parser.add_argument(
"--log-interval",
type=int,
default=10,
metavar="N",
help="how many batches to wait before logging training status",
)
parser.add_argument(
"--save-model",
action="store_true",
default=False,
help="For Saving the current Model",
)
parser.add_argument(
"--use-fp8", action="store_true", default=False, help="Use FP8 for inference and training without recalibration"
)
parser.add_argument(
"--use-fp8-infer", action="store_true", default=False, help="Use FP8 inference only"
)
parser.add_argument(
"--use-te", action="store_true", default=False, help="Use Transformer Engine"
)
args = parser.parse_args()
use_cuda = torch.cuda.is_available()
if args.use_fp8 or args.use_fp8_infer:
assert use_cuda, "CUDA needed for FP8 execution."
args.use_te = True
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
train_kwargs = {"batch_size": args.batch_size}
test_kwargs = {"batch_size": args.test_batch_size}
if use_cuda:
cuda_kwargs = {"num_workers": 1, "pin_memory": True, "shuffle": True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
)
dataset1 = datasets.MNIST("../data", train=True, download=True, transform=transform)
dataset2 = datasets.MNIST("../data", train=False, transform=transform)
train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
model = Net(use_te=args.use_te).to(device)
optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch, args.use_fp8)
test(model, device, test_loader, args.use_fp8)
scheduler.step()
if args.use_fp8_infer and not args.use_fp8:
calibrate(model, device, test_loader, args.use_fp8)
if args.save_model or args.use_fp8_infer:
torch.save(model.state_dict(), "mnist_cnn.pt")
print('Eval with reloaded checkpoint : fp8='+str(args.use_fp8_infer))
weights = torch.load("mnist_cnn.pt")
model.load_state_dict(weights)
test(model, device, test_loader, args.use_fp8_infer)
if __name__ == "__main__":
main()
| TransformerEngine-main | examples/pytorch/mnist/main.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
""" MNIST training on single GPU"""
import argparse
import unittest
from functools import partial
import jax
import jax.numpy as jnp
import numpy as np
import optax
from datasets import load_dataset
from flax import linen as nn
from flax.training import train_state
import transformer_engine.jax as te
import transformer_engine.jax.flax as te_flax
IMAGE_H = 28
IMAGE_W = 28
IMAGE_C = 1
PARAMS_KEY = 'params'
DROPOUT_KEY = 'dropout'
INPUT_KEY = 'input_rng'
class Net(nn.Module):
"""CNN model for MNIST."""
use_te: bool = False
@nn.compact
def __call__(self, x, disable_dropout=False):
if self.use_te:
nn_Dense = te_flax.DenseGeneral
else:
nn_Dense = nn.Dense
x = nn.Conv(features=32, kernel_size=(3, 3), strides=1, dtype=jnp.bfloat16)(x)
x = nn.relu(x)
x = nn.Conv(features=64, kernel_size=(3, 3), strides=1, dtype=jnp.bfloat16)(x)
x = nn.relu(x)
x = nn.max_pool(x, window_shape=(2, 2), strides=(2, 2))
x = nn.Dropout(rate=0.25)(x, deterministic=disable_dropout)
x = x.reshape(x.shape[0], -1)
x = nn_Dense(features=128, dtype=jnp.bfloat16)(x)
x = nn.relu(x)
x = nn.Dropout(rate=0.5)(x, deterministic=disable_dropout)
x = nn_Dense(features=16, dtype=jnp.bfloat16)(x)
x = nn.Dense(features=10, dtype=jnp.bfloat16)(x)
return x
@jax.jit
def apply_model(state, images, labels, var_collect, rngs=None):
"""Computes gradients, loss and accuracy for a single batch."""
def loss_fn(var_collect, disable_dropout=False):
logits = state.apply_fn(var_collect, images, disable_dropout, rngs=rngs)
one_hot = jax.nn.one_hot(labels, 10)
loss = jnp.mean(optax.softmax_cross_entropy(logits=logits, labels=one_hot))
return loss, logits
var_collect = {**var_collect, PARAMS_KEY: state.params}
if rngs is not None:
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
(loss, logits), grads = grad_fn(var_collect)
else:
loss, logits = loss_fn(var_collect, disable_dropout=True)
grads = None
accuracy = jnp.mean(jnp.argmax(logits, -1) == labels)
return grads, loss, accuracy
@partial(jax.jit, static_argnums=2)
def update_model(state, grads, use_fp8):
"""Update model params and FP8 meta."""
state = state.apply_gradients(grads=grads[PARAMS_KEY])
if use_fp8:
grads = te.update_fp8_metas(grads)
return state, grads
def train_epoch(state, train_ds, batch_size, rngs, var_collect, use_fp8):
"""Train for a single epoch."""
train_ds_size = len(train_ds['image'])
steps_per_epoch = train_ds_size // batch_size
perms = jax.random.permutation(rngs[INPUT_KEY], train_ds_size)
perms = perms[:steps_per_epoch * batch_size] # skip incomplete batch
perms = perms.reshape((steps_per_epoch, batch_size))
epoch_loss = []
epoch_accuracy = []
for perm in perms:
batch_images = train_ds['image'][perm, ...]
batch_labels = train_ds['label'][perm, ...]
grads, loss, accuracy = apply_model(state, batch_images, batch_labels, var_collect, rngs)
state, var_collect = update_model(state, grads, use_fp8)
epoch_loss.append(loss)
epoch_accuracy.append(accuracy)
avg_loss = np.mean(epoch_loss)
avg_accuracy = np.mean(epoch_accuracy)
return state, avg_loss, avg_accuracy, var_collect
def eval_model(state, test_ds, batch_size, var_collect):
"""Evaluation loop."""
test_ds_size = len(test_ds['image'])
num_steps = test_ds_size // batch_size
valid_size = num_steps * batch_size
all_loss = []
all_accuracy = []
for batch_start in range(0, valid_size, batch_size):
batch_end = batch_start + batch_size
batch_images = test_ds['image'][batch_start:batch_end]
batch_labels = test_ds['label'][batch_start:batch_end]
_, loss, accuracy = apply_model(state, batch_images, batch_labels, var_collect)
all_loss.append(loss)
all_accuracy.append(accuracy)
avg_loss = np.mean(all_loss)
avg_accuracy = np.mean(all_accuracy)
return avg_loss, avg_accuracy
def get_datasets():
"""Load MNIST train and test datasets into memory."""
train_ds = load_dataset('mnist', split='train')
train_ds.set_format(type='np')
batch_size = train_ds['image'].shape[0]
shape = (batch_size, IMAGE_H, IMAGE_W, IMAGE_C)
new_train_ds = {
'image': train_ds['image'].astype(np.float32).reshape(shape) / 255.,
'label': train_ds['label']
}
test_ds = load_dataset('mnist', split='test')
test_ds.set_format(type='np')
batch_size = test_ds['image'].shape[0]
shape = (batch_size, IMAGE_H, IMAGE_W, IMAGE_C)
new_test_ds = {
'image': test_ds['image'].astype(np.float32).reshape(shape) / 255.,
'label': test_ds['label']
}
return new_train_ds, new_test_ds
def check_fp8(state, var_collect, input_shape, label_shape):
"Check if model includes FP8."
assert "Float8" in str(
jax.make_jaxpr(apply_model)(state, jnp.empty(input_shape, dtype=jnp.bfloat16),
jnp.empty(label_shape, dtype=jnp.bfloat16), var_collect))
def train_and_evaluate(args):
"""Execute model training and evaluation loop."""
print(args)
if args.use_fp8:
args.use_te = True
train_ds, test_ds = get_datasets()
rng = jax.random.PRNGKey(args.seed)
rng, params_rng = jax.random.split(rng)
rng, dropout_rng = jax.random.split(rng)
init_rngs = {PARAMS_KEY: params_rng, DROPOUT_KEY: dropout_rng}
input_shape = [args.batch_size, IMAGE_H, IMAGE_W, IMAGE_C]
label_shape = [args.batch_size]
with te.fp8_autocast(enabled=args.use_fp8):
cnn = Net(args.use_te)
var_collect = cnn.init(init_rngs, jnp.empty(input_shape, dtype=jnp.bfloat16))
tx = optax.sgd(args.lr, args.momentum)
state = train_state.TrainState.create(apply_fn=cnn.apply,
params=var_collect[PARAMS_KEY],
tx=tx)
if args.use_fp8:
check_fp8(state, var_collect, input_shape, label_shape)
if args.dry_run:
apply_model(state, jnp.empty(input_shape, dtype=jnp.bfloat16),
jnp.empty(label_shape, dtype=jnp.bfloat16), var_collect,
{DROPOUT_KEY: dropout_rng})
print("PASSED")
return None
for epoch in range(1, args.epochs + 1):
rng, input_rng = jax.random.split(rng)
rng, dropout_rng = jax.random.split(rng)
rngs = {INPUT_KEY: input_rng, DROPOUT_KEY: dropout_rng}
state, train_loss, train_accuracy, var_collect = train_epoch(
state, train_ds, args.batch_size, rngs, var_collect, args.use_fp8)
test_loss, test_accuracy = eval_model(state, test_ds, args.test_batch_size, var_collect)
print(f"Epoch: {epoch:>2} "
f"Train Loss: {train_loss:.6f} "
f"Train Accuracy: {train_accuracy:.6f} "
f"Test Loss: {test_loss:.6f} "
f"Test Accuracy: {test_accuracy:.6f} ")
return [train_loss, train_accuracy, test_loss, test_accuracy]
def mnist_parser(args):
"""Training settings."""
parser = argparse.ArgumentParser(description="JAX MNIST Example")
parser.add_argument(
"--batch-size",
type=int,
default=64,
metavar="N",
help="input batch size for training (default: 64)",
)
parser.add_argument(
"--test-batch-size",
type=int,
default=800,
metavar="N",
help="input batch size for testing (default: 800)",
)
parser.add_argument(
"--epochs",
type=int,
default=10,
metavar="N",
help="number of epochs to train (default: 10)",
)
parser.add_argument(
"--lr",
type=float,
default=0.01,
metavar="LR",
help="learning rate (default: 0.01)",
)
parser.add_argument(
"--momentum",
type=float,
default=0.9,
metavar="M",
help="Momentum (default: 0.9)",
)
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="quickly check a single pass",
)
parser.add_argument("--seed", type=int, default=1, metavar="S", help="random seed (default: 1)")
parser.add_argument("--use-fp8",
action="store_true",
default=False,
help="Use FP8 for inference and training without recalibration. " \
"It also enables Transformer Engine implicitly.")
parser.add_argument("--use-te",
action="store_true",
default=False,
help="Use Transformer Engine")
return parser.parse_args(args)
class TestMNIST(unittest.TestCase):
"""MNIST unittests"""
gpu_has_fp8, reason = te.fp8.is_fp8_available()
@classmethod
def setUpClass(cls):
"""Run MNIST without Transformer Engine"""
cls.args = mnist_parser(["--epochs", "5"])
@staticmethod
def verify(actual):
"""Check If loss and accuracy match target"""
desired_traing_loss = 0.055
desired_traing_accuracy = 0.98
desired_test_loss = 0.035
desired_test_accuracy = 0.098
assert actual[0] < desired_traing_loss
assert actual[1] > desired_traing_accuracy
assert actual[2] < desired_test_loss
assert actual[3] > desired_test_accuracy
def test_te_bf16(self):
"""Test Transformer Engine with BF16"""
self.args.use_te = True
self.args.use_fp8 = False
actual = train_and_evaluate(self.args)
self.verify(actual)
@unittest.skipIf(not gpu_has_fp8, reason)
def test_te_fp8(self):
"""Test Transformer Engine with FP8"""
self.args.use_fp8 = True
actual = train_and_evaluate(self.args)
self.verify(actual)
if __name__ == "__main__":
train_and_evaluate(mnist_parser(None))
| TransformerEngine-main | examples/jax/mnist/test_single_gpu_mnist.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Encoder training on multi-GPU with tesnor parallelism"""
import argparse
import unittest
from functools import partial
import flax
import jax
import jax.numpy as jnp
import nltk
import numpy as np
import optax
from datasets import load_dataset
from flax import linen as nn
from flax.linen import partitioning as nn_partitioning
from flax.training import train_state
from jax.experimental import mesh_utils
from jax.experimental.pjit import pjit
import transformer_engine.jax as te
import transformer_engine.jax.flax as te_flax
DEVICE_DP_AXIS = 'data'
DEVICE_TP_AXIS = 'model'
NAMED_BROADCAST_AXIS = 'my_broadcast_axis'
NAMED_TP_AXIS = 'my_tp_axis'
PARAMS_KEY = 'params'
PARAMS_AXES_KEY = PARAMS_KEY + '_axes'
DROPOUT_KEY = 'dropout'
INPUT_KEY = 'input_rng'
class Net(nn.Module):
"""NLP Encoder"""
num_embed: int
@nn.compact
def __call__(self, x, mask, disable_dropout=False):
x = nn.Embed(num_embeddings=self.num_embed, features=256, dtype=jnp.bfloat16)(x)
te_Encoder = partial(te_flax.TransformerLayer,
hidden_size=256,
mlp_hidden_size=1024,
num_attention_heads=8,
hidden_dropout=0.1,
attention_dropout=0.1,
dropout_rng_name=DROPOUT_KEY,
layer_type=te_flax.TransformerLayerType.ENCODER,
self_attn_mask_type='padding',
enable_relative_embedding=False,
dtype=jnp.bfloat16)
x = te_Encoder()(x, attention_mask=mask, deterministic=disable_dropout)
x = x.reshape(x.shape[0], -1)
x = te_flax.DenseGeneral(features=256,
kernel_axes=(NAMED_BROADCAST_AXIS, NAMED_TP_AXIS),
bias_axes=(NAMED_TP_AXIS,),
sharding_type=te.ShardingType.DP_TP_COL,
dtype=jnp.bfloat16)(x)
x = te_flax.DenseGeneral(features=256,
kernel_axes=(NAMED_TP_AXIS, NAMED_BROADCAST_AXIS),
bias_axes=(NAMED_BROADCAST_AXIS,),
sharding_type=te.ShardingType.DP_TP_ROW,
dtype=jnp.bfloat16)(x)
x = nn.Dense(features=2, dtype=jnp.bfloat16)(x)
return x
def train_step(state, inputs, masks, labels, var_collect, rngs, use_fp8):
"""Computes gradients, loss and accuracy for a single batch."""
def loss_fn(var_collect, disable_dropout=False):
logits = state.apply_fn(var_collect, inputs, masks, disable_dropout, rngs=rngs)
one_hot = jax.nn.one_hot(labels, 2)
loss = jnp.mean(optax.softmax_cross_entropy(logits=logits, labels=one_hot))
return loss, logits
var_collect = {**var_collect, PARAMS_KEY: state.params}
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
(loss, logits), grads = grad_fn(var_collect)
accuracy = jnp.mean(jnp.argmax(logits, -1) == labels)
var_collect, grads = flax.core.pop(grads, PARAMS_KEY)
state = state.apply_gradients(grads=grads)
if use_fp8:
var_collect = te.update_fp8_metas(var_collect)
return state, loss, accuracy, var_collect
def train_epoch(state, train_ds, batch_size, rngs, var_collect, use_fp8, train_fn):
"""Train for a single epoch."""
train_ds_size = len(train_ds['sentence'])
steps_per_epoch = train_ds_size // batch_size
perms = jax.random.permutation(rngs[INPUT_KEY], train_ds_size)
perms = perms[:steps_per_epoch * batch_size] # skip incomplete batch
perms = perms.reshape((steps_per_epoch, batch_size))
epoch_loss = []
epoch_accuracy = []
for perm in perms:
batch_inputs = train_ds['sentence'][perm, ...]
batch_masks = train_ds['mask'][perm, ...]
batch_labels = train_ds['label'][perm, ...]
state, loss, accuracy, var_collect = train_fn(state, batch_inputs, batch_masks,
batch_labels, var_collect, rngs, use_fp8)
epoch_loss.append(loss)
epoch_accuracy.append(accuracy)
avg_loss = np.mean(epoch_loss)
avg_accuracy = np.mean(epoch_accuracy)
return state, avg_loss, avg_accuracy, var_collect
def eval_step(state, inputs, masks, labels, var_collect):
"""Computes loss and accuracy for a single batch."""
def loss_fn(var_collect, disable_dropout=False):
logits = state.apply_fn(var_collect, inputs, masks, disable_dropout)
one_hot = jax.nn.one_hot(labels, 2)
loss = jnp.mean(optax.softmax_cross_entropy(logits=logits, labels=one_hot))
return loss, logits
var_collect = {**var_collect, PARAMS_KEY: state.params}
loss, logits = loss_fn(var_collect, disable_dropout=True)
accuracy = jnp.mean(jnp.argmax(logits, -1) == labels)
return loss, accuracy
def eval_model(state, test_ds, batch_size, var_collect, eval_fn):
"""Evaluation loop."""
test_ds_size = len(test_ds['sentence'])
num_steps = test_ds_size // batch_size
valid_size = num_steps * batch_size
all_loss = []
all_accuracy = []
for batch_start in range(0, valid_size, batch_size):
batch_end = batch_start + batch_size
batch_inputs = test_ds['sentence'][batch_start:batch_end]
batch_masks = test_ds['mask'][batch_start:batch_end]
batch_labels = test_ds['label'][batch_start:batch_end]
loss, accuracy = eval_fn(state, batch_inputs, batch_masks, batch_labels, var_collect)
all_loss.append(loss)
all_accuracy.append(accuracy)
avg_loss = np.mean(all_loss)
avg_accuracy = np.mean(all_accuracy)
return avg_loss, avg_accuracy
def data_preprocess(dataset, vocab, word_id, max_seq_len):
"""Convert tokens to numbers."""
nltk.download('punkt')
dataset_size = len(dataset['sentence'])
output = np.zeros((dataset_size, max_seq_len), dtype=np.int32)
mask_3d = np.ones((dataset_size, max_seq_len, max_seq_len), dtype=np.uint8)
for j, sentence in enumerate(dataset['sentence']):
tokens = nltk.word_tokenize(sentence)
tensor = output[j]
for i, word in enumerate(tokens):
if i >= max_seq_len:
break
if word not in vocab:
vocab[word] = word_id
tensor[i] = word_id
word_id = word_id + 1
else:
tensor[i] = vocab[word]
seq_len = min(len(tokens), max_seq_len)
mask_2d = mask_3d[j]
mask_2d[:seq_len, :seq_len] = 0
new_dataset = {
'sentence': output,
'label': dataset['label'].astype(np.float32),
'mask': mask_3d.reshape((dataset_size, 1, max_seq_len, max_seq_len))
}
return new_dataset, vocab, word_id
def get_datasets(max_seq_len):
"""Load GLUE train and test datasets into memory."""
vocab = {}
word_id = 0
train_ds = load_dataset('glue', 'cola', split='train')
train_ds.set_format(type='np')
train_ds, vocab, word_id = data_preprocess(train_ds, vocab, word_id, max_seq_len)
test_ds = load_dataset('glue', 'cola', split='validation')
test_ds.set_format(type='np')
test_ds, vocab, word_id = data_preprocess(test_ds, vocab, word_id, max_seq_len)
return train_ds, test_ds, word_id
def check_fp8(state, var_collect, inputs, masks, labels):
"Check if model includes FP8."
rngs = {DROPOUT_KEY: jax.random.PRNGKey(0)}
assert "Float8" in str(
jax.make_jaxpr(train_step, static_argnums=6)(state, inputs, masks, labels, var_collect,
rngs, True))
def get_params_pspec(sharding_rules, abs_var_collect):
"""Refer params to create params partition spec"""
rules_dict = {}
for key, value in sharding_rules:
rules_dict[key] = value
def to_device_axis(logical_axis):
partitions = [rules_dict[key] for key in logical_axis]
return jax.sharding.PartitionSpec(*partitions)
params_axes = abs_var_collect.get(PARAMS_AXES_KEY, {})
params_axes_pspec = jax.tree_map(to_device_axis, nn_partitioning.get_axis_names(params_axes))
params_axes_pspec = flax.core.unfreeze(params_axes_pspec)
params_pspec = jax.tree_map(lambda x: jax.sharding.PartitionSpec(), abs_var_collect[PARAMS_KEY])
params_pspec = {**params_pspec, **params_axes_pspec}
return params_pspec
def get_state_pspec(state, params_pspec):
"""Refer params_pspec to create state partition spec"""
def replace_params(x):
return params_pspec if isinstance(x, dict) else None
state_pspec = jax.tree_map(replace_params, state, is_leaf=lambda x: isinstance(x, dict))
return state_pspec
def train_and_evaluate(args):
"""Execute model training and evaluation loop."""
print(args)
train_ds, test_ds, num_embed = get_datasets(args.max_seq_len)
num_gpu = jax.local_device_count()
num_gpu_tp = 2
if num_gpu % num_gpu_tp == 0:
num_gpu_dp = num_gpu // num_gpu_tp
else:
num_gpu_dp = 1
num_gpu_tp = 1
assert args.batch_size % num_gpu_dp == 0, f"Batch size needs to be multiple of {num_gpu_dp}"
assert args.test_batch_size % num_gpu_dp == 0, \
f"Test batch size needs to be multiple of {num_gpu_dp}"
device_mesh = mesh_utils.create_device_mesh((num_gpu_dp, num_gpu_tp))
with jax.sharding.Mesh(devices=device_mesh, axis_names=(DEVICE_DP_AXIS, DEVICE_TP_AXIS)):
rng = jax.random.PRNGKey(args.seed)
rng, params_rng = jax.random.split(rng)
rng, dropout_rng = jax.random.split(rng)
init_rngs = {PARAMS_KEY: params_rng, DROPOUT_KEY: dropout_rng}
input_shape = [args.batch_size, args.max_seq_len]
mask_shape = [args.batch_size, 1, args.max_seq_len, args.max_seq_len]
label_shape = [args.batch_size]
with te.fp8_autocast(args.use_fp8,
sharding_resource=te.ShardingResource(DEVICE_DP_AXIS, DEVICE_TP_AXIS)):
encoder = Net(num_embed)
inputs = jnp.zeros(input_shape, dtype=jnp.int32)
masks = jnp.zeros(mask_shape, dtype=jnp.uint8)
abs_var_collect = jax.eval_shape(encoder.init, init_rngs, inputs, masks)
customized_rules = ((NAMED_BROADCAST_AXIS, None), (NAMED_TP_AXIS, DEVICE_TP_AXIS))
sharding_rules = te_flax.extend_logical_axis_rules(tuple()) + customized_rules
params_pspec = get_params_pspec(sharding_rules, abs_var_collect)
inputs_pspec = jax.sharding.PartitionSpec(DEVICE_DP_AXIS, None)
masks_pspec = jax.sharding.PartitionSpec(DEVICE_DP_AXIS, None, None, None)
in_shardings = (None, inputs_pspec, masks_pspec)
out_shardings = {key: params_pspec if key is PARAMS_KEY else None \
for key in abs_var_collect}
pjit_encoder_init = pjit(encoder.init, in_shardings, out_shardings)
var_collect = pjit_encoder_init(init_rngs, inputs, masks)
optimizer = optax.adamw(args.lr)
var_collect, params = flax.core.pop(var_collect, PARAMS_KEY)
state = train_state.TrainState.create(apply_fn=encoder.apply,
params=params,
tx=optimizer)
state_pspec = get_state_pspec(state, params_pspec)
labels_pspec = jax.sharding.PartitionSpec(DEVICE_DP_AXIS,)
in_shardings = (state_pspec, inputs_pspec, masks_pspec, labels_pspec, None, None)
out_shardings = (state_pspec, None, None, None)
pjit_train_step = pjit(train_step, in_shardings, out_shardings, static_argnums=(6,))
in_shardings = (state_pspec, inputs_pspec, masks_pspec, labels_pspec, None)
out_shardings = (None, None)
pjit_eval_step = pjit(eval_step, in_shardings, out_shardings)
if args.use_fp8:
labels = jnp.zeros(label_shape, dtype=jnp.bfloat16)
check_fp8(state, var_collect, inputs, masks, labels)
if args.dry_run:
labels = jnp.zeros(label_shape, dtype=jnp.bfloat16)
rngs = {DROPOUT_KEY: dropout_rng}
pjit_train_step(state, inputs, masks, labels, var_collect, rngs, args.use_fp8)
print("PASSED")
return None
for epoch in range(1, args.epochs + 1):
rng, input_rng = jax.random.split(rng)
rng, dropout_rng = jax.random.split(rng)
rngs = {INPUT_KEY: input_rng, DROPOUT_KEY: dropout_rng}
state, train_loss, train_accuracy, var_collect = train_epoch(
state, train_ds, args.batch_size, rngs, var_collect, args.use_fp8,
pjit_train_step)
test_loss, test_accuracy = eval_model(state, test_ds, args.test_batch_size,
var_collect, pjit_eval_step)
print(f"Epoch: {epoch:>2} "
f"Train Loss: {train_loss:.6f} "
f"Train Accuracy: {train_accuracy:.6f} "
f"Test Loss: {test_loss:.6f} "
f"Test Accuracy: {test_accuracy:.6f} ")
return [train_loss, train_accuracy, test_loss, test_accuracy]
def encoder_parser(args):
"""Training settings."""
parser = argparse.ArgumentParser(description="JAX Encoder Example")
parser.add_argument(
"--batch-size",
type=int,
default=64,
metavar="N",
help="input batch size for training (default: 64)",
)
parser.add_argument(
"--test-batch-size",
type=int,
default=64,
metavar="N",
help="input batch size for testing (default: 64)",
)
parser.add_argument(
"--max-seq-len",
type=int,
default=32,
metavar="N",
help="maximum sequence length (default: 32)",
)
parser.add_argument(
"--epochs",
type=int,
default=3,
metavar="N",
help="number of epochs to train (default: 3)",
)
parser.add_argument(
"--lr",
type=float,
default=0.0001,
metavar="LR",
help="learning rate (default: 0.0001)",
)
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="quickly check a single pass",
)
parser.add_argument("--seed", type=int, default=0, metavar="S", help="random seed (default: 0)")
parser.add_argument("--use-fp8",
action="store_true",
default=False,
help="Use FP8 for inference and training without recalibration")
return parser.parse_args(args)
class TestEncoder(unittest.TestCase):
"""Encoder unittests"""
gpu_has_fp8, reason = te.fp8.is_fp8_available()
@classmethod
def setUpClass(cls):
"""Run 3 epochs for testing"""
cls.args = encoder_parser(["--epochs", "3"])
def test_te_bf16(self):
"""Test Transformer Engine with BF16"""
actual = train_and_evaluate(self.args)
assert actual[0] < 0.45 and actual[1] > 0.79
@unittest.skipIf(not gpu_has_fp8, reason)
def test_te_fp8(self):
"""Test Transformer Engine with FP8"""
self.args.use_fp8 = True
actual = train_and_evaluate(self.args)
assert actual[0] < 0.45 and actual[1] > 0.79
if __name__ == "__main__":
train_and_evaluate(encoder_parser(None))
| TransformerEngine-main | examples/jax/encoder/test_model_parallel_encoder.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Encoder training with multi-GPU, multiprocessing, and tensor parallelism"""
import argparse
import multiprocessing as mp
import os
import unittest
from functools import partial
import flax
import jax
import jax.numpy as jnp
import nltk
import numpy as np
import optax
from datasets import load_dataset
from flax import linen as nn
from flax.linen import partitioning as nn_partitioning
from flax.training import train_state
from jax.experimental import mesh_utils
from jax.experimental.pjit import pjit
import transformer_engine.jax as te
import transformer_engine.jax.flax as te_flax
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
DEVICE_DP_AXIS = 'data'
DEVICE_TP_AXIS = 'model'
NAMED_BROADCAST_AXIS = 'my_broadcast_axis'
NAMED_TP_AXIS = 'my_tp_axis'
PARAMS_KEY = 'params'
PARAMS_AXES_KEY = PARAMS_KEY + '_axes'
DROPOUT_KEY = 'dropout'
INPUT_KEY = 'input_rng'
class Net(nn.Module):
"""NLP Encoder"""
num_embed: int
@nn.compact
def __call__(self, x, mask, disable_dropout=False):
x = nn.Embed(num_embeddings=self.num_embed, features=256, dtype=jnp.bfloat16)(x)
te_Encoder = partial(te_flax.TransformerLayer,
hidden_size=256,
mlp_hidden_size=1024,
num_attention_heads=8,
hidden_dropout=0.1,
attention_dropout=0.1,
dropout_rng_name=DROPOUT_KEY,
layer_type=te_flax.TransformerLayerType.ENCODER,
self_attn_mask_type='padding',
enable_relative_embedding=False,
dtype=jnp.bfloat16)
x = te_Encoder()(x, attention_mask=mask, deterministic=disable_dropout)
x = x.reshape(x.shape[0], -1)
x = te_flax.DenseGeneral(features=256,
kernel_axes=(NAMED_BROADCAST_AXIS, NAMED_TP_AXIS),
bias_axes=(NAMED_TP_AXIS,),
sharding_type=te.ShardingType.DP_TP_COL,
dtype=jnp.bfloat16)(x)
x = te_flax.DenseGeneral(features=256,
kernel_axes=(NAMED_TP_AXIS, NAMED_BROADCAST_AXIS),
bias_axes=(NAMED_BROADCAST_AXIS,),
sharding_type=te.ShardingType.DP_TP_ROW,
dtype=jnp.bfloat16)(x)
x = nn.Dense(features=2, dtype=jnp.bfloat16)(x)
return x
def valid_shard_size(total_size, batch_size, dp_size, tp_size):
"""Get sharded input shape"""
global_batch_size = dp_size * batch_size
num_steps = total_size // global_batch_size
valid_size = num_steps * global_batch_size
gpu_id = jax.local_devices()[0].id
tp_group_id = gpu_id // tp_size
return valid_size, global_batch_size, num_steps, tp_group_id
def shard_array_wrapper(dataset, batch_size, mesh, pspec, enable_partition=False):
"""Generate needed args for jax.make_array_from_single_device_arrays"""
inputs = jnp.asarray(dataset)
total_input_size = len(inputs)
(dp_size, tp_size) = mesh.device_ids.shape
valid_input_size, global_batch_size, num_steps, tp_group_id = valid_shard_size(
total_input_size, batch_size, dp_size, tp_size)
inputs = inputs[:valid_input_size] # skip incomplete batch
single_input_shape = inputs.shape[1:]
global_input_shape = (global_batch_size, *single_input_shape)
named_sharding = jax.sharding.NamedSharding(mesh, pspec)
if enable_partition:
inputs = inputs.reshape(dp_size, num_steps, batch_size, *single_input_shape)
inputs = inputs[tp_group_id]
return global_input_shape, named_sharding, inputs
def train_step(state, inputs, masks, labels, var_collect, rngs, use_fp8):
"""Computes gradients, loss and accuracy for a single batch."""
def loss_fn(var_collect, disable_dropout=False):
logits = state.apply_fn(var_collect, inputs, masks, disable_dropout, rngs=rngs)
one_hot = jax.nn.one_hot(labels, 2)
loss = jnp.mean(optax.softmax_cross_entropy(logits=logits, labels=one_hot))
return loss, logits
var_collect = {**var_collect, PARAMS_KEY: state.params}
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
(loss, logits), grads = grad_fn(var_collect)
accuracy = jnp.mean(jnp.argmax(logits, -1) == labels)
var_collect, grads = flax.core.pop(grads, PARAMS_KEY)
state = state.apply_gradients(grads=grads)
if use_fp8:
var_collect = te.update_fp8_metas(var_collect)
return state, loss, accuracy, var_collect
def train_epoch(state, train_ds, batch_size, rngs, var_collect, use_fp8, train_fn, mesh,
inputs_pspec, masks_pspec, labels_pspec):
"""Train for a single epoch."""
total_batch_size = len(train_ds['sentence'])
(dp_size, tp_size) = mesh.device_ids.shape
valid_size, _, num_steps, tp_group_id = valid_shard_size(total_batch_size, batch_size, dp_size,
tp_size)
perms = jax.random.permutation(rngs[INPUT_KEY], valid_size)
perms = perms.reshape(dp_size, num_steps, batch_size)
perms = perms[tp_group_id]
global_input_shape, input_named_sharding, sentence = shard_array_wrapper(
train_ds['sentence'], batch_size, mesh, inputs_pspec)
global_mask_shape, mask_named_sharding, mask = shard_array_wrapper(
train_ds['mask'], batch_size, mesh, masks_pspec)
global_label_shape, label_named_sharding, label = shard_array_wrapper(
train_ds['label'], batch_size, mesh, labels_pspec)
epoch_loss = []
epoch_accuracy = []
for perm in perms:
batch_input = sentence[perm, ...]
batch_mask = mask[perm, ...]
batch_label = label[perm, ...]
shard_input = jax.make_array_from_single_device_arrays(global_input_shape,
input_named_sharding, [batch_input])
shard_mask = jax.make_array_from_single_device_arrays(global_mask_shape,
mask_named_sharding, [batch_mask])
shard_label = jax.make_array_from_single_device_arrays(global_label_shape,
label_named_sharding, [batch_label])
state, loss, accuracy, var_collect = train_fn(state, shard_input, shard_mask, shard_label,
var_collect, rngs, use_fp8)
epoch_loss.append(loss)
epoch_accuracy.append(accuracy)
avg_loss = np.mean(epoch_loss)
avg_accuracy = np.mean(epoch_accuracy)
return state, avg_loss, avg_accuracy, var_collect
def eval_step(state, inputs, masks, labels, var_collect):
"""Computes loss and accuracy for a single batch."""
def loss_fn(var_collect, disable_dropout=False):
logits = state.apply_fn(var_collect, inputs, masks, disable_dropout)
one_hot = jax.nn.one_hot(labels, 2)
loss = jnp.mean(optax.softmax_cross_entropy(logits=logits, labels=one_hot))
return loss, logits
var_collect = {**var_collect, PARAMS_KEY: state.params}
loss, logits = loss_fn(var_collect, disable_dropout=True)
accuracy = jnp.mean(jnp.argmax(logits, -1) == labels)
return loss, accuracy
def eval_model(state, test_ds, batch_size, var_collect, eval_fn, mesh, inputs_pspec, masks_pspec,
labels_pspec):
"""Evaluation loop."""
global_input_shape, input_named_sharding, sentence = shard_array_wrapper(test_ds['sentence'],
batch_size,
mesh,
inputs_pspec,
enable_partition=True)
global_mask_shape, mask_named_sharding, mask = shard_array_wrapper(test_ds['mask'],
batch_size,
mesh,
masks_pspec,
enable_partition=True)
global_label_shape, label_named_sharding, label = shard_array_wrapper(test_ds['label'],
batch_size,
mesh,
labels_pspec,
enable_partition=True)
all_loss = []
all_accuracy = []
for batch_input, batch_mask, batch_label in zip(sentence, mask, label):
shard_input = jax.make_array_from_single_device_arrays(global_input_shape,
input_named_sharding, [batch_input])
shard_mask = jax.make_array_from_single_device_arrays(global_mask_shape,
mask_named_sharding, [batch_mask])
shard_label = jax.make_array_from_single_device_arrays(global_label_shape,
label_named_sharding, [batch_label])
loss, accuracy = eval_fn(state, shard_input, shard_mask, shard_label, var_collect)
all_loss.append(loss)
all_accuracy.append(accuracy)
avg_loss = np.mean(all_loss)
avg_accuracy = np.mean(all_accuracy)
return avg_loss, avg_accuracy
def data_preprocess(dataset, vocab, word_id, max_seq_len):
"""Convert tokens to numbers."""
nltk.download('punkt')
dataset_size = len(dataset['sentence'])
output = np.zeros((dataset_size, max_seq_len), dtype=np.int32)
mask_3d = np.ones((dataset_size, max_seq_len, max_seq_len), dtype=np.uint8)
for j, sentence in enumerate(dataset['sentence']):
tokens = nltk.word_tokenize(sentence)
tensor = output[j]
for i, word in enumerate(tokens):
if i >= max_seq_len:
break
if word not in vocab:
vocab[word] = word_id
tensor[i] = word_id
word_id = word_id + 1
else:
tensor[i] = vocab[word]
seq_len = min(len(tokens), max_seq_len)
mask_2d = mask_3d[j]
mask_2d[:seq_len, :seq_len] = 0
new_dataset = {
'sentence': output,
'label': dataset['label'].astype(np.float32),
'mask': mask_3d.reshape((dataset_size, 1, max_seq_len, max_seq_len))
}
return new_dataset, vocab, word_id
def get_datasets(max_seq_len):
"""Load GLUE train and test datasets into memory."""
vocab = {}
word_id = 0
train_ds = load_dataset('glue', 'cola', split='train')
train_ds.set_format(type='np')
train_ds, vocab, word_id = data_preprocess(train_ds, vocab, word_id, max_seq_len)
test_ds = load_dataset('glue', 'cola', split='validation')
test_ds.set_format(type='np')
test_ds, vocab, word_id = data_preprocess(test_ds, vocab, word_id, max_seq_len)
return train_ds, test_ds, word_id
def check_fp8(state, var_collect, inputs, masks, labels):
"Check if model includes FP8."
rngs = {DROPOUT_KEY: jax.random.PRNGKey(0)}
assert "Float8" in str(
jax.make_jaxpr(train_step, static_argnums=6)(state, inputs, masks, labels, var_collect,
rngs, True))
def get_params_pspec(sharding_rules, abs_var_collect):
"""Refer params to create params partition spec"""
rules_dict = {}
for key, value in sharding_rules:
rules_dict[key] = value
def to_device_axis(logical_axis):
partitions = [rules_dict[key] for key in logical_axis]
return jax.sharding.PartitionSpec(*partitions)
params_axes = abs_var_collect.get(PARAMS_AXES_KEY, {})
params_axes_pspec = jax.tree_map(to_device_axis, nn_partitioning.get_axis_names(params_axes))
params_axes_pspec = flax.core.unfreeze(params_axes_pspec)
params_pspec = jax.tree_map(lambda x: jax.sharding.PartitionSpec(), abs_var_collect[PARAMS_KEY])
params_pspec = {**params_pspec, **params_axes_pspec}
return params_pspec
def get_state_pspec(state, params_pspec):
"""Refer params_pspec to create state partition spec"""
def replace_params(x):
return params_pspec if isinstance(x, dict) else None
state_pspec = jax.tree_map(replace_params, state, is_leaf=lambda x: isinstance(x, dict))
return state_pspec
def train_and_evaluate(args):
"""Execute model training and evaluation loop."""
print(args)
train_ds, test_ds, num_embed = get_datasets(args.max_seq_len)
jax.distributed.initialize(coordinator_address=args.coordinator_address,
num_processes=args.num_process,
process_id=args.process_id,
local_device_ids=args.process_id)
assert jax.local_device_count() == 1, "1 GPU per process"
num_gpu_tp = 2
if args.num_process % num_gpu_tp == 0:
num_gpu_dp = args.num_process // num_gpu_tp
else:
assert args.num_process == 1, "number of processes should be multiple of 2, or 1"
num_gpu_dp = 1
num_gpu_tp = 1
assert args.batch_size % num_gpu_dp == 0, f"Batch size needs to be multiple of {num_gpu_dp}"
assert args.test_batch_size % num_gpu_dp == 0, \
f"Test batch size needs to be multiple of {num_gpu_dp}"
device_mesh = mesh_utils.create_device_mesh((num_gpu_dp, num_gpu_tp))
with jax.sharding.Mesh(devices=device_mesh,
axis_names=(DEVICE_DP_AXIS, DEVICE_TP_AXIS)) as shard_mesh:
rng = jax.random.PRNGKey(args.seed)
rng, params_rng = jax.random.split(rng)
rng, dropout_rng = jax.random.split(rng)
init_rngs = {PARAMS_KEY: params_rng, DROPOUT_KEY: dropout_rng}
input_shape = [args.batch_size, args.max_seq_len]
mask_shape = [args.batch_size, 1, args.max_seq_len, args.max_seq_len]
label_shape = [args.batch_size]
with te.fp8_autocast(args.use_fp8,
sharding_resource=te.ShardingResource(DEVICE_DP_AXIS, DEVICE_TP_AXIS)):
encoder = Net(num_embed)
inputs = jnp.zeros(input_shape, dtype=jnp.int32)
masks = jnp.zeros(mask_shape, dtype=jnp.uint8)
abs_var_collect = jax.eval_shape(encoder.init, init_rngs, inputs, masks)
customized_rules = ((NAMED_BROADCAST_AXIS, None), (NAMED_TP_AXIS, DEVICE_TP_AXIS))
sharding_rules = te_flax.extend_logical_axis_rules(tuple()) + customized_rules
params_pspec = get_params_pspec(sharding_rules, abs_var_collect)
inputs_pspec = jax.sharding.PartitionSpec(DEVICE_DP_AXIS, None)
masks_pspec = jax.sharding.PartitionSpec(DEVICE_DP_AXIS, None, None, None)
in_shardings = (None, inputs_pspec, masks_pspec)
out_shardings = {key: params_pspec if key is PARAMS_KEY else None \
for key in abs_var_collect}
pjit_encoder_init = pjit(encoder.init, in_shardings, out_shardings)
var_collect = pjit_encoder_init(init_rngs, inputs, masks)
optimizer = optax.adamw(args.lr)
var_collect, params = flax.core.pop(var_collect, PARAMS_KEY)
state = train_state.TrainState.create(apply_fn=encoder.apply,
params=params,
tx=optimizer)
state_pspec = get_state_pspec(state, params_pspec)
labels_pspec = jax.sharding.PartitionSpec(DEVICE_DP_AXIS,)
in_shardings = (state_pspec, inputs_pspec, masks_pspec, labels_pspec, None, None)
out_shardings = (state_pspec, None, None, None)
pjit_train_step = pjit(train_step, in_shardings, out_shardings, static_argnums=(6,))
in_shardings = (state_pspec, inputs_pspec, masks_pspec, labels_pspec, None)
out_shardings = (None, None)
pjit_eval_step = pjit(eval_step, in_shardings, out_shardings)
if args.use_fp8:
labels = jnp.zeros(label_shape, dtype=jnp.bfloat16)
check_fp8(state, var_collect, inputs, masks, labels)
if args.dry_run:
labels = jnp.zeros(label_shape, dtype=jnp.bfloat16)
rngs = {DROPOUT_KEY: dropout_rng}
pjit_train_step(state, inputs, masks, labels, var_collect, rngs, args.use_fp8)
print("PASSED")
else:
for epoch in range(1, args.epochs + 1):
rng, input_rng = jax.random.split(rng)
rng, dropout_rng = jax.random.split(rng)
rngs = {INPUT_KEY: input_rng, DROPOUT_KEY: dropout_rng}
state, train_loss, train_accuracy, var_collect = train_epoch(
state, train_ds, args.batch_size, rngs, var_collect, args.use_fp8,
pjit_train_step, shard_mesh, inputs_pspec, masks_pspec, labels_pspec)
test_loss, test_accuracy = eval_model(state, test_ds, args.test_batch_size,
var_collect, pjit_eval_step, shard_mesh,
inputs_pspec, masks_pspec, labels_pspec)
if args.process_id == 0:
print(f"Epoch: {epoch:>2} "
f"Train Loss: {train_loss:.6f} "
f"Train Accuracy: {train_accuracy:.6f} "
f"Test Loss: {test_loss:.6f} "
f"Test Accuracy: {test_accuracy:.6f} ")
jax.distributed.shutdown()
return [train_loss, train_accuracy, test_loss, test_accuracy]
def encoder_parser(args):
"""Training settings."""
parser = argparse.ArgumentParser(description="JAX Encoder Example")
parser.add_argument(
"--batch-size",
type=int,
default=64,
metavar="N",
help="input batch size for training (default: 64)",
)
parser.add_argument(
"--test-batch-size",
type=int,
default=64,
metavar="N",
help="input batch size for testing (default: 64)",
)
parser.add_argument(
"--max-seq-len",
type=int,
default=32,
metavar="N",
help="maximum sequence length (default: 32)",
)
parser.add_argument(
"--epochs",
type=int,
default=3,
metavar="N",
help="number of epochs to train (default: 3)",
)
parser.add_argument(
"--lr",
type=float,
default=0.0001,
metavar="LR",
help="learning rate (default: 0.0001)",
)
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="quickly check a single pass",
)
parser.add_argument("--seed", type=int, default=0, metavar="S", help="random seed (default: 0)")
parser.add_argument("--use-fp8",
action="store_true",
default=False,
help="Use FP8 for inference and training without recalibration")
parser.add_argument("--coordinator-address",
type=str,
default="127.0.0.1:1234",
help="the IP address of process 0 and a port on \
which that process should launch a coordinator service \
(default: 127.0.0.1:1234)")
parser.add_argument("--num-process",
type=int,
default=1,
help="number of processes (default: 1)")
parser.add_argument("--process-id",
type=int,
default=0,
help="the ID number of the current process (default: 0)")
return parser.parse_args(args)
def query_gpu(q):
"""Query GPU info on the system"""
gpu_has_fp8, reason = te.fp8.is_fp8_available()
num_gpu = len(jax.devices())
q.put([num_gpu, gpu_has_fp8, reason])
def unittest_query_gpu():
r"""
It is only used by TestEncoder.
The `jax.distributed.initialize` must be called before any other JAX or Flax API,
otherwise `jax.local_devices` will be incorrect.
Thus, fork another process to query number of GPUs and FP8 capability.
"""
q = mp.Queue()
p = mp.Process(target=query_gpu, args=(q,))
p.start()
num_gpu, gpu_has_fp8, reason = q.get()
p.join()
return num_gpu, gpu_has_fp8, reason
class TestEncoder(unittest.TestCase):
"""Encoder unittests"""
num_gpu, gpu_has_fp8, reason = unittest_query_gpu()
def exec(self, use_fp8):
"""Run 3 epochs for testing"""
num_gpu = self.num_gpu
tp_size = 2 if num_gpu > 1 and num_gpu % 2 == 0 else 1
dp_size = num_gpu // tp_size
batch_size = 64 // dp_size
arg_list = []
for i in range(num_gpu):
args = encoder_parser([])
args.num_process = num_gpu
args.use_fp8 = use_fp8
args.batch_size = batch_size
args.test_batch_size = batch_size
args.process_id = i
arg_list.append(args)
with mp.Pool(self.num_gpu) as p:
results = p.map(train_and_evaluate, arg_list)
return results
def test_te_bf16(self):
"""Test Transformer Engine with BF16"""
results = self.exec(False)
actual = results[0]
assert actual[0] < 0.45 and actual[1] > 0.79
@unittest.skipIf(not gpu_has_fp8, reason)
def test_te_fp8(self):
"""Test Transformer Engine with FP8"""
results = self.exec(True)
actual = results[0]
assert actual[0] < 0.45 and actual[1] > 0.79
if __name__ == "__main__":
train_and_evaluate(encoder_parser(None))
| TransformerEngine-main | examples/jax/encoder/test_multiprocessing_encoder.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Encoder training on single GPU"""
import argparse
import unittest
from functools import partial
import flax
import jax
import jax.numpy as jnp
import nltk
import numpy as np
import optax
from datasets import load_dataset
from flax import linen as nn
from flax.training import train_state
import transformer_engine.jax as te
import transformer_engine.jax.flax as te_flax
PARAMS_KEY = 'params'
DROPOUT_KEY = 'dropout'
INPUT_KEY = 'input_rng'
class Net(nn.Module):
"""NLP Encoder"""
num_embed: int
@nn.compact
def __call__(self, x, mask, disable_dropout=False):
x = nn.Embed(num_embeddings=self.num_embed, features=256, dtype=jnp.bfloat16)(x)
te_Encoder = partial(te_flax.TransformerLayer,
hidden_size=256,
mlp_hidden_size=1024,
num_attention_heads=8,
hidden_dropout=0.1,
attention_dropout=0.1,
dropout_rng_name=DROPOUT_KEY,
layer_type=te_flax.TransformerLayerType.ENCODER,
self_attn_mask_type='padding',
enable_relative_embedding=False,
dtype=jnp.bfloat16)
x = te_Encoder()(x, attention_mask=mask, deterministic=disable_dropout)
x = x.reshape(x.shape[0], -1)
x = te_flax.DenseGeneral(features=256, dtype=jnp.bfloat16)(x)
x = te_flax.DenseGeneral(features=256, dtype=jnp.bfloat16)(x)
x = nn.Dense(features=2, dtype=jnp.bfloat16)(x)
return x
@partial(jax.jit, static_argnums=6)
def train_step(state, inputs, masks, labels, var_collect, rngs, use_fp8):
"""Computes gradients, loss and accuracy for a single batch."""
def loss_fn(var_collect, disable_dropout=False):
logits = state.apply_fn(var_collect, inputs, masks, disable_dropout, rngs=rngs)
one_hot = jax.nn.one_hot(labels, 2)
loss = jnp.mean(optax.softmax_cross_entropy(logits=logits, labels=one_hot))
return loss, logits
var_collect = {**var_collect, PARAMS_KEY: state.params}
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
(loss, logits), grads = grad_fn(var_collect)
accuracy = jnp.mean(jnp.argmax(logits, -1) == labels)
var_collect, grads = flax.core.pop(grads, PARAMS_KEY)
state = state.apply_gradients(grads=grads)
if use_fp8:
var_collect = te.update_fp8_metas(var_collect)
return state, loss, accuracy, var_collect
def train_epoch(state, train_ds, batch_size, rngs, var_collect, use_fp8):
"""Train for a single epoch."""
train_ds_size = len(train_ds['sentence'])
steps_per_epoch = train_ds_size // batch_size
perms = jax.random.permutation(rngs[INPUT_KEY], train_ds_size)
perms = perms[:steps_per_epoch * batch_size] # skip incomplete batch
perms = perms.reshape((steps_per_epoch, batch_size))
epoch_loss = []
epoch_accuracy = []
for perm in perms:
batch_inputs = train_ds['sentence'][perm, ...]
batch_masks = train_ds['mask'][perm, ...]
batch_labels = train_ds['label'][perm, ...]
state, loss, accuracy, var_collect = train_step(state, batch_inputs, batch_masks,
batch_labels, var_collect, rngs, use_fp8)
epoch_loss.append(loss)
epoch_accuracy.append(accuracy)
avg_loss = np.mean(epoch_loss)
avg_accuracy = np.mean(epoch_accuracy)
return state, avg_loss, avg_accuracy, var_collect
@jax.jit
def eval_step(state, inputs, masks, labels, var_collect):
"""Computes loss and accuracy for a single batch."""
def loss_fn(var_collect, disable_dropout=False):
logits = state.apply_fn(var_collect, inputs, masks, disable_dropout)
one_hot = jax.nn.one_hot(labels, 2)
loss = jnp.mean(optax.softmax_cross_entropy(logits=logits, labels=one_hot))
return loss, logits
var_collect = {**var_collect, PARAMS_KEY: state.params}
loss, logits = loss_fn(var_collect, disable_dropout=True)
accuracy = jnp.mean(jnp.argmax(logits, -1) == labels)
return loss, accuracy
def eval_model(state, test_ds, batch_size, var_collect):
"""Evaluation loop."""
test_ds_size = len(test_ds['sentence'])
num_steps = test_ds_size // batch_size
valid_size = num_steps * batch_size
all_loss = []
all_accuracy = []
for batch_start in range(0, valid_size, batch_size):
batch_end = batch_start + batch_size
batch_inputs = test_ds['sentence'][batch_start:batch_end]
batch_masks = test_ds['mask'][batch_start:batch_end]
batch_labels = test_ds['label'][batch_start:batch_end]
loss, accuracy = eval_step(state, batch_inputs, batch_masks, batch_labels, var_collect)
all_loss.append(loss)
all_accuracy.append(accuracy)
avg_loss = np.mean(all_loss)
avg_accuracy = np.mean(all_accuracy)
return avg_loss, avg_accuracy
def data_preprocess(dataset, vocab, word_id, max_seq_len):
"""Convert tokens to numbers."""
nltk.download('punkt')
dataset_size = len(dataset['sentence'])
output = np.zeros((dataset_size, max_seq_len), dtype=np.int32)
mask_3d = np.ones((dataset_size, max_seq_len, max_seq_len), dtype=np.uint8)
for j, sentence in enumerate(dataset['sentence']):
tokens = nltk.word_tokenize(sentence)
tensor = output[j]
for i, word in enumerate(tokens):
if i >= max_seq_len:
break
if word not in vocab:
vocab[word] = word_id
tensor[i] = word_id
word_id = word_id + 1
else:
tensor[i] = vocab[word]
seq_len = min(len(tokens), max_seq_len)
mask_2d = mask_3d[j]
mask_2d[:seq_len, :seq_len] = 0
new_dataset = {
'sentence': output,
'label': dataset['label'].astype(np.float32),
'mask': mask_3d.reshape((dataset_size, 1, max_seq_len, max_seq_len))
}
return new_dataset, vocab, word_id
def get_datasets(max_seq_len):
"""Load GLUE train and test datasets into memory."""
vocab = {}
word_id = 0
train_ds = load_dataset('glue', 'cola', split='train')
train_ds.set_format(type='np')
train_ds, vocab, word_id = data_preprocess(train_ds, vocab, word_id, max_seq_len)
test_ds = load_dataset('glue', 'cola', split='validation')
test_ds.set_format(type='np')
test_ds, vocab, word_id = data_preprocess(test_ds, vocab, word_id, max_seq_len)
return train_ds, test_ds, word_id
def check_fp8(state, var_collect, inputs, masks, labels):
"Check if model includes FP8."
rngs = {DROPOUT_KEY: jax.random.PRNGKey(0)}
assert "Float8" in str(
jax.make_jaxpr(train_step, static_argnums=6)(state, inputs, masks, labels, var_collect,
rngs, True))
def train_and_evaluate(args):
"""Execute model training and evaluation loop."""
print(args)
train_ds, test_ds, num_embed = get_datasets(args.max_seq_len)
rng = jax.random.PRNGKey(args.seed)
rng, params_rng = jax.random.split(rng)
rng, dropout_rng = jax.random.split(rng)
init_rngs = {PARAMS_KEY: params_rng, DROPOUT_KEY: dropout_rng}
input_shape = [args.batch_size, args.max_seq_len]
mask_shape = [args.batch_size, 1, args.max_seq_len, args.max_seq_len]
label_shape = [args.batch_size]
with te.fp8_autocast(enabled=args.use_fp8):
encoder = Net(num_embed)
inputs = jnp.zeros(input_shape, dtype=jnp.int32)
masks = jnp.zeros(mask_shape, dtype=jnp.uint8)
var_collect = encoder.init(init_rngs, inputs, masks)
tx = optax.adamw(args.lr)
state = train_state.TrainState.create(apply_fn=encoder.apply,
params=var_collect[PARAMS_KEY],
tx=tx)
if args.use_fp8:
labels = jnp.zeros(label_shape, dtype=jnp.bfloat16)
check_fp8(state, var_collect, inputs, masks, labels)
if args.dry_run:
labels = jnp.zeros(label_shape, dtype=jnp.bfloat16)
rngs = {DROPOUT_KEY: dropout_rng}
train_step(state, inputs, masks, labels, var_collect, rngs, args.use_fp8)
print("PASSED")
return None
for epoch in range(1, args.epochs + 1):
rng, input_rng = jax.random.split(rng)
rng, dropout_rng = jax.random.split(rng)
rngs = {INPUT_KEY: input_rng, DROPOUT_KEY: dropout_rng}
state, train_loss, train_accuracy, var_collect = train_epoch(
state, train_ds, args.batch_size, rngs, var_collect, args.use_fp8)
test_loss, test_accuracy = eval_model(state, test_ds, args.test_batch_size, var_collect)
print(f"Epoch: {epoch:>2} "
f"Train Loss: {train_loss:.6f} "
f"Train Accuracy: {train_accuracy:.6f} "
f"Test Loss: {test_loss:.6f} "
f"Test Accuracy: {test_accuracy:.6f} ")
return [train_loss, train_accuracy, test_loss, test_accuracy]
def encoder_parser(args):
"""Training settings."""
parser = argparse.ArgumentParser(description="JAX Encoder Example")
parser.add_argument(
"--batch-size",
type=int,
default=64,
metavar="N",
help="input batch size for training (default: 64)",
)
parser.add_argument(
"--test-batch-size",
type=int,
default=64,
metavar="N",
help="input batch size for testing (default: 64)",
)
parser.add_argument(
"--max-seq-len",
type=int,
default=32,
metavar="N",
help="maximum sequence length (default: 32)",
)
parser.add_argument(
"--epochs",
type=int,
default=3,
metavar="N",
help="number of epochs to train (default: 3)",
)
parser.add_argument(
"--lr",
type=float,
default=0.0001,
metavar="LR",
help="learning rate (default: 0.0001)",
)
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="quickly check a single pass",
)
parser.add_argument("--seed", type=int, default=0, metavar="S", help="random seed (default: 0)")
parser.add_argument("--use-fp8",
action="store_true",
default=False,
help="Use FP8 for inference and training without recalibration")
return parser.parse_args(args)
class TestEncoder(unittest.TestCase):
"""Encoder unittests"""
gpu_has_fp8, reason = te.fp8.is_fp8_available()
@classmethod
def setUpClass(cls):
"""Run 4 epochs for testing"""
cls.args = encoder_parser(["--epochs", "3"])
def test_te_bf16(self):
"""Test Transformer Engine with BF16"""
actual = train_and_evaluate(self.args)
assert actual[0] < 0.45 and actual[1] > 0.79
@unittest.skipIf(not gpu_has_fp8, reason)
def test_te_fp8(self):
"""Test Transformer Engine with FP8"""
self.args.use_fp8 = True
actual = train_and_evaluate(self.args)
assert actual[0] < 0.45 and actual[1] > 0.79
if __name__ == "__main__":
train_and_evaluate(encoder_parser(None))
| TransformerEngine-main | examples/jax/encoder/test_single_gpu_encoder.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Encoder training on multi-GPU with data parallelism"""
import argparse
import unittest
from functools import partial
import flax
import jax
import jax.numpy as jnp
import nltk
import numpy as np
import optax
from datasets import load_dataset
from flax import linen as nn
from flax.linen import partitioning as nn_partitioning
from flax.training import train_state
from jax.experimental import mesh_utils
from jax.experimental.pjit import pjit
import transformer_engine.jax as te
import transformer_engine.jax.flax as te_flax
DEVICE_DP_AXIS = 'data'
PARAMS_KEY = 'params'
PARAMS_AXES_KEY = PARAMS_KEY + '_axes'
DROPOUT_KEY = 'dropout'
INPUT_KEY = 'input_rng'
class Net(nn.Module):
"""NLP Encoder"""
num_embed: int
@nn.compact
def __call__(self, x, mask, disable_dropout=False):
x = nn.Embed(num_embeddings=self.num_embed, features=256, dtype=jnp.bfloat16)(x)
te_Encoder = partial(te_flax.TransformerLayer,
hidden_size=256,
mlp_hidden_size=1024,
num_attention_heads=8,
hidden_dropout=0.1,
attention_dropout=0.1,
dropout_rng_name=DROPOUT_KEY,
layer_type=te_flax.TransformerLayerType.ENCODER,
self_attn_mask_type='padding',
enable_relative_embedding=False,
dtype=jnp.bfloat16)
x = te_Encoder()(x, attention_mask=mask, deterministic=disable_dropout)
x = x.reshape(x.shape[0], -1)
x = te_flax.DenseGeneral(features=256, sharding_type=te.ShardingType.DP,
dtype=jnp.bfloat16)(x)
x = te_flax.DenseGeneral(features=256, sharding_type=te.ShardingType.DP,
dtype=jnp.bfloat16)(x)
x = nn.Dense(features=2, dtype=jnp.bfloat16)(x)
return x
def train_step(state, inputs, masks, labels, var_collect, rngs, use_fp8):
"""Computes gradients, loss and accuracy for a single batch."""
def loss_fn(var_collect, disable_dropout=False):
logits = state.apply_fn(var_collect, inputs, masks, disable_dropout, rngs=rngs)
one_hot = jax.nn.one_hot(labels, 2)
loss = jnp.mean(optax.softmax_cross_entropy(logits=logits, labels=one_hot))
return loss, logits
var_collect = {**var_collect, PARAMS_KEY: state.params}
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
(loss, logits), grads = grad_fn(var_collect)
accuracy = jnp.mean(jnp.argmax(logits, -1) == labels)
var_collect, grads = flax.core.pop(grads, PARAMS_KEY)
state = state.apply_gradients(grads=grads)
if use_fp8:
var_collect = te.update_fp8_metas(var_collect)
return state, loss, accuracy, var_collect
def train_epoch(state, train_ds, batch_size, rngs, var_collect, use_fp8, train_fn):
"""Train for a single epoch."""
train_ds_size = len(train_ds['sentence'])
steps_per_epoch = train_ds_size // batch_size
perms = jax.random.permutation(rngs[INPUT_KEY], train_ds_size)
perms = perms[:steps_per_epoch * batch_size] # skip incomplete batch
perms = perms.reshape((steps_per_epoch, batch_size))
epoch_loss = []
epoch_accuracy = []
for perm in perms:
batch_inputs = train_ds['sentence'][perm, ...]
batch_masks = train_ds['mask'][perm, ...]
batch_labels = train_ds['label'][perm, ...]
state, loss, accuracy, var_collect = train_fn(state, batch_inputs, batch_masks,
batch_labels, var_collect, rngs, use_fp8)
epoch_loss.append(loss)
epoch_accuracy.append(accuracy)
avg_loss = np.mean(epoch_loss)
avg_accuracy = np.mean(epoch_accuracy)
return state, avg_loss, avg_accuracy, var_collect
def eval_step(state, inputs, masks, labels, var_collect):
"""Computes loss and accuracy for a single batch."""
def loss_fn(var_collect, disable_dropout=False):
logits = state.apply_fn(var_collect, inputs, masks, disable_dropout)
one_hot = jax.nn.one_hot(labels, 2)
loss = jnp.mean(optax.softmax_cross_entropy(logits=logits, labels=one_hot))
return loss, logits
var_collect = {**var_collect, PARAMS_KEY: state.params}
loss, logits = loss_fn(var_collect, disable_dropout=True)
accuracy = jnp.mean(jnp.argmax(logits, -1) == labels)
return loss, accuracy
def eval_model(state, test_ds, batch_size, var_collect, eval_fn):
"""Evaluation loop."""
test_ds_size = len(test_ds['sentence'])
num_steps = test_ds_size // batch_size
valid_size = num_steps * batch_size
all_loss = []
all_accuracy = []
for batch_start in range(0, valid_size, batch_size):
batch_end = batch_start + batch_size
batch_inputs = test_ds['sentence'][batch_start:batch_end]
batch_masks = test_ds['mask'][batch_start:batch_end]
batch_labels = test_ds['label'][batch_start:batch_end]
loss, accuracy = eval_fn(state, batch_inputs, batch_masks, batch_labels, var_collect)
all_loss.append(loss)
all_accuracy.append(accuracy)
avg_loss = np.mean(all_loss)
avg_accuracy = np.mean(all_accuracy)
return avg_loss, avg_accuracy
def data_preprocess(dataset, vocab, word_id, max_seq_len):
"""Convert tokens to numbers."""
nltk.download('punkt')
dataset_size = len(dataset['sentence'])
output = np.zeros((dataset_size, max_seq_len), dtype=np.int32)
mask_3d = np.ones((dataset_size, max_seq_len, max_seq_len), dtype=np.uint8)
for j, sentence in enumerate(dataset['sentence']):
tokens = nltk.word_tokenize(sentence)
tensor = output[j]
for i, word in enumerate(tokens):
if i >= max_seq_len:
break
if word not in vocab:
vocab[word] = word_id
tensor[i] = word_id
word_id = word_id + 1
else:
tensor[i] = vocab[word]
seq_len = min(len(tokens), max_seq_len)
mask_2d = mask_3d[j]
mask_2d[:seq_len, :seq_len] = 0
new_dataset = {
'sentence': output,
'label': dataset['label'].astype(np.float32),
'mask': mask_3d.reshape((dataset_size, 1, max_seq_len, max_seq_len))
}
return new_dataset, vocab, word_id
def get_datasets(max_seq_len):
"""Load GLUE train and test datasets into memory."""
vocab = {}
word_id = 0
train_ds = load_dataset('glue', 'cola', split='train')
train_ds.set_format(type='np')
train_ds, vocab, word_id = data_preprocess(train_ds, vocab, word_id, max_seq_len)
test_ds = load_dataset('glue', 'cola', split='validation')
test_ds.set_format(type='np')
test_ds, vocab, word_id = data_preprocess(test_ds, vocab, word_id, max_seq_len)
return train_ds, test_ds, word_id
def check_fp8(state, var_collect, inputs, masks, labels):
"Check if model includes FP8."
rngs = {DROPOUT_KEY: jax.random.PRNGKey(0)}
assert "Float8" in str(
jax.make_jaxpr(train_step, static_argnums=6)(state, inputs, masks, labels, var_collect,
rngs, True))
def get_params_pspec(sharding_rules, abs_var_collect):
"""Refer params to create params partition spec"""
rules_dict = {}
for key, value in sharding_rules:
rules_dict[key] = value
def to_device_axis(logical_axis):
partitions = [rules_dict[key] for key in logical_axis]
return jax.sharding.PartitionSpec(*partitions)
params_axes = abs_var_collect.get(PARAMS_AXES_KEY, {})
params_axes_pspec = jax.tree_map(to_device_axis, nn_partitioning.get_axis_names(params_axes))
params_axes_pspec = flax.core.unfreeze(params_axes_pspec)
params_pspec = jax.tree_map(lambda x: jax.sharding.PartitionSpec(), abs_var_collect[PARAMS_KEY])
params_pspec = {**params_pspec, **params_axes_pspec}
return params_pspec
def get_state_pspec(state, params_pspec):
"""Refer params_pspec to create state partition spec"""
def replace_params(x):
return params_pspec if isinstance(x, dict) else None
state_pspec = jax.tree_map(replace_params, state, is_leaf=lambda x: isinstance(x, dict))
return state_pspec
def train_and_evaluate(args):
"""Execute model training and evaluation loop."""
print(args)
train_ds, test_ds, num_embed = get_datasets(args.max_seq_len)
num_gpu = jax.local_device_count()
assert args.batch_size % num_gpu == 0, f"Batch size needs to be multiple of {num_gpu}"
assert args.test_batch_size % num_gpu == 0, \
f"Test batch size needs to be multiple of {num_gpu}"
device_mesh = mesh_utils.create_device_mesh((num_gpu,))
with jax.sharding.Mesh(devices=device_mesh, axis_names=(DEVICE_DP_AXIS,)):
rng = jax.random.PRNGKey(args.seed)
rng, params_rng = jax.random.split(rng)
rng, dropout_rng = jax.random.split(rng)
init_rngs = {PARAMS_KEY: params_rng, DROPOUT_KEY: dropout_rng}
input_shape = [args.batch_size, args.max_seq_len]
mask_shape = [args.batch_size, 1, args.max_seq_len, args.max_seq_len]
label_shape = [args.batch_size]
with te.fp8_autocast(args.use_fp8, sharding_resource=te.ShardingResource(DEVICE_DP_AXIS)):
encoder = Net(num_embed)
inputs = jnp.zeros(input_shape, dtype=jnp.int32)
masks = jnp.zeros(mask_shape, dtype=jnp.uint8)
abs_var_collect = jax.eval_shape(encoder.init, init_rngs, inputs, masks)
sharding_rules = te_flax.extend_logical_axis_rules(tuple())
params_pspec = get_params_pspec(sharding_rules, abs_var_collect)
inputs_pspec = jax.sharding.PartitionSpec(DEVICE_DP_AXIS, None)
masks_pspec = jax.sharding.PartitionSpec(DEVICE_DP_AXIS, None, None, None)
in_shardings = (None, inputs_pspec, masks_pspec)
out_shardings = {key: params_pspec if key is PARAMS_KEY else None \
for key in abs_var_collect}
pjit_encoder_init = pjit(encoder.init, in_shardings, out_shardings)
var_collect = pjit_encoder_init(init_rngs, inputs, masks)
optimizer = optax.adamw(args.lr)
var_collect, params = flax.core.pop(var_collect, PARAMS_KEY)
state = train_state.TrainState.create(apply_fn=encoder.apply,
params=params,
tx=optimizer)
state_pspec = get_state_pspec(state, params_pspec)
labels_pspec = jax.sharding.PartitionSpec(DEVICE_DP_AXIS,)
in_shardings = (state_pspec, inputs_pspec, masks_pspec, labels_pspec, None, None)
out_shardings = (state_pspec, None, None, None)
pjit_train_step = pjit(train_step, in_shardings, out_shardings, static_argnums=(6,))
in_shardings = (state_pspec, inputs_pspec, masks_pspec, labels_pspec, None)
out_shardings = (None, None)
pjit_eval_step = pjit(eval_step, in_shardings, out_shardings)
if args.use_fp8:
labels = jnp.zeros(label_shape, dtype=jnp.bfloat16)
check_fp8(state, var_collect, inputs, masks, labels)
if args.dry_run:
labels = jnp.zeros(label_shape, dtype=jnp.bfloat16)
rngs = {DROPOUT_KEY: dropout_rng}
pjit_train_step(state, inputs, masks, labels, var_collect, rngs, args.use_fp8)
print("PASSED")
return None
for epoch in range(1, args.epochs + 1):
rng, input_rng = jax.random.split(rng)
rng, dropout_rng = jax.random.split(rng)
rngs = {INPUT_KEY: input_rng, DROPOUT_KEY: dropout_rng}
state, train_loss, train_accuracy, var_collect = train_epoch(
state, train_ds, args.batch_size, rngs, var_collect, args.use_fp8,
pjit_train_step)
test_loss, test_accuracy = eval_model(state, test_ds, args.test_batch_size,
var_collect, pjit_eval_step)
print(f"Epoch: {epoch:>2} "
f"Train Loss: {train_loss:.6f} "
f"Train Accuracy: {train_accuracy:.6f} "
f"Test Loss: {test_loss:.6f} "
f"Test Accuracy: {test_accuracy:.6f} ")
return [train_loss, train_accuracy, test_loss, test_accuracy]
def encoder_parser(args):
"""Training settings."""
parser = argparse.ArgumentParser(description="JAX Encoder Example")
parser.add_argument(
"--batch-size",
type=int,
default=128,
metavar="N",
help="input batch size for training (default: 128)",
)
parser.add_argument(
"--test-batch-size",
type=int,
default=128,
metavar="N",
help="input batch size for testing (default: 128)",
)
parser.add_argument(
"--max-seq-len",
type=int,
default=32,
metavar="N",
help="maximum sequence length (default: 32)",
)
parser.add_argument(
"--epochs",
type=int,
default=3,
metavar="N",
help="number of epochs to train (default: 3)",
)
parser.add_argument(
"--lr",
type=float,
default=0.0001,
metavar="LR",
help="learning rate (default: 0.0001)",
)
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="quickly check a single pass",
)
parser.add_argument("--seed", type=int, default=0, metavar="S", help="random seed (default: 0)")
parser.add_argument("--use-fp8",
action="store_true",
default=False,
help="Use FP8 for inference and training without recalibration")
return parser.parse_args(args)
class TestEncoder(unittest.TestCase):
"""Encoder unittests"""
gpu_has_fp8, reason = te.fp8.is_fp8_available()
@classmethod
def setUpClass(cls):
"""Run 3 epochs for testing"""
cls.args = encoder_parser(["--epochs", "3"])
def test_te_bf16(self):
"""Test Transformer Engine with BF16"""
actual = train_and_evaluate(self.args)
assert actual[0] < 0.50 and actual[1] > 0.76
@unittest.skipIf(not gpu_has_fp8, reason)
def test_te_fp8(self):
"""Test Transformer Engine with FP8"""
self.args.use_fp8 = True
actual = train_and_evaluate(self.args)
assert actual[0] < 0.50 and actual[1] > 0.76
if __name__ == "__main__":
train_and_evaluate(encoder_parser(None))
| TransformerEngine-main | examples/jax/encoder/test_multigpu_encoder.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Top level package"""
from . import common
try:
from . import pytorch
except ImportError as e:
pass
try:
from . import jax
except ImportError as e:
pass
try:
from . import tensorflow
except ImportError as e:
pass
| TransformerEngine-main | transformer_engine/__init__.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""FP8 utilities for TransformerEngine"""
from contextlib import contextmanager
from typing import Tuple, Optional, Dict, Any, Union
import numpy as np
import paddle
import transformer_engine_paddle as tex
from transformer_engine.common.recipe import DelayedScaling, Format
from .constants import dist_group_type
from .fp8_buffer import FP8MetaFwdBuffer, FP8MetaBwdBuffer, FP8RecomputeBuffer
# FP8 support
_is_fp8_available = None
_reason_for_no_fp8 = ""
def _check_fp8_support() -> Tuple[bool, str]:
"""Return if fp8 support is available"""
# Check GPU arch
arch = paddle.device.cuda.get_device_capability()
if arch >= (9, 0): # hopper and above
return True, ""
if arch < (8, 9): # pre-ada
return False, "Device compute capability 8.9 or higher required for FP8 execution."
# Special handling for Ada
if tex.get_cublasLt_version() < 120103:
return False, "CublasLt version 12.1.3.x or higher required for FP8 execution on Ada."
if not paddle.version.cuda():
return False, "Cuda version 12.1 or higher required for FP8 execution on Ada."
if tuple(int(v) for v in paddle.version.cuda().split(".")) < (12, 1):
return False, "Cuda version 12.1 or higher required for FP8 execution on Ada."
return True, ""
def is_fp8_available() -> Tuple[bool, str]:
"""Return if fp8 support is available"""
global _is_fp8_available, _reason_for_no_fp8
if _is_fp8_available is None:
_is_fp8_available, _reason_for_no_fp8 = _check_fp8_support()
return _is_fp8_available, _reason_for_no_fp8
class FP8State:
"""Stores FP8 state"""
def __init__(self):
self._fp8_enabled = False
self._fp8_calibration = False
self._fp8_recipe = None
self._fp8_distributed_group = None
self._is_first_fp8_module = False
self._fp8_autocast_counter = 0
self._fp8_autocast_depth = 0
self._fp8_recompute_enabled = False
self._fp8_fwd_buffer = FP8MetaFwdBuffer()
self._fp8_bwd_buffer = FP8MetaBwdBuffer()
self._fp8_recompute_buffer = FP8RecomputeBuffer()
def is_fp8_enabled(self) -> bool:
"""Is FP8 enabled"""
return self._fp8_enabled
def is_fp8_calibration(self) -> bool:
"""Is FP8 calibration"""
return self._fp8_calibration
def get_fp8_recipe(self) -> DelayedScaling:
"""Return the fp8 recipe"""
return self._fp8_recipe
@staticmethod
def get_default_fp8_recipe() -> DelayedScaling:
"""FP8 recipe if not provided by user
Margin = 0, interval = 1, E4M3
"""
return DelayedScaling()
def get_autocast_id(self) -> int:
"""Returns the number of times of entering the `fp8_autocast` context.
as a unique ID for different training steps."""
return self._fp8_autocast_counter
def is_first_fp8_module(self):
"""Returns `True` only the first time when called multiple
times from within the same `fp8_autocast` context.
"""
tmp = self._is_first_fp8_module
self._is_first_fp8_module = False
return tmp
def get_fp8_group(self) -> Union[dist_group_type, None]:
"""Return the fp8 group for scale/amax comm"""
return self._fp8_distributed_group
def get_fp8_fwd_buffer(self) -> FP8MetaFwdBuffer:
"""Returns global fp8 forward buffer."""
return self._fp8_fwd_buffer
def get_fp8_bwd_buffer(self) -> FP8MetaBwdBuffer:
"""Returns global fp8 backward buffer."""
return self._fp8_bwd_buffer
def is_fp8_recompute_enabled(self) -> bool:
"""Is FP8 recompute enabled"""
return self._fp8_recompute_enabled
def get_fp8_recompute_buffer(self) -> FP8RecomputeBuffer:
"""Returns global fp8 recompute buffer."""
return self._fp8_recompute_buffer
def enter(
self,
enabled: bool,
calibrating: bool,
fp8_recipe: Optional[DelayedScaling],
fp8_group: Optional[dist_group_type],
) -> None:
"""Called when entering 'fp8_autocast'"""
self.saved_states = (self._fp8_enabled, self._fp8_calibration, self._fp8_recipe,
self._fp8_distributed_group, self._is_first_fp8_module)
self._fp8_enabled = enabled
self._fp8_calibration = calibrating
self._fp8_recipe = self.get_default_fp8_recipe() if fp8_recipe is None else fp8_recipe
self._fp8_distributed_group = fp8_group
if self._fp8_autocast_depth == 0:
self._is_first_fp8_module = True
self._fp8_autocast_counter += 1
self._fp8_autocast_depth += 1
def exit(self):
"""Called when exiting 'fp8_autocast'"""
# Restore saved states
(self._fp8_enabled, self._fp8_calibration, self._fp8_recipe, self._fp8_distributed_group,
self._is_first_fp8_module) = self.saved_states
self._fp8_autocast_depth -= 1
if self._fp8_autocast_depth == 0:
self._fp8_fwd_buffer.finalize()
_global_fp8_state = FP8State()
def get_global_fp8_state() -> FP8State:
"""Get global fp8 state"""
return _global_fp8_state
@contextmanager
def fp8_autocast(
enabled: bool = False,
calibrating: bool = False,
fp8_recipe: Optional[DelayedScaling] = None,
fp8_group: Optional[dist_group_type] = None,
) -> None:
"""
Context manager for FP8 usage.
"""
try:
_global_fp8_state.enter(enabled, calibrating, fp8_recipe, fp8_group)
if enabled:
fp8_available, reason_for_no_fp8 = is_fp8_available()
assert fp8_available, reason_for_no_fp8
yield
finally:
_global_fp8_state.exit()
def get_fp8_te_dtype(fp8_recipe: DelayedScaling, fprop_tensor: bool = True) -> tex.DType:
"""Get fp8 data type according to recipe and tensor"""
if fp8_recipe.fp8_format == Format.E4M3 or (fp8_recipe.fp8_format == Format.HYBRID
and fprop_tensor):
return tex.DType.kFloat8E4M3
return tex.DType.kFloat8E5M2
def amax_and_scale_update(
fp8_meta: Dict[str, Any],
fwd_update: bool,
) -> None:
"""Updates fp8 amaxes/scales for fwd | bwd."""
amax_compute = fp8_meta["recipe"].amax_compute_algo
sf_compute = fp8_meta["recipe"].scaling_factor_compute_algo
fp8_meta_tensor_key = "scaling_fwd" if fwd_update else "scaling_bwd"
fp8_max_key = "fp8_max_fwd" if fwd_update else "fp8_max_bwd"
if not callable(amax_compute) and sf_compute is None:
# Obtain amax from history
amax_history = fp8_meta[fp8_meta_tensor_key].amax_history
if amax_compute == "max":
amax = paddle.max(amax_history, axis=0)
else: # amax_compute_algo == "most_recent"
amax = amax_history[0]
# Update amax history and set next amax to zero
if amax_history.shape[0] > 1:
amax_history = paddle.roll(amax_history, -1, 0)
amax_history[0] = 0.0
fp8_meta[fp8_meta_tensor_key].amax_history = amax_history
# Update scaling factor
fp8_meta[fp8_meta_tensor_key].scale = tex.update_scale(
amax=amax,
scale=fp8_meta[fp8_meta_tensor_key].scale,
fp8_max=fp8_meta[fp8_max_key],
margin=float(fp8_meta["recipe"].margin))
# Update scale_inv
fp8_meta[fp8_meta_tensor_key].scale_inv = \
1.0 / fp8_meta[fp8_meta_tensor_key].scale
else:
raise ValueError("We only support the fp8 recipe with 'max' or 'most_recent' "
"amax_compute_algo and default scaling_factor_compute_algo at this "
"moment.")
class FP8TensorMeta():
"""Holds FP8 scaling and amax history for FP8 layers"""
def __init__(self, is_forward: bool):
self.scale = paddle.Tensor()
self.scale_inv = paddle.Tensor()
self.amax_history = paddle.Tensor()
self.is_initialized = False
self.is_forward = is_forward
def prepare(self, num_gemms: bool, amax_history_len: int) -> None:
"""Prepare scales and amax tensors. It is called during fprop in each iteration.
If the meta tensors are not initialized yet, initialization is performed. If already
initialized, resize the meta tensors if amax_history_len has changed."""
if self.is_initialized:
# Handle changed amax history size.
curr_len = self.amax_history.shape[0]
num_fp8_tensors = self.amax_history.shape[1]
if amax_history_len < curr_len:
self.amax_history = (self.amax_history[:amax_history_len])
elif amax_history_len > curr_len:
extra_rows = amax_history_len - curr_len
self.amax_history = paddle.concat([
self.amax_history,
paddle.zeros((extra_rows, num_fp8_tensors), dtype='float32')
],
axis=0)
return
# Max. number of fp8 tensors per GEMM = 3 (input, weight, output) for fwd and
# 2 (grad_output and grad_input) for bwd
num_fp8_tensors = (num_gemms * 3 if self.is_forward else num_gemms * 2)
self.scale = paddle.ones(num_fp8_tensors, dtype='float32')
self.scale_inv = paddle.ones(num_fp8_tensors, dtype='float32')
self.amax_history = paddle.zeros([amax_history_len, num_fp8_tensors], dtype='float32')
self.is_initialized = True
def to_numpy(self):
"""Convert FP8 meta tensors to numpy."""
assert self.is_initialized, "FP8TensorMeta is not initialized yet."
return {
'scale': self.scale.numpy(),
'scale_inv': self.scale_inv.numpy(),
'amax_history': self.amax_history.numpy(),
}
def from_numpy(self, data: Dict[str, np.array]):
"""Set FP8 meta tensors from numpy"""
self.scale = paddle.to_tensor(data['scale'])
self.scale_inv = paddle.to_tensor(data['scale_inv'])
self.amax_history = paddle.to_tensor(data['amax_history'])
self.is_initialized = True
| TransformerEngine-main | transformer_engine/paddle/fp8.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Utils for profiling"""
from contextlib import contextmanager
from paddle.fluid import core
@contextmanager
def nvtx_range(msg):
"""Context to insert NVTX"""
core.nvprof_nvtx_push(msg)
yield
core.nvprof_nvtx_pop()
| TransformerEngine-main | transformer_engine/paddle/profile.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Constants"""
from enum import Enum
import paddle
import transformer_engine_paddle as tex
class FP8FwdTensors(Enum):
"""Used as named indices on the `scale`, `scale_inv`,
and `amax` tensors in the `FP8TensorMeta` class."""
GEMM1_INPUT = 0
GEMM1_WEIGHT = 1
GEMM1_OUTPUT = 2
GEMM2_INPUT = 3
GEMM2_WEIGHT = 4
GEMM2_OUTPUT = 5
class FP8BwdTensors(Enum):
"""Used as named indices on the `scale`, `scale_inv`,
and `amax` tensors in the `FP8TensorMeta` class."""
GRAD_OUTPUT1 = 0
GRAD_INPUT1 = 1
GRAD_OUTPUT2 = 2
GRAD_INPUT2 = 3
"""
Map from paddle dtype to TE dtype
"""
TE_DType = {
paddle.uint8: tex.DType.kByte,
paddle.int32: tex.DType.kInt32,
paddle.float32: tex.DType.kFloat32,
paddle.float16: tex.DType.kFloat16,
paddle.bfloat16: tex.DType.kBFloat16,
}
AttnMaskTypes = ("causal", "padding", "no_mask")
AttnTypes = ("self", "cross")
LayerTypes = ("encoder", "decoder")
GemmParallelModes = ("row", "column", None)
dist_group_type = paddle.distributed.collective.Group
RecomputeFunctionNames = ('unpack', 'backward')
| TransformerEngine-main | transformer_engine/paddle/constants.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Transformer Engine bindings for Paddle"""
from .fp8 import fp8_autocast
from .layer import (Linear, LayerNorm, LayerNormLinear, LayerNormMLP, FusedScaleMaskSoftmax,
DotProductAttention, MultiHeadAttention, TransformerLayer)
from .recompute import recompute
| TransformerEngine-main | transformer_engine/paddle/__init__.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Methods needed for distributed training."""
from contextlib import contextmanager
from typing import Optional, Union, Tuple
import paddle
import paddle.distributed.fleet.base.topology as tp
from paddle.distributed.fleet.meta_parallel import get_rng_state_tracker
from paddle.distributed.fleet.layers.mpu import mp_ops
from .constants import dist_group_type
_weight_split_axis = {
'transformer_engine': {
'row': 1,
'column': 0
},
'paddle': {
'row': 0,
'column': 1
}
}
def get_tp_group_and_world_size(tp_group: Union[dist_group_type, None],
enable_tp: bool = True) -> Tuple[Union[dist_group_type, None], int]:
"""Get TP group and world size using Fleet API"""
if not (paddle.distributed.is_initialized() and enable_tp):
return None, 1
model_parallel_group = (tp._HYBRID_PARALLEL_GROUP.get_model_parallel_group()
if tp_group is None else tp_group)
world_size = (tp._HYBRID_PARALLEL_GROUP.get_model_parallel_world_size()
if tp_group is None else tp_group.nranks)
return model_parallel_group, world_size
@contextmanager
def track_rng_state(enable: bool, **kwargs) -> None:
"""
Applies get_rng_state_tracker().rng_state() to the context.
If not enabled, it does nothing.
"""
if enable:
with get_rng_state_tracker().rng_state(**kwargs):
yield
else:
yield
def set_tensor_dist_attr(tensor: paddle.Tensor, is_parallel: bool, axis: int) -> None:
"""Set distributed attributes for the input tensor"""
tensor.is_distributed = is_parallel
if is_parallel:
tensor.split_axis = axis
def set_weight_tensor_dist_attr(tensor: paddle.Tensor, is_parallel: bool,
parallel_mode: Optional[str], backend: str) -> None:
"""Set distributed attributes for the weight tensor"""
if not is_parallel or parallel_mode is None:
return
set_tensor_dist_attr(tensor, is_parallel, axis=_weight_split_axis[backend][parallel_mode])
def allreduce(
input_: paddle.Tensor,
tp_group: Optional[dist_group_type] = None,
) -> paddle.Tensor:
"""All-reduce the input tensor across model parallel group."""
# Bypass the function if we are using only 1 GPU.
if tp_group is None or tp_group.nranks == 1:
return input_
# All-reduce.
output = mp_ops._mp_allreduce(
input_,
group=tp_group,
use_calc_stream=True,
use_model_parallel=True,
)
return output
def identity(
input_: paddle.Tensor,
tp_group: Optional[dist_group_type] = None,
) -> paddle.Tensor:
"""
Identity when forward.
Allreduce across model parallel group when backward.
"""
output = mp_ops._c_identity(input_, group=tp_group)
return output
| TransformerEngine-main | transformer_engine/paddle/distributed.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Utility functions for Transformer Engine modules"""
from typing import Optional, Tuple, Union
import paddle
import paddle.nn.functional as F
def cast_if_needed(tensor: Union[paddle.Tensor, None],
dtype: paddle.dtype) -> Union[paddle.Tensor, None]:
"""Cast tensor to dtype"""
return tensor if tensor is None or tensor.dtype == dtype else paddle.cast(tensor, dtype)
def cast_if_needed_inplace(tensor: Union[paddle.Tensor, None],
dtype: paddle.dtype) -> Union[paddle.Tensor, None]:
"""Cast tensor to dtype (inplace), not to be used on layer inputs"""
return tensor if tensor is None or tensor.dtype == dtype else tensor._to(dtype=dtype)
def check_dim_for_fp8_forward_exec(tensor: paddle.Tensor) -> bool:
"""For fp8 fprop (TN layout), inputs and weights must be such
that dim0 is divisible by 8 and dim1 is divisible by 16.
"""
return not tensor.shape[0] % 8 and not tensor.shape[1] % 16
def assert_dim_for_fp8_forward_exec(tensor: paddle.Tensor) -> None:
"""For fp8 fprop (TN layout), inputs and weights must be such
that dim0 is divisible by 8 and dim1 is divisible by 16.
"""
# single tensor check so it's clear which tensor is triggering the assertion
assert check_dim_for_fp8_forward_exec(tensor), (
"Tensor dimensions are not compatible for FP8 execution: "
f"({tensor.shape[0]} % 8 != 0, {tensor.shape[1]} % 16 != 0)")
def get_bias_dtype(activation_dtype: paddle.dtype):
"""Get bias dtype given activation_dtype"""
return paddle.bfloat16 if activation_dtype == paddle.float32 else activation_dtype
def get_paddle_act_func(activation):
"""Get paddle activation function"""
funcs = {
'gelu': F.gelu,
'relu': F.relu,
}
if activation not in funcs:
raise "Activation type " + activation + " is not supported."
return funcs[activation]
def attention_mask_func(attention_scores: paddle.Tensor,
attention_mask: paddle.Tensor) -> paddle.Tensor:
"""Get attention mask"""
def _masked_fill(x, mask, value):
y = paddle.full(x.shape, value, x.dtype)
return paddle.where(mask, y, x)
attention_scores = _masked_fill(attention_scores, attention_mask, -10000.0)
return attention_scores
def mask_to_cu_seqlens(mask: paddle.Tensor, need_kv: bool = False) -> paddle.Tensor:
"""Convert mask to cu_seqlens"""
assert 'bool' in str(mask.dtype), "mask must be bool dtype"
assert len(mask.shape) == 4 and mask.shape[1] == 1, "mask must be [b, 1, s_q, s_kv]"
q_actual_seqlens = paddle.sum(mask[:, :, :, 0] == False, axis=(-1, -2), dtype='int32') # pylint: disable=singleton-comparison
q_cu_seqlens = paddle.cumsum(q_actual_seqlens)
q_cu_seqlens = paddle.concat([paddle.zeros([1], dtype=paddle.int32), q_cu_seqlens], axis=0)
if not need_kv:
return q_cu_seqlens, None
kv_actual_seqlens = paddle.sum(mask[:, :, 0, :] == False, axis=(-1, -2), dtype='int32') # pylint: disable=singleton-comparison
kv_cu_seqlens = paddle.cumsum(kv_actual_seqlens)
kv_cu_seqlens = paddle.concat([paddle.zeros([1], dtype=paddle.int32), kv_cu_seqlens], axis=0)
return q_cu_seqlens, kv_cu_seqlens
def divide(numerator: int, denominator: int) -> int:
"""Ensure that numerator is divisible by the denominator and return
the division value."""
assert (numerator % denominator == 0), f"{numerator} is not divisible by {denominator}"
return numerator // denominator
def save_for_backward_allow_none(ctx, *args) -> None:
"""Save tensors for backward. Args could be None"""
indices_mapping = []
tensors_to_save = []
for x in args:
if isinstance(x, paddle.Tensor):
indices_mapping.append(len(tensors_to_save))
tensors_to_save.append(x)
elif x is None:
indices_mapping.append(-1)
else:
raise ValueError(f"Type {type(x)} is not allowed.")
ctx._indices_mapping = indices_mapping
ctx.save_for_backward(*tensors_to_save)
def saved_tensor_allow_none(ctx) -> Tuple[Optional[paddle.Tensor]]:
"""Used with `save_for_backward_allow_none` in pair. Get saved tensors from ctx."""
assert hasattr(ctx, '_indices_mapping'), "`saved_tensor_allow_none` must be used " \
"with `save_for_backward_allow_none` in pair."
indices_mapping = ctx._indices_mapping
outputs = []
saved_tensors = ctx.saved_tensor()
for index in indices_mapping:
if index < 0:
outputs.append(None)
else:
outputs.append(saved_tensors[index])
return tuple(outputs)
| TransformerEngine-main | transformer_engine/paddle/utils.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""TE FP8 extensions and GEMMs"""
import math
from typing import Optional, Tuple, Union
import paddle
import transformer_engine_paddle as tex
from .constants import TE_DType, FP8FwdTensors, FP8BwdTensors
from .fp8 import FP8TensorMeta
def gemm(
A: paddle.Tensor,
B: paddle.Tensor,
dtype: paddle.dtype,
workspace: paddle.Tensor,
gelu: bool = False,
gelu_input: Optional[paddle.Tensor] = None,
grad: bool = False,
accumulate: bool = False,
layout: str = "TN",
out: Optional[paddle.Tensor] = None,
bias: Optional[paddle.Tensor] = None,
use_bias: bool = False,
) -> Tuple[Union[paddle.Tensor, None], ...]:
"""Non FP8 GEMM."""
assert layout in ("TN", "NN", "NT"), f"GEMM layout {layout} not supported."
transa = layout[0] == "T"
transb = layout[1] == "T"
return_output = False
if out is None:
out = paddle.empty(
shape=[
B.shape[1] if transb else B.shape[0],
A.shape[0] if transa else A.shape[1],
],
dtype=dtype,
)
return_output = True
if gelu and not grad:
gelu_input = paddle.empty_like(out, dtype=dtype)
elif not gelu:
gelu_input = None
if grad and use_bias:
grad_bias = paddle.empty(shape=[B.shape[1]], dtype=out.dtype)
else:
grad_bias = None
bias = bias if use_bias else None
assert A.dtype == dtype and B.dtype == dtype, \
f'Expected dtype={dtype}, but found A.dtype={A.dtype} and B.dtype={B.dtype}'
input_dtype = TE_DType[dtype]
output_dtype = TE_DType[out.dtype]
if use_bias:
bias_dtype = TE_DType[grad_bias.dtype] if grad else TE_DType[bias.dtype]
else:
bias_dtype = output_dtype
tex.te_gemm(
A,
None,
B,
None,
grad_bias if grad else bias,
out,
None, # out_scale
None, # out_amax
gelu_input,
workspace,
0, # A_index
0, # B_index
0, # D_index
int(input_dtype),
int(input_dtype),
int(output_dtype),
int(bias_dtype),
transa,
transb,
grad,
workspace.shape[0],
accumulate,
False, # use_split_accumulator
0, # math_sm_count
)
if return_output:
return out, grad_bias, gelu_input
return None, grad_bias, gelu_input
def fp8_gemm(
A: paddle.Tensor,
A_scale_inv: paddle.Tensor,
A_fp8_tensor: Union[FP8FwdTensors, FP8BwdTensors],
A_dtype: tex.DType,
B: paddle.Tensor,
B_scale_inv: paddle.Tensor,
B_fp8_tensor: Union[FP8FwdTensors, FP8BwdTensors],
B_dtype: tex.DType,
out_dtype: paddle.dtype,
workspace: paddle.Tensor,
gelu: bool = False,
accumulate: bool = False,
out: Optional[paddle.Tensor] = None,
out_index=None,
fp8_meta_tensor: FP8TensorMeta = None,
bias: Optional[paddle.Tensor] = None,
use_bias: bool = False,
use_split_accumulator: bool = False,
D_dtype: Optional[tex.DType] = None,
) -> paddle.Tensor:
"""TN layout GEMM with fp8 inputs."""
if D_dtype is not None and D_dtype in [tex.DType.kFloat8E4M3, tex.DType.kFloat8E5M2]:
assert fp8_meta_tensor is not None and out_index is not None
return_output = False
if out is None:
out = paddle.empty(
shape=[
B.shape[0],
A.shape[0],
],
dtype=out_dtype,
)
return_output = True
# Use bfloat16 as default bias_dtype
bias_dtype = paddle.bfloat16 if bias is None else bias.dtype
if gelu:
gelu_input = paddle.empty_like(out, dtype=bias_dtype)
else:
gelu_input = None
bias_dtype = TE_DType[bias_dtype]
out_dtype = TE_DType[out.dtype] if D_dtype is None else D_dtype
tex.te_gemm(
A,
A_scale_inv,
B,
B_scale_inv,
bias if use_bias else None,
out,
None if out_index is None else fp8_meta_tensor.scale,
None if out_index is None else fp8_meta_tensor.amax_history,
gelu_input, # this is pre_gelu_out
workspace,
A_fp8_tensor.value,
B_fp8_tensor.value,
0 if out_index is None else out_index,
int(A_dtype),
int(B_dtype),
int(out_dtype),
int(bias_dtype),
True, # transa
False, # transb
False, # grad
workspace.shape[0],
accumulate,
use_split_accumulator,
0, # math_sm_count
)
if return_output:
if gelu:
return out, gelu_input
return out
if gelu:
return gelu_input
return None
def cast_to_fp8(
inp: paddle.Tensor,
fp8_meta_tensor: FP8TensorMeta,
fp8_tensor: Union[FP8FwdTensors, FP8BwdTensors],
otype: tex.DType,
) -> paddle.Tensor:
"""Cast input to FP8"""
out, _, _ = tex.cast_to_fp8(
inp,
fp8_meta_tensor.scale,
fp8_meta_tensor.amax_history,
fp8_meta_tensor.scale_inv,
fp8_tensor.value,
int(otype),
)
return out
def cast_from_fp8(
inp: paddle.Tensor,
fp8_meta_tensor: FP8TensorMeta,
fp8_tensor: Union[FP8FwdTensors, FP8BwdTensors],
itype: tex.DType,
otype: tex.DType,
) -> paddle.Tensor:
"""Cast input from FP8"""
return tex.cast_from_fp8(
inp,
fp8_meta_tensor.scale_inv,
fp8_tensor.value,
int(itype),
int(otype),
)
def transpose(
inp: paddle.Tensor,
otype: tex.DType,
) -> paddle.Tensor:
"""Transpose input"""
return tex.te_transpose(
inp,
int(otype),
)
def cast_transpose(
inp: paddle.Tensor,
fp8_meta_tensor: FP8TensorMeta,
fp8_tensor: Union[FP8FwdTensors, FP8BwdTensors],
otype: tex.DType,
) -> Union[Tuple[paddle.Tensor, paddle.Tensor], None]:
"""Cast + Transpose with FP8 output"""
cast_out, transpose_out, _, _ = tex.te_cast_transpose(
inp,
fp8_meta_tensor.scale,
fp8_meta_tensor.amax_history,
fp8_meta_tensor.scale_inv,
fp8_tensor.value,
int(otype),
)
return cast_out, transpose_out
def cast_transpose_bgrad(
inp: paddle.Tensor,
fp8_meta_tensor: FP8TensorMeta,
fp8_tensor: Union[FP8FwdTensors, FP8BwdTensors],
otype: tex.DType,
) -> Union[Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor], None]:
"""Fused Cast + Transpose + Bias Grad"""
grad_bias, cast_out, transpose_out, _, _ = tex.te_cast_transpose_bgrad(
inp,
fp8_meta_tensor.scale,
fp8_meta_tensor.amax_history,
fp8_meta_tensor.scale_inv,
fp8_tensor.value,
int(otype),
)
return grad_bias, cast_out, transpose_out
def te_gelu(
inp: paddle.Tensor,
otype: tex.DType,
) -> paddle.Tensor:
"""Non FP8 GELU"""
return tex.te_gelu(
inp,
int(otype),
)
def gelu_fp8(
inp: paddle.Tensor,
fp8_meta_tensor: FP8TensorMeta,
fp8_tensor: Union[FP8FwdTensors, FP8BwdTensors],
otype: tex.DType,
) -> paddle.Tensor:
"""GELU + FP8 cast"""
out, _, _ = tex.te_gelu_fp8(
inp,
fp8_meta_tensor.scale,
fp8_meta_tensor.amax_history,
fp8_meta_tensor.scale_inv,
fp8_tensor.value,
int(otype),
)
return out
def dgelu_cast_transpose_bgrad_fp8(
grad_output: paddle.Tensor,
gelu_input: paddle.Tensor,
fp8_meta_tensor: FP8TensorMeta,
fp8_tensor: Union[FP8FwdTensors, FP8BwdTensors],
otype: tex.DType,
) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor]:
"""
Fused dgelu + cast / transpose / reduce the result of
the GELU backward along the first dimension
"""
cast_dgelu, transpose_dgelu, dbias, _, _ = tex.te_cast_transpose_bgrad_dgelu(
grad_output,
gelu_input,
fp8_meta_tensor.scale,
fp8_meta_tensor.amax_history,
fp8_meta_tensor.scale_inv,
fp8_tensor.value,
int(otype),
)
return cast_dgelu, transpose_dgelu, dbias
def layernorm_fwd_fp8(
inp: paddle.Tensor,
weight: paddle.Tensor,
bias: paddle.Tensor,
eps: float,
fp8_meta_tensor: FP8TensorMeta,
fp8_tensor: Union[FP8FwdTensors, FP8BwdTensors],
otype: tex.DType,
sm_margin: int = 0,
zero_centered_gamma: bool = False,
) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor]:
"""LayerNorm with FP8 output"""
out, mu, rsigma, _, _ = tex.te_layernorm_fwd_fp8(inp, weight, bias, fp8_meta_tensor.scale,
fp8_meta_tensor.amax_history,
fp8_meta_tensor.scale_inv, eps,
fp8_tensor.value, int(otype), sm_margin,
zero_centered_gamma)
return out, mu, rsigma
def layernorm_fwd(
inp: paddle.Tensor,
weight: paddle.Tensor,
bias: paddle.Tensor,
eps: float,
otype: tex.DType,
sm_margin: int = 0,
zero_centered_gamma: bool = False,
) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor]:
"""Non-FP8 LayerNorm forward"""
return tex.te_layernorm_fwd(inp, weight, bias, eps, int(otype), sm_margin, zero_centered_gamma)
def layernorm_bwd(
dz: paddle.Tensor,
x: paddle.Tensor,
mu: paddle.Tensor,
rsigma: paddle.Tensor,
gamma: paddle.Tensor,
sm_margin: int = 0,
zero_centered_gamma: bool = False,
) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor]:
"""Non-FP8 LayerNorm backward"""
return tex.te_layernorm_bwd(dz, x, mu, rsigma, gamma, sm_margin, zero_centered_gamma)
def rmsnorm_fwd(
inp: paddle.Tensor,
weight: paddle.Tensor,
eps: float,
otype: tex.DType,
sm_margin: int = 0,
) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor]:
"""Non-FP8 RMSNorm forward"""
return tex.te_rmsnorm_fwd(inp, weight, eps, int(otype), sm_margin)
def rmsnorm_fwd_fp8(
inp: paddle.Tensor,
weight: paddle.Tensor,
eps: float,
fp8_meta_tensor: FP8TensorMeta,
fp8_tensor: Union[FP8FwdTensors, FP8BwdTensors],
otype: tex.DType,
sm_margin: int = 0,
) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor]:
"""RMSNorm with FP8 output"""
out, rsigma, _, _ = tex.te_rmsnorm_fwd_fp8(inp, weight, fp8_meta_tensor.scale,
fp8_meta_tensor.amax_history,
fp8_meta_tensor.scale_inv, eps, fp8_tensor.value,
int(otype), sm_margin)
return out, rsigma
def rmsnorm_bwd(
dz: paddle.Tensor,
x: paddle.Tensor,
rsigma: paddle.Tensor,
gamma: paddle.Tensor,
sm_margin: int = 0,
) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor]:
"""Non-FP8 RMSNorm backward"""
return tex.te_rmsnorm_bwd(dz, x, rsigma, gamma, sm_margin)
def fused_attn_fwd_qkvpacked(
qkv: paddle.Tensor,
cu_seqlens: paddle.Tensor,
rng_state: paddle.Tensor,
is_training: bool,
max_seqlen: int,
qkv_dtype: tex.DType,
Bias: paddle.Tensor = None,
attn_scale: float = None,
dropout: float = 0.0,
set_zero: bool = True,
qkv_layout: str = "qkv_interleaved",
bias_type: str = "no_bias",
attn_mask_type: str = "padding",
) -> Tuple[paddle.Tensor, paddle.Tensor]:
"""Fused Attention FWD for packed QKV input"""
assert (qkv_dtype in (tex.DType.kBFloat16,
tex.DType.kFloat16)), "Only support bf16/fp16 for fused attention."
b = cu_seqlens.shape[0] - 1
total_seqs = qkv.shape[0] * qkv.shape[1]
h = qkv.shape[3]
d = qkv.shape[4]
if attn_scale is None:
attn_scale = 1.0 / math.sqrt(d)
if bias_type != "no_bias":
assert Bias is not None, "bias tensor cannot be None when bias_type is not no_bias."
assert (Bias.shape == [1, h, max_seqlen, max_seqlen
]), "bias tensor must be in [1, h, max_seqlen, max_seqlen] shape."
assert (Bias.dtype == qkv.dtype), "bias tensor must be in the same dtype as qkv."
if set_zero:
out = paddle.full(shape=[b, max_seqlen, h, d], fill_value=0, dtype=qkv.dtype)
else:
out = paddle.empty(shape=[b, max_seqlen, h, d], dtype=qkv.dtype)
if is_training:
softmax_aux = paddle.empty(shape=[b, h, max_seqlen, max_seqlen], dtype=qkv.dtype)
else:
softmax_aux = None
# execute kernel
tex.te_fused_attn_fwd_qkvpacked(
qkv,
cu_seqlens,
Bias,
out,
softmax_aux,
rng_state,
b,
h,
d,
total_seqs,
max_seqlen,
is_training,
attn_scale,
dropout,
qkv_layout,
bias_type,
attn_mask_type,
int(qkv_dtype),
)
return out, softmax_aux
def fused_attn_bwd_qkvpacked(
qkv: paddle.Tensor,
cu_seqlens: paddle.Tensor,
rng_state: paddle.Tensor,
o: paddle.Tensor,
d_o: paddle.Tensor,
softmax_aux: paddle.Tensor,
max_seqlen: int,
qkv_dtype: tex.DType,
attn_scale: float = None,
dropout: float = 0.0,
set_zero: bool = True,
qkv_layout: str = "qkv_interleaved",
bias_type: str = "no_bias",
attn_mask_type: str = "padding",
) -> Tuple[paddle.Tensor, paddle.Tensor]:
"""Fused Attention BWD for packed QKV input"""
assert (qkv_dtype in (tex.DType.kBFloat16,
tex.DType.kFloat16)), "Only support bf16/fp16 for fused attention."
b = cu_seqlens.shape[0] - 1
total_seqs = qkv.shape[0] * qkv.shape[1]
h = qkv.shape[3]
d = qkv.shape[4]
if attn_scale is None:
attn_scale = 1.0 / math.sqrt(d)
if set_zero:
dqkv = paddle.full(shape=qkv.shape, fill_value=0, dtype=qkv.dtype)
else:
dqkv = paddle.empty(shape=qkv.shape, dtype=qkv.dtype)
if bias_type != "no_bias":
dbias = paddle.empty(shape=[1, h, max_seqlen, max_seqlen], dtype=qkv.dtype)
else:
dbias = None
# execute kernel
dqkv, dbias = tex.te_fused_attn_bwd_qkvpacked(
qkv,
cu_seqlens,
o,
d_o,
softmax_aux,
dqkv,
dbias,
rng_state,
b,
h,
d,
total_seqs,
max_seqlen,
attn_scale,
dropout,
qkv_layout,
bias_type,
attn_mask_type,
int(qkv_dtype),
)
return dqkv, dbias
def fused_attn_fwd_kvpacked(
q: paddle.Tensor,
kv: paddle.Tensor,
cu_seqlens_q: paddle.Tensor,
cu_seqlens_kv: paddle.Tensor,
rng_state: paddle.Tensor,
is_training: bool,
max_seqlen_q: int,
max_seqlen_kv: int,
qkv_dtype: tex.DType,
Bias: paddle.Tensor = None,
attn_scale: float = None,
dropout: float = 0.0,
set_zero: bool = True,
qkv_layout: str = "kv_interleaved",
bias_type: str = "no_bias",
attn_mask_type: str = "padding",
) -> Tuple[paddle.Tensor, paddle.Tensor]:
"""Fused Attention FWD for packed KV input"""
assert (qkv_dtype in (tex.DType.kBFloat16,
tex.DType.kFloat16)), "Only support bf16/fp16 for fused attention."
assert (cu_seqlens_q.shape == cu_seqlens_kv.shape
), "cu_seqlens_q and cu_seqlens_kv must have the same shape"
b = cu_seqlens_q.shape[0] - 1
total_seqs_q = q.shape[0] * q.shape[1]
total_seqs_kv = kv.shape[0] * kv.shape[1]
h = q.shape[2]
d = q.shape[3]
if attn_scale is None:
attn_scale = 1.0 / math.sqrt(d)
if bias_type != "no_bias":
assert Bias is not None, "bias tensor cannot be None when bias_type is not no_bias."
assert (Bias.shape == [1, h, max_seqlen_q, max_seqlen_kv
]), "bias tensor must be in [1, h, max_seqlen, max_seqlen] shape."
assert (Bias.dtype == q.dtype), "bias tensor must be in the same dtype as q and kv."
if set_zero:
out = paddle.full(shape=[b, max_seqlen_q, h, d], fill_value=0, dtype=q.dtype)
else:
out = paddle.empty(shape=[b, max_seqlen_q, h, d], dtype=q.dtype)
if is_training:
softmax_aux = paddle.empty(shape=[b, h, max_seqlen_q, max_seqlen_kv], dtype=q.dtype)
else:
softmax_aux = None
# execute kernel
tex.te_fused_attn_fwd_kvpacked(
q,
kv,
cu_seqlens_q,
cu_seqlens_kv,
Bias,
out,
softmax_aux,
rng_state,
b,
h,
d,
total_seqs_q,
total_seqs_kv,
max_seqlen_q,
max_seqlen_kv,
is_training,
attn_scale,
dropout,
qkv_layout,
bias_type,
attn_mask_type,
int(qkv_dtype),
)
return out, softmax_aux
def fused_attn_bwd_kvpacked(
q: paddle.Tensor,
kv: paddle.Tensor,
cu_seqlens_q: paddle.Tensor,
cu_seqlens_kv: paddle.Tensor,
rng_state: paddle.Tensor,
o: paddle.Tensor,
d_o: paddle.Tensor,
softmax_aux: paddle.Tensor,
max_seqlen_q: int,
max_seqlen_kv: int,
qkv_dtype: tex.DType,
attn_scale: float = None,
dropout: float = 0.0,
set_zero: bool = True,
qkv_layout: str = "kv_interleaved",
bias_type: str = "no_bias",
attn_mask_type: str = "padding",
) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor]:
"""Fused Attention BWD for packed KV input"""
assert (qkv_dtype in (tex.DType.kBFloat16,
tex.DType.kFloat16)), "Only support bf16/fp16 for fused attention."
assert (cu_seqlens_q.shape == cu_seqlens_kv.shape
), "cu_seqlens_q and cu_seqlens_kv must have the same shape"
b = cu_seqlens_q.shape[0] - 1
total_seqs_q = q.shape[0] * q.shape[1]
total_seqs_kv = kv.shape[0] * kv.shape[1]
h = q.shape[2]
d = q.shape[3]
if attn_scale is None:
attn_scale = 1.0 / math.sqrt(d)
if set_zero:
dq = paddle.full(shape=q.shape, fill_value=0, dtype=q.dtype)
dkv = paddle.full(shape=kv.shape, fill_value=0, dtype=kv.dtype)
else:
dq = paddle.empty(shape=q.shape, dtype=q.dtype)
dkv = paddle.empty(shape=kv.shape, dtype=kv.dtype)
if bias_type != "no_bias":
dbias = paddle.empty(shape=[1, h, max_seqlen_q, max_seqlen_kv], dtype=q.dtype)
else:
dbias = None
# execute kernel
tex.te_fused_attn_bwd_kvpacked(
q,
kv,
cu_seqlens_q,
cu_seqlens_kv,
o,
d_o,
softmax_aux,
dq,
dkv,
dbias,
rng_state,
b,
h,
d,
total_seqs_q,
total_seqs_kv,
max_seqlen_q,
max_seqlen_kv,
attn_scale,
dropout,
qkv_layout,
bias_type,
attn_mask_type,
int(qkv_dtype),
)
return dq, dkv, dbias
def scaled_softmax_forward(
inp: paddle.Tensor,
scale_factor: float,
) -> paddle.Tensor:
""" scaled softmax forward"""
return tex.te_scaled_softmax_forward(inp, scale_factor)
def scaled_softmax_backward(
out_grad: paddle.Tensor,
softmax_results: paddle.Tensor,
scale_factor: float,
) -> paddle.Tensor:
""" scaled softmax backward"""
tex.te_scaled_softmax_backward(out_grad, softmax_results, scale_factor)
return out_grad
def scaled_masked_softmax_forward(
inp: paddle.Tensor,
mask: paddle.Tensor,
scale_factor: float,
) -> paddle.Tensor:
""" scaled masked softmax forward"""
return tex.te_scaled_masked_softmax_forward(inp, mask, scale_factor)
def scaled_masked_softmax_backward(
out_grad: paddle.Tensor,
softmax_results: paddle.Tensor,
scale_factor: float,
) -> paddle.Tensor:
""" scaled masked softmax backward"""
tex.te_scaled_softmax_backward(out_grad, softmax_results, scale_factor)
return out_grad
def scaled_upper_triang_masked_softmax_forward(
inp: paddle.Tensor,
scale_factor: float,
) -> paddle.Tensor:
""" scaled upper triang masked softmax forward"""
return tex.te_scaled_upper_triang_masked_softmax_forward(inp, scale_factor)
def scaled_upper_triang_masked_softmax_backward(
out_grad: paddle.Tensor,
softmax_results: paddle.Tensor,
scale_factor: float,
) -> paddle.Tensor:
""" scaled upper triang masked softmax backward"""
tex.te_scaled_upper_triang_masked_softmax_backward(out_grad, softmax_results, scale_factor)
return out_grad
| TransformerEngine-main | transformer_engine/paddle/cpp_extensions.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""FP8 meta buffer for FP8 amax reduction"""
from abc import ABC, abstractmethod
from collections import deque
from functools import partial
import os
from typing import Dict, Any, List, Union
import numpy as np
import paddle
from .constants import dist_group_type, RecomputeFunctionNames
class FP8MetaBufferBase(ABC):
"""
A global buffer that holds FP8 meta for reduction across trainers.
"""
def __init__(self):
self._data = {}
self._buffer_delete_key = None
self._amax_reduce_wait_func = None
self._dp_amax_reduce_interval = None
self._dp_amax_reduce_idx = 0
@staticmethod
@abstractmethod
def _get_meta_tensor_key():
"""Returns scaling key in `fp8_meta`."""
@staticmethod
@abstractmethod
def _get_buffer_position_key():
"""Returns module position key in `fp8_meta`."""
@staticmethod
@abstractmethod
def _get_autocast_key():
"""Returns autocast id key in `fp8_meta`."""
def _get_amax_buffer_key(self, fp8_meta: Dict[str, Any]) -> str:
"""Return a key in `_data` for the AMAX storage."""
return f"AMAX_{fp8_meta[self._get_autocast_key()]}"
def _execute_deletion(self) -> None:
"""Delete the key from global amax buffer."""
if (self._buffer_delete_key is not None and self._buffer_delete_key in self._data):
del self._data[self._buffer_delete_key]
def _wait_handle_and_split(
self,
contiguous_amax: paddle.Tensor,
chunk_sizes: List[int],
amax_buffer_key: str,
wait_handle: Union[bool, None],
) -> None:
"""Wait for amax reduction to finish and then copy reduced amax to buffer"""
if wait_handle is not None:
wait_handle.wait()
self._data[amax_buffer_key] = list(contiguous_amax.split(chunk_sizes))
def _global_amax_reduction(
self,
fp8_meta: Dict[str, Any],
tp_group: dist_group_type,
tp_size: int,
) -> None:
"""Concatenate, reduce, and split amaxes in the global buffer."""
def _reduce_tensor_across_group_op_max(tensor, group, sync_op):
if paddle.distributed.is_initialized():
wait_handle = paddle.distributed.all_reduce(
tensor,
op=paddle.distributed.ReduceOp.MAX,
group=group,
sync_op=sync_op,
)
return wait_handle
return None
amax_buffer_key = self._get_amax_buffer_key(fp8_meta)
# Key already deleted.
if amax_buffer_key not in self._data:
return None
# Reduce AMAX in DP-domain at an interval.
if self._dp_amax_reduce_interval is None:
self._dp_amax_reduce_interval = int(os.getenv("NVTE_DP_AMAX_REDUCE_INTERVAL", "1"))
tp_amax_reduce = False
if self._dp_amax_reduce_idx == 0:
reduce_group = fp8_meta["fp8_group"]
else:
tp_amax_reduce = True
self._dp_amax_reduce_idx = (self._dp_amax_reduce_idx + 1) % self._dp_amax_reduce_interval
if tp_amax_reduce:
if tp_size > 1:
reduce_group = tp_group
else:
return None
chunk_sizes = [x.shape[0] for x in self._data[amax_buffer_key]]
contiguous_amax = paddle.concat(self._data[amax_buffer_key])
wait_handle = _reduce_tensor_across_group_op_max(
contiguous_amax,
reduce_group,
not fp8_meta["async_amax_reduction"],
)
return partial(
self._wait_handle_and_split,
contiguous_amax,
chunk_sizes,
amax_buffer_key,
wait_handle,
)
def add_amax(self, fp8_meta: Dict[str, Any]) -> None:
"""Append `amax_history` to global buffer."""
buffer_key = self._get_amax_buffer_key(fp8_meta)
fp8_meta_tensor_key = self._get_meta_tensor_key()
buffer_position_key = self._get_buffer_position_key()
if buffer_key not in self._data:
self._data[buffer_key] = [fp8_meta[fp8_meta_tensor_key].amax_history[0]]
else:
self._data[buffer_key].append(fp8_meta[fp8_meta_tensor_key].amax_history[0])
if buffer_position_key not in fp8_meta:
fp8_meta[buffer_position_key] = len(self._data[buffer_key]) - 1
# Catch incorrect fp8_autocast usage.
assert fp8_meta[buffer_position_key] == len(self._data[buffer_key]) - 1, \
"Same module is being invoked more than once inside an `fp8_autocast` " \
"region when using FP8 with amax reduction. This behavior is currently " \
"unsupported. For more details and correct usage, please see " \
"https://github.com/NVIDIA/TransformerEngine/pull/93."
def copy_amax_from_buffer(self, fp8_meta: Dict[str, Any]) -> None:
"""Populate current amax with the correct location from buffer."""
fp8_meta_tensor_key = self._get_meta_tensor_key()
buffer_position_key = self._get_buffer_position_key()
if buffer_position_key not in fp8_meta:
return
amax_buffer_key = self._get_amax_buffer_key(fp8_meta)
assert amax_buffer_key in self._data, "TE internal error."
fp8_meta[fp8_meta_tensor_key].amax_history[0] = self._data[amax_buffer_key][
fp8_meta[buffer_position_key]]
def set_for_deletion(self, fp8_meta: Dict[str, Any]) -> None:
"""Delete this amax key from global buffer during autocast end."""
if self._get_autocast_key() not in fp8_meta:
return
self._buffer_delete_key = self._get_amax_buffer_key(fp8_meta)
def get_amax_reduce_handle(self) -> Union[bool, None]:
"""Return AMAX reduction wait handle."""
return self._amax_reduce_handle
def wait(self) -> None:
"""Wait for reduced amax to be available in buffer."""
if self._amax_reduce_wait_func is not None:
self._amax_reduce_wait_func() # pylint: disable=not-callable
self._amax_reduce_wait_func = None
def to_numpy(self) -> Dict[str, List[np.array]]:
"""Convert to numpy arrays"""
out = {}
for k, v in self._data.items():
out[k] = [tensor.numpy() for tensor in v]
return out
def from_numpy(self, buffer: Dict[str, np.array]) -> None:
"""Set buffer values from numpy arrays"""
for k, v in buffer.items():
self._data[k] = [paddle.to_tensor(arr) for arr in v]
class FP8MetaFwdBuffer(FP8MetaBufferBase):
"""FP8Meta Buffer for forward"""
@staticmethod
def _get_meta_tensor_key() -> str:
"""Returns scaling key in `fp8_meta`."""
return "scaling_fwd"
@staticmethod
def _get_buffer_position_key() -> str:
"""Returns module position key in `fp8_meta`."""
return "global_fp8_buffer_pos_fwd"
@staticmethod
def _get_autocast_key() -> str:
"""Returns module position key in `fp8_meta`."""
return "autocast_id_fwd"
def set_for_amax_reduction(
self,
fp8_meta: Dict[str, Any],
tp_group: dist_group_type,
tp_size: int,
) -> None:
"""Sets up the function to call during autocast exit."""
self._amax_global_reduce_func = partial(
self._global_amax_reduction,
fp8_meta,
tp_group,
tp_size,
)
def finalize(self) -> None:
"""
Called at FP8 autocast end.
Performs AMAX reduction and delete unused buffer entries.
"""
if hasattr(self, '_amax_global_reduce_func') and callable(self._amax_global_reduce_func):
self._amax_reduce_wait_func = self._amax_global_reduce_func()
self._execute_deletion()
class FP8MetaBwdBuffer(FP8MetaBufferBase):
"""FP8Meta Buffer for backward"""
@staticmethod
def _get_meta_tensor_key() -> str:
"""Returns scaling key in `fp8_meta`."""
return "scaling_bwd"
@staticmethod
def _get_buffer_position_key() -> str:
"""Returns module position key in `fp8_meta`."""
return "global_fp8_buffer_pos_bwd"
@staticmethod
def _get_autocast_key() -> str:
"""Returns module position key in `fp8_meta`."""
return "autocast_id_bwd"
def finalize(
self,
fp8_meta: Dict[str, Any],
tp_group: dist_group_type,
tp_size: int,
) -> None:
"""
Called at FP8 autocast end in backward.
Performs AMAX reduction and delete unused buffer entries.
"""
self._amax_reduce_wait_func = self._global_amax_reduction(fp8_meta, tp_group, tp_size)
self._execute_deletion()
class FP8RecomputeBuffer:
"""Buffer used to hold FP8 meta tensors for recompute"""
def __init__(self):
self._data = []
@staticmethod
def get_buffer_position_key():
"""Returns the key (in fp8_meta) for recompute buffer position"""
return 'recompute_buffer_pos'
def stash_fp8_meta_tensors(self, fp8_meta: Dict[str, Any]) -> None:
"""Stash the scaling factors and amaxes for recompute"""
buffer_position_key = self.get_buffer_position_key()
to_copy = [
fp8_meta["scaling_fwd"].amax_history.clone(),
fp8_meta["scaling_fwd"].scale.clone(),
fp8_meta["scaling_fwd"].scale_inv.clone(),
]
if buffer_position_key in fp8_meta:
self._data[fp8_meta[buffer_position_key]].append(to_copy)
else:
self._data.append(deque())
self._data[-1].append(to_copy)
fp8_meta[buffer_position_key] = len(self._data) - 1
def retrieve_fp8_meta_tensors(self, fp8_meta: Dict[str, Any]) -> None:
"""Switch to the previously saved scaling factors and amaxes"""
# Store updated amaxes and scales from phase 1 post forward.
fp8_meta["updated_amax_history_fwd"] = fp8_meta["scaling_fwd"].amax_history
fp8_meta["updated_scale_fwd"] = fp8_meta["scaling_fwd"].scale
fp8_meta["updated_scale_inv_fwd"] = fp8_meta["scaling_fwd"].scale_inv
# Retrieve stashed amaxes and scales from phase 1 pre forward.
buffer_position_key = self.get_buffer_position_key()
stashed_fp8_meta = self._data[fp8_meta[buffer_position_key]].popleft()
# Replace amaxes and scales with stashed values for phase 2 forward
fp8_meta["scaling_fwd"].amax_history = stashed_fp8_meta[0]
fp8_meta["scaling_fwd"].scale = stashed_fp8_meta[1]
fp8_meta["scaling_fwd"].scale_inv = stashed_fp8_meta[2]
@staticmethod
def restore_fp8_meta_tensors(fp8_meta: Dict[str, Any]) -> None:
"""Restore latest scaling factors and amaxes after recompute forward run."""
assert "updated_amax_history_fwd" in fp8_meta, "Recompute internal error." \
" If you are not using recompute, please check if" \
" the forward function is called from one of these functions: " \
f"{RecomputeFunctionNames}. If so, consider change the function name " \
"or set NVTE_DISABLE_RECOMPUTE=1."
fp8_meta["scaling_fwd"].amax_history = fp8_meta["updated_amax_history_fwd"]
fp8_meta["scaling_fwd"].scale = fp8_meta["updated_scale_fwd"]
fp8_meta["scaling_fwd"].scale_inv = fp8_meta["updated_scale_inv_fwd"]
| TransformerEngine-main | transformer_engine/paddle/fp8_buffer.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Methods needed for recompute."""
import os
import inspect
from paddle.distributed import fleet
from .constants import RecomputeFunctionNames
from .fp8 import get_global_fp8_state
__all__ = ['recompute', 'is_in_recompute_phase']
_DISABLE_RECOMPUTE = int(os.getenv("NVTE_DISABLE_RECOMPUTE", "0"))
def is_in_recompute_phase():
"""Inspect call stack to determine if this is called from
backward phase. Paddle has two recompute methods:
(1) Use RecomputeFunction. The recomputed function is called from `RecomputeFunction.backward`;
(2) Use paddle.autograd.saved_tensors_hooks. The recompute function is called from `unpack`."""
if _DISABLE_RECOMPUTE:
return False
frame = inspect.currentframe().f_back
while frame:
if frame.f_code.co_name in RecomputeFunctionNames:
return True
frame = frame.f_back
return False
def recompute(function, *args, **kwargs):
"""
This is a wrapper of paddle.distributed.fleet.utils.recompute. It provides necessary
state information for fp8 layers.
"""
assert not _DISABLE_RECOMPUTE, "Recompute is disabled. " \
f"Got NVTE_DISABLE_RECOMPUTE={_DISABLE_RECOMPUTE}."
global_fp8_state = get_global_fp8_state()
try:
global_fp8_state._fp8_recompute_enabled = True
outputs = fleet.utils.recompute(function, *args, **kwargs)
finally:
global_fp8_state._fp8_recompute_enabled = False
return outputs
| TransformerEngine-main | transformer_engine/paddle/recompute.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Attntion API"""
import math
import os
import warnings
from typing import Optional, Tuple, Union
import paddle
import paddle.nn.functional as F
from .layernorm_linear import LayerNormLinear
from .linear import Linear
from .softmax import FusedScaleMaskSoftmax
from ..constants import AttnTypes, TE_DType, dist_group_type
from ..cpp_extensions import (
fused_attn_fwd_qkvpacked,
fused_attn_bwd_qkvpacked,
fused_attn_fwd_kvpacked,
fused_attn_bwd_kvpacked,
)
from ..distributed import get_tp_group_and_world_size, track_rng_state
from ..utils import attention_mask_func, divide, mask_to_cu_seqlens
from ..recompute import recompute
class FusedAttnFuncPackedQKV(paddle.autograd.PyLayer):
"""Function for FusedAttention with packed QKV input"""
@staticmethod
def forward(ctx, qkv, cu_seqlens, attn_bias, rng_state, max_seqlen, attn_scale, qkv_dtype,
dropout_p, set_zero, qkv_layout, attn_bias_type, attn_mask_type, is_training):
"""Forward function for FusedAttention with packed QKV input"""
out, aux_ctx_tensors = fused_attn_fwd_qkvpacked(
qkv,
cu_seqlens,
rng_state,
is_training,
max_seqlen,
qkv_dtype,
attn_bias,
attn_scale,
dropout_p,
set_zero,
qkv_layout,
attn_bias_type,
attn_mask_type,
)
ctx.save_for_backward(qkv, out, cu_seqlens, rng_state, aux_ctx_tensors)
ctx.max_seqlen = max_seqlen
ctx.qkv_dtype = qkv_dtype
ctx.attn_scale = attn_scale
ctx.dropout_p = dropout_p
ctx.set_zero = set_zero
ctx.qkv_layout = qkv_layout
ctx.attn_bias_type = attn_bias_type
ctx.attn_mask_type = attn_mask_type
return out
@staticmethod
def backward(ctx, d_out):
"""Backward function for FusedAttention with packed QKV input"""
qkv, out, cu_seqlens, rng_state, aux_ctx_tensors = ctx.saved_tensor()
dqkv, *rest = fused_attn_bwd_qkvpacked(qkv, cu_seqlens, rng_state, out, d_out,
aux_ctx_tensors, ctx.max_seqlen, ctx.qkv_dtype,
ctx.attn_scale, ctx.dropout_p, ctx.set_zero,
ctx.qkv_layout, ctx.attn_bias_type,
ctx.attn_mask_type)
# if no_bias, return dqkv
if ctx.attn_bias_type == "no_bias":
return (dqkv, None, None)
# else, return (dqkv, dbias)
return (dqkv, None, rest[0], None)
class FusedAttnFuncPackedKV(paddle.autograd.PyLayer):
"""Function for FusedAttention with packed KV input"""
@staticmethod
def forward(ctx, q, kv, cu_seqlens_q, cu_seqlens_kv, attn_bias, rng_state, max_seqlen_q,
max_seqlen_kv, attn_scale, qkv_dtype, dropout_p, set_zero, qkv_layout,
attn_bias_type, attn_mask_type, is_training):
"""Forward function for FusedAttention with packed KV input"""
out, aux_ctx_tensors = fused_attn_fwd_kvpacked(q, kv, cu_seqlens_q, cu_seqlens_kv,
rng_state, is_training, max_seqlen_q,
max_seqlen_kv, qkv_dtype, attn_bias,
attn_scale, dropout_p, set_zero, qkv_layout,
attn_bias_type, attn_mask_type)
ctx.save_for_backward(q, kv, out, cu_seqlens_q, cu_seqlens_kv, rng_state, aux_ctx_tensors)
ctx.max_seqlen_q = max_seqlen_q
ctx.max_seqlen_kv = max_seqlen_kv
ctx.qkv_dtype = qkv_dtype
ctx.attn_scale = attn_scale
ctx.dropout_p = dropout_p
ctx.set_zero = set_zero
ctx.qkv_layout = qkv_layout
ctx.attn_bias_type = attn_bias_type
ctx.attn_mask_type = attn_mask_type
return out
@staticmethod
def backward(ctx, d_out):
"""Backward function for FusedAttention with packed KV input"""
q, kv, out, cu_seqlens_q, cu_seqlens_kv, rng_state, aux_ctx_tensors = ctx.saved_tensor()
dq, dkv, *rest = fused_attn_bwd_kvpacked(q, kv, cu_seqlens_q, cu_seqlens_kv, rng_state, out,
d_out, aux_ctx_tensors, ctx.max_seqlen_q,
ctx.max_seqlen_kv, ctx.qkv_dtype, ctx.attn_scale,
ctx.dropout_p, ctx.set_zero, ctx.qkv_layout,
ctx.attn_bias_type, ctx.attn_mask_type)
# if no_bias, return dq, dkv
if ctx.attn_bias_type == "no_bias":
return (dq, dkv, None, None, None)
# else, return (dq, dkv, dbias)
return (dq, dkv, None, None, rest[0], None)
class DotProductAttention(paddle.nn.Layer):
"""Dot Product Attention Layer
Allows the model to jointly attend to information from different
representation subspaces as described in the paper:
`Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_.
.. note::
Argument :attr:`attention_mask` will be ignored in the `forward` call when
:attr:`attn_mask_type` is set to `"causal"`.
Parameters
----------
norm_factor : float
normalization factor for the attention scores.
attention_dropout: float, default = 0.1
dropout probability for the dropout op during multi-head attention.
attn_mask_type: {'causal', 'padding', 'no_mask'}, default = `causal`
type of attention mask passed into softmax operation.
attention_type: {'self', 'cross'}, default = `self`
type of attention operation.
backend: {'transformer_engine', 'paddle'}, default = `transformer_engine`
backend to use for attention operation.
"""
def __init__(self,
norm_factor: float,
attention_dropout: float = 0.1,
attn_mask_type: str = "causal",
attention_type: str = "self",
backend: str = 'transformer_engine') -> None:
super().__init__()
self.norm_factor = norm_factor
self.attn_mask_type = attn_mask_type
self.attention_dropout = attention_dropout
self.attention_type = attention_type
self.rng_state = paddle.zeros((2,), dtype='int64')
self.rng_state.persistable = True
self.backend = backend
arch = paddle.device.cuda.get_device_capability()
self.is_fused_attn_supported = arch in ((8, 0), (9, 0))
self.enable_fused_attn = int(os.getenv("NVTE_FUSED_ATTN",
"0")) and self.is_fused_attn_supported
if not self.enable_fused_attn and backend == 'transformer_engine':
# FMHA is not enabled, falling back to Paddle backend
self.backend = 'paddle'
if self.backend != 'transformer_engine':
self.scale_mask_softmax = FusedScaleMaskSoftmax(attn_mask_type,
attention_mask_func,
backend=self.backend)
def forward(
self,
query_layer: paddle.Tensor,
key_value_layer: paddle.Tensor = None,
attention_mask: Optional[paddle.Tensor] = None,
core_attention_bias_type: str = "no_bias",
core_attention_bias: Optional[paddle.Tensor] = None,
set_zero: bool = True,
) -> paddle.Tensor:
"""
Dot Product Attention Layer.
.. note::
Argument :attr:`attention_mask` will be ignored when :attr:`attn_mask_type`
is set to `"causal"`.
.. note::
For self attention, :attr:`query_layer` is the `[query, key, value]` tensor
stacked along the 2nd dimension, which must be of shape (:attr:`batch_size`,
:attr:`seq_length`, 3, :attr:`num_attention_heads`, :attr:`size_per_head`).
And :attr:`key_value_layer` is `None`.
For cross attention, :attr:`query_layer` is the `[query]` tensor, which must
be of shape (:attr:`batch_size`, :attr:`seq_length`, :attr:`num_attention_heads`,
:attr:`size_per_head`). And :attr:`key_value_layer` is the `[key, value]` tensor,
which must be of shape (:attr:`batch_size`, :attr:`seq_length`, 2,
:attr:`num_attention_heads`, :attr:`size_per_head`).
Parameters
----------
query_layer : paddle.Tensor
Query tensor.
key_value_layer : paddle.Tensor
Key tensor.
attention_mask : Optional[paddle.Tensor], default = `None`
Boolean tensor used to mask out softmax input when not using attention.
core_attention_bias_type: str, default = `no_bias`
only support no_bias type currently, {`no_bias`}
core_attention_bias: Optional[paddle.Tensor], default = `None`
Bias tensor for Q * K.T
set_zero: bool, defautl = `True`
Whether to use the fast path to set output tensors to 0 or not.
"""
if self.backend == 'transformer_engine':
return self._te_forward(query_layer, key_value_layer, attention_mask,
core_attention_bias_type, core_attention_bias, set_zero)
if self.backend == 'paddle':
if core_attention_bias_type != "no_bias":
warnings.warn("Paddle backend dot product attention does not support bias yet. "
"Bias will be ignored.")
return self._pd_forward(query_layer, key_value_layer, attention_mask)
raise AttributeError(f"Backend {self.backend} is not supported.")
def _te_forward(
self,
query_layer: paddle.Tensor,
key_value_layer: paddle.Tensor = None,
attention_mask: Optional[paddle.Tensor] = None,
core_attention_bias_type: str = "no_bias",
core_attention_bias: Optional[paddle.Tensor] = None,
set_zero: bool = True,
) -> paddle.Tensor:
gen_state = paddle.get_rng_state()[0].__getstate__()
self.rng_state[0], self.rng_state[1] = gen_state[1], gen_state[2] # [seed, offset]
if self.attention_type == "self":
# self attention - q: [b, s, 3, h, d] kv: None
assert (len(query_layer.shape) == 5 and query_layer.shape[2] == 3
and key_value_layer is None
), "query shape must be [b, s, 3, h, d] for dot product self attention"
max_seqlen = query_layer.shape[1]
cu_seqlens, _ = mask_to_cu_seqlens(attention_mask)
qkv_dtype = TE_DType[query_layer.dtype]
qkv_layout = "qkv_interleaved"
output = FusedAttnFuncPackedQKV.apply(
query_layer,
cu_seqlens,
core_attention_bias,
self.rng_state,
max_seqlen,
1.0 / self.norm_factor,
qkv_dtype,
self.attention_dropout if self.training else 0.0,
set_zero,
qkv_layout,
core_attention_bias_type,
self.attn_mask_type,
self.training,
)
elif self.attention_type == "cross":
# cross attention - q: [b, s_q, h, d] kv: [b, s_kv, 2, h, d]
assert (
len(query_layer.shape) == 4 and len(key_value_layer.shape) == 5
and key_value_layer.shape[2] == 2
), "query shape must be [b, s, h, d] and key shape must be [b, s, 2, h, d]" \
"for dot product cross attention"
max_seqlen_q = query_layer.shape[1]
max_seqlen_kv = key_value_layer.shape[1]
cu_seqlens_q, cu_seqlens_kv = mask_to_cu_seqlens(attention_mask, need_kv=True)
qkv_dtype = TE_DType[query_layer.dtype]
qkv_layout = "kv_interleaved"
output = FusedAttnFuncPackedKV.apply(
query_layer,
key_value_layer,
cu_seqlens_q,
cu_seqlens_kv,
core_attention_bias,
self.rng_state,
max_seqlen_q,
max_seqlen_kv,
1.0 / self.norm_factor,
qkv_dtype,
self.attention_dropout if self.training else 0.0,
set_zero,
qkv_layout,
core_attention_bias_type,
self.attn_mask_type,
self.training,
)
else:
raise ValueError("attention_type must be one of ['self', 'cross']")
return output
def _pd_forward(
self,
query_layer: paddle.Tensor,
key_value_layer: paddle.Tensor = None,
attention_mask: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if self.attention_type == "self":
# self attention - q: [b, s, 3, h, d] k: None
assert (len(query_layer.shape) == 5 and query_layer.shape[2] == 3
and key_value_layer is None
), "query shape must be [b, s, 3, h, d] for dot product self attention"
q = query_layer[:, :, 0]
k = query_layer[:, :, 1]
v = query_layer[:, :, 2]
elif self.attention_type == "cross":
# cross attention - q: [b, s, h, d] kv: [b, s, 2, h, d]
assert (
len(query_layer.shape) == 4 and len(key_value_layer.shape) == 5
and key_value_layer.shape[2] == 2
), f"query shape must be [b, s, h, d] and key_value shape must be [b, s, 2, h, d]" \
f"for dot product cross attention. The actual shape is q: {query_layer.shape}" \
f"kv: {key_value_layer.shape}"
q = query_layer
k = key_value_layer[:, :, 0]
v = key_value_layer[:, :, 1]
q = paddle.transpose(x=q, perm=[0, 2, 1, 3])
k = paddle.transpose(x=k, perm=[0, 2, 1, 3])
v = paddle.transpose(x=v, perm=[0, 2, 1, 3])
product = paddle.matmul(x=q * (1.0 / self.norm_factor), y=k, transpose_y=True)
attention_probs = self.scale_mask_softmax(product, attention_mask, scale=None)
if self.attention_dropout > 0:
attention_probs = F.dropout(
attention_probs,
self.attention_dropout,
training=self.training,
)
out = paddle.matmul(attention_probs, v)
out = paddle.transpose(out, perm=[0, 2, 1, 3]) # [b, s, h, d]
# out = paddle.reshape(x=out, shape=[0, 0, out.shape[2] * out.shape[3]])
return out
class MultiHeadAttention(paddle.nn.Layer):
"""Attention w/ QKV and Proj Gemms
Parameters
----------
hidden_size: int
hidden size of the model.
num_attention_heads: int
number of attention heads.
attention_dropout: float, default = 0.1
dropout probability for the dropout op during multi-head attention.
layernorm_epsilon: float, default = 1e-5
epsilon to use in the layer norm operations.
weight_attr: Union[paddle.ParamAttr, None], default = `None`
paddle.ParamAttr object for the weight parameter.
bias_attr: Union[paddle.ParamAttr, None, bool], default = `None`
paddle.ParamAttr object for the bias parameter.
attn_mask_type: {'causal', 'padding', 'no_mask'}, default = `causal`
type of attention mask passed into softmax operation.
params_dtype: Optional[paddle.dtype], default = `None`
data type for the weights and biases.
return_layernorm_output: bool, default = `False`
whether to return the output of the layernorm operation.
input_layernorm: bool, default = `False`
whether to apply layernorm to the input.
attention_type: {'self', 'cross'}, default = `self`
type of attention operation.
zero_centered_gamma: bool, default = `False`
whether to zero initialize the gamma of the layernorm operation.
backend: {'transformer_engine', 'paddle'}, default = `transformer_engine`
backend to use for attention operation.
Parallelism parameters
----------------------
set_parallel_mode : bool, default = `False`
if set to `True`, QKV and FC1 layers are used as Column Parallel
whereas PROJ and FC2 is used as Row Parallel as described
`here <https://arxiv.org/pdf/1909.08053.pdf>`_.
tp_group : ProcessGroup, default = `None`
tensor parallel process group.
rng_state_name : str, default = `local_seed`
Controls the rng state used for dropout on attention probs. The
specified rng should be set different seeds for different TP ranks.
It will be ignored if `set_parallel_mode` is False. The specified
name should be registered through
`paddle.distributed.fleet.meta_parallel.get_rng_state_tracker()
.add(rng_state_name, seed)`.
"""
def __init__(
self,
hidden_size: int,
num_attention_heads: int,
attention_dropout: float = 0.1,
layernorm_epsilon: float = 1e-5,
weight_attr: Union[paddle.ParamAttr, None] = None,
bias_attr: Union[paddle.ParamAttr, None, bool] = None,
attn_mask_type: str = "causal",
params_dtype: Optional[paddle.dtype] = None,
return_layernorm_output: bool = False,
input_layernorm: bool = False,
attention_type: str = "self",
zero_centered_gamma: bool = False,
set_parallel_mode: bool = False,
tp_group: Optional[dist_group_type] = None,
rng_state_name: str = 'local_seed',
backend: str = 'transformer_engine',
) -> None:
super().__init__()
self.input_layernorm = input_layernorm
self.attention_type = attention_type
self.return_layernorm_output = return_layernorm_output
self.params_dtype = paddle.get_default_dtype() if params_dtype is None else params_dtype
self.weight_attr = weight_attr
self.bias_attr = bias_attr
self.attn_mask_type = attn_mask_type
assert attention_type in AttnTypes, f"attention_type {attention_type} not supported"
self.tp_group, self.tp_size = get_tp_group_and_world_size(tp_group,
enable_tp=set_parallel_mode)
self.tensor_parallel = self.tp_size > 1
self.hidden_size_per_attention_head = hidden_size // num_attention_heads
self.num_attention_heads = num_attention_heads
norm_factor = math.sqrt(self.hidden_size_per_attention_head)
self.set_parallel_mode = set_parallel_mode
self.rng_state_name = rng_state_name
self.backend = backend
self.num_attention_heads_per_partition = divide(self.num_attention_heads, self.tp_size)
qkv_parallel_mode = "column" if set_parallel_mode else None
if self.attention_type == "self":
if self.input_layernorm:
self.layernorm_qkv = LayerNormLinear(
hidden_size,
3 * hidden_size,
eps=layernorm_epsilon,
weight_attr=self.weight_attr,
bias_attr=self.bias_attr,
return_layernorm_output=return_layernorm_output,
zero_centered_gamma=zero_centered_gamma,
parallel_mode=qkv_parallel_mode,
tp_group=self.tp_group,
backend=self.backend,
)
else:
self.qkv = Linear(
hidden_size,
3 * hidden_size,
self.weight_attr,
self.bias_attr,
parallel_mode=qkv_parallel_mode,
tp_group=self.tp_group,
backend=self.backend,
)
else: # cross attention
if self.input_layernorm:
self.layernorm_query = LayerNormLinear(
hidden_size,
hidden_size,
eps=layernorm_epsilon,
weight_attr=self.weight_attr,
bias_attr=self.bias_attr,
return_layernorm_output=return_layernorm_output,
zero_centered_gamma=zero_centered_gamma,
parallel_mode=qkv_parallel_mode,
tp_group=self.tp_group,
backend=self.backend,
)
else:
self.query_layer = Linear(
hidden_size,
hidden_size,
self.weight_attr,
self.bias_attr,
parallel_mode=qkv_parallel_mode,
tp_group=self.tp_group,
backend=self.backend,
)
self.key_value = Linear(
hidden_size,
2 * hidden_size,
self.weight_attr,
self.bias_attr,
parallel_mode=qkv_parallel_mode,
tp_group=self.tp_group,
backend=self.backend,
)
# Attention.
self.core_attention = DotProductAttention(
norm_factor,
attention_dropout,
attn_mask_type=attn_mask_type,
attention_type=self.attention_type,
backend=self.backend,
)
# Linear
self.proj = Linear(
hidden_size,
hidden_size,
self.weight_attr,
self.bias_attr,
parallel_mode="row" if set_parallel_mode else None,
tp_group=self.tp_group,
backend=self.backend,
)
def forward(
self,
hidden_states: paddle.Tensor,
attention_mask: Optional[paddle.Tensor] = None,
encoder_output: Optional[paddle.Tensor] = None,
core_attention_bias_type: str = "no_bias",
core_attention_bias: Optional[paddle.Tensor] = None,
set_zero: bool = True,
recompute_core_attention: bool = False,
) -> Tuple[Union[paddle.Tensor, None], ...]:
"""
MultiHeadAttention Layer.
Parameters
----------
hidden_states : paddle.Tensor
Input tensor.
attention_mask : Optional[paddle.Tensor], default = `None`
Boolean tensor used to mask out softmax input when not using attention.
encoder_output : Optional[paddle.Tensor], default = `None`
Output of the encoder layer.
core_attention_bias_type: str, default = `no_bias`
only support no_bias type currently, {`no_bias`}
core_attention_bias: Optional[paddle.Tensor], default = `None`
Bias tensor for Q * K.T
set_zero: bool, defautl = `True`
Whether to use the fast path to set output tensors to 0 or not.
recompute_core_attention: bool, default = `False`
If true, forward activations for core attention are recomputed
during the backward pass in order to save memory that would
otherwise be occupied to store the forward activations until
backprop.
"""
# hidden_states: [b, s_q, hidden_size]
if self.attn_mask_type != "causal" and attention_mask is not None:
assert (attention_mask.dtype == paddle.bool), "Attention mask must be a boolean tensor"
if self.attention_type == "self":
if self.input_layernorm:
layernorm_qkv_outputs = self.layernorm_qkv(hidden_states)
if self.return_layernorm_output:
mixed_qkv_layer, layernorm_output = layernorm_qkv_outputs
else:
mixed_qkv_layer = layernorm_qkv_outputs
else:
mixed_qkv_layer = self.qkv(hidden_states)
# [b, s_q, 3 * hidden_size] --> [b, s_q, 3, num_heads, head_size]
mixed_qkv_layer = mixed_qkv_layer.reshape(shape=[
0, 0, 3, self.num_attention_heads_per_partition, self.hidden_size_per_attention_head
])
with track_rng_state(enable=self.tensor_parallel, name=self.rng_state_name):
if recompute_core_attention:
context_layer = recompute(
self.core_attention,
mixed_qkv_layer,
None,
attention_mask,
core_attention_bias_type,
core_attention_bias,
set_zero,
use_reentrant=False,
)
else:
context_layer = self.core_attention(
query_layer=mixed_qkv_layer,
key_value_layer=None,
attention_mask=attention_mask,
core_attention_bias_type=core_attention_bias_type,
core_attention_bias=core_attention_bias,
set_zero=set_zero,
)
else: # cross attention
mixed_kv_layer = self.key_value(encoder_output)
# [b, s_kv, 2 * hidden_size] --> [b, s_kv, 2, num_heads, head_size]
mixed_kv_layer = mixed_kv_layer.reshape(shape=[
0, 0, 2, self.num_attention_heads_per_partition, self.hidden_size_per_attention_head
])
if self.input_layernorm:
layernorm_query_outputs = self.layernorm_query(hidden_states)
if self.return_layernorm_output:
query_layer, layernorm_output = layernorm_query_outputs
else:
query_layer = layernorm_query_outputs
else:
query_layer = self.query_layer(hidden_states)
query_layer = query_layer.reshape(shape=[
0, 0, self.num_attention_heads_per_partition, self.hidden_size_per_attention_head
])
with track_rng_state(enable=self.tensor_parallel, name=self.rng_state_name):
if recompute_core_attention:
context_layer = recompute(
self.core_attention,
query_layer,
mixed_kv_layer,
attention_mask,
core_attention_bias_type,
core_attention_bias,
set_zero,
use_reentrant=False,
)
else:
context_layer = self.core_attention(
query_layer=query_layer,
key_value_layer=mixed_kv_layer,
attention_mask=attention_mask,
core_attention_bias_type=core_attention_bias_type,
core_attention_bias=core_attention_bias,
set_zero=set_zero,
)
context_layer = paddle.reshape(context_layer,
[0, 0, context_layer.shape[2] * context_layer.shape[3]])
# Output. [b, s, hidden]
attention_output = self.proj(context_layer)
if self.input_layernorm and self.return_layernorm_output:
return attention_output, layernorm_output
return attention_output
| TransformerEngine-main | transformer_engine/paddle/layer/attention.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""LayerNormMLP API"""
import os
from typing import Union, Tuple, Dict, Any, Optional
import paddle
import paddle.nn.functional as F
from paddle.nn.initializer import Constant
from .base import TransformerEngineBaseLayer
from .layernorm_linear import _layernorm_fwd_fp8_cast, _layernorm_bwd
from .linear import _linear_fwd_fp8, _linear_fwd_non_fp8, _linear_bwd_fp8, _linear_bwd_non_fp8
from ..constants import TE_DType, FP8FwdTensors, FP8BwdTensors, dist_group_type
from ..cpp_extensions import (
cast_from_fp8,
dgelu_cast_transpose_bgrad_fp8,
gelu_fp8,
transpose,
)
from ..distributed import (
allreduce,
get_tp_group_and_world_size,
identity,
track_rng_state,
set_tensor_dist_attr,
set_weight_tensor_dist_attr,
)
from ..fp8 import get_fp8_te_dtype
from ..utils import (
assert_dim_for_fp8_forward_exec,
cast_if_needed,
cast_if_needed_inplace,
divide,
get_paddle_act_func,
save_for_backward_allow_none,
saved_tensor_allow_none,
)
__all__ = ["LayerNormMLP"]
def _mlp_forward(
inputmat: paddle.Tensor,
inputmat_fp8_index: FP8FwdTensors,
fc1_weight: paddle.Tensor,
fc1_weight_fp8_index: FP8FwdTensors,
fc1_bias: Union[paddle.Tensor, None],
use_fc1_bias: bool,
fc2_input_fp8_index: FP8FwdTensors, # FP8FwdTensors.GEMM2_INPUT
fc2_weight: paddle.Tensor,
fc2_weight_fp8_index: FP8FwdTensors,
fc2_bias: Union[paddle.Tensor, None],
use_fc2_bias: bool,
fp8_enabled: bool,
fp8_calibration: bool,
fp8_meta: Dict[str, Any],
activation_dtype: paddle.dtype,
activation: str,
is_grad_enabled: bool,
set_parallel_mode: bool,
tensor_parallel: bool,
tp_group: Union[dist_group_type, None],
):
if fp8_enabled:
fp8_dtype_forward = get_fp8_te_dtype(fp8_meta["recipe"], fprop_tensor=True)
fc1_out, fc1_weight_t_fp8 = _linear_fwd_fp8(
inputmat,
inputmat_fp8_index,
fc1_weight,
fc1_weight_fp8_index,
fc1_bias,
use_fc1_bias,
fp8_meta,
activation_dtype,
'column' if set_parallel_mode else None,
tensor_parallel,
tp_group,
is_grad_enabled,
)
gelu_out = gelu_fp8(
fc1_out,
fp8_meta["scaling_fwd"],
fc2_input_fp8_index,
fp8_dtype_forward,
)
fc2_out, fc2_weight_t_fp8 = _linear_fwd_fp8(
gelu_out,
fc2_input_fp8_index,
fc2_weight,
fc2_weight_fp8_index,
fc2_bias,
use_fc2_bias,
fp8_meta,
activation_dtype,
'row' if set_parallel_mode else None,
tensor_parallel,
tp_group,
is_grad_enabled,
)
else:
fc1_out, gelu_out = _linear_fwd_non_fp8(
inputmat,
inputmat_fp8_index,
fc1_weight,
fc1_weight_fp8_index,
fc1_bias,
use_fc1_bias,
fp8_calibration,
fp8_meta,
activation_dtype,
'column' if set_parallel_mode else None,
tensor_parallel,
tp_group,
activation=activation,
)
fc2_out = _linear_fwd_non_fp8(
gelu_out,
fc2_input_fp8_index,
fc2_weight,
fc2_weight_fp8_index,
fc2_bias,
use_fc2_bias,
fp8_calibration,
fp8_meta,
activation_dtype,
'row' if set_parallel_mode else None,
tensor_parallel,
tp_group,
)
return (
fc1_out,
gelu_out,
fc2_out,
fc1_weight_t_fp8 if fp8_enabled else None,
fc2_weight_t_fp8 if fp8_enabled else None,
)
def _mlp_backward(
fc1_input: paddle.Tensor, # ln_out, BF16 / FP8
fc1_input_fp8_index: FP8FwdTensors,
fc1_weight: paddle.Tensor,
fc1_weight_t_fp8: paddle.Tensor,
fc1_weight_fp8_index: FP8FwdTensors,
fc1_grad_output_fp8_index: FP8BwdTensors, # FP8BwdTensors.GRAD_OUTPUT2
requires_fc1_wgrad: bool,
requires_fc1_bgrad: bool,
fc1_out: paddle.Tensor,
fc2_input: paddle.Tensor, # gelu_out
fc2_input_fp8_index: FP8FwdTensors, # FP8FwdTensors.GEMM2_INPUT
fc2_weight: paddle.Tensor,
fc2_weight_t_fp8: paddle.Tensor,
fc2_weight_fp8_index: FP8FwdTensors,
requires_fc2_wgrad: bool,
requires_fc2_bgrad: bool,
grad_output: paddle.Tensor,
grad_output_c: paddle.Tensor,
grad_output_t: paddle.Tensor,
grad_output_fp8_index: FP8BwdTensors, # FP8BwdTensors.GRAD_OUTPUT1
fwd_scale_inverses: paddle.Tensor,
fp8_enabled: bool,
fp8_meta: Dict[str, Any],
requires_dgrad: bool,
activation_dtype: paddle.dtype,
activation: str,
set_parallel_mode: bool,
tensor_parallel: bool,
tp_group: Union[dist_group_type, None],
):
(
fc1_dgrad,
fc1_wgrad,
fc1_bgrad,
fc2_wgrad,
fc2_bgrad,
) = None, None, None, None, None
if fp8_enabled:
fp8_dtype_forward = get_fp8_te_dtype(fp8_meta["recipe"], fprop_tensor=True)
fp8_dtype_backward = get_fp8_te_dtype(fp8_meta["recipe"], fprop_tensor=False)
# FC2 Bwd
fc2_input_no_fp8, fc2_input_t = None, None
fp8_wgrad = not fp8_meta["recipe"].override_linear_precision.wgrad
if requires_fc2_wgrad:
if fp8_wgrad:
fc2_input_t = transpose(fc2_input, fp8_dtype_forward)
else:
fc2_input_no_fp8 = cast_from_fp8(
fc2_input,
fp8_meta["scaling_fwd"],
fc2_input_fp8_index,
fp8_dtype_forward,
TE_DType[activation_dtype],
)
fc2_dgrad, fc2_wgrad = _linear_bwd_fp8(
fc2_input_no_fp8,
fc2_input_t,
fc2_input_fp8_index,
fc2_weight_t_fp8,
fc2_weight_fp8_index,
grad_output,
grad_output_c,
grad_output_t,
grad_output_fp8_index,
fwd_scale_inverses,
fp8_meta,
True,
requires_fc2_wgrad,
activation_dtype,
'row' if set_parallel_mode else None,
tensor_parallel,
tp_group,
)
# GELU Bwd
dgelu, dgelu_t, fc1_bgrad_ = dgelu_cast_transpose_bgrad_fp8(
fc2_dgrad,
fc1_out,
fp8_meta["scaling_bwd"],
fc1_grad_output_fp8_index,
fp8_dtype_backward,
)
if requires_fc1_bgrad:
fc1_bgrad = fc1_bgrad_
# FC1 Bwd
dgelu_no_fp8, fc1_input_no_fp8, fc1_input_t = None, None, None
if requires_fc1_wgrad:
if fp8_wgrad:
fc1_input_t = transpose(fc1_input, fp8_dtype_forward)
else:
# TODO(tizheng) Paddle lacks fused dgelu_bgrad OP. Cast from dgrad(fp8) instead.
dgelu_no_fp8 = cast_from_fp8(
dgelu,
fp8_meta["scaling_bwd"],
fc1_grad_output_fp8_index,
fp8_dtype_backward,
TE_DType[activation_dtype],
)
fc1_input_no_fp8 = cast_from_fp8(
fc1_input,
fp8_meta["scaling_fwd"],
fc1_input_fp8_index,
fp8_dtype_forward,
TE_DType[activation_dtype],
)
fc1_dgrad, fc1_wgrad = _linear_bwd_fp8(
fc1_input_no_fp8,
fc1_input_t,
fc1_input_fp8_index,
fc1_weight_t_fp8,
fc1_weight_fp8_index,
dgelu_no_fp8,
dgelu,
dgelu_t,
fc1_grad_output_fp8_index,
fwd_scale_inverses,
fp8_meta,
requires_dgrad,
requires_fc1_wgrad,
activation_dtype,
'column' if set_parallel_mode else None,
tensor_parallel,
tp_group,
)
else:
dgelu, fc2_wgrad, fc2_bgrad = _linear_bwd_non_fp8(
fc2_input,
fc2_weight,
grad_output,
requires_fc2_bgrad,
True,
requires_fc2_wgrad,
activation_dtype,
'row' if set_parallel_mode else None,
tensor_parallel,
tp_group,
gelu_input=fc1_out,
activation=activation,
)
fc1_dgrad, fc1_wgrad, fc1_bgrad = _linear_bwd_non_fp8(
fc1_input,
fc1_weight,
dgelu,
requires_fc1_bgrad,
requires_dgrad,
requires_fc1_wgrad,
activation_dtype,
'column' if set_parallel_mode else None,
tensor_parallel,
tp_group,
)
return (
fc1_dgrad,
fc1_wgrad,
fc1_bgrad,
fc2_wgrad,
fc2_bgrad,
)
class _LayerNormMLP(paddle.autograd.PyLayer):
"""TE implementation of LayerNormMLP"""
@staticmethod
def forward(
ctx,
inp: paddle.Tensor,
ln_weight: paddle.Tensor,
ln_bias: paddle.Tensor,
fc1_weight: paddle.Tensor,
fc1_bias: Union[paddle.Tensor, None],
use_fc1_bias: bool,
fc2_weight: paddle.Tensor,
fc2_bias: Union[paddle.Tensor, None],
use_fc2_bias: bool,
eps: float,
fp8_enabled: bool,
fp8_calibration: bool,
fp8_meta: Dict[str, Any],
activation_dtype: paddle.dtype,
return_layernorm_output: bool,
is_grad_enabled: bool,
fwd_ln_sm_margin: int,
bwd_ln_sm_margin: int,
zero_centered_gamma: bool,
activation: str,
set_parallel_mode: bool,
tensor_parallel: bool,
tp_group: Union[dist_group_type, None],
tp_size: int,
) -> Union[Tuple[paddle.Tensor, ...], paddle.Tensor]:
# Make sure input dimensions are compatible
in_features = ln_weight.shape[0]
assert inp.shape[-1] == in_features, "GEMM not possible"
inputmat = inp.reshape((-1, in_features))
if fp8_enabled:
assert_dim_for_fp8_forward_exec(inputmat)
assert_dim_for_fp8_forward_exec(fc1_weight)
assert_dim_for_fp8_forward_exec(fc2_weight)
# only support gelu for now
assert activation == 'gelu'
# LayerNorm Fwd + FP8 Cast
(
ln_out_return,
ln_out,
mu,
rsigma,
) = _layernorm_fwd_fp8_cast(
inputmat,
ln_weight,
ln_bias,
FP8FwdTensors.GEMM1_INPUT,
eps,
fp8_enabled,
fp8_meta,
activation_dtype,
return_layernorm_output,
fwd_ln_sm_margin,
zero_centered_gamma,
)
(
fc1_out,
gelu_out,
fc2_out,
fc1_weight_t_fp8,
fc2_weight_t_fp8,
) = _mlp_forward(
ln_out,
FP8FwdTensors.GEMM1_INPUT,
fc1_weight,
FP8FwdTensors.GEMM1_WEIGHT,
fc1_bias,
use_fc1_bias,
FP8FwdTensors.GEMM2_INPUT,
fc2_weight,
FP8FwdTensors.GEMM2_WEIGHT,
fc2_bias,
use_fc2_bias,
fp8_enabled,
fp8_calibration,
fp8_meta,
activation_dtype,
activation,
is_grad_enabled,
set_parallel_mode,
tensor_parallel,
tp_group,
)
if is_grad_enabled:
save_for_backward_allow_none(
ctx,
inputmat,
ln_weight,
mu,
rsigma,
ln_out,
fc1_out,
gelu_out,
fc1_weight,
fc1_weight_t_fp8,
fc2_weight,
fc2_weight_t_fp8,
fp8_meta["scaling_fwd"].scale_inv.clone() if fp8_enabled else None,
)
ctx.activation_dtype = activation_dtype
ctx.activation = activation
ctx.fp8_enabled = fp8_enabled
ctx.fp8_meta = fp8_meta
ctx.use_fc1_bias = use_fc1_bias
ctx.use_fc2_bias = use_fc2_bias
ctx.inp_shape = inp.shape
ctx.return_layernorm_output = return_layernorm_output
ctx.bwd_ln_sm_margin = bwd_ln_sm_margin
ctx.zero_centered_gamma = zero_centered_gamma
ctx.set_parallel_mode = set_parallel_mode
ctx.tensor_parallel = tensor_parallel
ctx.tp_group = tp_group
ctx.tp_size = tp_size
ctx.requires_dgrad = not inp.stop_gradient
ctx.requires_fc1_wgrad = not fc1_weight.stop_gradient
ctx.requires_fc2_wgrad = not fc2_weight.stop_gradient
ctx.requires_fc1_bgrad = use_fc1_bias and not fc1_bias.stop_gradient
ctx.requires_fc2_bgrad = use_fc2_bias and not fc2_bias.stop_gradient
ctx.requires_ln_bgrad = not ln_bias.stop_gradient
ctx.requires_ln_wgrad = not ln_weight.stop_gradient
# [*, in_features] -> [*, out_features] except first dimension changes for SP
fc2_out = fc2_out.reshape((-1, *inp.shape[1:-1], fc2_out.shape[-1]))
if return_layernorm_output:
return fc2_out, ln_out_return.reshape(inp.shape)
return fc2_out
@staticmethod
def backward(
ctx, *grad_outputs: Tuple[paddle.Tensor,
...]) -> Tuple[Union[paddle.Tensor, None], ...]:
with TransformerEngineBaseLayer.prepare_backward(ctx.fp8_enabled,
ctx.fp8_meta,
ctx.tp_group,
ctx.tp_size,
name="_LayerNormMLP"):
( # pylint: disable=unbalanced-tuple-unpacking
inputmat,
ln_weight,
mu,
rsigma,
ln_out,
fc1_out,
gelu_out,
fc1_weight,
fc1_weight_t_fp8,
fc2_weight,
fc2_weight_t_fp8,
fwd_scale_inverses,
) = saved_tensor_allow_none(ctx)
ctx.use_bias = ctx.use_fc2_bias # For grad_output_preprocess
(
grad_output,
grad_output_c,
grad_output_t,
fc2_bgrad,
) = TransformerEngineBaseLayer.grad_output_preprocess(ctx, grad_outputs[0])
(
fc1_dgrad,
fc1_wgrad,
fc1_bgrad,
fc2_wgrad,
fc2_bgrad_,
) = _mlp_backward(
ln_out,
FP8FwdTensors.GEMM1_INPUT,
fc1_weight,
fc1_weight_t_fp8,
FP8FwdTensors.GEMM1_WEIGHT,
FP8BwdTensors.GRAD_OUTPUT2,
ctx.requires_fc1_wgrad,
ctx.requires_fc1_bgrad,
fc1_out,
gelu_out,
FP8FwdTensors.GEMM2_INPUT,
fc2_weight,
fc2_weight_t_fp8,
FP8FwdTensors.GEMM2_WEIGHT,
ctx.requires_fc2_wgrad,
ctx.requires_fc2_bgrad,
grad_output,
grad_output_c,
grad_output_t,
FP8BwdTensors.GRAD_OUTPUT1,
fwd_scale_inverses,
ctx.fp8_enabled,
ctx.fp8_meta,
True,
ctx.activation_dtype,
ctx.activation,
ctx.set_parallel_mode,
ctx.tensor_parallel,
ctx.tp_group,
)
if not ctx.fp8_enabled:
# fc2_bias is fused with gemm for non-FP8 path
fc2_bgrad = fc2_bgrad_
# LayerNorm Bwd
dxmat, dgamma, dbeta = _layernorm_bwd(
inputmat,
fc1_dgrad,
ln_weight,
mu,
rsigma,
grad_outputs[1] if ctx.return_layernorm_output else None,
ctx.return_layernorm_output,
ctx.bwd_ln_sm_margin,
ctx.zero_centered_gamma,
)
fc1_bgrad = fc1_bgrad if ctx.requires_fc1_bgrad else None
fc2_bgrad = fc2_bgrad if ctx.requires_fc2_bgrad else None
fc1_bgrad_out = (fc1_bgrad,) if ctx.use_fc1_bias else ()
fc2_bgrad_out = (fc2_bgrad,) if ctx.use_fc2_bias else ()
return (
dxmat.reshape(ctx.inp_shape) if ctx.requires_dgrad else None,
dgamma if ctx.requires_ln_wgrad else None,
dbeta if ctx.requires_ln_bgrad else None,
fc1_wgrad if ctx.requires_fc1_wgrad else None,
*fc1_bgrad_out,
fc2_wgrad if ctx.requires_fc2_wgrad else None,
*fc2_bgrad_out,
)
class LayerNormMLP(TransformerEngineBaseLayer):
r"""
Applies layer normalization followed by linear transformation to the incoming data.
"""
def __init__(
self,
hidden_size: int,
ffn_hidden_size: int,
eps: float = 1e-5,
weight_attr: Union[paddle.ParamAttr, None] = None,
bias_attr: Union[paddle.ParamAttr, None, bool] = None,
activation: str = "gelu",
return_layernorm_output: bool = False,
zero_centered_gamma: bool = False,
set_parallel_mode: bool = False,
tp_group: Optional[dist_group_type] = None,
backend: str = 'transformer_engine',
) -> None:
super().__init__()
self.hidden_size = hidden_size
self.ffn_hidden_size = ffn_hidden_size
self.eps = eps
self.activation = activation
self.return_layernorm_output = return_layernorm_output
self.zero_centered_gamma = zero_centered_gamma
self.backend = backend
self._weight_attr = weight_attr
self._bias_attr = bias_attr
self._dtype = self._helper.get_default_dtype()
# Set parallel configs
self.tp_group, self.tp_size = get_tp_group_and_world_size(tp_group,
enable_tp=set_parallel_mode)
self.tensor_parallel = self.tp_size > 1
self.set_parallel_mode = set_parallel_mode
if self.set_parallel_mode:
self.size_per_partition = divide(self.ffn_hidden_size, self.tp_size)
else:
self.size_per_partition = self.ffn_hidden_size
# LayerNorm weights
self.ln_weight = self.create_parameter(
shape=[self.hidden_size],
attr=paddle.ParamAttr(initializer=Constant(
value=0.0 if self.zero_centered_gamma else 1.0)),
dtype=self._dtype,
is_bias=False,
)
self.ln_bias = self.create_parameter(
shape=[self.hidden_size],
attr=paddle.ParamAttr(initializer=Constant(value=0.0)),
dtype=self._dtype,
is_bias=True,
)
# FC1 weights
with track_rng_state(enable=self.tensor_parallel):
self.fc1_weight = self.create_parameter(
shape=[self.size_per_partition, self.hidden_size] if self.backend
== 'transformer_engine' else [self.hidden_size, self.size_per_partition],
attr=self._weight_attr,
dtype=self._dtype,
is_bias=False,
)
set_weight_tensor_dist_attr(self.fc1_weight,
self.tensor_parallel,
parallel_mode='column',
backend=self.backend)
self.has_bias = self._bias_attr is not False
use_default_bias = self._bias_attr is None or self._bias_attr is True
if use_default_bias:
self._bias_attr = paddle.ParamAttr(initializer=Constant(value=0.0))
if self.has_bias:
self.fc1_bias = self.create_parameter(
shape=[self.size_per_partition],
attr=self._bias_attr,
dtype=self._dtype,
is_bias=True,
)
set_tensor_dist_attr(self.fc1_bias, self.tensor_parallel, axis=0)
else:
self.fc1_bias = None
# FC2 weights
self.fc2_weight = self.create_parameter(
shape=[self.hidden_size, self.size_per_partition] if self.backend
== 'transformer_engine' else [self.size_per_partition, self.hidden_size],
attr=self._weight_attr,
dtype=self._dtype,
is_bias=False,
)
set_weight_tensor_dist_attr(self.fc2_weight,
self.tensor_parallel,
parallel_mode='row',
backend=self.backend)
if self.has_bias:
self.fc2_bias = self.create_parameter(
shape=[self.hidden_size],
attr=self._bias_attr,
dtype=self._dtype,
is_bias=True,
)
else:
self.fc2_bias = None
# For RPL, bias has to be added after TP collectives
# So it cannot be fused with the GEMM
if self.set_parallel_mode and self.tensor_parallel and self.has_bias:
self.gemm_bias_fused_add = False
else:
self.gemm_bias_fused_add = True
# These many SMs are subtracted from the total SM count when calling forward
# and backward LayerNorm C APIs. These envvars can be used to prevent the LN
# kernels from using all SMs in the device. This is useful for cases such as
# communication overlap with LN.
self.fwd_ln_sm_margin = int(os.getenv("NVTE_FWD_LAYERNORM_SM_MARGIN", "0"))
self.bwd_ln_sm_margin = int(os.getenv("NVTE_BWD_LAYERNORM_SM_MARGIN", "0"))
def _te_forward(
self,
inp: paddle.Tensor,
) -> Union[paddle.Tensor, Tuple[paddle.Tensor, ...]]:
"""
Apply layer normalization to the input followed by a linear transformation.
"""
with self.prepare_forward(inp, num_gemms=2) as inp:
# Layer input should be casted outside PyLayer, as performing
# inplace cast to input tensors may cause problems when used
# together with Paddle native layers.
inp = cast_if_needed(inp, self.activation_dtype)
out = _LayerNormMLP.apply(
inp,
self.ln_weight,
self.ln_bias,
self.fc1_weight,
self.fc1_bias,
self.has_bias,
self.fc2_weight,
self.fc2_bias,
self.has_bias,
self.eps,
self.fp8_enabled,
self.fp8_calibration,
self.fp8_meta,
self.activation_dtype,
self.return_layernorm_output,
paddle.is_grad_enabled(),
self.fwd_ln_sm_margin,
self.bwd_ln_sm_margin,
self.zero_centered_gamma,
self.activation,
self.set_parallel_mode,
self.tensor_parallel,
self.tp_group,
self.tp_size,
)
if self.return_layernorm_output:
out, ln_out = out
if not self.gemm_bias_fused_add:
out = out + cast_if_needed_inplace(self.fc2_bias, self.activation_dtype)
if self.return_layernorm_output:
return out, ln_out
return out
def _pd_forward(
self,
inp: paddle.Tensor,
) -> paddle.Tensor:
"""Calls Paddle OP"""
if self.zero_centered_gamma:
raise NotImplementedError(
"Paddle backend does not support LayerNorm with zero-centered scale.")
ln_out = F.layer_norm(x=inp,
normalized_shape=inp.shape[-1],
weight=self.ln_weight,
bias=self.ln_bias,
epsilon=self.eps)
if self.set_parallel_mode and self.tensor_parallel:
ln_out = identity(ln_out, self.tp_group)
fc1_out = F.linear(ln_out, self.fc1_weight, self.fc1_bias)
act_func = get_paddle_act_func(self.activation)
act_out = act_func(fc1_out)
out = F.linear(act_out, self.fc2_weight,
self.fc2_bias if self.gemm_bias_fused_add else None)
if self.set_parallel_mode and self.tensor_parallel:
out = allreduce(out, self.tp_group)
out = out + self.fc2_bias if self.fc2_bias is not None else out
if self.return_layernorm_output:
return out, ln_out
return out
def forward(self, *args, **kwargs):
"""forward"""
if self.backend == 'transformer_engine':
return self._te_forward(*args, **kwargs)
if self.backend == 'paddle':
return self._pd_forward(*args, **kwargs)
raise AttributeError(f"Backend {self.backend} is not supported.")
| TransformerEngine-main | transformer_engine/paddle/layer/layernorm_mlp.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Linear API"""
import os
from typing import Union, Tuple
import paddle
import paddle.nn.functional as F
from paddle.nn.initializer import Constant
from ..constants import TE_DType
from ..cpp_extensions import layernorm_fwd, layernorm_bwd
__all__ = ["LayerNorm"]
class _LayerNorm(paddle.autograd.PyLayer):
"""TE Non-FP8 LayerNorm"""
@staticmethod
def forward(
ctx,
inp: paddle.Tensor,
ln_weight: paddle.Tensor,
ln_bias: paddle.Tensor,
eps: float,
fwd_ln_sm_margin: int,
bwd_ln_sm_margin: int,
zero_centered_gamma: bool,
) -> paddle.Tensor:
# Make sure input dimensions are compatible
in_features = ln_weight.shape[0]
assert inp.shape[-1] == in_features, "LayerNorm not possible"
inputmat = inp.reshape((-1, in_features))
ln_out, mu, rsigma = layernorm_fwd(inputmat, ln_weight, ln_bias, eps, TE_DType[inp.dtype],
fwd_ln_sm_margin, zero_centered_gamma)
ctx.save_for_backward(inputmat, ln_weight, mu, rsigma)
ctx.inp_shape = inp.shape
ctx.bwd_ln_sm_margin = bwd_ln_sm_margin
ctx.zero_centered_gamma = zero_centered_gamma
ctx.requires_dx = not inp.stop_gradient
ctx.requires_dw = not ln_weight.stop_gradient
ctx.requires_dbias = not ln_bias.stop_gradient
return ln_out.reshape(inp.shape)
@staticmethod
def backward(ctx, grad_output: paddle.Tensor) -> Tuple[Union[paddle.Tensor, None], ...]:
inputmat, ln_weight, mu, rsigma = ctx.saved_tensor()
d_ln_out = grad_output.reshape(inputmat.shape)
dxmat, dgamma, dbeta = layernorm_bwd(d_ln_out, inputmat, mu, rsigma, ln_weight,
ctx.bwd_ln_sm_margin, ctx.zero_centered_gamma)
return (
dxmat.reshape(ctx.inp_shape) if ctx.requires_dx else None,
dgamma if ctx.requires_dw else None,
dbeta if ctx.requires_dbias else None,
)
class LayerNorm(paddle.nn.Layer):
r"""
Applies Layer Normalization over a mini-batch of inputs as described in
the paper `Layer Normalization <https://arxiv.org/abs/1607.06450>`
"""
def __init__(
self,
hidden_size: int,
eps: float = 1e-5,
weight_attr: Union[paddle.ParamAttr, None] = None,
bias_attr: Union[paddle.ParamAttr, None, bool] = None,
zero_centered_gamma: bool = False,
backend: str = 'transformer_engine',
) -> None:
super().__init__()
self.eps = eps
self.zero_centered_gamma = zero_centered_gamma
self.backend = backend
self._dtype = self._helper.get_default_dtype()
self._weight_attr = weight_attr
if not self._weight_attr:
self._weight_attr = paddle.ParamAttr(initializer=Constant(
value=0.0 if self.zero_centered_gamma else 1.0))
self._bias_attr = bias_attr
if self._bias_attr is False:
self._bias_attr = paddle.ParamAttr(initializer=Constant(value=0.0), trainable=False)
self.weight = self.create_parameter(
shape=[hidden_size],
attr=self._weight_attr,
dtype=self._dtype,
is_bias=False,
)
self.bias = self.create_parameter(
shape=[hidden_size],
attr=self._bias_attr,
dtype=self._dtype,
is_bias=True,
)
# These many SMs are subtracted from the total SM count when calling forward
# and backward LayerNorm C APIs. These envvars can be used to prevent the LN
# kernels from using all SMs in the device. This is useful for cases such as
# communication overlap with LN.
self.fwd_ln_sm_margin = int(os.getenv("NVTE_FWD_LAYERNORM_SM_MARGIN", "0"))
self.bwd_ln_sm_margin = int(os.getenv("NVTE_BWD_LAYERNORM_SM_MARGIN", "0"))
def _te_forward(self, inp: paddle.Tensor) -> paddle.Tensor:
"""LayerNorm FWD"""
return _LayerNorm.apply(inp, self.weight, self.bias, self.eps, self.fwd_ln_sm_margin,
self.bwd_ln_sm_margin, self.zero_centered_gamma)
def _pd_forward(
self,
inp: paddle.Tensor,
) -> paddle.Tensor:
"""Calls Paddle OP"""
if self.zero_centered_gamma:
raise NotImplementedError(
"Paddle backend does not support LayerNorm with zero-centered scale.")
return F.layer_norm(x=inp,
normalized_shape=inp.shape[-1],
weight=self.weight,
bias=self.bias,
epsilon=self.eps)
def forward(self, *args, **kwargs):
"""forward"""
if self.backend == 'transformer_engine':
return self._te_forward(*args, **kwargs)
if self.backend == 'paddle':
return self._pd_forward(*args, **kwargs)
raise AttributeError(f"Backend {self.backend} is not supported.")
| TransformerEngine-main | transformer_engine/paddle/layer/layernorm.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Linear API"""
from typing import Union, Tuple, Dict, Any, Optional
import paddle
import paddle.nn.functional as F
from paddle.nn.initializer import Constant
from .base import (
TransformerEngineBaseLayer,
get_workspace,
_2X_ACC_FPROP,
_2X_ACC_DGRAD,
_2X_ACC_WGRAD,
)
from ..constants import FP8FwdTensors, FP8BwdTensors, GemmParallelModes, dist_group_type
from ..cpp_extensions import gemm, fp8_gemm, cast_to_fp8, cast_transpose
from ..distributed import (
allreduce,
get_tp_group_and_world_size,
identity,
track_rng_state,
set_tensor_dist_attr,
set_weight_tensor_dist_attr,
)
from ..fp8 import get_fp8_te_dtype
from ..utils import (
assert_dim_for_fp8_forward_exec,
cast_if_needed,
cast_if_needed_inplace,
divide,
get_bias_dtype,
save_for_backward_allow_none,
saved_tensor_allow_none,
)
__all__ = ["Linear", "_linear_fwd", "_linear_fwd_fp8", "_linear_bwd", "_linear_fwd_non_fp8"]
def _linear_fwd_fp8(
inputmat: paddle.Tensor,
inputmat_fp8_index: FP8FwdTensors,
weight: paddle.Tensor,
weight_fp8_index: FP8FwdTensors,
bias: paddle.Tensor,
use_bias: bool,
fp8_meta: Dict[str, Any],
activation_dtype: paddle.dtype,
parallel_mode: Union[str, None],
tensor_parallel: bool,
tp_group: Union[dist_group_type, None],
is_grad_enabled: bool,
):
"""FP8 path of Linear Fwd"""
fp8_dtype_forward = get_fp8_te_dtype(fp8_meta["recipe"], fprop_tensor=True)
bias_dtype = get_bias_dtype(activation_dtype)
bias = cast_if_needed(bias, bias_dtype)
if is_grad_enabled:
weight_fp8, weight_t_fp8 = cast_transpose(
weight,
fp8_meta["scaling_fwd"],
weight_fp8_index,
fp8_dtype_forward,
)
else:
weight_t_fp8 = None
weight_fp8 = cast_to_fp8(
weight,
fp8_meta["scaling_fwd"],
weight_fp8_index,
fp8_dtype_forward,
)
out = fp8_gemm(
weight_fp8,
fp8_meta["scaling_fwd"].scale_inv,
weight_fp8_index,
fp8_dtype_forward,
inputmat,
fp8_meta["scaling_fwd"].scale_inv,
inputmat_fp8_index,
fp8_dtype_forward,
activation_dtype,
get_workspace(),
bias=bias,
use_bias=use_bias,
use_split_accumulator=_2X_ACC_FPROP,
)
# Row Parallel Linear
if parallel_mode == "row" and tensor_parallel:
out = allreduce(out, tp_group)
return out, weight_t_fp8
def _linear_fwd_non_fp8(
inputmat: paddle.Tensor,
inputmat_fp8_index: FP8FwdTensors,
weight: paddle.Tensor,
weight_fp8_index: FP8FwdTensors,
bias: paddle.Tensor,
use_bias: bool,
fp8_calibration: bool,
fp8_meta: Dict[str, Any],
activation_dtype: paddle.dtype,
parallel_mode: Union[str, None],
tensor_parallel: bool,
tp_group: Union[dist_group_type, None],
activation: str = "",
):
"""Non-FP8 path of Linear Fwd"""
# Layer parameters are initialized as float32 dtype by default.
# Cast the parameters to activation_dtype if the current dtype
# does not match activation_dtype. The casting is inplace, so it
# only needs to performed once throughout the traing process.
weight = cast_if_needed_inplace(weight, activation_dtype)
bias = cast_if_needed_inplace(bias, activation_dtype)
if fp8_calibration:
# amax of input
fp8_meta["scaling_fwd"].amax_history[0, inputmat_fp8_index.value] = \
paddle.max(paddle.abs(inputmat)).item()
# amax of weight
fp8_meta["scaling_fwd"].amax_history[0, weight_fp8_index.value] = \
paddle.max(paddle.abs(weight)).item()
outputs = gemm(weight,
inputmat,
activation_dtype,
get_workspace(),
bias=bias,
use_bias=use_bias,
gelu=(activation == 'gelu'))
if activation == 'gelu':
gelu_out, _, out = outputs
return out, gelu_out
out, _, _ = outputs
# Row Parallel Linear
if parallel_mode == "row" and tensor_parallel:
out = allreduce(out, tp_group)
return out
def _linear_fwd(
inputmat: paddle.Tensor,
inputmat_fp8_index: FP8FwdTensors,
weight: paddle.Tensor,
weight_fp8_index: FP8FwdTensors,
bias: paddle.Tensor,
use_bias: bool,
fp8_enabled: bool,
fp8_calibration: bool,
fp8_meta: Dict[str, Any],
activation_dtype: paddle.dtype,
parallel_mode: Union[str, None],
tensor_parallel: bool,
tp_group: Union[dist_group_type, None],
is_grad_enabled: bool,
):
if fp8_enabled:
out, weight_t_fp8 = _linear_fwd_fp8(
inputmat,
inputmat_fp8_index,
weight,
weight_fp8_index,
bias,
use_bias,
fp8_meta,
activation_dtype,
parallel_mode,
tensor_parallel,
tp_group,
is_grad_enabled,
)
else:
out = _linear_fwd_non_fp8(
inputmat,
inputmat_fp8_index,
weight,
weight_fp8_index,
bias,
use_bias,
fp8_calibration,
fp8_meta,
activation_dtype,
parallel_mode,
tensor_parallel,
tp_group,
)
return (
out,
weight_t_fp8 if fp8_enabled else None,
)
def _linear_bwd_fp8(
inputmat: paddle.Tensor,
inputmat_t: paddle.Tensor,
inputmat_fp8_index: FP8FwdTensors,
weight_t_fp8: paddle.Tensor,
weight_fp8_index: FP8FwdTensors,
grad_output: paddle.Tensor,
grad_output_c: paddle.Tensor,
grad_output_t: paddle.Tensor,
grad_output_fp8_index: FP8BwdTensors,
fwd_scale_inverses: paddle.Tensor,
fp8_meta: Dict[str, Any],
requires_dgrad: bool,
requires_wgrad: bool,
activation_dtype: paddle.dtype,
parallel_mode: Union[str, None],
tensor_parallel: bool,
tp_group: Union[dist_group_type, None],
):
dgrad, wgrad = None, None
fp8_dtype_forward = get_fp8_te_dtype(fp8_meta["recipe"], fprop_tensor=True)
fp8_dtype_backward = get_fp8_te_dtype(fp8_meta["recipe"], fprop_tensor=False)
if requires_dgrad:
dgrad = fp8_gemm(
weight_t_fp8,
fwd_scale_inverses,
weight_fp8_index,
fp8_dtype_forward,
grad_output_c,
fp8_meta["scaling_bwd"].scale_inv,
grad_output_fp8_index,
fp8_dtype_backward,
activation_dtype,
get_workspace(),
use_split_accumulator=_2X_ACC_DGRAD,
)
if parallel_mode == "column" and tensor_parallel:
dgrad = allreduce(dgrad, tp_group)
if requires_wgrad:
if not fp8_meta["recipe"].override_linear_precision.wgrad:
wgrad = fp8_gemm(
inputmat_t,
fwd_scale_inverses,
inputmat_fp8_index,
fp8_dtype_forward,
grad_output_t,
fp8_meta["scaling_bwd"].scale_inv,
grad_output_fp8_index,
fp8_dtype_backward,
activation_dtype,
get_workspace(),
use_split_accumulator=_2X_ACC_WGRAD,
)
else:
wgrad, _, _ = gemm(
inputmat,
grad_output,
activation_dtype,
get_workspace(),
layout="NT",
grad=True,
)
return dgrad, wgrad
def _linear_bwd_non_fp8(
inputmat: paddle.Tensor,
weight: paddle.Tensor,
grad_output: paddle.Tensor,
requires_bgrad: bool,
requires_dgrad: bool,
requires_wgrad: bool,
activation_dtype: paddle.dtype,
parallel_mode: Union[str, None],
tensor_parallel: bool,
tp_group: Union[dist_group_type, None],
gelu_input: Union[paddle.Tensor, None] = None,
activation: str = "",
):
"""
Performs Linear Backward. Optionally, fuses GELU backward and dbias.
"""
dgrad, wgrad, bgrad = None, None, None
if requires_dgrad:
dgrad, _, _ = gemm(
weight,
grad_output,
activation_dtype,
get_workspace(),
layout="NN",
gelu=(activation == 'gelu'),
gelu_input=gelu_input,
grad=True,
)
if parallel_mode == "column" and tensor_parallel:
dgrad = allreduce(dgrad, tp_group)
if requires_wgrad:
wgrad, bgrad, _ = gemm(
inputmat,
grad_output,
activation_dtype,
get_workspace(),
layout="NT",
grad=True,
use_bias=requires_bgrad,
)
elif requires_bgrad:
bgrad = grad_output.sum(axis=0)
return dgrad, wgrad, bgrad
def _linear_bwd(
inputmat: paddle.Tensor,
inputmat_t: paddle.Tensor,
inputmat_fp8_index: FP8FwdTensors,
weight: paddle.Tensor,
weight_t_fp8: paddle.Tensor,
weight_fp8_index: FP8FwdTensors,
grad_output: paddle.Tensor,
grad_output_c: paddle.Tensor,
grad_output_t: paddle.Tensor,
grad_output_fp8_index: FP8BwdTensors,
fwd_scale_inverses: paddle.Tensor,
requires_bgrad: bool,
fp8_enabled: bool,
fp8_meta: Dict[str, Any],
requires_dgrad: bool,
requires_wgrad: bool,
activation_dtype: paddle.dtype,
parallel_mode: Union[str, None],
tensor_parallel: bool,
tp_group: Union[dist_group_type, None],
):
dgrad, wgrad, bgrad = None, None, None
if fp8_enabled:
dgrad, wgrad = _linear_bwd_fp8(
inputmat,
inputmat_t,
inputmat_fp8_index,
weight_t_fp8,
weight_fp8_index,
grad_output,
grad_output_c,
grad_output_t,
grad_output_fp8_index,
fwd_scale_inverses,
fp8_meta,
requires_dgrad,
requires_wgrad,
activation_dtype,
parallel_mode,
tensor_parallel,
tp_group,
)
else:
dgrad, wgrad, bgrad = _linear_bwd_non_fp8(
inputmat,
weight,
grad_output,
requires_bgrad,
requires_dgrad,
requires_wgrad,
activation_dtype,
parallel_mode,
tensor_parallel,
tp_group,
)
return dgrad, wgrad, bgrad
class _Linear(paddle.autograd.PyLayer):
"""TE implementation of Linear"""
@staticmethod
def forward(
ctx,
weight: paddle.Tensor,
inp: paddle.Tensor,
bias: paddle.Tensor,
use_bias: bool,
fp8_enabled: bool,
fp8_calibration: bool,
fp8_meta: Dict[str, Any],
activation_dtype: paddle.dtype,
is_grad_enabled: bool,
parallel_mode: Union[str, None],
tensor_parallel: bool,
tp_group: Union[dist_group_type, None],
tp_size: int,
) -> paddle.Tensor:
# Make sure input dimensions are compatible
in_features = weight.shape[-1]
assert inp.shape[-1] == in_features, "GEMM not possible"
inputmat = inp.reshape((-1, in_features))
if fp8_enabled:
assert_dim_for_fp8_forward_exec(inputmat)
assert_dim_for_fp8_forward_exec(weight)
inputmat_no_fp8 = inputmat
# FP8 casting
if fp8_enabled:
fp8_dtype_forward = get_fp8_te_dtype(fp8_meta["recipe"], fprop_tensor=True)
if not fp8_meta["recipe"].override_linear_precision.wgrad:
if is_grad_enabled:
inputmat, inputmat_t = cast_transpose(
inputmat,
fp8_meta["scaling_fwd"],
FP8FwdTensors.GEMM1_INPUT,
fp8_dtype_forward,
)
else:
inputmat = cast_to_fp8(
inputmat,
fp8_meta["scaling_fwd"],
FP8FwdTensors.GEMM1_INPUT,
fp8_dtype_forward,
)
else:
inputmat, inputmat_t = cast_to_fp8(
inputmat,
fp8_meta["scaling_fwd"],
FP8FwdTensors.GEMM1_INPUT,
fp8_dtype_forward,
), None
# GEMM Fwd
out, weight_t_fp8 = _linear_fwd(
inputmat,
FP8FwdTensors.GEMM1_INPUT,
weight,
FP8FwdTensors.GEMM1_WEIGHT,
bias,
use_bias,
fp8_enabled,
fp8_calibration,
fp8_meta,
activation_dtype,
parallel_mode,
tensor_parallel,
tp_group,
is_grad_enabled,
)
if is_grad_enabled:
fp8_wgrad = fp8_enabled and not fp8_meta["recipe"].override_linear_precision.wgrad
save_for_backward_allow_none(
ctx,
inputmat_no_fp8 if not weight.stop_gradient and not fp8_wgrad else None,
inputmat_t if not weight.stop_gradient and fp8_wgrad else None,
weight,
weight_t_fp8 if fp8_enabled else None,
fp8_meta["scaling_fwd"].scale_inv.clone() if fp8_enabled else None,
)
ctx.activation_dtype = activation_dtype
ctx.fp8_enabled = fp8_enabled
ctx.fp8_meta = fp8_meta
ctx.use_bias = use_bias
ctx.inp_shape = inp.shape
ctx.parallel_mode = parallel_mode
ctx.tensor_parallel = tensor_parallel
ctx.tp_group = tp_group
ctx.tp_size = tp_size
ctx.requires_dgrad = not inp.stop_gradient
ctx.requires_wgrad = not weight.stop_gradient
ctx.requires_bgrad = use_bias and not bias.stop_gradient
return out.reshape((-1, *inp.shape[1:-1], out.shape[-1]))
@staticmethod
def backward(ctx, grad_output: paddle.Tensor) -> Tuple[Union[paddle.Tensor, None], ...]:
with TransformerEngineBaseLayer.prepare_backward(ctx.fp8_enabled,
ctx.fp8_meta,
ctx.tp_group,
ctx.tp_size,
name="_Linear"):
( # pylint: disable=unbalanced-tuple-unpacking
inputmat,
inputmat_t,
weight,
weight_t_fp8,
fwd_scale_inverses,
) = saved_tensor_allow_none(ctx)
(
grad_output,
grad_output_c,
grad_output_t,
bgrad,
) = TransformerEngineBaseLayer.grad_output_preprocess(ctx, grad_output)
dgrad, wgrad, bgrad_ = _linear_bwd(
inputmat,
inputmat_t,
FP8FwdTensors.GEMM1_INPUT,
weight,
weight_t_fp8,
FP8FwdTensors.GEMM1_WEIGHT,
grad_output,
grad_output_c,
grad_output_t,
FP8BwdTensors.GRAD_OUTPUT1,
fwd_scale_inverses,
ctx.requires_bgrad,
ctx.fp8_enabled,
ctx.fp8_meta,
ctx.requires_dgrad,
ctx.requires_wgrad,
ctx.activation_dtype,
ctx.parallel_mode,
ctx.tensor_parallel,
ctx.tp_group,
)
if not ctx.fp8_enabled:
# bgrad is fused with gemm for non-FP8 path
bgrad = bgrad_
if not ctx.use_bias:
return (
wgrad if ctx.requires_wgrad else None,
dgrad.reshape(ctx.inp_shape) if ctx.requires_dgrad else None,
)
return (
wgrad if ctx.requires_wgrad else None,
dgrad.reshape(ctx.inp_shape) if ctx.requires_dgrad else None,
bgrad if ctx.requires_bgrad else None,
)
class Linear(TransformerEngineBaseLayer):
"""
Applies a linear transformation to the incoming data :math:`y = xA^T + b`
"""
def __init__(
self,
in_features: int,
out_features: int,
weight_attr: Union[paddle.ParamAttr, None] = None,
bias_attr: Union[paddle.ParamAttr, None, bool] = None,
parallel_mode: Optional[str] = None,
tp_group: Union[dist_group_type, None] = None,
backend: str = 'transformer_engine',
) -> None:
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.backend = backend
self._weight_attr = weight_attr
self._bias_attr = bias_attr
self._dtype = self._helper.get_default_dtype()
# Set parallel configs
self.tp_group, self.tp_size = get_tp_group_and_world_size(tp_group,
enable_tp=parallel_mode
is not None)
self.tensor_parallel = self.tp_size > 1
self.parallel_mode = parallel_mode
assert (self.parallel_mode
in GemmParallelModes), f"parallel_mode {parallel_mode} not supported"
if self.parallel_mode == "column":
self.out_features = divide(self.out_features, self.tp_size)
elif self.parallel_mode == "row":
self.in_features = divide(self.in_features, self.tp_size)
# Initialize weight parameter
with track_rng_state(enable=self.tensor_parallel):
# TE linear weight is in column major
self.weight = self.create_parameter(
shape=[self.out_features, self.in_features]
if self.backend == 'transformer_engine' else [self.in_features, self.out_features],
attr=self._weight_attr,
dtype=self._dtype,
is_bias=False,
)
set_weight_tensor_dist_attr(self.weight, self.tensor_parallel, self.parallel_mode,
self.backend)
# Initialize bias parameter
self.has_bias = self._bias_attr is not False
use_default_bias = self._bias_attr is None or self._bias_attr is True
if self.has_bias:
self.bias = self.create_parameter(
shape=[self.out_features],
attr=self._bias_attr if not use_default_bias else paddle.ParamAttr(
initializer=Constant(value=0.0)),
dtype=self._dtype,
is_bias=True,
)
if parallel_mode == "column":
set_tensor_dist_attr(self.bias, self.tensor_parallel, axis=0)
else:
self.bias = None
# For RPL, bias has to be added after TP collectives
# So it cannot be fused with the GEMM
if self.parallel_mode == "row" and self.tensor_parallel and self.has_bias:
self.gemm_bias_fused_add = False
else:
self.gemm_bias_fused_add = True
def _te_forward(
self,
inp: paddle.Tensor,
) -> paddle.Tensor:
"""
Apply the linear transformation to the input.
"""
with self.prepare_forward(inp) as inp:
# Layer input should be casted outside PyLayer, as performing
# inplace cast to input tensors may cause problems when used
# together with Paddle native layers.
inp = cast_if_needed(inp, self.activation_dtype)
out = _Linear.apply(
self.weight,
inp,
self.bias if self.gemm_bias_fused_add else None,
self.has_bias and self.gemm_bias_fused_add,
self.fp8_enabled,
self.fp8_calibration,
self.fp8_meta,
self.activation_dtype,
paddle.is_grad_enabled(),
self.parallel_mode,
self.tensor_parallel,
self.tp_group,
self.tp_size,
)
if not self.gemm_bias_fused_add:
out = out + cast_if_needed_inplace(self.bias, self.activation_dtype)
return out
def _pd_forward(
self,
inp: paddle.Tensor,
) -> paddle.Tensor:
"""Calls Paddle OP"""
if self.parallel_mode == 'column' and self.tensor_parallel:
inp = identity(inp, self.tp_group)
out = F.linear(inp, self.weight, self.bias if self.gemm_bias_fused_add else None)
if self.parallel_mode == 'row' and self.tensor_parallel:
out = allreduce(out, self.tp_group)
out = out + self.bias if self.bias is not None else out
return out
def forward(self, *args, **kwargs):
"""forward"""
if self.backend == 'transformer_engine':
return self._te_forward(*args, **kwargs)
if self.backend == 'paddle':
return self._pd_forward(*args, **kwargs)
raise AttributeError(f"Backend {self.backend} is not supported.")
| TransformerEngine-main | transformer_engine/paddle/layer/linear.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Layer level Paddle APIs"""
from .attention import DotProductAttention, MultiHeadAttention
from .layernorm import LayerNorm
from .layernorm_linear import LayerNormLinear
from .layernorm_mlp import LayerNormMLP
from .linear import Linear
from .softmax import FusedScaleMaskSoftmax
from .transformer import TransformerLayer
| TransformerEngine-main | transformer_engine/paddle/layer/__init__.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""LayerNormLinear API"""
import os
from typing import Union, Tuple, Dict, Any, Optional
import paddle
import paddle.nn.functional as F
from paddle.nn.initializer import Constant
from ..cpp_extensions import (
cast_to_fp8,
cast_from_fp8,
layernorm_fwd,
layernorm_fwd_fp8,
layernorm_bwd,
transpose,
)
from .base import TransformerEngineBaseLayer
from .linear import _linear_fwd, _linear_bwd
from ..constants import TE_DType, FP8FwdTensors, FP8BwdTensors, GemmParallelModes, dist_group_type
from ..distributed import (
allreduce,
get_tp_group_and_world_size,
identity,
track_rng_state,
set_tensor_dist_attr,
set_weight_tensor_dist_attr,
)
from ..fp8 import get_fp8_te_dtype
from ..utils import (
assert_dim_for_fp8_forward_exec,
cast_if_needed,
cast_if_needed_inplace,
divide,
save_for_backward_allow_none,
saved_tensor_allow_none,
)
__all__ = ["LayerNormLinear", "_layernorm_fwd_fp8_cast", "_layernorm_bwd"]
def _layernorm_fwd_fp8_cast(
inputmat: paddle.Tensor,
ln_weight: paddle.Tensor,
ln_bias: paddle.Tensor,
out_fp8_index: FP8FwdTensors,
eps: float,
fp8_enabled: bool,
fp8_meta: Dict[str, Any],
activation_dtype: paddle.dtype,
return_layernorm_output: bool,
fwd_ln_sm_margin: int,
zero_centered_gamma: bool,
):
"""Performs LayerNorm + FP8_Cast for FP8 path. LayerNorm only for BF16 path"""
ln_weight = cast_if_needed_inplace(ln_weight, activation_dtype)
ln_bias = cast_if_needed_inplace(ln_bias, activation_dtype)
if fp8_enabled:
fp8_dtype_forward = get_fp8_te_dtype(fp8_meta["recipe"], fprop_tensor=True)
if not return_layernorm_output:
ln_out, mu, rsigma = layernorm_fwd_fp8(
inputmat,
ln_weight,
ln_bias,
eps,
fp8_meta["scaling_fwd"],
out_fp8_index,
fp8_dtype_forward,
fwd_ln_sm_margin,
zero_centered_gamma,
)
ln_out_return = ln_out
else:
ln_out_return, mu, rsigma = layernorm_fwd(inputmat, ln_weight, ln_bias, eps,
TE_DType[activation_dtype], fwd_ln_sm_margin,
zero_centered_gamma)
ln_out = cast_to_fp8(
ln_out_return,
fp8_meta["scaling_fwd"],
out_fp8_index,
fp8_dtype_forward,
)
else:
ln_out, mu, rsigma = layernorm_fwd(inputmat, ln_weight, ln_bias, eps,
TE_DType[activation_dtype], fwd_ln_sm_margin,
zero_centered_gamma)
ln_out_return = ln_out
return (
ln_out_return,
ln_out,
mu,
rsigma,
)
def _layernorm_bwd(
inputmat: paddle.Tensor,
dgrad: paddle.Tensor,
ln_weight: paddle.Tensor,
mu: paddle.Tensor,
rsigma: paddle.Tensor,
grad_ln_out_return: paddle.Tensor,
return_layernorm_output: bool,
bwd_ln_sm_margin: int,
zero_centered_gamma: bool,
):
# LayerNorm gradient
d_ln_out = dgrad.reshape(inputmat.shape)
# Residual gradient
if return_layernorm_output:
d_ln_out = d_ln_out + grad_ln_out_return.reshape(d_ln_out.shape)
return layernorm_bwd(d_ln_out, inputmat, mu, rsigma, ln_weight, bwd_ln_sm_margin,
zero_centered_gamma)
class _LayerNormLinear(paddle.autograd.PyLayer):
"""TE implementation of LayerNormLinear"""
@staticmethod
def forward(
ctx,
inp: paddle.Tensor,
ln_weight: paddle.Tensor,
ln_bias: paddle.Tensor,
weight: paddle.Tensor,
bias: Union[paddle.Tensor, None],
use_bias: bool,
eps: float,
fp8_enabled: bool,
fp8_calibration: bool,
fp8_meta: Dict[str, Any],
activation_dtype: paddle.dtype,
return_layernorm_output: bool,
is_grad_enabled: bool,
fwd_ln_sm_margin: int,
bwd_ln_sm_margin: int,
zero_centered_gamma: bool,
parallel_mode: Union[str, None],
tensor_parallel: bool,
tp_group: Union[dist_group_type, None],
tp_size: int,
) -> Union[Tuple[paddle.Tensor, ...], paddle.Tensor]:
# Make sure input dimensions are compatible
in_features = ln_weight.shape[0]
assert inp.shape[-1] == in_features, "GEMM not possible"
inputmat = inp.reshape((-1, in_features))
if fp8_enabled:
assert_dim_for_fp8_forward_exec(inputmat)
assert_dim_for_fp8_forward_exec(weight)
# LayerNorm Fwd + FP8 Cast
(
ln_out_return,
ln_out,
mu,
rsigma,
) = _layernorm_fwd_fp8_cast(
inputmat,
ln_weight,
ln_bias,
FP8FwdTensors.GEMM1_INPUT,
eps,
fp8_enabled,
fp8_meta,
activation_dtype,
return_layernorm_output,
fwd_ln_sm_margin,
zero_centered_gamma,
)
# Linear Fwd
out, weight_t_fp8 = _linear_fwd(
ln_out,
FP8FwdTensors.GEMM1_INPUT,
weight,
FP8FwdTensors.GEMM1_WEIGHT,
bias,
use_bias,
fp8_enabled,
fp8_calibration,
fp8_meta,
activation_dtype,
parallel_mode,
tensor_parallel,
tp_group,
is_grad_enabled,
)
if is_grad_enabled:
save_for_backward_allow_none(
ctx,
inputmat,
ln_weight,
mu,
rsigma,
weight,
weight_t_fp8 if fp8_enabled else None,
ln_out,
fp8_meta["scaling_fwd"].scale_inv.clone() if fp8_enabled else None,
)
ctx.activation_dtype = activation_dtype
ctx.fp8_enabled = fp8_enabled
ctx.fp8_meta = fp8_meta
ctx.use_bias = use_bias
ctx.inp_shape = inp.shape
ctx.return_layernorm_output = return_layernorm_output
ctx.bwd_ln_sm_margin = bwd_ln_sm_margin
ctx.zero_centered_gamma = zero_centered_gamma
ctx.parallel_mode = parallel_mode
ctx.tensor_parallel = tensor_parallel
ctx.tp_group = tp_group
ctx.tp_size = tp_size
ctx.requires_dgrad = not inp.stop_gradient
ctx.requires_wgrad = not weight.stop_gradient
ctx.requires_bgrad = use_bias and not bias.stop_gradient
ctx.requires_ln_bgrad = not ln_bias.stop_gradient
ctx.requires_ln_wgrad = not ln_weight.stop_gradient
# [*, in_features] -> [*, out_features] except first dimension changes for SP
out = out.reshape((-1, *inp.shape[1:-1], out.shape[-1]))
if return_layernorm_output:
return out, ln_out_return.reshape(inp.shape)
return out
@staticmethod
def backward(
ctx, *grad_outputs: Tuple[paddle.Tensor,
...]) -> Tuple[Union[paddle.Tensor, None], ...]:
with TransformerEngineBaseLayer.prepare_backward(ctx.fp8_enabled,
ctx.fp8_meta,
ctx.tp_group,
ctx.tp_size,
name="_LayerNormLinear"):
( # pylint: disable=unbalanced-tuple-unpacking
inputmat,
ln_weight,
mu,
rsigma,
weight,
weight_t_fp8,
ln_out,
fwd_scale_inverses,
) = saved_tensor_allow_none(ctx)
(
grad_output,
grad_output_c,
grad_output_t,
bgrad,
) = TransformerEngineBaseLayer.grad_output_preprocess(ctx, grad_outputs[0])
# Prepare ln_out for Linear bwd
ln_out_no_fp8, ln_out_t = None, None
if ctx.fp8_enabled:
fp8_dtype_forward = get_fp8_te_dtype(ctx.fp8_meta["recipe"], fprop_tensor=True)
fp8_wgrad = not ctx.fp8_meta["recipe"].override_linear_precision.wgrad
if ctx.requires_wgrad:
if fp8_wgrad:
ln_out_t = transpose(ln_out, fp8_dtype_forward)
else:
ln_out_no_fp8 = cast_from_fp8(
ln_out,
ctx.fp8_meta["scaling_fwd"],
FP8FwdTensors.GEMM1_INPUT,
fp8_dtype_forward,
TE_DType[ctx.activation_dtype],
)
# Linear Bwd
dgrad, wgrad, bgrad_ = _linear_bwd(
ln_out_no_fp8 if ctx.fp8_enabled else ln_out,
ln_out_t,
FP8FwdTensors.GEMM1_INPUT,
weight,
weight_t_fp8,
FP8FwdTensors.GEMM1_WEIGHT,
grad_output,
grad_output_c,
grad_output_t,
FP8BwdTensors.GRAD_OUTPUT1,
fwd_scale_inverses,
ctx.requires_bgrad,
ctx.fp8_enabled,
ctx.fp8_meta,
True, # Always compute dgrad to feed into LayerNorm bwd
ctx.requires_wgrad,
ctx.activation_dtype,
ctx.parallel_mode,
ctx.tensor_parallel,
ctx.tp_group,
)
if not ctx.fp8_enabled:
# bgrad is fused with gemm for non-FP8 path
bgrad = bgrad_
# LayerNorm Bwd
dxmat, dgamma, dbeta = _layernorm_bwd(
inputmat,
dgrad,
ln_weight,
mu,
rsigma,
grad_outputs[1] if ctx.return_layernorm_output else None,
ctx.return_layernorm_output,
ctx.bwd_ln_sm_margin,
ctx.zero_centered_gamma,
)
bgrad = bgrad if ctx.requires_bgrad else None
bgrad_out = (bgrad,) if ctx.use_bias else ()
return (
dxmat.reshape(ctx.inp_shape) if ctx.requires_dgrad else None,
dgamma if ctx.requires_ln_wgrad else None,
dbeta if ctx.requires_ln_bgrad else None,
wgrad if ctx.requires_wgrad else None,
*bgrad_out,
)
class LayerNormLinear(TransformerEngineBaseLayer):
r"""
Applies layer normalization followed by linear transformation to the incoming data.
"""
def __init__(
self,
in_features: int,
out_features: int,
eps: float = 1e-5,
weight_attr: Union[paddle.ParamAttr, None] = None,
bias_attr: Union[paddle.ParamAttr, None, bool] = None,
return_layernorm_output: bool = False,
zero_centered_gamma: bool = False,
parallel_mode: Optional[str] = None,
tp_group: Union[dist_group_type, None] = None,
backend: str = 'transformer_engine',
) -> None:
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.eps = eps
self.return_layernorm_output = return_layernorm_output
self.zero_centered_gamma = zero_centered_gamma
self.backend = backend
self._weight_attr = weight_attr
self._bias_attr = bias_attr
self._dtype = self._helper.get_default_dtype()
# Set parallel configs
self.tp_group, self.tp_size = get_tp_group_and_world_size(tp_group,
enable_tp=parallel_mode
is not None)
self.tensor_parallel = self.tp_size > 1
self.parallel_mode = parallel_mode
assert (self.parallel_mode
in GemmParallelModes), f"parallel_mode {parallel_mode} not supported"
if self.parallel_mode == "column":
self.out_features = divide(self.out_features, self.tp_size)
elif self.parallel_mode == "row":
self.in_features = divide(self.in_features, self.tp_size)
# LayerNorm weights
self.ln_weight = self.create_parameter(
shape=[self.in_features],
attr=paddle.ParamAttr(initializer=Constant(
value=0.0 if self.zero_centered_gamma else 1.0)),
dtype=self._dtype,
is_bias=False,
)
self.ln_bias = self.create_parameter(
shape=[self.in_features],
attr=paddle.ParamAttr(initializer=Constant(value=0.0)),
dtype=self._dtype,
is_bias=True,
)
# Initialize Linear weight parameter
with track_rng_state(enable=self.tensor_parallel):
# TE linear weight is in column major
self.weight = self.create_parameter(
shape=[self.out_features, self.in_features]
if self.backend == 'transformer_engine' else [self.in_features, self.out_features],
attr=self._weight_attr,
dtype=self._dtype,
is_bias=False,
)
set_weight_tensor_dist_attr(self.weight, self.tensor_parallel, self.parallel_mode,
self.backend)
# Initialize Linear bias parameter
self.has_bias = self._bias_attr is not False
use_default_bias = self._bias_attr is None or self._bias_attr is True
if self.has_bias:
self.bias = self.create_parameter(
shape=[self.out_features],
attr=self._bias_attr if not use_default_bias else paddle.ParamAttr(
initializer=Constant(value=0.0)),
dtype=self._dtype,
is_bias=True,
)
if parallel_mode == "column":
set_tensor_dist_attr(self.bias, self.tensor_parallel, axis=0)
else:
self.bias = None
# For RPL, bias has to be added after TP collectives
# So it cannot be fused with the GEMM
if self.parallel_mode == "row" and self.tensor_parallel and self.has_bias:
self.gemm_bias_fused_add = False
else:
self.gemm_bias_fused_add = True
# These many SMs are subtracted from the total SM count when calling forward
# and backward LayerNorm C APIs. These envvars can be used to prevent the LN
# kernels from using all SMs in the device. This is useful for cases such as
# communication overlap with LN.
self.fwd_ln_sm_margin = int(os.getenv("NVTE_FWD_LAYERNORM_SM_MARGIN", "0"))
self.bwd_ln_sm_margin = int(os.getenv("NVTE_BWD_LAYERNORM_SM_MARGIN", "0"))
def _te_forward(
self,
inp: paddle.Tensor,
) -> Union[paddle.Tensor, Tuple[paddle.Tensor, ...]]:
"""
Apply layer normalization to the input followed by a linear transformation.
"""
with self.prepare_forward(inp) as inp:
# Layer input should be casted outside PyLayer, as performing
# inplace cast to input tensors may cause problems when used
# together with Paddle native layers.
inp = cast_if_needed(inp, self.activation_dtype)
out = _LayerNormLinear.apply(
inp,
self.ln_weight,
self.ln_bias,
self.weight,
self.bias if self.gemm_bias_fused_add else None,
self.has_bias and self.gemm_bias_fused_add,
self.eps,
self.fp8_enabled,
self.fp8_calibration,
self.fp8_meta,
self.activation_dtype,
self.return_layernorm_output,
paddle.is_grad_enabled(),
self.fwd_ln_sm_margin,
self.bwd_ln_sm_margin,
self.zero_centered_gamma,
self.parallel_mode,
self.tensor_parallel,
self.tp_group,
self.tp_size,
)
if self.return_layernorm_output:
out, ln_out = out
if not self.gemm_bias_fused_add:
out = out + cast_if_needed_inplace(self.bias, self.activation_dtype)
if self.return_layernorm_output:
return out, ln_out
return out
def _pd_forward(
self,
inp: paddle.Tensor,
) -> paddle.Tensor:
"""Calls Paddle OP"""
if self.zero_centered_gamma:
raise NotImplementedError(
"Paddle backend does not support LayerNorm with zero-centered scale.")
ln_out = F.layer_norm(x=inp,
normalized_shape=inp.shape[-1],
weight=self.ln_weight,
bias=self.ln_bias,
epsilon=self.eps)
if self.parallel_mode == 'column' and self.tensor_parallel:
ln_out = identity(ln_out, self.tp_group)
out = F.linear(ln_out, self.weight, self.bias if self.gemm_bias_fused_add else None)
if self.parallel_mode == 'row' and self.tensor_parallel:
out = allreduce(out, self.tp_group)
out = out + self.bias if self.bias is not None else out
if self.return_layernorm_output:
return out, ln_out
return out
def forward(self, *args, **kwargs):
"""forward"""
if self.backend == 'transformer_engine':
return self._te_forward(*args, **kwargs)
if self.backend == 'paddle':
return self._pd_forward(*args, **kwargs)
raise AttributeError(f"Backend {self.backend} is not supported.")
| TransformerEngine-main | transformer_engine/paddle/layer/layernorm_linear.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Transformer"""
from typing import Optional, Union
import paddle
from . import LayerNormMLP, LayerNorm, MultiHeadAttention
from ..constants import AttnMaskTypes, LayerTypes, dist_group_type
from ..distributed import get_tp_group_and_world_size, track_rng_state
class TransformerLayer(paddle.nn.Layer):
r"""
TransformerLayer is made up of an attention block and a feedforward network (MLP).
This standard layer is based on the paper "Attention Is All You Need".
Parameters
----------
hidden_size : int
size of each input sample.
ffn_hidden_size : int
intermediate size to which input samples are projected.
num_attention_heads : int
number of attention heads in the transformer layer.
layernorm_epsilon : float, default = 1e-5
a value added to the denominator of layer normalization
for numerical stability.
hidden_dropout: float, default = 0.1
dropout probability for the dropout op after FC2 layer.
attention_dropout: float, default = 0.1
dropout probability for the dropout op during multi-head attention.
self_attn_mask_type: {'causal', 'padding'}, default = `causal`
type of attention mask passed into softmax operation.
apply_residual_connection_post_layernorm : bool, default = `False`
if set to `True`, residual connections are taken
from the output of layer norm (default is taken
from input of layer norm)
output_layernorm: bool, default = `False`
if set to `True`, layer normalization is applied on the output side,
after the final dropout-add. default behavior is to apply layer
normalization on the input side, before the QKV transformation.
layer_type: {'encoder', 'decoder'}, default = `encoder`
if set to `decoder`, an additional cross-attn block is added after self-attn.
This can be used for structures like `T5` Transformer in conjunction with the
`encoder` option.
zero_centered_gamma : bool, default = 'False'
if set to 'True', gamma parameter in LayerNorm is initialized to 0 and
the LayerNorm formula changes to
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \varepsilon}} *
(1 + \gamma) + \beta
activation : str, default = 'gelu'
Type of activation used in MLP block.
Options are: 'gelu', 'relu', 'reglu', 'geglu' and 'swiglu'.
params_dtype : paddle.dtype, default = `paddle.get_default_dtype()`
it controls the type used to allocate the initial parameters. Useful when
the model is trained with lower precision and the original FP32 parameters
would not fit in GPU memory.
Parallelism parameters
----------------------
set_parallel_mode : bool, default = `False`
if set to `True`, QKV and FC1 layers are used as Column Parallel
whereas PROJ and FC2 is used as Row Parallel as described
`here <https://arxiv.org/pdf/1909.08053.pdf>`_.
tp_group : ProcessGroup, default = `None`
tensor parallel process group.
attention_dropout_rng_state_name : str, default = `local_seed`
Controls the rng state used for dropout on attention probs. The
specified rng should be set different seeds for different TP ranks.
It will be ignored if `set_parallel_mode` is False.
hidden_dropout_rng_state_name : str, default = `global_seed`
Controls the rng state used for dropout on hidden states. The
specified rng should be given the same seeds for different TP
ranks. It will be ignored if `set_parallel_mode` is False. The
specified name should be registered through
`paddle.distributed.fleet.meta_parallel.get_rng_state_tracker()
.add(rng_state_name, seed)`.
"""
def __init__(self,
hidden_size: int,
ffn_hidden_size: int,
num_attention_heads: int,
layernorm_epsilon: float = 1e-5,
hidden_dropout: float = 0.1,
attention_dropout: float = 0.1,
weight_attr: Union[paddle.ParamAttr, None] = None,
bias_attr: Union[paddle.ParamAttr, None, bool] = None,
self_attn_mask_type: str = "causal",
params_dtype: Optional[paddle.dtype] = None,
apply_residual_connection_post_layernorm: bool = False,
output_layernorm: bool = False,
layer_type: str = "encoder",
zero_centered_gamma: bool = False,
activation: str = 'gelu',
set_parallel_mode: bool = False,
tp_group: Optional[dist_group_type] = None,
attention_dropout_rng_state_name: str = 'local_seed',
hidden_dropout_rng_state_name: str = 'global_seed',
backend: str = 'transformer_engine') -> None:
super().__init__()
params_dtype = paddle.get_default_dtype() if params_dtype is None else params_dtype
self.output_layernorm = output_layernorm
self.layer_type = layer_type
self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm
self.self_attn_mask_type = self_attn_mask_type
self.set_parallel_mode = set_parallel_mode
self.tp_group, self.tp_size = get_tp_group_and_world_size(tp_group,
enable_tp=set_parallel_mode)
self.tensor_parallel = self.tp_size > 1
self.hidden_dropout_rng_state_name = hidden_dropout_rng_state_name
assert (self_attn_mask_type
in AttnMaskTypes), f"self_attn_mask_type {self_attn_mask_type} not supported"
assert layer_type in LayerTypes, f"layer_type {layer_type} not supported"
attention_args = (
hidden_size,
num_attention_heads,
attention_dropout,
layernorm_epsilon,
weight_attr,
bias_attr,
)
common_attention_kwargs = {
"params_dtype": params_dtype,
"return_layernorm_output": apply_residual_connection_post_layernorm,
"zero_centered_gamma": zero_centered_gamma,
"set_parallel_mode": set_parallel_mode,
"tp_group": tp_group,
"rng_state_name": attention_dropout_rng_state_name,
"backend": backend,
}
self.self_attention = MultiHeadAttention(
*attention_args,
**common_attention_kwargs,
attn_mask_type=self_attn_mask_type,
input_layernorm=not output_layernorm,
attention_type="self",
)
if layer_type == "decoder":
self.inter_attention = MultiHeadAttention(
*attention_args,
**common_attention_kwargs,
attn_mask_type="padding",
input_layernorm=True,
attention_type="cross",
)
self.layernorm_mlp = LayerNormMLP(
hidden_size,
ffn_hidden_size,
eps=layernorm_epsilon,
weight_attr=weight_attr,
bias_attr=bias_attr,
activation=activation,
return_layernorm_output=apply_residual_connection_post_layernorm,
zero_centered_gamma=zero_centered_gamma,
set_parallel_mode=set_parallel_mode,
tp_group=tp_group,
backend=backend,
)
self.hidden_dropout = hidden_dropout
if self.output_layernorm:
self.layernorm = LayerNorm(
hidden_size,
layernorm_epsilon,
weight_attr,
bias_attr,
zero_centered_gamma=zero_centered_gamma,
backend=backend,
)
def forward(
self,
hidden_states: paddle.Tensor,
attention_mask: Optional[paddle.Tensor] = None,
encoder_output: Optional[paddle.Tensor] = None,
enc_dec_attn_mask: Optional[paddle.Tensor] = None,
core_attention_bias_type: str = "no_bias",
core_attention_bias: Optional[paddle.Tensor] = None,
set_zero: bool = True,
recompute_core_attention: bool = False,
) -> paddle.Tensor:
"""
Transformer Layer: attention block and a feedforward network (MLP)
.. note::
Argument :attr:`attention_mask` will be ignored when :attr:`self_attn_mask_type`
is set to `"causal"`.
Parameters
----------
hidden_states : paddle.Tensor
Input tensor.
attention_mask : Optional[paddle.Tensor], default = `None`
Boolean tensor used to mask out self-attention softmax input.
encoder_output : Optional[paddle.Tensor], default = `None`
Output of the encoder block to be fed into the decoder block if using
`layer_type="decoder"`.
enc_dec_attn_mask : Optional[paddle.Tensor], default = `None`
Boolean tensor used to mask out inter-attention softmax input if using
`layer_type="decoder"`.
core_attention_bias_type: str, default = `no_bias`
core_attention_bias: Optional[paddle.Tensor], default = `None`
Bias tensor for Q * K.T
set_zero: bool, default = `True`
Whether to set output tensors to 0 or not before use.
recompute_core_attention: bool, default = `False`
If true, forward activations for core attention are recomputed
during the backward pass in order to save memory that would
otherwise be occupied to store the forward activations until
backprop.
"""
if self.self_attn_mask_type != "causal" and attention_mask is not None:
assert (attention_mask.dtype == paddle.bool), "Attention mask must be a boolean tensor"
assert core_attention_bias_type in ['no_bias'], f"Only no_bias is supported currently, " \
f"but receive core_attention_bias_type = {core_attention_bias_type}"
# Self attention.
self_attention_outputs = self.self_attention(
hidden_states,
attention_mask,
core_attention_bias_type=core_attention_bias_type,
core_attention_bias=core_attention_bias,
set_zero=set_zero,
recompute_core_attention=recompute_core_attention,
)
if self.apply_residual_connection_post_layernorm and not self.output_layernorm:
attention_output, residual = self_attention_outputs
else:
attention_output = self_attention_outputs
residual = hidden_states
# dropoout add.
with track_rng_state(enable=self.tensor_parallel, name=self.hidden_dropout_rng_state_name):
out = paddle.nn.functional.dropout(
attention_output,
p=self.hidden_dropout,
training=True,
)
bda_output = residual + out
# Cross attention.
if self.layer_type == "decoder":
inter_attention_outputs = self.inter_attention(
bda_output,
enc_dec_attn_mask,
encoder_output=encoder_output,
core_attention_bias_type=core_attention_bias_type,
core_attention_bias=core_attention_bias,
set_zero=set_zero,
recompute_core_attention=recompute_core_attention,
)
if self.apply_residual_connection_post_layernorm:
attention_output, residual = inter_attention_outputs
else:
attention_output = inter_attention_outputs
residual = bda_output
with track_rng_state(enable=self.tensor_parallel,
name=self.hidden_dropout_rng_state_name):
out = paddle.nn.functional.dropout(
attention_output,
p=self.hidden_dropout,
training=True,
)
bda_output = residual + out
# MLP.
mlp_outputs = self.layernorm_mlp(bda_output)
if self.apply_residual_connection_post_layernorm:
mlp_output, residual = mlp_outputs
else:
mlp_output = mlp_outputs
residual = bda_output
# dropoout add.
with track_rng_state(enable=self.tensor_parallel, name=self.hidden_dropout_rng_state_name):
out = paddle.nn.functional.dropout(mlp_output, p=self.hidden_dropout, training=True)
output = residual + out
# For BERT like architectures.
if self.output_layernorm:
output = self.layernorm(output)
# output: [b, s, hidden]
return output
| TransformerEngine-main | transformer_engine/paddle/layer/transformer.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Fused scaled masked softmax functions"""
import os
import warnings
from typing import Callable, Tuple, Union, Optional
import paddle
from transformer_engine.paddle.cpp_extensions import (
scaled_upper_triang_masked_softmax_forward,
scaled_upper_triang_masked_softmax_backward,
scaled_masked_softmax_forward,
scaled_masked_softmax_backward,
scaled_softmax_forward,
scaled_softmax_backward,
)
THREADS_PER_WARP = 32
THREADS_PER_BLOCK = 128
_default_causal_mask = {}
def _get_default_causal_mask(seqlen: int) -> paddle.Tensor:
"""Return the causal upper triangular mask for softmax input"""
if seqlen not in _default_causal_mask:
_default_causal_mask[seqlen] = paddle.triu(paddle.ones((seqlen, seqlen)),
diagonal=1).cast('bool')
return _default_causal_mask[seqlen]
class ScaledUpperTriangMaskedSoftmax(paddle.autograd.PyLayer):
"""
Fused operation which performs following three operations in sequence
1. Scale the tensor.
2. Apply upper triangular mask (typically used in gpt models).
3. Perform softmax.
"""
@staticmethod
def forward(ctx, inputs: paddle.Tensor, scale: float) -> paddle.Tensor:
"""ScaledUpperTriangMaskedSoftmax fwd"""
scale_t = paddle.Tensor([scale])
softmax_results = scaled_upper_triang_masked_softmax_forward(inputs, scale_t[0])
ctx.save_for_backward(softmax_results, scale_t)
return softmax_results
@staticmethod
def backward(ctx, output_grads: paddle.Tensor) -> Tuple[Union[paddle.Tensor, None], ...]:
"""ScaledUpperTriangMaskedSoftmax bwd"""
softmax_results, scale_t = ctx.saved_tensor()
input_grads = scaled_upper_triang_masked_softmax_backward(output_grads, softmax_results,
scale_t[0])
return input_grads, None
class ScaledMaskedSoftmax(paddle.autograd.PyLayer):
"""
Fused operation which performs following three operations in sequence
1. Scale the tensor.
2. Apply the mask.
3. Perform softmax.
"""
@staticmethod
def forward(ctx, inputs: paddle.Tensor, mask: paddle.Tensor, scale: float) -> paddle.Tensor:
"""ScaledMaskedSoftmax fwd"""
scale_t = paddle.Tensor([scale])
softmax_results = scaled_masked_softmax_forward(inputs, mask, scale_t[0])
ctx.save_for_backward(softmax_results, scale_t)
return softmax_results
@staticmethod
def backward(ctx, output_grads: paddle.Tensor) -> Tuple[Union[paddle.Tensor, None], ...]:
"""ScaledMaskedSoftmax bwd"""
softmax_results, scale_t = ctx.saved_tensor()
input_grads = scaled_masked_softmax_backward(output_grads, softmax_results, scale_t[0])
return input_grads, None, None
class ScaledSoftmax(paddle.autograd.PyLayer):
"""
Fused operation which performs following two operations in sequence
1. Scale the tensor.
2. Perform softmax.
"""
@staticmethod
def forward(ctx, inputs: paddle.Tensor, scale: float) -> paddle.Tensor:
"""ScaledSoftmax fwd"""
scale_t = paddle.Tensor([scale])
softmax_results = scaled_softmax_forward(inputs, scale_t[0])
ctx.save_for_backward(softmax_results, scale_t)
return softmax_results
@staticmethod
def backward(ctx, output_grads: paddle.Tensor) -> Tuple[Union[paddle.Tensor, None], ...]:
"""ScaledSoftmax bwd"""
softmax_results, scale_t = ctx.saved_tensor()
input_grads = scaled_softmax_backward(output_grads, softmax_results, scale_t[0])
return input_grads, None, None
class FusedScaleMaskSoftmax(paddle.nn.Layer):
"""
fused operation: scaling + mask + softmax
Arguments:
attn_mask_type: attention mask type (pad or causal)
mask_func: mask function to be applied.
softmax_in_fp32: if true, softmax in performed at fp32 precision.
"""
def __init__(
self,
attn_mask_type: str,
mask_func: Callable,
softmax_in_fp32: bool = True,
backend: str = 'transformer_engine',
) -> None:
super().__init__()
self.attn_mask_type = attn_mask_type
self.scaled_masked_softmax_fusion = bool(int(os.getenv("NVTE_MASKED_SOFTMAX_FUSION", "1")))
self.mask_func = mask_func
self.softmax_in_fp32 = softmax_in_fp32
self.backend = backend
def forward(
self,
inp: paddle.Tensor,
mask: paddle.Tensor,
scale: Optional[float] = None,
) -> paddle.Tensor:
"""FusedScaleMaskSoftmax fprop"""
# [batch_size, num_heads, s_q, s_kv]
assert inp.dim() == 4
self.input_is_fp16 = inp.dtype == paddle.float16
self.input_is_bf16 = inp.dtype == paddle.bfloat16
self.input_in_16bit_float = self.input_is_fp16 or self.input_is_bf16
assert (scale is None or self.softmax_in_fp32), "softmax should be in fp32 when scaled"
if self.backend == 'transformer_engine' and not self.is_kernel_available(*inp.shape):
warnings.warn(
"fused kernel is not available for this input shape, fall back to paddle backend")
self.backend = 'paddle'
if self.backend == 'transformer_engine':
return self._te_forward(inp, mask, scale)
if self.backend == 'paddle':
return self._pd_forward(inp, mask, scale)
raise AttributeError(f"Backend {self.backend} is not supported.")
def is_kernel_available(self, b: int, h: int, s_q: int, s_kv: int) -> bool:
"""Check FusedScaleMaskSoftmax kernel availability based on size"""
attn_batches = b * h
if (self.scaled_masked_softmax_fusion # user want to fuse
and self.input_in_16bit_float # input must be fp16
and 16 < s_kv <= 4096 # s_kv must be 16 ~ 2048
and s_q % 4 == 0 # s_q must be a multiple of 4
and attn_batches % 4 == 0 # b * h must be a multiple of 4
):
if 0 <= s_kv <= 4096:
batch_per_block = self.get_batch_per_block(int(s_kv))
if self.attn_mask_type == "causal":
if attn_batches % batch_per_block == 0:
return True
else:
if s_q % batch_per_block == 0:
return True
return False
def _te_forward(self,
inp: paddle.Tensor,
mask: paddle.Tensor,
scale: Optional[float] = None) -> paddle.Tensor:
"""Fused masked softmax kernel"""
b, h, s_q, s_kv = inp.size()
scale = 1.0 if scale is None else scale
if self.attn_mask_type == "causal":
assert s_q == s_kv, "causal mask is only for self attention"
# input is 3D tensor (attn_batches, s_q, s_kv)
inp = inp.reshape((-1, s_q, s_kv))
probs = ScaledUpperTriangMaskedSoftmax.apply(inp, scale)
return probs.reshape((b, h, s_q, s_kv))
# input is 4D tensor (b, h, s_q, s_kv)
if mask is not None:
return ScaledMaskedSoftmax.apply(inp, mask, scale)
return ScaledSoftmax.apply(inp, scale)
def _pd_forward(self,
inp: paddle.Tensor,
mask: paddle.Tensor,
scale: Optional[float] = None) -> paddle.Tensor:
"""Call Paddle OP"""
if self.input_in_16bit_float and self.softmax_in_fp32:
inp = paddle.cast(inp, 'float32')
if scale is not None:
inp = inp * scale
if self.attn_mask_type == "causal":
mask = _get_default_causal_mask(inp.shape[2])
mask_output = self.mask_func(inp, mask) if mask is not None else inp
probs = paddle.nn.functional.softmax(mask_output, axis=-1)
if self.input_in_16bit_float and self.softmax_in_fp32:
if self.input_is_fp16:
probs = paddle.cast(probs, 'float16')
else:
probs = paddle.cast(probs, 'bfloat16')
return probs
@staticmethod
def get_batch_per_block(key_seq_len: int) -> int:
"""Softmax utility"""
pow2 = 1 << (key_seq_len - 1).bit_length()
warp_size = pow2 if pow2 < THREADS_PER_WARP else THREADS_PER_WARP
batches_per_warp = 2 if pow2 <= 128 else 1
warps_per_block = THREADS_PER_BLOCK // warp_size
batches_per_block = warps_per_block * batches_per_warp
return batches_per_block
| TransformerEngine-main | transformer_engine/paddle/layer/softmax.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Base modules and utilities for TransformerEngine Paddle API"""
from abc import ABC, abstractmethod
from contextlib import contextmanager
import os
import pickle
from typing import Generator, Dict, Tuple, Union, Any
import numpy as np
import paddle
from paddle.fluid import core
from paddle.fluid.framework import _dygraph_tracer
from ..constants import FP8BwdTensors, dist_group_type
from ..cpp_extensions import cast_transpose, cast_transpose_bgrad, cast_to_fp8
from ..fp8 import (
FP8State,
FP8TensorMeta,
amax_and_scale_update,
get_global_fp8_state,
get_fp8_te_dtype,
)
from ..profile import nvtx_range
from ..recompute import is_in_recompute_phase
from ..fp8_buffer import FP8RecomputeBuffer
_2X_ACC_FPROP = False
_2X_ACC_DGRAD = True
_2X_ACC_WGRAD = True
_cublas_workspace = None
def get_cublas_workspace_size_bytes() -> None:
"""Return 32 MiB if using hopper, 4 MiB for all other architectures."""
if paddle.device.cuda.get_device_capability()[0] >= 9:
return 33_554_432
return 4_194_304
def get_workspace() -> paddle.Tensor:
"""Returns workspace for cublas."""
global _cublas_workspace
if _cublas_workspace is None:
_cublas_workspace = paddle.empty(
[get_cublas_workspace_size_bytes()],
dtype='uint8',
)
return _cublas_workspace
class TransformerEngineBaseLayer(paddle.nn.Layer, ABC):
"""Base TE Layer."""
def __init__(self) -> None:
super().__init__()
assert 'gpu' in paddle.device.get_device(), "TransformerEngine needs CUDA."
self.fp8_initialized = False
self.fp8_enabled = False
self.fp8_calibration = False
self.fp8_meta = {}
self.fp8_meta["fp8_checkpoint"] = False
self.fp8_meta["fp8_group"] = None
self.fp8_meta["recipe"] = FP8State.get_default_fp8_recipe()
self.fp8_meta["scaling_fwd"] = FP8TensorMeta(is_forward=True)
self.fp8_meta["scaling_bwd"] = FP8TensorMeta(is_forward=False)
self.tp_group = None
self.tp_size = 1
self.fp8_meta["autocast_id_fwd_stack"] = []
self.fp8_meta["async_amax_reduction"] = bool(
int(os.getenv("NVTE_ASYNC_AMAX_REDUCTION", "0")))
def set_activation_dtype(self, inp: paddle.Tensor) -> None:
"""Get activation data type for AMP."""
tracer = _dygraph_tracer()
if tracer and tracer._amp_level != core.AmpLevel.O0:
# Set activation_dtype to the Paddle AMP dtype if under 'paddle.amp.auto_cast' context
if tracer._amp_dtype == 'float32':
self.activation_dtype = paddle.float32
elif tracer._amp_dtype == 'bfloat16':
self.activation_dtype = paddle.bfloat16
elif tracer._amp_dtype == 'float16':
self.activation_dtype = paddle.float16
else:
raise RuntimeError(f"AMP format {tracer._amp_dtype} is not supported.")
else:
# If not under paddle.amp.auto_cast, set activation_dtype to the input dtype.
# Also, make sure the parameters match the input dtype.
# Skip the check if activation_dtype is already set and if activation_dtype
# matches input dtype. If they do not match, e.g, when user switch from AMP
# training to normal training, activation_dtype will still be updated.
if hasattr(self, "activation_dtype") and self.activation_dtype == inp.dtype:
return
dtype = inp.dtype
for name, param in self.named_parameters():
if param is not None:
assert dtype == param.dtype, (
"Data types for parameters must match when outside of autocasted region. "
f" Found input dtype: {dtype} and {name!r} dtype: {param.dtype}")
self.activation_dtype = dtype
# This routine is shared across FP8 and FP8_calibration paths so should not actually
# assume FP8 execution.
def fp8_init(self, num_gemms: int = 1) -> None:
"""Initialize fp8 related metadata and tensors during fprop."""
global_fp8_state = get_global_fp8_state()
self.fp8_enabled = global_fp8_state.is_fp8_enabled()
self.fp8_calibration = global_fp8_state.is_fp8_calibration()
self.fp8_meta["fp8_checkpoint"] = self.fp8_enabled or self.fp8_calibration
if self.fp8_enabled or self.fp8_calibration:
# FP8 init has already been run and recipe is the same, don't do anything.
if self.fp8_initialized and global_fp8_state.get_fp8_recipe(
) == self.fp8_meta["recipe"]:
return
# Set FP8, recipe, and other FP8 metadata
self.fp8_meta["recipe"] = global_fp8_state.get_fp8_recipe()
self.fp8_meta["fp8_group"] = global_fp8_state.get_fp8_group()
# Set FP8_MAX per tensor according to recipe
self.fp8_meta["fp8_max_fwd"] = self.fp8_meta["recipe"].fp8_format.value.max_fwd
self.fp8_meta["fp8_max_bwd"] = self.fp8_meta["recipe"].fp8_format.value.max_bwd
# Allocate scales and amaxes
amax_history_len = self.fp8_meta["recipe"].amax_history_len
self.fp8_meta["scaling_fwd"].prepare(num_gemms, amax_history_len)
self.fp8_meta["scaling_bwd"].prepare(num_gemms, amax_history_len)
self.fp8_initialized = True
else:
# If fp8 isn't enabled, turn off and return.
self.fp8_initialized = False
return
def _get_fp8_state(self) -> paddle.Tensor:
"""Dump FP8 state to paddle.Tensor."""
state = None
if self.fp8_meta["fp8_checkpoint"]:
state = {}
state["scaling_fwd"] = self.fp8_meta["scaling_fwd"].to_numpy()
state["scaling_bwd"] = self.fp8_meta["scaling_bwd"].to_numpy()
state["global_fp8_fwd_buffer"] = get_global_fp8_state().get_fp8_fwd_buffer().to_numpy()
state["global_fp8_bwd_buffer"] = get_global_fp8_state().get_fp8_bwd_buffer().to_numpy()
# Store other pickelable values.
extra = {}
for k, v in self.fp8_meta.items():
if isinstance(v, (bool, int, float, str)):
extra[k] = v
state["extra_fp8_variables"] = extra
state_serialized = pickle.dumps(state)
state_tensor = paddle.to_tensor(np.frombuffer(state_serialized, dtype=np.uint8))
return state_tensor
@paddle.no_grad()
def state_dict(
self,
destination=None,
include_sublayers=True,
structured_name_prefix="",
use_hook=True,
):
"""Save FP8 State when checkpointing."""
st = super().state_dict(
destination=destination,
include_sublayers=include_sublayers,
structured_name_prefix=structured_name_prefix,
use_hook=use_hook,
)
st["fp8_state"] = self._get_fp8_state()
return st
def _set_fp8_state(self, state: paddle.Tensor) -> None:
"""Load previous state."""
if state is None:
return
state = pickle.loads(state.numpy().tobytes())
if state is None:
return
# Load fp8 meta tensors.
self.fp8_meta["scaling_fwd"].from_numpy(state["scaling_fwd"])
self.fp8_meta["scaling_bwd"].from_numpy(state["scaling_bwd"])
# Restore global FP8 buffer states.
global_fp8_fwd_buffer = get_global_fp8_state().get_fp8_fwd_buffer()
global_fp8_bwd_buffer = get_global_fp8_state().get_fp8_bwd_buffer()
global_fp8_fwd_buffer.from_numpy(state["global_fp8_fwd_buffer"])
global_fp8_bwd_buffer.from_numpy(state["global_fp8_bwd_buffer"])
# Load extra items.
self.fp8_meta.update(state["extra_fp8_variables"])
self.fp8_meta["recipe"].amax_history_len = self.fp8_meta["scaling_fwd"].amax_history.shape[
0]
recompute_buffer_pos_key = FP8RecomputeBuffer.get_buffer_position_key()
if recompute_buffer_pos_key in self.fp8_meta:
del self.fp8_meta[recompute_buffer_pos_key]
@paddle.no_grad()
def set_state_dict(self, state_dict, use_structured_name=True):
"""Restore FP8 State from checkpoint."""
fp8_state_tensor = state_dict.pop("fp8_state")
self._set_fp8_state(fp8_state_tensor)
return super().set_state_dict(state_dict)
@contextmanager
def prepare_forward(
self,
inp: paddle.Tensor,
num_gemms: int = 1,
) -> Generator[paddle.Tensor, None, None]:
"""Checks and prep for FWD.
The context manager is needed because there isn't a way for a module to know
if it's the last FP8 module in the forward autocast. It is useful
to setup the forward aggregated amax reduction for every module
just in case. The autocast exit will pick up the most recent one.
"""
if self.fp8_enabled and is_in_recompute_phase():
global_recompute_buffer = get_global_fp8_state().get_fp8_recompute_buffer()
global_recompute_buffer.retrieve_fp8_meta_tensors(self.fp8_meta)
else:
self.set_activation_dtype(inp)
self.fp8_init(num_gemms=num_gemms)
# Previous iteration was grad_enabled
if self.fp8_meta.get("update_amax_and_scale_fwd", False):
global_fp8_fwd_buffer = get_global_fp8_state().get_fp8_fwd_buffer()
global_fp8_fwd_buffer.wait()
if self.fp8_meta["recipe"].reduce_amax:
global_fp8_fwd_buffer.copy_amax_from_buffer(self.fp8_meta)
amax_and_scale_update(self.fp8_meta, True)
global_fp8_fwd_buffer.set_for_deletion(self.fp8_meta)
else:
amax_and_scale_update(self.fp8_meta, True)
if self.fp8_enabled and self.training:
# Setup for amax reduction
if self.fp8_meta["recipe"].reduce_amax:
global_fp8_state = get_global_fp8_state()
self.fp8_meta["first_module"] = global_fp8_state.is_first_fp8_module()
self.fp8_meta["autocast_id_fwd"] = global_fp8_state.get_autocast_id()
self.fp8_meta["autocast_id_fwd_stack"].append(self.fp8_meta["autocast_id_fwd"])
self.fp8_meta["update_amax_and_scale_fwd"] = True
else:
self.fp8_meta["update_amax_and_scale_fwd"] = False
# Activation recomputation is used and this is the first forward phase.
if (self.fp8_enabled and self.training
and get_global_fp8_state().is_fp8_recompute_enabled()):
global_recompute_buffer = get_global_fp8_state().get_fp8_recompute_buffer()
global_recompute_buffer.stash_fp8_meta_tensors(self.fp8_meta)
with nvtx_range(self.__class__.__name__ + " forward"):
yield inp
if self.fp8_enabled and is_in_recompute_phase():
FP8RecomputeBuffer.restore_fp8_meta_tensors(self.fp8_meta)
return
if self.fp8_enabled and self.training and self.fp8_meta["recipe"].reduce_amax:
global_fp8_state = get_global_fp8_state()
global_fp8_fwd_buffer = global_fp8_state.get_fp8_fwd_buffer()
global_fp8_fwd_buffer.add_amax(self.fp8_meta)
global_fp8_fwd_buffer.set_for_amax_reduction(
self.fp8_meta,
self.tp_group,
self.tp_size,
)
@staticmethod
@contextmanager
def prepare_backward(fp8_enabled: bool,
fp8_meta: Dict[str, Any],
tp_group: dist_group_type,
tp_size: int,
name: str = "") -> Generator[None, None, None]:
"""Checks and prep for BWD."""
if fp8_enabled:
global_fp8_state = get_global_fp8_state()
global_fp8_bwd_buffer = global_fp8_state.get_fp8_bwd_buffer()
global_fp8_bwd_buffer.wait()
if fp8_meta["recipe"].reduce_amax:
global_fp8_bwd_buffer.copy_amax_from_buffer(fp8_meta)
amax_and_scale_update(fp8_meta, False)
global_fp8_bwd_buffer.set_for_deletion(fp8_meta)
# Get new backward key.
fp8_meta["autocast_id_bwd"] = fp8_meta["autocast_id_fwd_stack"].pop(0)
else:
amax_and_scale_update(fp8_meta, False)
with nvtx_range(name + " backward"):
yield
if fp8_enabled and fp8_meta["recipe"].reduce_amax:
global_fp8_bwd_buffer.add_amax(fp8_meta)
if fp8_meta["first_module"]:
global_fp8_bwd_buffer.finalize(fp8_meta, tp_group, tp_size)
@staticmethod
def grad_output_preprocess(
ctx, grad_output: paddle.Tensor) -> Tuple[Union[paddle.Tensor, None], ...]:
"""Utility function for backward.
Returns tuple in order (all optional/None based on training precion/recipe):
R1: gathered `grad_output` in higher precision.
R2: gathered `grad_output` in FP8.
R3: R2 transposed.
R4: bias gradient on R1.
"""
grad_output_mat = grad_output.reshape((-1, grad_output.shape[-1]))
# No-FP8 case: bgrad is fused with wgrad for this case.
if not ctx.fp8_enabled:
return grad_output_mat, None, None, None
fp8_dtype_backward = get_fp8_te_dtype(ctx.fp8_meta["recipe"], fprop_tensor=False)
# FP8 case without gather: cast, transpose, bgrad fused
if ctx.use_bias:
bgrad, grad_output_c, grad_output_t = cast_transpose_bgrad(
grad_output_mat,
ctx.fp8_meta["scaling_bwd"],
FP8BwdTensors.GRAD_OUTPUT1,
fp8_dtype_backward,
)
else:
if not ctx.fp8_meta["recipe"].override_linear_precision.wgrad:
grad_output_c, grad_output_t = cast_transpose(
grad_output_mat,
ctx.fp8_meta["scaling_bwd"],
FP8BwdTensors.GRAD_OUTPUT1,
fp8_dtype_backward,
)
else:
grad_output_t = None
grad_output_c = cast_to_fp8(
grad_output_mat,
ctx.fp8_meta["scaling_bwd"],
FP8BwdTensors.GRAD_OUTPUT1,
fp8_dtype_backward,
)
bgrad = None
return grad_output_mat, grad_output_c, grad_output_t, bgrad
@abstractmethod
def forward(self):
"""Needs override."""
| TransformerEngine-main | transformer_engine/paddle/layer/base.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""FW agnostic user-end APIs"""
import ctypes
import os
import platform
import subprocess
import sys
def get_te_path():
"""Find Transformer Engine install path using pip"""
command = [sys.executable, "-m", "pip", "show", "transformer_engine"]
result = subprocess.run(command, capture_output=True, check=True, text=True)
result = result.stdout.replace("\n", ":").split(":")
return result[result.index("Location") + 1].strip()
def _load_library():
"""Load shared library with Transformer Engine C extensions"""
system = platform.system()
if system == "Linux":
extension = "so"
elif system == "Darwin":
extension = "dylib"
elif system == "Windows":
extension = "dll"
else:
raise RuntimeError(f"Unsupported operating system ({system})")
lib_name = "libtransformer_engine." + extension
dll_path = get_te_path()
dll_path = os.path.join(dll_path, lib_name)
return ctypes.CDLL(dll_path, mode=ctypes.RTLD_GLOBAL)
def _load_userbuffers():
"""Load shared library with userbuffers"""
system = platform.system()
if system == "Linux":
extension = "so"
elif system == "Darwin":
extension = "dylib"
elif system == "Windows":
extension = "dll"
else:
raise RuntimeError(f"Unsupported operating system ({system})")
lib_name = "libtransformer_engine_userbuffers." + extension
dll_path = get_te_path()
dll_path = os.path.join(dll_path, lib_name)
if os.path.exists(dll_path):
return ctypes.CDLL(dll_path, mode=ctypes.RTLD_GLOBAL)
return None
_TE_LIB_CTYPES = _load_library()
_UB_LIB_CTYPES = _load_userbuffers()
| TransformerEngine-main | transformer_engine/common/__init__.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""The utilities for Transformer Engine"""
import inspect
import warnings
from enum import Enum
warnings.simplefilter('default')
class DeprecatedEnum: # pylint: disable=too-few-public-methods
"""DeprecatedEnum"""
def __init__(self, enum_cls, msg):
self.enum_cls = enum_cls
self.msg = msg
def __iter__(self):
return iter(list(self.enum_cls.__members__.values()))
def __getattr__(self, name):
if name in self.enum_cls.__members__:
warnings.warn(self.msg, DeprecationWarning)
return self.enum_cls.__members__[name]
raise AttributeError(f"{self.enum_cls} does not contain {name}")
def deprecate_wrapper(obj, msg):
"""Deprecate wrapper"""
if inspect.isclass(obj):
if issubclass(obj, Enum):
return DeprecatedEnum(obj, msg)
class DeprecatedCls(obj): # pylint: disable=too-few-public-methods
"""DeprecatedCls"""
def __init__(self, *args, **kwargs):
warnings.warn(msg, DeprecationWarning)
super().__init__(*args, **kwargs)
return DeprecatedCls
if inspect.isfunction(obj):
def deprecated(*args, **kwargs):
warnings.warn(msg, DeprecationWarning)
return obj(*args, **kwargs)
return deprecated
raise NotImplementedError(
f"deprecate_cls_wrapper only support Class and Function, but got {type(obj)}.")
| TransformerEngine-main | transformer_engine/common/utils.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""This module provides predefined FP8 recipes."""
from __future__ import annotations
from enum import Enum
from typing import Literal, Optional, Union, Callable, NamedTuple
from pydantic.dataclasses import dataclass
class _FormatHelper(NamedTuple):
"""
Stores max FP8 values for fprop and bprop a `Format`.
"""
max_fwd: float
max_bwd: float
class Format(Enum):
"""
Supported FP8 formats.
Values
------
E4M3 :
All FP8 tensors are in e4m3 format
E5M2 :
All FP8 tensors are in e5m2 format
HYBRID :
FP8 tensors in the forward pass are in e4m3 format,
FP8 tensors in the backward pass are in e5m2 format
"""
E4M3 = _FormatHelper(max_fwd=448, max_bwd=448)
E5M2 = _FormatHelper(max_fwd=57344, max_bwd=57344)
HYBRID = _FormatHelper(max_fwd=E4M3.max_fwd, max_bwd=E5M2.max_bwd)
class _OverrideLinearPrecision(NamedTuple):
"""
Whether or not the execute the `fprop`, `dgrad`, and `wgrad`
GEMMs in higher precision when using FP8.
"""
fprop: bool = False
dgrad: bool = False
wgrad: bool = False
@dataclass()
class DelayedScaling:
"""
Use the delayed scaling factor strategy.
Use scale factor from previous iteration,
recompute once every `interval`, and record
amax history of `amax_history_len` steps.
Parameters
----------
margin : int, default = 0
Margin for the scaling factor computation.
interval : int, default = 1
Controls how often the scaling factor is recomputed.
fp8_format : {Format.E4M3, Format.HYBRID}, default = Format.HYBRID
Controls the FP8 data format used during forward and backward
pass.
amax_history_len : int, default = 1024
The length of the amax history window used for
scaling factor computation.
amax_compute_algo : {'max', 'most_recent', Callable}, default = 'max'
Algorithm used for choosing the `amax` value for the
scaling factor computation. There are 2 predefined
choices: `max` chooses the largest `amax` in the history
window, while `most_recent` always chooses the most recently
seen value. Alternatively, one may pass a function of the
signature:
.. code-block:: python
def amax_compute(amax_history: Tensor) -> Tensor
where `Tensor` is a framework tensor type.
scaling_factor_compute_algo : Callable, default = None
Algorithm used for computing the new scaling
factor based on the value of `amax`. It should
be a function of the signature:
.. code-block:: python
def scaling_factor_compute(amax: Tensor,
old_scaling_factor: Tensor,
fp8_max: Tensor,
recipe: DelayedScaling) -> Tensor
where `Tensor` is a framework tensor type.
override_linear_precision: Tuple(bool, bool, bool), default=(False, False, False)
Whether or not the execute the `fprop`, `dgrad`, and `wgrad`
GEMMs (respectively) in higher precision when using FP8.
reduce_amax: bool, default = `True`
By default, if `torch.distributed` is initialized, the `amax` value for FP8
tensors is reduced across the `fp8_group` (specified in the `fp8_autocast`
call). This keeps the amaxes and scaling factors synced across the given
distributed group. If set to `False`, this reduction is skipped and every
GPU maintains local amaxes and scaling factors. To ensure results are
numerically identical across checkpointing boundaries in this case, all
ranks must checkpoint in order to store the local tensors.
Notes
-----
* By default (when `scaling_factor_compute_algo` is left as `None`) the scaling
factor is computed from the final `amax` value using the formula:
.. code-block:: python
FP8_MAX = maximum_representable_value(fp8_format)
exp = get_exponent(FP8_MAX / amax) - margin
new_scaling_factor = 2.0 ^ exp
* The scaling factor should always be a power of 2 to not introduce numerical
error during the conversion from FP8 to higher precision format.
"""
margin: int = 0
interval: int = 1
fp8_format: Format = Format.HYBRID
amax_history_len: int = 1024
amax_compute_algo: Union[Literal["max", "most_recent"], Callable] = "max"
override_linear_precision: _OverrideLinearPrecision = _OverrideLinearPrecision()
scaling_factor_compute_algo: Optional[Callable] = None
reduce_amax: bool = True
def __post_init__(self) -> None:
assert self.fp8_format != Format.E5M2, "Pure E5M2 training is not supported."
assert self.override_linear_precision in (
(False, False, False),
(False, False, True),
), "Only wgrad GEMM override is currently supported."
| TransformerEngine-main | transformer_engine/common/recipe.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""FP8 utilies for TransformerEngine"""
from contextlib import contextmanager
from typing import Generator, Optional, Dict, Any
import tensorflow as tf
import transformer_engine_tensorflow as tex
from transformer_engine.common.recipe import DelayedScaling, Format
_FP8_ENABLED = False
_FP8_RECIPE = None
_FP8_DISTRIBUTED_GROUP = None
_IS_FIRST_FP8_MODULE = False
_FP8_AUTOCAST_COUNTER = 0
_FP8_CURRENT_CONTEXT_ID = 0
_FP8_AUTOCAST_DEPTH = 0
_global_fp8_buffer = {}
_amax_forward_global_reduce_func = lambda: None
_buffer_delete_key_fwd = None
_buffer_delete_key_bwd = None
def get_meta_tensor_key(forward: bool = True) -> str:
"""Returns scaling key in `fp8_meta`."""
if forward:
return "scaling_fwd"
return "scaling_bwd"
def get_autocast_key(forward: bool = True) -> str:
"""Returns module position key in `fp8_meta`."""
if forward:
return "autocast_id_fwd"
return "autocast_id_bwd"
def get_amax_buffer_key(fp8_meta: Dict[str, Any], forward: bool = True) -> str:
"""Return a key in `_global_fp8_buffer` for the AMAX storage."""
if forward:
return f"FWD_AMAX_{fp8_meta['autocast_id_fwd']}"
return f"BWD_AMAX_{fp8_meta['autocast_id_bwd']}"
def set_amax_buffer_key_deletion(
fp8_meta: Dict[str, Any], forward: bool = True
) -> None:
"""Delete this amax key from global buffer during autocast end."""
if get_autocast_key(forward=forward) not in fp8_meta:
return
global _buffer_delete_key_fwd, _buffer_delete_key_bwd
if forward:
_buffer_delete_key_fwd = get_amax_buffer_key(fp8_meta, forward=forward)
else:
_buffer_delete_key_bwd = get_amax_buffer_key(fp8_meta, forward=forward)
def get_default_fp8_recipe():
"""FP8 recipe if not provided by user
Margin = 0, interval = 1, E4M3
"""
return DelayedScaling()
@contextmanager
def fp8_autocast(
enabled: bool = False,
fp8_recipe: Optional[DelayedScaling] = None,
) -> Generator[None, None, None]:
"""
Context manager for FP8 usage.
.. code-block:: python
with fp8_autocast(enabled=True):
out = model(inp)
.. note::
Support for FP8 in the Dense layer of Transformer Engine is currently
limited to tensors with shapes where both dimensions are divisible by 16.
In terms of the input to the full Transformer network, this typically
requires padding sequence length to be multiple of 16.
Parameters
----------
enabled: bool, default = `False`
whether or not to enable fp8
fp8_recipe: recipe.DelayedScaling, default = `None`
recipe used for FP8 training.
"""
global _FP8_ENABLED, _FP8_RECIPE, _FP8_DISTRIBUTED_GROUP, _FP8_AUTOCAST_DEPTH
global _IS_FIRST_FP8_MODULE, _FP8_AUTOCAST_COUNTER
global _global_fp8_buffer, _buffer_delete_key_fwd
fp8_state = (_FP8_ENABLED, _FP8_RECIPE, _FP8_DISTRIBUTED_GROUP)
try:
_FP8_ENABLED = enabled
_FP8_RECIPE = get_default_fp8_recipe() if fp8_recipe is None else fp8_recipe
if _FP8_AUTOCAST_DEPTH == 0:
_IS_FIRST_FP8_MODULE = True
_FP8_AUTOCAST_COUNTER += 1
_FP8_AUTOCAST_DEPTH += 1
yield
finally:
_FP8_ENABLED, _FP8_RECIPE, _FP8_DISTRIBUTED_GROUP = fp8_state
_IS_FIRST_FP8_MODULE = False
_FP8_AUTOCAST_DEPTH -= 1
if _FP8_AUTOCAST_DEPTH == 0:
if callable(_amax_forward_global_reduce_func):
_amax_forward_global_reduce_func()
delete_key_from_amax_buffer(forward=True)
def get_fp8_context_id() -> int:
"""Returns an ID for the current FP8 context."""
return _FP8_CURRENT_CONTEXT_ID
def set_fp8_context_id(ctx_id: int) -> None:
"""Sets the current FP8 context."""
global _FP8_CURRENT_CONTEXT_ID
_FP8_CURRENT_CONTEXT_ID = ctx_id
def new_fp8_context_id() -> int:
"""Returns global autocast counter as a proxy to be used
as the autocast ID for FP8 modules.
"""
return _FP8_AUTOCAST_COUNTER
def is_fp8_enabled():
"""Is FP8 enabled"""
return _FP8_ENABLED
def is_first_fp8_module():
"""Returns `True` only the first time when called multiple
times from within the same `fp8_autocast` context.
"""
global _IS_FIRST_FP8_MODULE
tmp = _IS_FIRST_FP8_MODULE
_IS_FIRST_FP8_MODULE = False
return tmp
def get_fp8_recipe():
"""Return the fp8 recipe"""
return _FP8_RECIPE
def _default_sf_compute(amax, scale, fp8_max, margin):
"""Default function to convert amax to scaling factor."""
exp = tf.math.floor(tf.experimental.numpy.log2(fp8_max / amax)) - margin
sf = tf.math.round(tf.math.pow(2.0, tf.math.abs(exp)))
sf = tf.where(amax > 0.0, sf, scale)
sf = tf.where(tf.math.is_finite(amax), sf, scale)
sf = tf.where(exp < 0, 1.0 / sf, sf)
return sf
def _roll_and_zero_out(amax_history):
"""Update amax history and set next amax to zero."""
amax_history = tf.roll(amax_history, -1, 0)
zeros = tf.zeros(shape=amax_history[0].shape)
updated = tf.tensor_scatter_nd_update(amax_history, [[0]], [zeros])
return updated
@tf.function(jit_compile=True)
def _reduce_max_and_default_sf_compute(amax_history, scale, fp8_max, margin):
"""Get amax using max algorithm and compute scaling factor."""
amax = tf.reduce_max(amax_history, axis=0)
sf = _default_sf_compute(amax, scale, fp8_max, margin)
updated = _roll_and_zero_out(amax_history)
return updated, sf
@tf.function(jit_compile=True)
def _most_recent_and_default_sf_compute(amax_history, scale, fp8_max, margin):
"""Get amax using most-recent algorithm and compute scaling factor."""
amax = amax_history[0]
sf = _default_sf_compute(amax, scale, fp8_max, margin)
updated = _roll_and_zero_out(amax_history)
return updated, sf
def fused_amax_and_scale_update(
amax_history: tf.Variable,
scale: tf.Variable,
scale_inv: tf.Variable,
fp8_max: float,
margin: int,
amax_compute_algo: str,
):
"""Amax to scale conversion."""
if amax_compute_algo == "max":
updated, sf = _reduce_max_and_default_sf_compute(
amax_history, scale, fp8_max, margin
)
else:
assert amax_compute_algo == "most_recent"
updated, sf = _most_recent_and_default_sf_compute(
amax_history, scale, fp8_max, margin
)
amax_history.assign(updated)
scale.assign(sf)
scale_inv.assign(1.0 / sf)
def amax_and_scale_update(
fp8_meta: Dict[str, Any],
fwd_update: bool,
) -> None:
"""Updates fp8 amaxes/scales for fwd | bwd."""
amax_compute = fp8_meta["recipe"].amax_compute_algo
sf_compute = fp8_meta["recipe"].scaling_factor_compute_algo
fp8_meta_tensor_key = "scaling_fwd" if fwd_update else "scaling_bwd"
fp8_max_key = "fp8_max_fwd" if fwd_update else "fp8_max_bwd"
if not callable(amax_compute) and sf_compute is None:
fused_amax_and_scale_update(
fp8_meta[fp8_meta_tensor_key]["amax_history"],
fp8_meta[fp8_meta_tensor_key]["scale"],
fp8_meta[fp8_meta_tensor_key]["scale_inv"],
fp8_meta[fp8_max_key],
fp8_meta["recipe"].margin,
fp8_meta["recipe"].amax_compute_algo,
)
else:
raise ValueError(
"We only support the fp8 recipe with 'max' or 'most_recent' "
"amax_compute_algo and default scaling_factor_compute_algo at this "
"moment."
)
def get_fp8_te_dtype(fp8_recipe: DelayedScaling, fprop_tensor: bool = True):
"""Get fp8 data type according to recipe and tensor"""
if fp8_recipe.fp8_format == Format.E4M3 or (
fp8_recipe.fp8_format == Format.HYBRID and fprop_tensor
):
return tex.DType.kFloat8E4M3
return tex.DType.kFloat8E5M2
def delete_key_from_amax_buffer(forward: bool = True) -> None:
"""Delete the key from global amax buffer."""
global _global_fp8_buffer, _buffer_delete_key_fwd, _buffer_delete_key_bwd
if forward:
if (
_buffer_delete_key_fwd is not None
and _buffer_delete_key_fwd in _global_fp8_buffer
):
del _global_fp8_buffer[_buffer_delete_key_fwd]
else:
if (
_buffer_delete_key_bwd is not None
and _buffer_delete_key_bwd in _global_fp8_buffer
):
del _global_fp8_buffer[_buffer_delete_key_bwd]
| TransformerEngine-main | transformer_engine/tensorflow/fp8.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""XLA functions and JIT utilities"""
from typing import Callable
import tensorflow as tf
@tf.function(jit_compile=True)
def _bgrad_dgelu_fused(grad_output, inp):
"""Bgrad-Dgelu fused"""
x = inp
tanh_out = tf.math.tanh(0.79788456 * x * (1 + 0.044715 * x * x))
# sqrt(2/pi) * 3 * 0.044715 -> 0.1070322243
ff = 0.5 * x * (
(1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)
) + 0.5 * (1 + tanh_out)
dgelu = ff * grad_output
bgrad = tf.math.reduce_sum(dgelu, axis=0)
return bgrad, dgelu
def bgrad_dgelu_fused(grad_output, inp):
"""Bgrad-Dgelu fused"""
return _bgrad_dgelu_fused(grad_output, inp)
def bias_dropout_add(
x: tf.Tensor,
bias: tf.Variable,
residual: tf.Tensor,
prob: float,
training: bool,
) -> tf.Tensor:
"""dropout(inp + bias) + residual"""
# TODO(kaixih): Use stateless_dropout and specify the seed mainly for
# debugging purpose. Should allow random seed.
out = (
tf.nn.experimental.stateless_dropout(
x + bias,
rate=prob,
seed=[1, 0],
)
if training
else x + bias
)
out = residual + out
return out
def get_bias_dropout_add(training: bool) -> Callable:
"""bias_dropout_add based on training or not"""
def _bias_dropout_add(x, bias, residual, prob):
return bias_dropout_add(x, bias, residual, prob, training)
return _bias_dropout_add
@tf.function(jit_compile=True)
def bias_dropout_add_fused_train_(
x: tf.Tensor,
bias: tf.Variable,
residual: tf.Tensor,
prob: float,
) -> tf.Tensor:
"""Jit fused bias_dropout_add for training"""
return bias_dropout_add(x, bias, residual, prob, True)
def bias_dropout_add_fused_train(
x: tf.Tensor,
bias: tf.Variable,
residual: tf.Tensor,
prob: float,
) -> tf.Tensor:
"""Jit fused bias_dropout_add for training"""
return bias_dropout_add_fused_train_(x, bias, residual, prob)
@tf.function(jit_compile=True)
def bias_dropout_add_fused_inference_(
x: tf.Tensor,
bias: tf.Variable,
residual: tf.Tensor,
prob: float,
) -> tf.Tensor:
"""Jit fused bias_dropout_add for inference"""
return bias_dropout_add(x, bias, residual, prob, False)
def bias_dropout_add_fused_inference(
x: tf.Tensor,
bias: tf.Variable,
residual: tf.Tensor,
prob: float,
) -> tf.Tensor:
"""Jit fused bias_dropout_add for inference"""
return bias_dropout_add_fused_inference_(x, bias, residual, prob)
| TransformerEngine-main | transformer_engine/tensorflow/jit.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Enums for e2e transformer"""
import tensorflow as tf
import transformer_engine_tensorflow as tex
"""
This is a map: tf.dtype -> int
Used for passing dtypes into cuda
extension. Has one to one mapping
with enum in transformer_engine.h
"""
TE_DType = {
tf.int8: tex.DType.kByte,
tf.int32: tex.DType.kInt32,
tf.float32: tex.DType.kFloat32,
tf.half: tex.DType.kFloat16,
tf.bfloat16: tex.DType.kBFloat16,
}
AttnMaskTypes = ("causal", "padding")
AttnTypes = ("self", "cross")
LayerTypes = ("encoder", "decoder")
| TransformerEngine-main | transformer_engine/tensorflow/constants.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Transformer Engine bindings for Tensorflow"""
from transformer_engine.common.recipe import DelayedScaling
from transformer_engine.common.recipe import Format
from .constants import TE_DType
from .fp8 import fp8_autocast
from .module import Dense
from .module import LayerNorm
from .module import LayerNormDense
from .module import LayerNormMLP
from .module import get_stream_id
from .transformer import MultiHeadAttention
from .transformer import TransformerLayer
| TransformerEngine-main | transformer_engine/tensorflow/__init__.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Utility functions for Transformer Engine modules"""
import tensorflow as tf
def attention_mask_func(
attention_scores: tf.Tensor, attention_mask: tf.Tensor
) -> tf.Tensor:
"""Get attention mask"""
return tf.where(attention_mask, attention_scores, -10000.0)
def ensure_divisibility(numerator: int, denominator: int) -> None:
"""Ensure that numerator is divisible by the denominator."""
assert (
numerator % denominator == 0
), f"{numerator} is not divisible by {denominator}"
def divide(numerator: int, denominator: int) -> int:
"""Ensure that numerator is divisible by the denominator and return
the division value."""
ensure_divisibility(numerator, denominator)
return numerator // denominator
| TransformerEngine-main | transformer_engine/tensorflow/utils.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Transformer."""
from contextlib import nullcontext
from typing import Callable, Optional, Tuple, Union
import os
from keras import backend, layers, initializers
import tensorflow as tf
from transformer_engine.tensorflow.module import (
LayerNorm,
LayerNormDense,
LayerNormMLP,
Dense,
)
from transformer_engine.tensorflow.softmax import FusedScaleMaskSoftmax
from transformer_engine.tensorflow.constants import (
AttnMaskTypes,
AttnTypes,
LayerTypes,
)
from transformer_engine.tensorflow.utils import (
divide,
attention_mask_func,
)
from transformer_engine.tensorflow.jit import (
get_bias_dropout_add,
bias_dropout_add_fused_train,
bias_dropout_add_fused_inference,
)
class CoreAttention(tf.keras.Model): # pylint: disable=too-few-public-methods
"""Parallel attention w/o QKV and Proj Gemms
BMM1 -> softmax + dropout -> BMM2
"""
def __init__(
self,
num_attention_heads: int,
kv_channels: int,
attention_dropout: float,
layer_number: Optional[int] = None,
apply_query_key_layer_scaling: bool = True,
attention_softmax_in_fp32: bool = False,
attn_mask_type: str = "causal",
) -> None:
super().__init__()
self.apply_query_key_layer_scaling = apply_query_key_layer_scaling
self.attention_softmax_in_fp32 = attention_softmax_in_fp32
if layer_number is None:
self.apply_query_key_layer_scaling = False
else:
self.layer_number = max(1, layer_number)
if self.apply_query_key_layer_scaling:
self.attention_softmax_in_fp32 = True
self.attn_mask_type = attn_mask_type
projection_size = kv_channels * num_attention_heads
assert (
attn_mask_type in AttnMaskTypes
), f"attn_mask_type {attn_mask_type} not supported"
# Per attention head and per partition values.
self.hidden_size_per_partition = divide(projection_size, 1)
self.hidden_size_per_attention_head = divide(
projection_size, num_attention_heads
)
self.attention_dropout_ctx = nullcontext
coeff = None
self.norm_factor = tf.math.sqrt(
float(self.hidden_size_per_attention_head))
if self.apply_query_key_layer_scaling:
coeff = self.layer_number
self.norm_factor *= coeff
self.scale_mask_softmax = FusedScaleMaskSoftmax(
self.attn_mask_type,
attention_mask_func,
self.attention_softmax_in_fp32,
coeff,
)
# Dropout. Note that for a single iteration, this layer will generate
# different outputs on different number of parallel partitions but
# on average it should not be partition dependent.
self.attention_dropout = layers.Dropout(attention_dropout)
def __call__(
self,
query_layer: tf.Tensor,
key_layer: tf.Tensor,
value_layer: tf.Tensor,
attention_mask: tf.Tensor,
) -> tf.Tensor:
"""core attention fprop"""
# [b, np, sq, sk]
output_size = (
query_layer.shape[1],
query_layer.shape[2],
query_layer.shape[0],
key_layer.shape[0],
)
# [sq, b, np, hn] -> [sq, b * np, hn]
new_q_shape = (output_size[2], output_size[0] * output_size[1], -1)
query_layer = tf.reshape(query_layer, new_q_shape)
# [sk, b, np, hn] -> [sk, b * np, hn]
new_k_shape = (output_size[3], output_size[0] * output_size[1], -1)
key_layer = tf.reshape(key_layer, new_k_shape)
norm_factor = self._maybe_cast_inputs(self.norm_factor)
# Raw attention scores. [b * np, sq, sk]
matmul_result = (
tf.matmul(
tf.transpose(query_layer, perm=(1, 0, 2)), # [b * np, sq, hn]
tf.transpose(key_layer, perm=(1, 2, 0)), # [b * np, hn, sk]
)
/ norm_factor
)
# change view to [b, np, sq, sk]
attention_scores = tf.reshape(matmul_result, output_size)
# attention scores and attention mask [b, np, sq, sk]
attention_probs = self.scale_mask_softmax(attention_scores,
attention_mask)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
with self.attention_dropout_ctx():
attention_probs = self.attention_dropout(attention_probs)
# value_layer -> context layer.
# [sk, b, np, hn] --> [b, np, sq, hn]
output_size = (
value_layer.shape[1],
value_layer.shape[2],
query_layer.shape[0],
value_layer.shape[3],
)
# change view [sk, b * np, hn]
new_v_shape = (value_layer.shape[0], output_size[0] * output_size[1],
-1)
value_layer = tf.reshape(value_layer, new_v_shape)
# change view [b * np, sq, sk]
new_attn_shape = (output_size[0] * output_size[1], output_size[2], -1)
attention_probs = tf.reshape(attention_probs, new_attn_shape)
# matmul: [b * np, sq, hn]
context_layer = tf.matmul(
attention_probs, # [b * np, sq, sk]
tf.transpose(value_layer, perm=(1, 0, 2)), # [b * np, sk, hn]
)
# change view [b, np, sq, hn]
context_layer = tf.reshape(context_layer, output_size)
# [b, np, sq, hn] --> [sq, b, np, hn]
context_layer = tf.transpose(context_layer, perm=(2, 0, 1, 3))
# [sq, b, np, hn] --> [sq, b, hp]
new_context_layer_shape = (
*context_layer.shape[:-2],
self.hidden_size_per_partition,
)
context_layer = tf.reshape(context_layer, new_context_layer_shape)
return context_layer
class MultiHeadAttention(layers.Layer):
"""Parallel attention w/ QKV and Proj Gemms
BMM1 -> softmax + dropout -> BMM2
"""
def __init__(
self,
hidden_size: int,
num_attention_heads: int,
kv_channels: int,
attention_dropout: float,
layernorm_epsilon: float = 1e-3,
init_method: Optional[Callable] = None,
output_layer_init_method: Optional[Callable] = None,
layer_number: Optional[int] = None,
apply_query_key_layer_scaling: bool = True,
attention_softmax_in_fp32: bool = False,
attn_mask_type: str = "causal",
return_layernorm_output: bool = False,
input_layernorm: bool = False,
attention_type: str = "self",
fuse_qkv_params: bool = False,
) -> None:
super().__init__()
self.hidden_size = hidden_size
self.layer_number = (layer_number,)
self.input_layernorm = input_layernorm
self.attention_type = attention_type
self.return_layernorm_output = return_layernorm_output
self.init_method = init_method
self.fuse_qkv_params = fuse_qkv_params
# We only support zero-initializer for bias weights.
self.bias_initializer = initializers.get("zeros")
assert (
attention_type in AttnTypes
), f"attention_type {attention_type} not supported"
self.hidden_size_per_attention_head = kv_channels
self.num_attention_heads_per_partition = divide(num_attention_heads, 1)
if self.attention_type == "self":
if self.input_layernorm:
self.layernorm_qkv = LayerNormDense(
3 * hidden_size,
epsilon=layernorm_epsilon,
kernel_initializer=init_method,
use_bias=True,
return_bias=False,
return_layernorm_output=return_layernorm_output,
skip_weight_param_allocation=not fuse_qkv_params,
)
else:
self.qkv = Dense(
3 * hidden_size,
kernel_initializer=init_method,
use_bias=True,
return_bias=False,
skip_weight_param_allocation=not fuse_qkv_params,
)
else:
if self.input_layernorm:
self.layernorm_query = LayerNormDense(
hidden_size,
epsilon=layernorm_epsilon,
kernel_initializer=init_method,
use_bias=True,
return_bias=False,
return_layernorm_output=return_layernorm_output,
skip_weight_param_allocation=not fuse_qkv_params,
)
else:
self.query_layer = Dense(
hidden_size,
kernel_initializer=init_method,
use_bias=True,
return_bias=False,
skip_weight_param_allocation=not fuse_qkv_params,
)
self.key_value = Dense(
2 * hidden_size,
kernel_initializer=init_method,
use_bias=True,
return_bias=False,
skip_weight_param_allocation=not fuse_qkv_params,
)
# Core Self attention.
self.core_attention = CoreAttention(
num_attention_heads,
kv_channels,
attention_dropout,
layer_number=layer_number,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
attention_softmax_in_fp32=attention_softmax_in_fp32,
attn_mask_type=attn_mask_type,
)
# Linear
self.proj = Dense(
hidden_size,
kernel_initializer=output_layer_init_method,
use_bias=False,
return_bias=True,
)
def build(self, input_shape):
"""One-time allocation of the variables."""
input_shape = tf.TensorShape(input_shape)
last_dim = tf.compat.dimension_value(input_shape[-1])
if last_dim is None:
raise ValueError(
"The last dimension of the inputs to a Dense layer should be "
f"defined. Found None. Full input shape received: {input_shape}"
)
if not self.fuse_qkv_params:
self.set_qkv_params(
last_dim,
3 * self.hidden_size,
use_bias=True,
)
def set_qkv_params(
self,
in_features,
out_features,
use_bias: bool = False,
) -> None:
"""Initialize separate Parameters for query, key, and value tensors."""
assert (
out_features % 3 == 0
), f"3 way QKV split with dimension {out_features} not possible."
qkv_dim = out_features // 3
if self.attention_type == "self":
self.qkv_weight = self.add_weight(
name="qkv_kernel",
shape=(in_features, out_features),
initializer=self.init_method,
trainable=True,
)
self.qkv_bias = None
if use_bias:
self.qkv_bias = self.add_weight(
name="qkv_bias",
shape=(out_features,),
initializer=self.bias_initializer,
trainable=True,
)
else:
self.q_weight = self.add_weight(
name="q_kernel",
shape=(in_features, qkv_dim),
initializer=self.init_method,
trainable=True,
)
self.kv_weight = self.add_weight(
name="kv_kernel",
shape=(in_features, 2 * qkv_dim),
initializer=self.init_method,
trainable=True,
)
self.q_bias = None
self.kv_bias = None
if use_bias:
self.q_bias = self.add_weight(
name="q_bias",
shape=(qkv_dim,),
initializer=self.bias_initializer,
trainable=True,
)
self.kv_bias = self.add_weight(
name="kv_bias",
shape=(2 * qkv_dim,),
initializer=self.bias_initializer,
trainable=True,
)
def _get_training_value(self, training=None):
if training is None:
training = backend.learning_phase()
if isinstance(training, int):
training = bool(training)
if not self.trainable:
# When the layer is not trainable, it overrides the value passed
# from model.
training = False
return training
def call(
self,
hidden_states: tf.Tensor,
attention_mask: tf.Tensor,
encoder_output: Optional[tf.Tensor] = None,
training: bool = None,
) -> Tuple[Union[tf.Tensor, None], ...]:
"""MultiHeadAttention FWD"""
training = self._get_training_value(training)
# hidden_states: [sq, b, h]
if attention_mask is not None:
assert (
attention_mask.dtype == tf.bool
), "Attention mask must be a boolean tensor"
# =====================
# Query, Key, and Value
# =====================
if self.attention_type == "self":
qkv_weight = self.qkv_weight if not self.fuse_qkv_params else None
qkv_bias = self.qkv_bias if not self.fuse_qkv_params else None
# Attention heads [sq, b, h] --> [sq, b, (np * 3 * hn)]
if self.input_layernorm:
layernorm_qkv_outputs = self.layernorm_qkv(
hidden_states,
kernel=qkv_weight,
bias=qkv_bias,
training=training,
)
if self.return_layernorm_output:
mixed_x_layer, layernorm_output = layernorm_qkv_outputs
else:
mixed_x_layer = layernorm_qkv_outputs
else:
mixed_x_layer = self.qkv(
hidden_states,
kernel=qkv_weight,
bias=qkv_bias,
training=training,
)
# [sq, b, (np * 3 * hn)] --> [sq, b, np, 3 * hn]
new_tensor_shape = (
*mixed_x_layer.shape[:-1],
self.num_attention_heads_per_partition,
3 * self.hidden_size_per_attention_head,
)
mixed_x_layer = tf.reshape(mixed_x_layer, new_tensor_shape)
# [sq, b, np, 3 * hn] --> 3 [sq, b, np, hn]
query_layer, key_layer, value_layer = tf.split(
mixed_x_layer, num_or_size_splits=3, axis=-1
)
else:
kv_weight = self.kv_weight if not self.fuse_qkv_params else None
kv_bias = self.kv_bias if not self.fuse_qkv_params else None
# Attention heads [sk, b, h] --> [sk, b, (np * 2 * hn)]
mixed_kv_layer = self.key_value(
encoder_output,
kernel=kv_weight,
bias=kv_bias,
training=training,
)
# [sk, b, (np * 2 * hn)] --> [sk, b, np, 2 * hn]
new_tensor_shape = (
*mixed_kv_layer.shape[:-1],
self.num_attention_heads_per_partition,
2 * self.hidden_size_per_attention_head,
)
mixed_kv_layer = tf.reshape(mixed_kv_layer, new_tensor_shape)
# [sk, b, np, 2 * hn] --> 2 [sk, b, np, hn]
key_layer, value_layer = tf.split(
mixed_kv_layer, num_or_size_splits=2, axis=-1
)
# Attention head [sq, b, h] --> [sq, b, hp]
if self.input_layernorm:
layernorm_query_outputs = self.layernorm_query(
hidden_states,
kernel=self.q_weight,
bias=self.q_bias,
training=training,
)
if self.return_layernorm_output:
query_layer, layernorm_output = layernorm_query_outputs
else:
query_layer = layernorm_query_outputs
else:
query_layer = self.query_layer(
hidden_states,
kernel=self.q_weight,
bias=self.q_bias,
training=training,
)
# [sq, b, hp] --> [sq, b, np, hn]
new_tensor_shape = (
*query_layer.shape[:-1],
self.num_attention_heads_per_partition,
self.hidden_size_per_attention_head,
)
query_layer = tf.reshape(query_layer, new_tensor_shape)
# ==================================
# core attention computation
# ==================================
context_layer = self.core_attention(
query_layer, key_layer, value_layer, attention_mask
)
# =================
# Output. [sq, b, h]
# =================
attention_output, attention_bias = self.proj(
context_layer,
training=training,
)
if self.input_layernorm and self.return_layernorm_output:
return attention_output, attention_bias, layernorm_output
return attention_output, attention_bias
class DropPath(tf.keras.Model): # pylint: disable=too-few-public-methods
"""Drop paths (Stochastic Depth) per sample (when applied in main path of
residual blocks).
"""
def __init__(self, drop_prob: float = 0.0) -> None:
super().__init__()
self.drop_prob = drop_prob
def __call__(self, hidden_state: tf.Tensor, training: bool) -> tf.Tensor:
"""DropPath FWD"""
if self.drop_prob == 0.0 or not training:
return hidden_state
keep_prob = 1 - self.drop_prob
# work with diff dim tensors, not just 2D ConvNets
shape = (hidden_state.shape[0],) + (1,) * (len(hidden_state.shape) - 1)
# TODO(kaixih): We set the seed mainly for debugging purpose. Should
# allow users to turn it off.
random_tensor = tf.random.stateless_uniform(shape, seed=[1, 0])
random_mask = tf.cast(random_tensor <= keep_prob,
dtype=hidden_state.dtype)
output = (hidden_state / keep_prob) * random_mask
return output
class TransformerLayer(tf.keras.Model): # pylint: disable=too-few-public-methods
"""
TransformerLayer is made up of an attention block and a feedforward network
(MLP). This standard layer is based on the paper
"Attention Is All You Need".
Parameters
----------
hidden_size : int
size of each input sample.
ffn_hidden_size : int
intermediate size to which input samples are projected.
num_attention_heads : int
number of attention heads in the transformer layer.
layernorm_epsilon : float, default = 1e-5
a value added to the denominator of layer normalization for numerical
stability.
hidden_dropout: float, default = 0.1
dropout probability for the dropout op after FC2 layer.
attention_dropout: float, default = 0.1
dropout probability for the dropout op during multi-head attention.
init_method : Callable, default = `None`
used for initializing weights of QKV and FC1 weights in the following way:
`init_method(weight)`. When set to `None`, defaults to
`tf.keras.initializers.RandomNormal(mean=0.0, std=0.023)`.
output_layer_init_method : Callable, default = `None`
used for initializing weights of PROJ and FC2 in the following way:
`output_layer_init_method(weight)`. When set to `None`, defaults to
`tf.keras.initializers.RandomNormal(mean=0.0, std=0.023)`.
apply_residual_connection_post_layernorm : bool, default = `False`
if set to `True`, residual connections are taken from the output of layer
norm (default is taken from input of layer norm)
layer_number: int, default = `None`
layer number of the current `TransformerLayer` when multiple such modules
are concatenated to form a transformer block.
apply_query_key_layer_scaling: bool, default = `True`
apply query-key layer scaling during BMM1 by a factor of `layer_number`
output_layernorm: bool, default = `False`
if set to `True`, layer normalization is applied on the output side, after
the final dropout-add. default behavior is to apply layer normalization on
the input side, before the QKV transformation.
attention_softmax_in_fp32: bool, default = `False`
if set to `True`, softmax is executed in tf.float32 dtype (single
precision)
layer_type: {'encoder', 'decoder'}, default = `encoder`
if set to `decoder`, an additional cross-attn block is added after
self-attn. This can be used for structures like `T5` Transformer in
conjunction with the `encoder` option.
kv_channels: int, default = `None`
number of key-value channels. defaults to
`hidden_size / num_attention_heads` if `None`.
self_attn_mask_type: {'causal', 'padding'}, default = `causal`
type of attention mask passed into softmax operation.
Optimization parameters
-----------------------
drop_path_rate: float, default = 0.0
when > 0.0, applies stochastic depth per sample in the main path of the
residual block.
fuse_qkv_params: bool, default = 'False'
if set to `True`, `TransformerLayer` module exposes a single fused
parameter for query-key-value. This enables optimizations such as QKV
fusion without concatentations/splits.
"""
def __init__(
self,
hidden_size: int,
ffn_hidden_size: int,
num_attention_heads: int,
layernorm_epsilon: float = 1e-5,
hidden_dropout: float = 0.1,
attention_dropout: float = 0.1,
init_method: Optional[Callable] = None,
output_layer_init_method: Optional[Callable] = None,
layer_number: Optional[int] = None,
kv_channels: Optional[int] = None,
self_attn_mask_type: str = "causal",
apply_query_key_layer_scaling: bool = True,
attention_softmax_in_fp32: bool = False,
apply_residual_connection_post_layernorm: bool = False,
output_layernorm: bool = False,
layer_type: str = "encoder",
drop_path_rate: float = 0.0,
fuse_qkv_params: bool = False,
) -> None:
super().__init__()
bias_dropout_fusion = \
bool(int(os.getenv("NVTE_BIAS_DROPOUT_FUSION", "1")))
self.layer_number = layer_number
self.output_layernorm = output_layernorm
self.layer_type = layer_type
self.apply_residual_connection_post_layernorm = (
apply_residual_connection_post_layernorm
)
assert (
self_attn_mask_type in AttnMaskTypes
), f"self_attn_mask_type {self_attn_mask_type} not supported"
assert layer_type in LayerTypes, \
f"layer_type {layer_type} not supported"
self.kv_channels = (
kv_channels if kv_channels else (hidden_size // num_attention_heads)
)
if init_method is None:
init_method = initializers.RandomNormal(mean=0.0, stddev=0.023)
if output_layer_init_method is None:
output_layer_init_method = initializers.RandomNormal(mean=0.0,
stddev=0.023)
attention_args = (
hidden_size,
num_attention_heads,
self.kv_channels,
attention_dropout,
layernorm_epsilon,
init_method,
output_layer_init_method,
)
common_attention_kwargs = {
"layer_number": layer_number,
"apply_query_key_layer_scaling": apply_query_key_layer_scaling,
"attention_softmax_in_fp32": attention_softmax_in_fp32,
"return_layernorm_output": apply_residual_connection_post_layernorm,
"fuse_qkv_params": fuse_qkv_params,
}
self.self_attention = MultiHeadAttention(
*attention_args,
**common_attention_kwargs,
attn_mask_type=self_attn_mask_type,
input_layernorm=not output_layernorm,
attention_type="self",
)
if layer_type == "decoder":
self.inter_attention = MultiHeadAttention(
*attention_args,
**common_attention_kwargs,
attn_mask_type="padding",
input_layernorm=True,
attention_type="cross",
)
# LayerNorm -> gelu(Linear + Bias) -> Linear
self.layernorm_mlp = LayerNormMLP(
hidden_size,
ffn_hidden_size,
epsilon=layernorm_epsilon,
kernel_initializer=init_method,
ffn_kernel_initializer=output_layer_init_method,
use_bias=False,
return_bias=True,
return_layernorm_output=apply_residual_connection_post_layernorm,
)
self.hidden_dropout = hidden_dropout
self.bias_dropout_fusion = bias_dropout_fusion
self.drop_path = (DropPath(drop_path_rate) if drop_path_rate > 0.0 else
None)
if self.output_layernorm:
self.layernorm = LayerNorm(
epsilon=layernorm_epsilon,
)
def _get_training_value(self, training=None):
if training is None:
training = backend.learning_phase()
if isinstance(training, int):
training = bool(training)
if not self.trainable:
# When the layer is not trainable, it overrides the value passed
# from model.
training = False
return training
def __call__(
self,
hidden_states: tf.Tensor,
attention_mask: tf.Tensor,
encoder_output: Optional[tf.Tensor] = None,
enc_dec_attn_mask: Optional[tf.Tensor] = None,
training: bool = None,
) -> tf.Tensor:
"""
Transformer Layer: attention block and a feedforward network (MLP)
Parameters
----------
hidden_states : tf.Tensor
Input tensor.
attention_mask : tf.Tensor
Boolean tensor used to mask out self-attention softmax input.
encoder_output : tf.Tensor
Output of the encoder block to be fed into the decoder block if using
`layer_type="decoder"`.
enc_dec_attn_mask : tf.Tensor
Boolean tensor used to mask out inter-attention softmax input if using
`layer_type="decoder"`.
"""
if attention_mask is not None:
assert (
attention_mask.dtype == tf.bool
), "Attention mask must be a boolean tensor"
# Theoretically, the input dtype can be handled by the autocast during
# the layer call. However, we may use the input (hidden_states) in the
# residual connection before the layer is called. So, we convert it
# ahead of time. As for the other input (encoder_output), we can leave
# the conversion to the inter_attention layer, since it won't be used in
# the residual connection.
hidden_states = self._maybe_cast_inputs(hidden_states)
# Self attention.
self_attention_outputs = self.self_attention(
hidden_states,
attention_mask,
training=training,
)
if (self.apply_residual_connection_post_layernorm and
not self.output_layernorm):
attention_output, attention_bias, residual = self_attention_outputs
else:
attention_output, attention_bias = self_attention_outputs
residual = hidden_states
# Set BDA func.
if self.bias_dropout_fusion:
if training:
bias_dropout_add_func = bias_dropout_add_fused_train
else:
bias_dropout_add_func = bias_dropout_add_fused_inference
else:
bias_dropout_add_func = get_bias_dropout_add(training)
# Bias dropout add.
attention_bias = tf.cast(attention_bias, dtype=self.compute_dtype)
if self.drop_path is None:
bda_output = bias_dropout_add_func(
attention_output,
attention_bias,
residual,
self.hidden_dropout,
)
else:
# TODO(kaixih): Use stateless_dropout and specify the seed
# mainly for debugging purpose. Should allow random seed.
out = (
tf.nn.experimental.stateless_dropout(
attention_output + attention_bias,
rate=self.hidden_dropout,
seed=[1, 0],
)
if training
else attention_output + attention_bias
)
bda_output = residual + self.drop_path(out, training)
# Cross attention.
if self.layer_type == "decoder":
inter_attention_outputs = self.inter_attention(
bda_output,
enc_dec_attn_mask,
encoder_output=encoder_output,
training=training,
)
if self.apply_residual_connection_post_layernorm:
attention_output, attention_bias, residual = \
inter_attention_outputs
else:
attention_output, attention_bias = inter_attention_outputs
residual = bda_output
attention_bias = tf.cast(attention_bias, dtype=self.compute_dtype)
bda_output = bias_dropout_add_func(
attention_output,
attention_bias,
residual,
self.hidden_dropout,
)
# MLP.
mlp_outputs = self.layernorm_mlp(
bda_output,
training=training,
)
if self.apply_residual_connection_post_layernorm:
mlp_output, mlp_bias, residual = mlp_outputs
else:
mlp_output, mlp_bias = mlp_outputs
residual = bda_output
# Bias dropout add.
mlp_bias = tf.cast(mlp_bias, dtype=self.compute_dtype)
if self.drop_path is None:
output = bias_dropout_add_func(
mlp_output,
mlp_bias,
residual,
self.hidden_dropout,
)
else:
# TODO(kaixih): Use stateless_dropout and specify the seed
# mainly for debugging purpose. Should allow random seed.
output = (
tf.nn.experimental.stateless_dropout(
mlp_output + mlp_bias,
rate=self.hidden_dropout,
seed=[1, 0],
)
if training
else mlp_output + mlp_bias
)
output = residual + self.drop_path(output, training)
# For BERT like architectures.
if self.output_layernorm:
output = self.layernorm(output)
# output: [b, s, h]
return output
| TransformerEngine-main | transformer_engine/tensorflow/transformer.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Top level Transformer Engine PyTorch modules"""
from typing import Union, Callable
from keras import backend, layers, initializers
import tensorflow as tf
import transformer_engine_tensorflow as tex
from .constants import TE_DType
from .fp8 import (
is_fp8_enabled,
get_fp8_recipe,
get_default_fp8_recipe,
get_fp8_te_dtype,
is_first_fp8_module,
new_fp8_context_id,
get_fp8_context_id,
set_fp8_context_id,
amax_and_scale_update,
set_amax_buffer_key_deletion,
get_meta_tensor_key,
)
from .jit import (
bgrad_dgelu_fused,
)
stream_lib = tf.load_op_library(
tf.compat.v1.resource_loader.get_path_to_datafile(
tf.sysconfig.get_lib() + "/../lib_get_stream.so"
)
)
def get_stream_id():
"""Get stream index for GPU tasks."""
return stream_lib.get_stream().numpy()[0]
_2X_ACC_FPROP = False
_2X_ACC_DGRAD = True
_2X_ACC_WGRAD = True
_cublas_workspace = None
def get_workspace():
"""Returns workspace for cublas."""
global _cublas_workspace
if _cublas_workspace is None:
_cublas_workspace = tf.zeros([33_554_432], dtype=tf.int8)
return _cublas_workspace
def get_autocast_bias(dtype, bias_var, use_bias, use_fp8):
"""Get casted bias for fp8 gemm."""
if not use_bias:
return None
# We need to pass the EagerTensor instead of Variable when calling into the
# pybind functions. So, we use value() for the explicit convertion.
bias = bias_var.value()
if dtype == "float16":
bias = tf.cast(bias, dtype)
if use_fp8 and bias.dtype == tf.float32:
bias = tf.cast(bias, dtype=tf.bfloat16)
return bias
def get_init_method(user_input, default_init_method):
"""Get initializer method for variables."""
if user_input is None:
return default_init_method
if callable(user_input):
return user_input
assert isinstance(user_input, str)
return initializers.get(user_input)
def cast_to_fp8_wrapper(x, fp8_meta, amax_index, fwd, output_dtype, stream_id):
"""Wrapper to call the tex.cast_to_fp8."""
scaling_key = get_meta_tensor_key(fwd)
scale = fp8_meta[scaling_key]["scale"].value()
amax = fp8_meta[scaling_key]["amax_history"].value()
scale_inv = fp8_meta[scaling_key]["scale_inv"].value()
x_fp8 = tex.cast_to_fp8(
x, scale, output_dtype, amax, scale_inv, amax_index, stream_id
)
return x_fp8
def cast_from_fp8_wrapper(x, fp8_meta, amax_index, fwd, idtype, odtype, sid):
"""Wrapper to call the tex.cast_from_fp8."""
scaling_key = "scaling_fwd" if fwd else "scaling_bwd"
scale_inv = fp8_meta[scaling_key]["scale_inv"].value()
x_fp8 = tex.cast_from_fp8(x, scale_inv, idtype, odtype, amax_index, sid)
return x_fp8
def fp8_cast_transpose_fused_wrapper(x, fp8_meta, amax_index, fwd, output_dtype,
sid):
"""Wrapper to call the tex.fp8_cast_transpose_fused."""
scaling_key = get_meta_tensor_key(fwd)
scale = fp8_meta[scaling_key]["scale"].value()
amax = fp8_meta[scaling_key]["amax_history"].value()
scale_inv = fp8_meta[scaling_key]["scale_inv"].value()
x_fp8, x_t_fp8 = tex.fp8_cast_transpose_fused(
x, scale, output_dtype, amax, scale_inv, amax_index, sid
)
return x_fp8, x_t_fp8
def fp8_cast_transpose_bgrad_fused_wrapper(
x, fp8_meta, amax_index, fwd, output_dtype, sid
):
"""Wrapper to call the tex.fp8_cast_transpose_bgrad_fused."""
scaling_key = get_meta_tensor_key(fwd)
scale = fp8_meta[scaling_key]["scale"].value()
amax = fp8_meta[scaling_key]["amax_history"].value()
scale_inv = fp8_meta[scaling_key]["scale_inv"].value()
grad_bias, grad_fp8, grad_t_fp8 = tex.fp8_cast_transpose_bgrad_fused(
x, scale, output_dtype, amax, scale_inv, amax_index, sid
)
return grad_bias, grad_fp8, grad_t_fp8
def fp8_cast_transpose_bgrad_dgelu_fused_wrapper(
dy, x, fp8_meta, amax_index, fwd, output_dtype, sid
):
"""Wrapper to call the tex.fp8_fused_cast_transpose_bgrad_dgelu."""
scaling_key = get_meta_tensor_key(fwd)
scale = fp8_meta[scaling_key]["scale"].value()
amax = fp8_meta[scaling_key]["amax_history"].value()
scale_inv = fp8_meta[scaling_key]["scale_inv"].value()
dbias, dgelu_c, dgelu_t = tex.fp8_fused_cast_transpose_bgrad_dgelu(
dy, x, scale, output_dtype, amax, scale_inv, amax_index, sid
)
return dbias, dgelu_c, dgelu_t
def fp8_gelu_wrapper(x, fp8_meta, amax_index, fwd, output_dtype, sid):
"""Wrapper to call the tex.te_gelu."""
scaling_key = get_meta_tensor_key(fwd)
scale = fp8_meta[scaling_key]["scale"].value()
amax = fp8_meta[scaling_key]["amax_history"].value()
scale_inv = fp8_meta[scaling_key]["scale_inv"].value()
y_fp8 = tex.te_gelu(x, scale, output_dtype, amax, scale_inv, amax_index,
sid)
return y_fp8
def matmul_wrapper(
inp,
weight,
mode,
output_dtype,
sid,
use_bias=False,
bias=None,
grad=False,
gelu=False,
gelu_input=None,
):
"""Wrapper to call the tex.te_gemm for the non-fp8 gemm."""
A = inp
B = weight
A_dtype, B_dtype = TE_DType[A.dtype], TE_DType[B.dtype]
A_offset, B_offset = -1, -1
if mode in ("fwd", "fc1_fwd", "fc2_fwd"):
transA, transB = False, False
elif mode in ("bwd_input", "fc1_bwd_input", "fc2_bwd_input"):
transA, transB = False, True
elif mode in ("bwd_weight", "fc1_bwd_weight", "fc2_bwd_weight"):
transA, transB = True, False
return tex.te_gemm(
B,
None,
B_dtype,
B_offset,
A,
None,
A_dtype,
A_offset,
get_workspace(),
use_bias,
bias,
gelu,
gelu_input,
transB,
transA,
grad,
False, # accumulate
False, # accumulate
TE_DType[output_dtype],
sid,
)
def fp8_matmul_wrapper(
inp,
weight,
fp8_meta,
mode,
A_dtype,
B_dtype,
output_dtype,
use_split_accumulate,
sid,
use_bias=False,
bias=None,
):
"""Wrapper to call the tex.te_gemm for the fp8 gemm."""
A = inp
B = weight
if mode in ("fwd", "fc1_fwd"):
A_scale_inv = fp8_meta["scaling_fwd"]["scale_inv"].value()
A_offset = tex.FP8FwdTensors.GEMM1_INPUT
B_scale_inv = fp8_meta["scaling_fwd"]["scale_inv"].value()
B_offset = tex.FP8FwdTensors.GEMM1_WEIGHT
elif mode == "fc2_fwd":
A_scale_inv = fp8_meta["scaling_fwd"]["scale_inv"].value()
A_offset = tex.FP8FwdTensors.GEMM2_INPUT
B_scale_inv = fp8_meta["scaling_fwd"]["scale_inv"].value()
B_offset = tex.FP8FwdTensors.GEMM2_WEIGHT
elif mode == "bwd_input":
A_scale_inv = fp8_meta["scaling_bwd"]["scale_inv"].value()
A_offset = tex.FP8BwdTensors.GRAD_OUTPUT1
B_scale_inv = fp8_meta["scaling_fwd"]["scale_inv"].value()
B_offset = tex.FP8FwdTensors.GEMM1_WEIGHT
elif mode == "fc1_bwd_input":
A_scale_inv = fp8_meta["scaling_bwd"]["scale_inv"].value()
A_offset = tex.FP8BwdTensors.GRAD_OUTPUT2
B_scale_inv = fp8_meta["scaling_fwd"]["scale_inv"].value()
B_offset = tex.FP8FwdTensors.GEMM1_WEIGHT
elif mode == "fc2_bwd_input":
A_scale_inv = fp8_meta["scaling_bwd"]["scale_inv"].value()
A_offset = tex.FP8BwdTensors.GRAD_OUTPUT1
B_scale_inv = fp8_meta["scaling_fwd"]["scale_inv"].value()
B_offset = tex.FP8FwdTensors.GEMM2_WEIGHT
elif mode == "bwd_weight":
A_scale_inv = fp8_meta["scaling_fwd"]["scale_inv"].value()
A_offset = tex.FP8FwdTensors.GEMM1_INPUT
B_scale_inv = fp8_meta["scaling_bwd"]["scale_inv"].value()
B_offset = tex.FP8BwdTensors.GRAD_OUTPUT1
elif mode == "fc2_bwd_weight":
A_scale_inv = fp8_meta["scaling_fwd"]["scale_inv"].value()
A_offset = tex.FP8FwdTensors.GEMM2_INPUT
B_scale_inv = fp8_meta["scaling_bwd"]["scale_inv"].value()
B_offset = tex.FP8BwdTensors.GRAD_OUTPUT1
elif mode == "fc1_bwd_weight":
A_scale_inv = fp8_meta["scaling_fwd"]["scale_inv"].value()
A_offset = tex.FP8FwdTensors.GEMM1_INPUT
B_scale_inv = fp8_meta["scaling_bwd"]["scale_inv"].value()
B_offset = tex.FP8BwdTensors.GRAD_OUTPUT2
return tex.te_gemm(
B,
B_scale_inv,
B_dtype,
B_offset,
A,
A_scale_inv,
A_dtype,
A_offset,
get_workspace(),
use_bias,
bias,
False, # use_gelu
None, # gelu_input
True, # transa
False, # transb
False, # grad
False, # accumulate
use_split_accumulate,
TE_DType[output_dtype],
sid,
)
def layernorm_fwd_fp8_wrapper(
x, ln_gamma, ln_beta, epsilon, fp8_meta, amax_index, output_dtype, sid
):
"""Wrapper to call the tex.layernorm_fwd_fp8."""
scaling_key = "scaling_fwd"
scale = fp8_meta[scaling_key]["scale"].value()
amax = fp8_meta[scaling_key]["amax_history"].value()
scale_inv = fp8_meta[scaling_key]["scale_inv"].value()
ln_out, mu, rsigma = tex.layernorm_fwd_fp8(
x,
ln_gamma,
ln_beta,
epsilon,
scale,
output_dtype,
amax,
scale_inv,
amax_index,
sid,
)
return ln_out, mu, rsigma
# The DelayedScaling object is not supported in TF autograd. So, to avoid
# passing this object to the custom gradient function, we only extract the
# useful information.
def get_recipe_attrs(recipe):
"""Get attributes from the recipe."""
fp8_dtype_fwd = get_fp8_te_dtype(recipe, fprop_tensor=True)
fp8_dtype_bwd = get_fp8_te_dtype(recipe, fprop_tensor=False)
override_linear_precision = recipe.override_linear_precision
return (fp8_dtype_fwd, fp8_dtype_bwd, override_linear_precision)
# TransformerEngineBaseModule is a mixin class and its init function will pass
# through all the positional and keyword arguments to other subclasses. Make
# sure this class is inherited first.
class TransformerEngineBaseModule:
"""Base TE module."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# fp8 related
self.fp8 = False
self.fp8_meta = {}
self.fp8_meta["recipe"] = get_default_fp8_recipe()
self.fp8_meta_tensors_initialized = False
self.fp8_weight_shapes = []
self.stream_id = get_stream_id()
def set_meta_tensor(self, fwd):
"""Init scales and amaxes for fwd | bwd."""
fp8_meta_tensor_key = "scaling_fwd" if fwd else "scaling_bwd"
num_fp8_tensors = (
self.fp8_meta["num_gemms"] * 2 if fwd else
self.fp8_meta["num_gemms"]
)
self.fp8_meta[fp8_meta_tensor_key] = {}
self.fp8_meta[fp8_meta_tensor_key]["scale"] = tf.Variable(
tf.ones((num_fp8_tensors), dtype=tf.float32), trainable=False
)
self.fp8_meta[fp8_meta_tensor_key]["scale_inv"] = tf.Variable(
tf.ones((num_fp8_tensors), dtype=tf.float32), trainable=False
)
self.fp8_meta[fp8_meta_tensor_key]["amax_history"] = tf.Variable(
tf.zeros(
(self.fp8_meta["recipe"].amax_history_len, num_fp8_tensors),
dtype=tf.float32,
),
trainable=False,
)
def init_fp8_meta_tensors(self):
"""Init scales and amaxes."""
# Checkpoint loaded
if self.fp8_meta_tensors_initialized:
return
self.set_meta_tensor(True)
self.set_meta_tensor(False)
def fp8_init(self, num_gemms=1):
"""Initialize fp8 related metadata and tensors during fprop."""
if not is_fp8_enabled():
self.fp8 = False
return
# FP8 is already enabled and recipe is the same, don't do anything.
if self.fp8 and get_fp8_recipe() == self.fp8_meta["recipe"]:
return
# Set FP8, recipe, and other FP8 metadata
self.fp8 = True
self.fp8_meta["recipe"] = get_fp8_recipe()
self.fp8_meta["num_gemms"] = num_gemms
# Set FP8_MAX per tensor according to recipe
fp8_format_val = self.fp8_meta["recipe"].fp8_format.value
self.fp8_meta["fp8_max_fwd"] = fp8_format_val.max_fwd
self.fp8_meta["fp8_max_bwd"] = fp8_format_val.max_bwd
# Allocate scales and amaxes
self.init_fp8_meta_tensors()
def pre_forward(self, training, num_gemms=1):
"""Checks and prep for FWD."""
self.fp8_init(num_gemms=num_gemms)
if self.fp8:
if self.fp8_meta.get("update_amax_and_scale_fwd", False):
# Previous iteration was grad_enabled
amax_and_scale_update(self.fp8_meta, True)
set_amax_buffer_key_deletion(self.fp8_meta, forward=True)
if training:
self.fp8_meta["first_module"] = is_first_fp8_module()
if self.fp8_meta["first_module"]:
self.fp8_meta["autocast_id_fwd"] = new_fp8_context_id()
set_fp8_context_id(self.fp8_meta["autocast_id_fwd"])
else:
self.fp8_meta["autocast_id_fwd"] = get_fp8_context_id()
self.fp8_meta["update_amax_and_scale_fwd"] = True
# Create an empty tensor as a placeholder for the backprop to
# correctly know how many tensors to autograd.
self.fp8_meta["autocast_id_bwd"] = -1
else:
self.fp8_meta["update_amax_and_scale_fwd"] = False
def pre_backward(self):
"""Checks and prep for BWD."""
# From previous iteration
amax_and_scale_update(self.fp8_meta, False)
set_amax_buffer_key_deletion(self.fp8_meta, forward=False)
class Dense(TransformerEngineBaseModule, layers.Layer):
"""
Applies a linear transformation to the incoming data :math:`y = xW + b`
On NVIDIA GPUs it is a drop-in replacement for `tf.keras.layers.Dense`.
Parameters
----------
units : int
size of each output sample.
use_bias : bool, default = `True`
if set to `False`, the layer will not learn an additive bias.
kernel_initializer: Callable, default = `None`
used for initializing weights in the following way:
`kernel_initializer(weight)`. When set to `None`, defaults to
`tf.keras.initializers.RandomNormal(mean=0.0, std=0.023)`.
bias_initializer: Callable, default = `None`
used for initializing biases in the following way:
`bias_initializer(weight)`. When set to `None`, defaults to `zeros`.
Parallelism parameters
----------------------
skip_weight_param_allocation: bool, default = `False`
if set to `True`, weight parameter is not allocated and must be passed as
a keyword argument `weight` during the forward pass.
Optimization parameters
-----------------------
return_bias : bool, default = `False`
when set to `True`, this module will not apply the additive bias itself,
but instead return the bias value during the forward pass together with
the output of the linear transformation :math:`y = xW`. This is useful
when the bias addition can be fused to subsequent operations.
"""
def __init__(
self,
units: int,
use_bias: bool = True,
return_bias: bool = False,
kernel_initializer: Union[Callable, str, None] = None,
bias_initializer: Union[Callable, str, None] = None,
skip_weight_param_allocation: bool = False,
**kwargs,
):
super().__init__(**kwargs)
self.units = units
self.use_bias = use_bias
self.return_bias = return_bias
self.kernel_initializer = get_init_method(
kernel_initializer, initializers.RandomNormal(mean=0.0,
stddev=0.023)
)
self.bias_initializer = get_init_method(
bias_initializer, initializers.get("zeros")
)
self.skip_weight_param_allocation = skip_weight_param_allocation
def build(self, input_shape):
"""One-time allocation of the variables."""
input_shape = tf.TensorShape(input_shape)
last_dim = tf.compat.dimension_value(input_shape[-1])
if last_dim is None:
raise ValueError(
"The last dimension of the inputs to a Dense layer should be "
f"defined. Found None. Full input shape received: {input_shape}"
)
self.kernel = None
self.bias = None
if not self.skip_weight_param_allocation:
self.kernel = self.add_weight(
name="kernel",
shape=(last_dim, self.units),
initializer=self.kernel_initializer,
trainable=True,
)
if self.use_bias or self.return_bias:
self.bias = self.add_weight(
name="bias",
shape=(self.units,),
initializer=self.bias_initializer,
trainable=True,
)
# fp8 related
self.fp8_weight_shapes.append((last_dim, self.units))
self.built = True
def _get_training_value(self, training=None):
if training is None:
training = backend.learning_phase()
if isinstance(training, int):
training = bool(training)
if not self.trainable:
# When the layer is not trainable, it overrides the value passed
# from model.
training = False
return training
def non_fp8_matmul(
self,
inp: tf.Tensor,
kernel_var: tf.Variable,
bias_var: Union[tf.Variable, None] = None,
):
"""Prep fwd+bwd non-fp8 matmul."""
@tf.custom_gradient
def non_fp8_matmul_func(x):
# We need to pass the EagerTensor instead of Variable when calling
# into the pybind functions. So, we use value() for the explicit
# convertion.
kernel_val = kernel_var.value()
bias = get_autocast_bias(
self.compute_dtype, bias_var, self.use_bias, use_fp8=False,
)
output_dtype = self._compute_dtype_object
outputs = matmul_wrapper(
x, kernel_val, "fwd", output_dtype, self.stream_id,
self.use_bias, bias,
)
def grad_fn(upstream, variables=None):
grad_x = matmul_wrapper(
upstream, kernel_val, "bwd_input", output_dtype,
self.stream_id,
)
grad_weight = matmul_wrapper(
x, upstream, "bwd_weight", output_dtype, self.stream_id
)
if self.use_bias:
grad_bias = tf.math.reduce_sum(upstream, axis=0)
grad_inputs = [grad_x]
grad_vars = []
for v in variables:
if v.name.endswith("bias:0") and self.use_bias:
grad_vars.append(grad_bias)
elif v.name.endswith("kernel:0"):
grad_vars.append(grad_weight)
return grad_inputs, grad_vars
return outputs, grad_fn
return non_fp8_matmul_func(inp)
def fp8_matmul(
self,
inp: tf.Tensor,
kernel_var: tf.Variable,
bias_var: Union[tf.Variable, None] = None,
):
"""Prep fwd+bwd fp8 matmul."""
fp8_meta = self.fp8_meta
fp8_dtype_fwd, fp8_dtype_bwd, override_linear_precision = \
get_recipe_attrs(fp8_meta["recipe"])
@tf.custom_gradient
def fp8_matmul_func(x):
# We need to pass the EagerTensor instead of Variable when calling
# into the pybind functions. So, we use value() for the explicit
# convertion.
kernel_val = kernel_var.value()
bias = get_autocast_bias(
self.compute_dtype, bias_var, self.use_bias, use_fp8=True,
)
if not override_linear_precision.wgrad:
x_fp8, x_t_fp8 = fp8_cast_transpose_fused_wrapper(
x,
fp8_meta,
tex.FP8FwdTensors.GEMM1_INPUT,
True,
fp8_dtype_fwd,
self.stream_id,
)
else:
x_fp8 = cast_to_fp8_wrapper(
x,
fp8_meta,
tex.FP8FwdTensors.GEMM1_INPUT,
True,
fp8_dtype_fwd,
self.stream_id,
)
weight_fp8, weight_t_fp8 = fp8_cast_transpose_fused_wrapper(
kernel_val,
fp8_meta,
tex.FP8FwdTensors.GEMM1_WEIGHT,
True,
fp8_dtype_fwd,
self.stream_id,
)
output_dtype = self._compute_dtype_object
outputs = fp8_matmul_wrapper(
x_fp8,
weight_t_fp8,
fp8_meta,
"fwd",
fp8_dtype_fwd,
fp8_dtype_fwd,
output_dtype,
_2X_ACC_FPROP,
self.stream_id,
self.use_bias,
bias,
)
def grad_fn(upstream, variables=None):
self.pre_backward()
if self.use_bias:
(
grad_bias,
grad_fp8,
grad_t_fp8,
) = fp8_cast_transpose_bgrad_fused_wrapper(
upstream,
fp8_meta,
tex.FP8BwdTensors.GRAD_OUTPUT1,
False,
fp8_dtype_bwd,
self.stream_id,
)
else:
if not override_linear_precision.wgrad:
grad_fp8, grad_t_fp8 = fp8_cast_transpose_fused_wrapper(
upstream,
fp8_meta,
tex.FP8BwdTensors.GRAD_OUTPUT1,
False,
fp8_dtype_bwd,
self.stream_id,
)
else:
grad_fp8 = cast_to_fp8_wrapper(
upstream,
fp8_meta,
tex.FP8BwdTensors.GRAD_OUTPUT1,
False,
fp8_dtype_bwd,
self.stream_id,
)
grad_x = fp8_matmul_wrapper(
grad_fp8,
weight_fp8,
fp8_meta,
"bwd_input",
fp8_dtype_bwd,
fp8_dtype_fwd,
output_dtype,
_2X_ACC_DGRAD,
self.stream_id,
)
if not override_linear_precision.wgrad:
grad_weight = fp8_matmul_wrapper(
x_t_fp8,
grad_t_fp8,
fp8_meta,
"bwd_weight",
fp8_dtype_fwd,
fp8_dtype_bwd,
output_dtype,
_2X_ACC_WGRAD,
self.stream_id,
)
else:
grad_weight = matmul_wrapper(
x, upstream, "bwd_weight", output_dtype, self.stream_id
)
grad_inputs = [grad_x]
grad_vars = []
for v in variables:
if v.name.endswith("bias:0") and self.use_bias:
grad_vars.append(grad_bias)
elif v.name.endswith("kernel:0"):
grad_vars.append(grad_weight)
return grad_inputs, grad_vars
return outputs, grad_fn
return fp8_matmul_func(inp)
def call(
self,
inputs,
kernel=None,
bias=None,
training=None,
):
"""
Apply the linear transformation to the input.
Parameters
----------
inp : tf.Tensor
Input tensor.
weight : tf.Variable, default = None
An optional weight tensor for the module. This argument is compulsory
if module is initialized with `skip_weight_param_allocation=True`
bias : tf.Variable, default = None
An optional bias tensor for the module. This argument is compulsory if
module is initialized with `skip_weight_param_allocation=True` and one
of `use_bias` or `return_bias`
training : {True, False, None}, default = None
Whether this is in the training context.
"""
# self.pre_forward needs to be called outside the following branch,
# since it will set the self.fp8 if the autocast is detected.
training = self._get_training_value(training)
self.pre_forward(training)
kernel_var = (kernel if self.skip_weight_param_allocation else
self.kernel)
bias_var = bias if self.skip_weight_param_allocation else self.bias
if kernel_var is None:
raise ValueError("No valid kernel is provided")
inputmat = tf.reshape(inputs, shape=(-1, inputs.shape[-1]))
if self.fp8:
outputmat = self.fp8_matmul(inputmat, kernel_var, bias_var)
else:
outputmat = self.non_fp8_matmul(inputmat, kernel_var, bias_var)
outputs = tf.reshape(
outputmat, shape=(-1, *inputs.shape[1:-1], outputmat.shape[-1])
)
if self.return_bias:
return outputs, bias_var
return outputs
def get_config(self):
"""Returns the config of the layer."""
config = super().get_config()
config.update(
{
"units": self.units,
"use_bias": self.use_bias,
"kernel_initializer": initializers.serialize(
self.kernel_initializer),
"bias_initializer": initializers.serialize(
self.bias_initializer),
"skip_weight_param_allocation":
self.skip_weight_param_allocation,
}
)
class LayerNorm(layers.Layer):
"""
Applies Layer Normalization over a mini-batch of inputs.
Parameters
----------
epsilon : float, default = 1e-3
a value added to the denominator of layer normalization for numerical
stability.
gamma_initializer: Callable, default = `None`
used for initializing LayerNorm gamma in the following way:
`gamma_initializer(weight)`. When set to `None`, defaults to `ones`.
beta_initializer: Callable, default = `None`
used for initializing LayerNorm beta in the following way:
`beta_initializer(weight)`. When set to `None`, defaults to `zeros`.
"""
def __init__(
self, epsilon=1e-3, gamma_initializer="ones", beta_initializer="zeros",
**kwargs
):
super().__init__(**kwargs)
self.epsilon = epsilon
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_initializer = initializers.get(gamma_initializer)
self.stream = get_stream_id()
def build(self, input_shape):
"""One-time allocation of the variables."""
input_shape = tf.TensorShape(input_shape)
last_dim = tf.compat.dimension_value(input_shape[-1])
if last_dim is None:
raise ValueError(
"The last dimension of the inputs to a Dense layer should be "
f"defined. Found None. Full input shape received: {input_shape}"
)
self.gamma = self.add_weight(
name="gamma",
shape=(last_dim,),
initializer=self.gamma_initializer,
trainable=True,
)
self.beta = self.add_weight(
name="beta",
shape=(last_dim,),
initializer=self.beta_initializer,
trainable=True,
)
self.built = True
@tf.custom_gradient
def layernorm(self, inp: tf.Tensor):
"""Prep fwd+bwd non-fp8 layernorm."""
gamma = self.gamma.value()
ln_out, mu, rsigma = tex.layernorm_fwd(
inp, gamma, self.beta.value(), self.epsilon, self.stream
)
def grad_fn(upstream, variables=None):
# pylint: disable=unused-argument
dxmat, dgamma, dbeta = tex.layernorm_bwd(
upstream, inp, mu, rsigma, gamma, self.stream
)
grad_inputs = [tf.reshape(dxmat, inp.shape)]
grad_vars = [dgamma, dbeta]
return grad_inputs, grad_vars
return ln_out, grad_fn
def call(self, inputs):
"""LayerNorm FWD"""
inputmat = tf.reshape(inputs, shape=(-1, inputs.shape[-1]))
outputmat = self.layernorm(inputmat)
outputs = tf.reshape(outputmat, shape=inputs.shape)
return outputs
def get_config(self):
"""Returns the config of the layer."""
config = super().get_config()
config.update(
{
"epsilon": self.epsilon,
"gamma_initializer": initializers.serialize(
self.gamma_initializer),
"beta_initializer": initializers.serialize(
self.beta_initializer),
}
)
class LayerNormDense(TransformerEngineBaseModule, layers.Layer):
"""
Applies layer normalization followed by linear transformation to the
incoming data.
Parameters
----------
units : int
size of each output sample.
epsilon : float, default = 1e-3
a value added to the denominator of layer normalization for numerical
stability.
use_bias : bool, default = `True`
if set to `False`, the layer will not learn an additive bias.
gamma_initializer: Callable, default = `None`
used for initializing LayerNorm gamma in the following way:
`gamma_initializer(weight)`. When set to `None`, defaults to `ones`.
beta_initializer: Callable, default = `None`
used for initializing LayerNorm beta in the following way:
`beta_initializer(weight)`. When set to `None`, defaults to `zeros`.
kernel_initializer : Callable, default = `None`
used for initializing GEMM weights in the following way:
`kernel_initializer(weight)`. When set to `None`, defaults to
`tf.keras.initializers.RandomNormal(mean=0.0, std=0.023)`.
bias_initializer : Callable, default = `None`
used for initializing GEMM bias in the following way:
`bias_initializer(weight)`. When set to `None`, defaults to `zeros`.
return_layernorm_output : bool, default = `False`
if set to `True`, output of layernorm is returned from the forward
together with the output of the linear transformation.
Example use case: residual connection for transformer module is taken post
layernorm.
Parallelism parameters
----------------------
skip_weight_param_allocation: bool, default = `False`
if set to `True`, weight parameter is not allocated and must be passed as
a keyword argument `weight` during the forward pass.
Optimization parameters
-----------------------
return_bias : bool, default = `False`
when set to `True`, this module will not apply the additive bias itself,
but instead return the bias value during the forward pass together with
the output of the linear transformation :math:`y = xW`. This is useful
when the bias addition can be fused to subsequent operations.
"""
def __init__(
self,
units,
epsilon=1e-3,
gamma_initializer: Union[Callable, str, None] = None,
beta_initializer: Union[Callable, str, None] = None,
return_layernorm_output=False,
use_bias=True,
return_bias=False,
kernel_initializer: Union[Callable, str, None] = None,
bias_initializer: Union[Callable, str, None] = None,
skip_weight_param_allocation=False,
**kwargs,
):
super().__init__(**kwargs)
self.units = units
self.epsilon = epsilon
self.gamma_initializer = get_init_method(
gamma_initializer, initializers.get("ones")
)
self.beta_initializer = get_init_method(
beta_initializer, initializers.get("zeros")
)
self.return_layernorm_output = return_layernorm_output
self.use_bias = use_bias
self.return_bias = return_bias
self.kernel_initializer = get_init_method(
kernel_initializer, initializers.RandomNormal(mean=0.0,
stddev=0.023)
)
self.bias_initializer = get_init_method(
bias_initializer, initializers.get("zeros")
)
self.skip_weight_param_allocation = skip_weight_param_allocation
def build(self, input_shape):
"""One-time allocation of the variables."""
input_shape = tf.TensorShape(input_shape)
last_dim = tf.compat.dimension_value(input_shape[-1])
if last_dim is None:
raise ValueError(
"The last dimension of the inputs to a Dense layer should be "
f"defined. Found None. Full input shape received: {input_shape}"
)
self.gamma = self.add_weight(
name="gamma",
shape=(last_dim,),
initializer=self.gamma_initializer,
trainable=True,
)
self.beta = self.add_weight(
name="beta",
shape=(last_dim,),
initializer=self.beta_initializer,
trainable=True,
)
self.kernel = None
self.bias = None
if not self.skip_weight_param_allocation:
self.kernel = self.add_weight(
name="kernel",
shape=(last_dim, self.units),
initializer=self.kernel_initializer,
trainable=True,
)
if self.use_bias or self.return_bias:
self.bias = self.add_weight(
name="bias",
shape=(self.units,),
initializer=self.bias_initializer,
trainable=True,
)
# fp8 related
self.fp8_weight_shapes.append((last_dim, self.units))
self.built = True
def _get_training_value(self, training=None):
if training is None:
training = backend.learning_phase()
if isinstance(training, int):
training = bool(training)
if not self.trainable:
# When the layer is not trainable, it overrides the value passed
# from model.
training = False
return training
def non_fp8_layernorm_matmul(
self,
inp: tf.Tensor,
gamma_var: tf.Variable,
beta_var: tf.Variable,
kernel_var: tf.Variable,
bias_var: Union[tf.Variable, None] = None,
):
"""Prep fwd+bwd non-fp8 layernorm followed by matmul."""
@tf.custom_gradient
def non_fp8_layernorm_matmul_func(x):
# We need to pass the EagerTensor instead of Variable when calling
# into the pybind functions. So, we use value() for the explicit
# convertion.
kernel_val = kernel_var.value()
gamma_val = gamma_var.value()
beta_val = beta_var.value()
ln_out, mu, rsigma = tex.layernorm_fwd(
x, gamma_val, beta_val, self.epsilon, self.stream_id
)
bias = get_autocast_bias(
self.compute_dtype, bias_var, self.use_bias, use_fp8=False,
)
output_dtype = self._compute_dtype_object
outputs = matmul_wrapper(
ln_out,
kernel_val,
"fwd",
output_dtype,
self.stream_id,
self.use_bias,
bias,
)
def grad_fn(*upstream, variables=None):
grad_x = matmul_wrapper(
upstream[0], kernel_val, "bwd_input", output_dtype,
self.stream_id,
)
grad_weight = matmul_wrapper(
ln_out, upstream[0], "bwd_weight", output_dtype,
self.stream_id,
)
if self.use_bias:
grad_bias = tf.math.reduce_sum(upstream[0], axis=0)
if self.return_layernorm_output:
assert len(upstream) == 2
grad_x = grad_x + upstream[1]
dxmat, dgamma, dbeta = tex.layernorm_bwd(
grad_x, x, mu, rsigma, gamma_val, self.stream_id
)
grad_inputs = [dxmat]
grad_vars = []
for v in variables:
if v.name.endswith("gamma:0"):
grad_vars.append(dgamma)
elif v.name.endswith("bias:0") and self.use_bias:
grad_vars.append(grad_bias)
elif v.name.endswith("kernel:0"):
grad_vars.append(grad_weight)
elif v.name.endswith("beta:0"):
grad_vars.append(dbeta)
return grad_inputs, grad_vars
if self.return_layernorm_output:
return (outputs, ln_out), grad_fn
return outputs, grad_fn
return non_fp8_layernorm_matmul_func(inp)
def fp8_layernorm_matmul(
self,
inp: tf.Tensor,
gamma_var: tf.Variable,
beta_var: tf.Variable,
kernel_var: tf.Variable,
bias_var: Union[tf.Variable, None] = None,
):
"""Prep fwd+bwd fp8 layernorm followed by matmul."""
fp8_meta = self.fp8_meta
fp8_dtype_fwd, fp8_dtype_bwd, override_linear_precision = \
get_recipe_attrs(fp8_meta["recipe"])
@tf.custom_gradient
def fp8_layernorm_matmul_func(x):
# We need to pass the EagerTensor instead of Variable when calling
# into the pybind functions. So, we use value() for the explicit
# convertion.
kernel_val = kernel_var.value()
gamma_val = gamma_var.value()
beta_val = beta_var.value()
if not self.return_layernorm_output:
ln_out, mu, rsigma = layernorm_fwd_fp8_wrapper(
x,
gamma_val,
beta_val,
self.epsilon,
fp8_meta,
tex.FP8FwdTensors.GEMM1_INPUT,
fp8_dtype_fwd,
self.stream_id,
)
else:
ln_out_return, mu, rsigma = tex.layernorm_fwd(
x, gamma_val, beta_val, self.epsilon, self.stream_id
)
ln_out = cast_to_fp8_wrapper(
ln_out_return,
fp8_meta,
tex.FP8FwdTensors.GEMM1_INPUT,
True,
fp8_dtype_fwd,
self.stream_id,
)
bias = get_autocast_bias(
self.compute_dtype, bias_var, self.use_bias, use_fp8=True,
)
weight_fp8, weight_t_fp8 = fp8_cast_transpose_fused_wrapper(
kernel_val,
fp8_meta,
tex.FP8FwdTensors.GEMM1_WEIGHT,
True,
fp8_dtype_fwd,
self.stream_id,
)
output_dtype = self._compute_dtype_object
outputs = fp8_matmul_wrapper(
ln_out,
weight_t_fp8,
fp8_meta,
"fwd",
fp8_dtype_fwd,
fp8_dtype_fwd,
output_dtype,
_2X_ACC_FPROP,
self.stream_id,
self.use_bias,
bias,
)
def grad_fn(*upstream, variables=None):
self.pre_backward()
if self.use_bias:
(
grad_bias,
grad_fp8,
grad_t_fp8,
) = fp8_cast_transpose_bgrad_fused_wrapper(
upstream[0],
fp8_meta,
tex.FP8BwdTensors.GRAD_OUTPUT1,
False,
fp8_dtype_bwd,
self.stream_id,
)
else:
if not override_linear_precision.wgrad:
grad_fp8, grad_t_fp8 = fp8_cast_transpose_fused_wrapper(
upstream[0],
fp8_meta,
tex.FP8BwdTensors.GRAD_OUTPUT1,
False,
fp8_dtype_bwd,
self.stream_id,
)
else:
grad_fp8 = cast_to_fp8_wrapper(
upstream[0],
fp8_meta,
tex.FP8BwdTensors.GRAD_OUTPUT1,
False,
fp8_dtype_bwd,
self.stream_id,
)
grad_x = fp8_matmul_wrapper(
grad_fp8,
weight_fp8,
fp8_meta,
"bwd_input",
fp8_dtype_bwd,
fp8_dtype_fwd,
output_dtype,
_2X_ACC_DGRAD,
self.stream_id,
)
if not override_linear_precision.wgrad:
ln_out_t = tex.fp8_transpose(ln_out, fp8_dtype_fwd,
self.stream_id)
grad_weight = fp8_matmul_wrapper(
ln_out_t,
grad_t_fp8,
fp8_meta,
"bwd_weight",
fp8_dtype_fwd,
fp8_dtype_bwd,
output_dtype,
_2X_ACC_WGRAD,
self.stream_id,
)
else:
ln_out_c = cast_from_fp8_wrapper(
ln_out,
fp8_meta,
tex.FP8FwdTensors.GEMM1_INPUT,
True,
fp8_dtype_fwd,
TE_DType[x.dtype],
self.stream_id,
)
grad_weight = matmul_wrapper(
ln_out_c,
upstream[0],
"bwd_weight",
output_dtype,
self.stream_id,
)
if self.return_layernorm_output:
assert len(upstream) == 2
grad_x = grad_x + upstream[1]
dxmat, dgamma, dbeta = tex.layernorm_bwd(
grad_x, x, mu, rsigma, gamma_val, self.stream_id
)
grad_inputs = [dxmat]
grad_vars = []
for v in variables:
if v.name.endswith("gamma:0"):
grad_vars.append(dgamma)
elif v.name.endswith("bias:0") and self.use_bias:
grad_vars.append(grad_bias)
elif v.name.endswith("kernel:0"):
grad_vars.append(grad_weight)
elif v.name.endswith("beta:0"):
grad_vars.append(dbeta)
return grad_inputs, grad_vars
if self.return_layernorm_output:
return (outputs, ln_out_return), grad_fn
return outputs, grad_fn
return fp8_layernorm_matmul_func(inp)
def call(
self,
inputs,
kernel=None,
bias=None,
training=None,
):
"""
Apply layer normalization to the input followed by a linear
transformation.
Parameters
----------
inputs : tf.Tensor
Input tensor.
kernel : tf.Variable, default = None
An optional weight tensor for the module. This argument is compulsory
if module is initialized with `skip_weight_param_allocation=True`
bias : tf.Variable, default = None
An optional bias tensor for the module. This argument is compulsory if
module is initialized with `skip_weight_param_allocation=True` and one
of `use_bias` or `return_bias`
training : {True, False, None}, default = None
Whether this is in the training context.
"""
# self.pre_forward needs to be called outside the following branch,
# since it has side effects to set the self.fp8 if the autocast is
# detected.
training = self._get_training_value(training)
self.pre_forward(training)
kernel_var = (kernel if self.skip_weight_param_allocation else
self.kernel)
bias_var = bias if self.skip_weight_param_allocation else self.bias
if kernel_var is None:
raise ValueError("No valid kernel is provided")
inputmat = tf.reshape(inputs, shape=(-1, inputs.shape[-1]))
if self.fp8:
outputs = self.fp8_layernorm_matmul(
inputmat, self.gamma, self.beta, kernel_var, bias_var
)
else:
outputs = self.non_fp8_layernorm_matmul(
inputmat, self.gamma, self.beta, kernel_var, bias_var
)
if self.return_layernorm_output:
outputmat, ln_outputmat = outputs
else:
outputmat = outputs
outputs = tf.reshape(
outputmat, shape=(-1, *inputs.shape[1:-1], outputmat.shape[-1])
)
if self.return_bias:
if self.return_layernorm_output:
ln_outputs = tf.reshape(ln_outputmat, shape=inputs.shape)
return (outputs, bias_var, ln_outputs)
return outputs, bias_var
if self.return_layernorm_output:
ln_outputs = tf.reshape(ln_outputmat, shape=inputs.shape)
return (outputs, ln_outputs)
return outputs
def get_config(self):
"""Returns the config of the layer."""
config = super().get_config()
config.update(
{
"units": self.units,
"epsilon": self.epsilon,
"gamma_initializer": initializers.serialize(
self.gamma_initializer),
"beta_initializer": initializers.serialize(
self.beta_initializer),
"return_layernorm_output": self.return_layernorm_output,
"use_bias": self.use_bias,
"kernel_initializer": initializers.serialize(
self.kernel_initializer),
"bias_initializer": initializers.serialize(
self.bias_initializer),
"skip_weight_param_allocation":
self.skip_weight_param_allocation,
}
)
class LayerNormMLP(TransformerEngineBaseModule, layers.Layer):
"""
Applies layer normalization on the input followed by the MLP module,
consisting of 2 successive linear transformations, separated by the GeLU
activation.
Parameters
----------
units : int
size of each input sample.
ffn_units : int
intermediate size to which input samples are projected.
epsilon : float, default = 1e-3
a value added to the denominator of layer normalization for numerical
stability.
gamma_initializer: Callable, default = `None`
used for initializing LayerNorm gamma in the following way:
`gamma_initializer(weight)`. When set to `None`, defaults to `ones`.
beta_initializer: Callable, default = `None`
used for initializing LayerNorm beta in the following way:
`beta_initializer(weight)`. When set to `None`, defaults to `zeros`.
use_bias : bool, default = `True`
if set to `False`, the FC2 layer will not learn an additive bias.
kernel_initializer: Callable, default = `None`
used for initializing FC1 weights in the following way:
`kernel_initializer(weight)`. When set to `None`, defaults to
`tf.keras.initializers.RandomNormal(mean=0.0, std=0.023)`.
ffn_kernel_initializer: Callable, default = `None`
used for initializing FC2 weights in the following way:
`ffn_kernel_initializer(weight)`. When set to `None`, defaults to
`tf.keras.initializers.RandomNormal(mean=0.0, std=0.023)`.
return_layernorm_output : bool, default = `False`
if set to `True`, output of layernorm is returned from the forward
together with the output of the linear transformation.
Example use case: residual connection for transformer module is taken post
layernorm.
bias_initializer: Callable, default = `None`
used for initializing FC1 and FC2 bias in the following way:
`bias_initializer(weight)`. When set to `None`, defaults to `zeros`.
Optimization parameters
-----------------------
return_bias : bool, default = `False`
when set to `True`, this module will not apply the additive bias itself,
but instead return the bias value during the forward pass together with
the output of the linear transformation :math:`y = xW`. This is useful
when the bias addition can be fused to subsequent operations.
"""
def __init__(
self,
units: int,
ffn_units: int,
epsilon: float = 1e-3,
gamma_initializer: Union[Callable, str, None] = None,
beta_initializer: Union[Callable, str, None] = None,
return_layernorm_output: bool = False,
use_bias: bool = True,
return_bias: bool = False,
kernel_initializer: Union[Callable, str, None] = None,
ffn_kernel_initializer: Union[Callable, str, None] = None,
bias_initializer: Union[Callable, str, None] = None,
**kwargs,
):
super().__init__(**kwargs)
self.fc1_units = units
self.fc2_units = ffn_units
self.epsilon = epsilon
self.gamma_initializer = get_init_method(
gamma_initializer, initializers.get("ones")
)
self.beta_initializer = get_init_method(
beta_initializer, initializers.get("zeros")
)
self.return_layernorm_output = return_layernorm_output
self.use_bias = use_bias
self.return_bias = return_bias
self.kernel1_initializer = get_init_method(
kernel_initializer, initializers.RandomNormal(mean=0.0,
stddev=0.023)
)
self.kernel2_initializer = get_init_method(
ffn_kernel_initializer, initializers.RandomNormal(mean=0.0,
stddev=0.023)
)
self.bias_initializer = get_init_method(
bias_initializer, initializers.get("zeros")
)
def build(self, input_shape):
"""One-time allocation of the variables."""
input_shape = tf.TensorShape(input_shape)
last_dim = tf.compat.dimension_value(input_shape[-1])
if last_dim is None:
raise ValueError(
"The last dimension of the inputs to a Dense layer should be "
f"defined. Found None. Full input shape received: {input_shape}"
)
self.gamma = self.add_weight(
name="gamma",
shape=(last_dim,),
initializer=self.gamma_initializer,
trainable=True,
)
self.beta = self.add_weight(
name="beta",
shape=(last_dim,),
initializer=self.beta_initializer,
trainable=True,
)
self.fc1_kernel = self.add_weight(
name="fc1_kernel",
shape=(last_dim, self.fc1_units),
initializer=self.kernel1_initializer,
trainable=True,
)
self.fc1_bias = self.add_weight(
name="fc1_bias",
shape=(self.fc1_units,),
initializer=self.bias_initializer,
trainable=True,
)
# fp8 related
self.fp8_weight_shapes.append((last_dim, self.fc1_units))
self.fc2_kernel = self.add_weight(
name="fc2_kernel",
shape=(self.fc1_units, self.fc2_units),
initializer=self.kernel2_initializer,
trainable=True,
)
self.fc2_bias = None
if self.use_bias or self.return_bias:
self.fc2_bias = self.add_weight(
name="fc2_bias",
shape=(self.fc2_units,),
initializer=self.bias_initializer,
trainable=True,
)
# fp8 related
self.fp8_weight_shapes.append((self.fc1_units, self.fc2_units))
self.built = True
def _get_training_value(self, training=None):
if training is None:
training = backend.learning_phase()
if isinstance(training, int):
training = bool(training)
if not self.trainable:
# When the layer is not trainable, it overrides the value passe from
# model.
training = False
return training
def non_fp8_layernorm_mlp(
self,
inp: tf.Tensor,
gamma_var: tf.Variable,
beta_var: tf.Variable,
fc1_kernel_var: tf.Variable,
fc1_bias_var: tf.Variable,
fc2_kernel_var: tf.Variable,
fc2_bias_var: Union[tf.Variable, None] = None,
):
"""Prep fwd+bwd non-fp8 layernorm followed by mlp."""
@tf.custom_gradient
def non_fp8_layernorm_mlp_func(x):
# We need to pass the EagerTensor instead of Variable when calling
# into the pybind functions. So, we use value() for the explicit
# convertion.
fc1_kernel_val = fc1_kernel_var.value()
fc2_kernel_val = fc2_kernel_var.value()
gamma_val = gamma_var.value()
beta_val = beta_var.value()
ln_out, mu, rsigma = tex.layernorm_fwd(
x, gamma_val, beta_val, self.epsilon, self.stream_id
)
fc1_bias = get_autocast_bias(
self.compute_dtype, fc1_bias_var, use_bias=True, use_fp8=False,
)
fc2_bias = get_autocast_bias(
self.compute_dtype, fc2_bias_var, self.use_bias, use_fp8=False,
)
output_dtype = self._compute_dtype_object
# TODO(kaixih): Ideally, we should set gelu=True to fuse the gelu in
# cuBlasLt calls. However, it seems it is slower than the unfused
# version. Fix this when cuBlasLt improves the issue.
fc1_out = matmul_wrapper(
ln_out,
fc1_kernel_val,
"fc1_fwd",
output_dtype,
self.stream_id,
use_bias=True,
bias=fc1_bias,
)
gelu_out = tex.te_gelu(
fc1_out, None, TE_DType[output_dtype], None, None, 0,
self.stream_id,
)
fc2_out = matmul_wrapper(
gelu_out,
fc2_kernel_val,
"fc2_fwd",
output_dtype,
self.stream_id,
use_bias=self.use_bias,
bias=fc2_bias,
)
def grad_fn(*upstream, variables=None):
fc2_dgrad = matmul_wrapper(
upstream[0],
fc2_kernel_val,
"fc2_bwd_input",
output_dtype,
self.stream_id,
grad=True,
gelu=True,
gelu_input=fc1_out,
)
fc2_wgrad = matmul_wrapper(
gelu_out, upstream[0], "bwd_weight", output_dtype,
self.stream_id,
)
if self.use_bias:
fc2_bias_grad = tf.math.reduce_sum(upstream[0], axis=0)
dgelu = fc2_dgrad
fc1_dgrad = matmul_wrapper(
dgelu, fc1_kernel_val, "fc1_bwd_input", output_dtype,
self.stream_id,
)
fc1_wgrad = matmul_wrapper(
ln_out, dgelu, "bwd_weight", output_dtype, self.stream_id
)
fc1_bias_grad = tf.math.reduce_sum(dgelu, axis=0)
d_ln_out = fc1_dgrad
if self.return_layernorm_output:
assert len(upstream) == 2
d_ln_out = d_ln_out + upstream[1]
dxmat, dgamma, dbeta = tex.layernorm_bwd(
d_ln_out, x, mu, rsigma, gamma_val, self.stream_id
)
grad_inputs = [dxmat]
grad_vars = []
for v in variables:
if v.name.endswith("gamma:0"):
grad_vars.append(dgamma)
elif v.name.endswith("fc1_kernel:0"):
grad_vars.append(fc1_wgrad)
elif v.name.endswith("fc1_bias:0"):
grad_vars.append(fc1_bias_grad)
elif v.name.endswith("fc2_kernel:0"):
grad_vars.append(fc2_wgrad)
elif v.name.endswith("fc2_bias:0") and self.use_bias:
grad_vars.append(fc2_bias_grad)
elif v.name.endswith("beta:0"):
grad_vars.append(dbeta)
return grad_inputs, grad_vars
if self.return_layernorm_output:
return (fc2_out, ln_out), grad_fn
return fc2_out, grad_fn
return non_fp8_layernorm_mlp_func(inp)
def fp8_layernorm_mlp(
self,
inp: tf.Tensor,
gamma_var: tf.Variable,
beta_var: tf.Variable,
fc1_kernel_var: tf.Variable,
fc1_bias_var: tf.Variable,
fc2_kernel_var: tf.Variable,
fc2_bias_var: Union[tf.Variable, None] = None,
):
"""Prep fwd+bwd fp8 layernorm followed by mlp."""
fp8_meta = self.fp8_meta
fp8_dtype_fwd, fp8_dtype_bwd, override_linear_precision = \
get_recipe_attrs(fp8_meta["recipe"])
@tf.custom_gradient
def fp8_layernorm_mlp_func(x):
# We need to pass the EagerTensor instead of Variable when calling
# into the pybind functions. So, we use value() for the explicit
# convertion.
fc1_kernel_val = fc1_kernel_var.value()
fc2_kernel_val = fc2_kernel_var.value()
gamma_val = gamma_var.value()
beta_val = beta_var.value()
if not self.return_layernorm_output:
ln_out, mu, rsigma = layernorm_fwd_fp8_wrapper(
x,
gamma_val,
beta_val,
self.epsilon,
fp8_meta,
tex.FP8FwdTensors.GEMM1_INPUT,
fp8_dtype_fwd,
self.stream_id,
)
else:
ln_out_return, mu, rsigma = tex.layernorm_fwd(
x, gamma_val, beta_val, self.epsilon, self.stream_id
)
ln_out = cast_to_fp8_wrapper(
ln_out_return,
fp8_meta,
tex.FP8FwdTensors.GEMM1_INPUT,
True,
fp8_dtype_fwd,
self.stream_id,
)
fc1_bias = get_autocast_bias(
self.compute_dtype, fc1_bias_var, use_bias=True, use_fp8=True,
)
fc2_bias = get_autocast_bias(
self.compute_dtype, fc2_bias_var, self.use_bias, use_fp8=True,
)
fc1_weight_fp8, fc1_weight_t_fp8 = fp8_cast_transpose_fused_wrapper(
fc1_kernel_val,
fp8_meta,
tex.FP8FwdTensors.GEMM1_WEIGHT,
True,
fp8_dtype_fwd,
self.stream_id,
)
fc2_weight_fp8, fc2_weight_t_fp8 = fp8_cast_transpose_fused_wrapper(
fc2_kernel_val,
fp8_meta,
tex.FP8FwdTensors.GEMM2_WEIGHT,
True,
fp8_dtype_fwd,
self.stream_id,
)
output_dtype = self._compute_dtype_object
fc1_out = fp8_matmul_wrapper(
ln_out,
fc1_weight_t_fp8,
fp8_meta,
"fc1_fwd",
fp8_dtype_fwd,
fp8_dtype_fwd,
output_dtype,
_2X_ACC_FPROP,
self.stream_id,
use_bias=True,
bias=fc1_bias,
)
gelu_out = fp8_gelu_wrapper(
fc1_out,
fp8_meta,
tex.FP8FwdTensors.GEMM2_INPUT,
True,
fp8_dtype_fwd,
self.stream_id,
)
fc2_out = fp8_matmul_wrapper(
gelu_out,
fc2_weight_t_fp8,
fp8_meta,
"fc2_fwd",
fp8_dtype_fwd,
fp8_dtype_fwd,
output_dtype,
_2X_ACC_FPROP,
self.stream_id,
use_bias=self.use_bias,
bias=fc2_bias,
)
def grad_fn(*upstream, variables=None):
self.pre_backward()
if self.use_bias:
(
fc2_bias_grad,
grad_fp8,
grad_t_fp8,
) = fp8_cast_transpose_bgrad_fused_wrapper(
upstream[0],
fp8_meta,
tex.FP8BwdTensors.GRAD_OUTPUT1,
False,
fp8_dtype_bwd,
self.stream_id,
)
else:
if not override_linear_precision.wgrad:
grad_fp8, grad_t_fp8 = fp8_cast_transpose_fused_wrapper(
upstream[0],
fp8_meta,
tex.FP8BwdTensors.GRAD_OUTPUT1,
False,
fp8_dtype_bwd,
self.stream_id,
)
else:
grad_fp8 = cast_to_fp8_wrapper(
upstream[0],
fp8_meta,
tex.FP8BwdTensors.GRAD_OUTPUT1,
False,
fp8_dtype_bwd,
self.stream_id,
)
fc2_dgrad = fp8_matmul_wrapper(
grad_fp8,
fc2_weight_fp8,
fp8_meta,
"fc2_bwd_input",
fp8_dtype_bwd,
fp8_dtype_fwd,
output_dtype,
_2X_ACC_DGRAD,
self.stream_id,
)
if not override_linear_precision.wgrad:
gelu_out_t = tex.fp8_transpose(
gelu_out, fp8_dtype_fwd, self.stream_id
)
fc2_wgrad = fp8_matmul_wrapper(
gelu_out_t,
grad_t_fp8,
fp8_meta,
"fc2_bwd_weight",
fp8_dtype_fwd,
fp8_dtype_bwd,
output_dtype,
_2X_ACC_WGRAD,
self.stream_id,
)
(
fc1_bias_grad,
dgelu,
dgelu_t,
) = fp8_cast_transpose_bgrad_dgelu_fused_wrapper(
fc2_dgrad,
fc1_out,
fp8_meta,
tex.FP8BwdTensors.GRAD_OUTPUT2,
False,
fp8_dtype_bwd,
self.stream_id,
)
else:
gelu_out_c = cast_from_fp8_wrapper(
gelu_out,
fp8_meta,
tex.FP8FwdTensors.GEMM2_INPUT,
True,
fp8_dtype_fwd,
TE_DType[x.dtype],
self.stream_id,
)
fc2_wgrad = matmul_wrapper(
gelu_out_c,
upstream[0],
"bwd_weight",
output_dtype,
self.stream_id,
)
# Different from PyTorch implementation, the fc1_out has
# already added bias. So we don't need to pass fc1_bias
# here.
fc1_bias_grad, dgelu_no_fp8 = bgrad_dgelu_fused(fc2_dgrad,
fc1_out)
dgelu = cast_to_fp8_wrapper(
dgelu_no_fp8,
fp8_meta,
tex.FP8BwdTensors.GRAD_OUTPUT2,
False,
fp8_dtype_bwd,
self.stream_id,
)
dgelu_t = None
fc1_dgrad = fp8_matmul_wrapper(
dgelu,
fc1_weight_fp8,
fp8_meta,
"fc1_bwd_input",
fp8_dtype_bwd,
fp8_dtype_fwd,
output_dtype,
_2X_ACC_DGRAD,
self.stream_id,
)
if not override_linear_precision.wgrad:
ln_out_t = tex.fp8_transpose(ln_out, fp8_dtype_fwd,
self.stream_id)
fc1_wgrad = fp8_matmul_wrapper(
ln_out_t,
dgelu_t,
fp8_meta,
"fc1_bwd_weight",
fp8_dtype_fwd,
fp8_dtype_bwd,
output_dtype,
_2X_ACC_WGRAD,
self.stream_id,
)
else:
ln_out_c = cast_from_fp8_wrapper(
ln_out,
fp8_meta,
tex.FP8FwdTensors.GEMM1_INPUT,
True,
fp8_dtype_fwd,
TE_DType[x.dtype],
self.stream_id,
)
fc1_wgrad = matmul_wrapper(
ln_out_c,
dgelu_no_fp8,
"bwd_weight",
output_dtype,
self.stream_id,
)
d_ln_out = fc1_dgrad
if self.return_layernorm_output:
assert len(upstream) == 2
d_ln_out = d_ln_out + upstream[1]
dxmat, dgamma, dbeta = tex.layernorm_bwd(
d_ln_out, x, mu, rsigma, gamma_val, self.stream_id
)
grad_inputs = [dxmat]
grad_vars = []
for v in variables:
if v.name.endswith("gamma:0"):
grad_vars.append(dgamma)
elif v.name.endswith("fc1_kernel:0"):
grad_vars.append(fc1_wgrad)
elif v.name.endswith("fc1_bias:0"):
grad_vars.append(fc1_bias_grad)
elif v.name.endswith("fc2_kernel:0"):
grad_vars.append(fc2_wgrad)
elif v.name.endswith("fc2_bias:0") and self.use_bias:
grad_vars.append(fc2_bias_grad)
elif v.name.endswith("beta:0"):
grad_vars.append(dbeta)
return grad_inputs, grad_vars
if self.return_layernorm_output:
return (fc2_out, ln_out_return), grad_fn
return fc2_out, grad_fn
return fp8_layernorm_mlp_func(inp)
def call(
self,
inputs,
training=None,
):
"""
Apply layer normalization to the input followed by a feedforward network
(MLP Block).
Parameters
----------
inputs : tf.Tensor
Input tensor.
training : {True, False, None}, default = None
Whether this is in the training context.
"""
# self.pre_forward needs to be called outside the following branch,
# since it has side effects to set the self.fp8 if the autocast is
# detected.
training = self._get_training_value(training)
self.pre_forward(training, num_gemms=2)
inputmat = tf.reshape(inputs, shape=(-1, inputs.shape[-1]))
if self.fp8:
outputs = self.fp8_layernorm_mlp(
inputmat,
self.gamma,
self.beta,
self.fc1_kernel,
self.fc1_bias,
self.fc2_kernel,
self.fc2_bias,
)
else:
outputs = self.non_fp8_layernorm_mlp(
inputmat,
self.gamma,
self.beta,
self.fc1_kernel,
self.fc1_bias,
self.fc2_kernel,
self.fc2_bias,
)
if self.return_layernorm_output:
outputmat, ln_outputmat = outputs
else:
outputmat = outputs
outputs = tf.reshape(
outputmat, shape=(-1, *inputs.shape[1:-1], outputmat.shape[-1])
)
if self.return_bias:
if self.return_layernorm_output:
ln_outputs = tf.reshape(ln_outputmat, shape=inputs.shape)
return (outputs, self.fc2_bias, ln_outputs)
return outputs, self.fc2_bias
if self.return_layernorm_output:
ln_outputs = tf.reshape(ln_outputmat, shape=inputs.shape)
return (outputs, ln_outputs)
return outputs
def get_config(self):
"""Returns the config of the layer."""
config = super().get_config()
config.update(
{
"hidden_size": self.fc1_units,
"ffn_hidden_size": self.fc2_units,
"epsilon": self.epsilon,
"gamma_init_method": initializers.serialize(
self.gamma_initializer),
"beta_init_method": initializers.serialize(
self.beta_initializer),
"return_layernorm_output": self.return_layernorm_output,
"use_bias": self.use_bias,
"init_method": initializers.serialize(self.kernel1_initializer),
"output_layer_init_method": initializers.serialize(
self.kernel2_initializer
),
"bias_init_method": initializers.serialize(
self.bias_initializer),
}
)
| TransformerEngine-main | transformer_engine/tensorflow/module.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Fused scaled masked softmax functions"""
from typing import Callable
import os
import transformer_engine_tensorflow as tex
import tensorflow as tf
from .module import get_stream_id
THREADS_PER_WARP = 32
THREADS_PER_BLOCK = 128
_default_causal_mask = {}
def _get_default_causal_mask(sq: int) -> tf.Tensor:
"""Return the causal upper triangular mask for softmax input"""
if sq not in _default_causal_mask:
# In TF, the mask specifies 1 to keep and 0 to mask. In "causal" mask
# mode, we compute the softmax of the lower triangular.
mask_operator = tf.linalg.LinearOperatorLowerTriangular(
tf.ones((sq, sq), dtype=tf.bool))
mask = mask_operator.to_dense()
_default_causal_mask[sq] = mask
return _default_causal_mask[sq]
class FusedScaleMaskSoftmax(tf.keras.Model):
"""
fused operation: scaling + mask + softmax
Arguments:
attn_mask_type: attention mask type (pad or causal)
mask_func: mask function to be applied.
softmax_in_fp32: if true, softmax in performed at fp32 precision.
scale: scaling factor used in input tensor scaling.
"""
def __init__(
self,
attn_mask_type: str,
mask_func: Callable,
softmax_in_fp32: bool,
scale: float,
) -> None:
super().__init__()
self.attn_mask_type = attn_mask_type
self.scaled_masked_softmax_fusion = bool(
int(os.getenv("NVTE_MASKED_SOFTMAX_FUSION", "1"))
)
self.mask_func = mask_func
self.softmax_in_fp32 = softmax_in_fp32
self.scale = scale
self.stream = get_stream_id()
assert (
self.scale is None or softmax_in_fp32
), "softmax should be in fp32 when scaled"
def __call__(self, inp: tf.Tensor, mask: tf.Tensor) -> tf.Tensor:
"""FusedScaleMaskSoftmax fprop"""
# [b, np, sq, sk]
assert len(inp.shape) == 4
self.input_in_fp16 = inp.dtype == tf.float16
self.input_in_bf16 = inp.dtype == tf.bfloat16
self.input_in_float16 = self.input_in_fp16 or self.input_in_bf16
if self.is_kernel_available(*inp.shape):
return self.forward_fused_softmax(inp, mask)
return self.forward_tf_softmax(inp, mask)
def is_kernel_available(self, b: int, np: int, sq: int, sk: int) -> bool:
"""Check FusedScaleMaskSoftmax kernel availability based on size"""
attn_batches = b * np
if (
self.scaled_masked_softmax_fusion # user want to fuse
and self.input_in_float16 # input must be fp16
and 16 < sk <= 4096 # sk must be 16 ~ 2048
and sq % 4 == 0 # sq must be divisor of 4
and attn_batches % 4 == 0 # np * b must be divisor of 4
):
if 0 <= sk <= 4096:
batch_per_block = self.get_batch_per_block(int(sk))
if self.attn_mask_type == "causal":
if attn_batches % batch_per_block == 0:
return True
else:
if sq % batch_per_block == 0:
return True
return False
@tf.custom_gradient
def scaled_masked_softmax(self, x: tf.Tensor, mask: tf.Tensor,
scale: float):
"""Scaled masked softmax."""
y = tex.scaled_masked_softmax_forward(x, mask, scale, self.stream)
def grad_fn(upstream):
dx = tex.scaled_masked_softmax_backward(upstream, y, scale,
self.stream)
return dx, None, None
return y, grad_fn
@tf.custom_gradient
def scaled_softmax(self, x: tf.Tensor, scale: float):
"""Scaled softmax."""
y = tex.scaled_softmax_forward(x, scale, self.stream)
def grad_fn(upstream):
dx = tex.scaled_softmax_backward(upstream, y, scale, self.stream)
return dx, None
return y, grad_fn
@tf.custom_gradient
def scaled_upper_triang_masked_softmax(self, x: tf.Tensor, scale: float):
"""Scaled upper triangular masked softmax."""
y = tex.scaled_upper_triang_masked_softmax_forward(x, scale,
self.stream)
def grad_fn(upstream):
dx = tex.scaled_upper_triang_masked_softmax_backward(
upstream, y, scale, self.stream
)
return dx, None
return y, grad_fn
def forward_fused_softmax(
self,
inp: tf.Tensor,
mask: tf.Tensor,
) -> tf.Tensor:
"""Fused masked softmax kernel"""
sq, sk = inp.shape[2], inp.shape[3]
scale = self.scale if self.scale is not None else 1.0
if self.attn_mask_type == "causal":
assert sq == sk, "causal mask is only for self attention"
# input is 3D tensor (attn_batches, sq, sk)
inp = tf.reshape(inp, (-1, sq, sk))
probs = self.scaled_upper_triang_masked_softmax(inp, scale)
return tf.reshape(probs, inp.shape)
# input is 4D tensor (b, np, sq, sk)
if mask is not None:
# The mask defined in TE kernels are different from TF. In TE, the
# mask specifies 1 to mask out and 0 to keep.
mask = tf.math.logical_not(mask)
ndims = len(mask.shape)
assert ndims <= 4, "mask ndims should be <= 4"
if len(mask.shape) < 4:
# Broadcasting the first dims of mask to match the input ndims.
broadcast_shape = [1] * (4 - ndims) + mask.shape[:]
mask = tf.reshape(mask, broadcast_shape)
return self.scaled_masked_softmax(inp, mask, scale)
return self.scaled_softmax(inp, scale)
def forward_tf_softmax(self, inp: tf.Tensor, mask: tf.Tensor) -> tf.Tensor:
"""Framework softmax"""
if self.input_in_float16 and self.softmax_in_fp32:
inp = tf.cast(inp, tf.float32)
if self.scale is not None:
inp = inp * self.scale
if self.attn_mask_type == "causal":
mask = _get_default_causal_mask(inp.shape[2])
mask_output = self.mask_func(inp, mask) if mask is not None else inp
probs = tf.nn.softmax(mask_output, axis=-1)
if self.input_in_float16 and self.softmax_in_fp32:
if self.input_in_fp16:
probs = tf.cast(probs, tf.half)
else:
probs = tf.cast(probs, tf.bfloat16)
return probs
@staticmethod
def get_batch_per_block(key_seq_len: int) -> int:
"""Softmax utility"""
pow2 = 1 << (key_seq_len - 1).bit_length()
warp_size = pow2 if pow2 < THREADS_PER_WARP else THREADS_PER_WARP
batches_per_warp = 2 if pow2 <= 128 else 1
warps_per_block = THREADS_PER_BLOCK / warp_size
batches_per_block = warps_per_block * batches_per_warp
return batches_per_block
| TransformerEngine-main | transformer_engine/tensorflow/softmax.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Attention."""
import os
import warnings
import math
from importlib.metadata import version
from contextlib import nullcontext
from typing import Any, Callable, Optional, Tuple, Union, Dict
from pkg_resources import packaging
import torch
import transformer_engine_extensions as tex
from transformer_engine.pytorch.cpp_extensions.fused_attn import (
fused_attn_fwd_qkvpacked,
fused_attn_bwd_qkvpacked,
fused_attn_fwd_kvpacked,
fused_attn_bwd_kvpacked,
QKVLayout,
AttnBiasType,
AttnMaskType,
FusedAttnBackend,
)
from transformer_engine.pytorch.module import LayerNormLinear, Linear
from transformer_engine.pytorch.utils import (
divide,
attention_mask_func,
split_tensor_along_dim,
get_device_compute_capability,
get_default_init_method,
)
from transformer_engine.pytorch.constants import (
AttnMaskTypes,
AttnTypes,
AttnBiasTypes,
dist_group_type,
TE_DType,
)
from transformer_engine.pytorch.softmax import FusedScaleMaskSoftmax
from transformer_engine.pytorch.distributed import (
get_distributed_world_size,
checkpoint,
)
from transformer_engine.pytorch.export import is_in_onnx_export_mode
_flash_attn_version = packaging.version.Version(version("flash-attn"))
_flash_attn_version_required = packaging.version.Version("1.0.6")
_flash_attn_2_available = _flash_attn_version >= packaging.version.Version("2")
if _flash_attn_2_available:
from flash_attn.flash_attn_interface import flash_attn_varlen_func as flash_attn_forward_func # pylint: disable=no-name-in-module
from flash_attn_2_cuda import varlen_bwd as flash_attn_cuda_bwd # pylint: disable=no-name-in-module
else:
from flash_attn.flash_attn_interface import flash_attn_unpadded_func as flash_attn_forward_func # pylint: disable=no-name-in-module,ungrouped-imports
__all__ = ["DotProductAttention", "MultiheadAttention"]
def _rotate_half(x: torch.Tensor) -> torch.Tensor:
"""
change sign so the last dimension becomes [-odd, +even]
"""
x = x.view(x.shape[:-1] + torch.Size((2, x.shape[-1] // 2)))
x1, x2 = x.unbind(dim=-2)
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(t: torch.Tensor, freqs: torch.Tensor) -> torch.Tensor:
"""
input tensor t is of shape [seq_length, ..., dim]
rotary positional embeding tensor `freqs` is of shape [seq_length, ..., dim]
"""
rot_dim = freqs.shape[-1]
# ideally t_pass is empty so rotary pos embedding is applied to all tensor t
t, t_pass = t[..., :rot_dim], t[..., rot_dim:]
# first part is cosine component
# second part is sine component, need to change signs with _rotate_half method
t = (t * freqs.cos()) + (_rotate_half(t) * freqs.sin())
return torch.cat((t, t_pass), dim=-1)
class _SplitLastDim(torch.autograd.Function):
""""""
@staticmethod
def forward(ctx,
mixed_x_layer: torch.Tensor,
num_parts: int
) -> Tuple[torch.Tensor, ...]:
return split_tensor_along_dim(mixed_x_layer, -1, num_parts)
@staticmethod
def backward(ctx,
*grad_outputs):
assert len(grad_outputs) > 0, "No gradients received for backprop!"
noop_ok = True
strides = grad_outputs[0].stride()
data_ptr = grad_outputs[0].storage().data_ptr()
shape = grad_outputs[0].shape
last_dim_size = grad_outputs[0].shape[-1]
for i, tensor in enumerate(grad_outputs):
if (tensor.stride() != strides or
tensor.shape != shape or
tensor.storage().data_ptr() != data_ptr or
tensor.storage_offset() != i * last_dim_size):
noop_ok = False
break
if noop_ok:
ret = torch.Tensor().to(grad_outputs[0].dtype)
ret = torch.Tensor().to(device=grad_outputs[0].device,
dtype=grad_outputs[0].dtype)
new_shape = list(shape)
new_shape[-1] = new_shape[-1] * len(grad_outputs)
ret.set_(grad_outputs[0].storage(),
grad_outputs[0].storage_offset(),
new_shape,
grad_outputs[0].stride()
)
return ret, None
return torch.cat(grad_outputs, dim = -1), None
class _CombineQKV(torch.autograd.Function):
""""""
@staticmethod
def forward(ctx,
query_layer: torch.Tensor,
key_layer: torch.Tensor, # pylint: disable=unused-argument
value_layer: torch.Tensor, # pylint: disable=unused-argument
dim: int,
) -> torch.Tensor:
mixed_layer = torch.Tensor().to(device=query_layer.device,
dtype=query_layer.dtype)
new_shape = list(query_layer.shape)
new_shape[dim] = new_shape[dim] * 3
mixed_layer.set_(query_layer.untyped_storage(),
query_layer.storage_offset(),
new_shape,
query_layer.stride())
ctx.dim = dim
return mixed_layer
@staticmethod
def backward(ctx,
*grad_outputs,
) -> Tuple[torch.Tensor, ...]:
assert len(grad_outputs) > 0, "No gradients received for backprop!"
tensors = split_tensor_along_dim(grad_outputs[0], ctx.dim, 3)
return tensors[0], tensors[1], tensors[2], None
class _CombineKV(torch.autograd.Function):
""""""
@staticmethod
def forward(ctx,
key_layer: torch.Tensor,
value_layer: torch.Tensor, # pylint: disable=unused-argument
dim: int,
) -> torch.Tensor:
mixed_layer = torch.Tensor().to(device=key_layer.device,
dtype=key_layer.dtype)
new_shape = list(key_layer.shape)
new_shape[dim] = new_shape[dim] * 2
mixed_layer.set_(key_layer.untyped_storage(),
key_layer.storage_offset(),
new_shape,
key_layer.stride())
ctx.dim = dim
return mixed_layer
@staticmethod
def backward(ctx,
*grad_outputs,
) -> Tuple[torch.Tensor, ...]:
assert len(grad_outputs) > 0, "No gradients received for backprop!"
tensors = split_tensor_along_dim(grad_outputs[0], ctx.dim, 2)
return tensors[0], tensors[1], None
class UnfusedDotProductAttention(torch.nn.Module):
"""Parallel attention w/o QKV and Proj Gemms
BMM1 -> softmax + dropout -> BMM2
"""
def __init__(
self,
norm_factor: float,
attention_dropout: float = 0.0,
attention_dropout_ctx: Optional[Callable] = nullcontext,
layer_number: Optional[int] = None,
) -> None:
super().__init__()
self.norm_factor = norm_factor
self.attention_dropout_ctx = attention_dropout_ctx
self.layer_number = layer_number
self.scale_mask_softmax = FusedScaleMaskSoftmax(attention_mask_func)
# Dropout. Note that for a single iteration, this layer will generate
# different outputs on different number of parallel partitions but
# on average it should not be partition dependent.
self.attention_dropout = torch.nn.Dropout(attention_dropout)
# An FP16 training trick required for certain GPT-like models.
self.apply_qk_layer_scaling = (
bool(int(os.getenv("NVTE_APPLY_QK_LAYER_SCALING", "0"))) and layer_number is not None)
def forward(
self,
query_layer: torch.Tensor,
key_layer: torch.Tensor,
value_layer: torch.Tensor,
attn_mask_type: str = "causal",
attention_mask: Optional[torch.Tensor] = None,
core_attention_bias_type: str = "no_bias",
core_attention_bias: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""core attention fprop"""
assert (
attn_mask_type in AttnMaskTypes
), f"attn_mask_type {attn_mask_type} not supported"
batch_size, seqlen = query_layer.shape[1], query_layer.shape[0]
apply_qk_layer_scaling = self.apply_qk_layer_scaling and key_layer.dtype == torch.float16
# [b, np, sq, sk]
output_size = (
query_layer.size(1),
query_layer.size(2),
query_layer.size(0),
key_layer.size(0),
)
assert key_layer.shape == value_layer.shape, "Keys and values must have the same shape!"
if key_layer.shape[2] != query_layer.shape[2]:
assert (query_layer.shape[2]%key_layer.shape[2]==0
),"The number of attention heads must be divisible by the number of GQA groups!"
key_layer = key_layer.repeat_interleave(
int(query_layer.shape[2]/key_layer.shape[2]), dim = 2)
value_layer = value_layer.repeat_interleave(
int(query_layer.shape[2]/value_layer.shape[2]), dim = 2)
# [sq, b, np, hn] -> [sq, b * np, hn]
query_layer = query_layer.reshape(
output_size[2], output_size[0] * output_size[1], -1
)
# [sk, b, np, hn] -> [sk, b * np, hn]
key_layer = key_layer.reshape(output_size[3], output_size[0] * output_size[1], -1)
# preallocting result tensor: [b * np, sq, sk]
# WAR to set dtype to FP32 as ONNX lacks BF16 support for ConstantOfShape operator
is_bf16 = query_layer.dtype == torch.bfloat16
matmul_result = torch.empty(
output_size[0] * output_size[1],
output_size[2],
output_size[3],
dtype=torch.float32 if is_in_onnx_export_mode() and is_bf16 else query_layer.dtype,
device=torch.cuda.current_device(),
)
if is_in_onnx_export_mode() and is_bf16:
matmul_result = matmul_result.bfloat16()
scale = self.norm_factor
if apply_qk_layer_scaling:
scale *= self.layer_number
# Raw attention scores. [b * np, sq, sk]
if core_attention_bias_type == "no_bias":
matmul_result = torch.baddbmm(
matmul_result,
query_layer.transpose(0, 1), # [b * np, sq, hn]
key_layer.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk]
beta=0.0,
alpha=(1.0 / scale),
)
elif core_attention_bias_type == "pre_scale_bias":
assert core_attention_bias is not None, "core_attention_bias should not be None!"
assert (core_attention_bias.shape == torch.Size(1, *output_size[1:])
), "core_attention_bias must be in [1, h, sq, skv] shape!"
matmul_result = torch.bmm(
query_layer.transpose(0, 1), # [b * np, sq, hn]
key_layer.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk]
)
matmul_result = (matmul_result.view(
output_size[0], output_size[1], output_size[2], output_size[3])
+ core_attention_bias).view(-1, output_size[2], output_size[3])
matmul_result /= scale
elif core_attention_bias_type == "post_scale_bias":
assert core_attention_bias is not None, "core_attention_bias should not be None!"
assert (core_attention_bias.shape == torch.Size([1, *output_size[1:]])
), "core_attention_bias must be in [1, h, sq, skv] shape!"
matmul_result = torch.baddbmm(
matmul_result,
query_layer.transpose(0, 1), # [b * np, sq, hn]
key_layer.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk]
beta=0.0,
alpha=(1.0 / scale),
)
matmul_result = (matmul_result.view(
output_size[0], output_size[1], output_size[2], output_size[3])
+ core_attention_bias).view(-1, output_size[2], output_size[3])
# change view to [b, np, sq, sk]
attention_scores = matmul_result.view(*output_size)
# attention scores and attention mask [b, np, sq, sk]
softmax_scale = self.layer_number if apply_qk_layer_scaling else None
attention_probs = self.scale_mask_softmax(
attention_scores, attention_mask, attn_mask_type, softmax_scale)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
with self.attention_dropout_ctx():
attention_probs = self.attention_dropout(attention_probs)
# value_layer -> context layer.
# [sk, b, np, hn] --> [b, np, sq, hn]
output_size = (
value_layer.size(1),
value_layer.size(2),
query_layer.size(0),
value_layer.size(3),
)
# change view [sk, b * np, hn]
value_layer = value_layer.reshape(
value_layer.size(0), output_size[0] * output_size[1], -1
)
# change view [b * np, sq, sk]
attention_probs = attention_probs.view(
output_size[0] * output_size[1], output_size[2], -1
)
# matmul: [b * np, sq, hn]
context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1))
# change view [b, np, sq, hn]
context_layer = context_layer.view(*output_size)
# [b, np, sq, hn] --> [sq, b, np, hn]
context_layer = context_layer.permute(2, 0, 1, 3).contiguous()
# [sq, b, np, hn] --> [sq, b, hp]
context_layer = context_layer.view(seqlen, batch_size, -1)
return context_layer
class _PrepareQKVForFA(torch.autograd.Function):
"""This class converts QKV from interleaved (s, b, ...) layout
to separate contiguous q, k, v tensors in (b, s, ...) layout."""
@staticmethod
def forward(ctx,
query_layer: torch.Tensor,
key_layer: torch.Tensor,
value_layer: torch.Tensor
) -> torch.Tensor:
# All inputs received are non-contiguous tensors.
# The `query_layer` tensor is used to access the
# full memory region of the QKV tensor.
qkv = tex.fa_prepare_fwd(query_layer)
q, k, v = split_tensor_along_dim(qkv, 0, 3)
query_layer = torch.squeeze(q, 0)
key_layer = torch.squeeze(k, 0)
value_layer = torch.squeeze(v, 0)
return query_layer, key_layer, value_layer
@staticmethod
def backward(ctx,
dq: torch.Tensor,
dk: torch.Tensor,
dv: torch.Tensor
) -> Tuple[Union[torch.Tensor, None], ...]:
dqkv = tex.fa_prepare_bwd(dq, dk, dv)
dq, dk, dv = split_tensor_along_dim(dqkv, -1, 3)
return dq, dk, dv
def _check_qkv_layout(q, k, v):
data_ptr = q.untyped_storage().data_ptr()
check_ptrs = all(x.untyped_storage().data_ptr() == data_ptr for x in [q, k, v])
if not check_ptrs:
return False
stride = q.stride()
check_strides = all(stride == x.stride() for x in [q, k, v])
if not check_strides:
return False
shape = q.shape
check_shapes = all(shape == x.shape for x in [q, k, v])
if not check_shapes:
return False
last_dim_size = shape[-1]
check_offsets = all(i * last_dim_size == x.storage_offset()
for i, x in enumerate([q, k, v]))
if check_offsets:
return "sbh3d"
last_dims_size = shape[-1] * shape[-2]
check_offsets = all(i * last_dims_size == x.storage_offset()
for i, x in enumerate([q, k, v]))
if check_offsets:
return "sb3hd"
return "other"
def _check_kv_layout(k, v):
data_ptr = k.untyped_storage().data_ptr()
check_ptrs = all(x.untyped_storage().data_ptr() == data_ptr for x in [k, v])
if not check_ptrs:
return False
stride = k.stride()
check_strides = all(stride == x.stride() for x in [k, v])
if not check_strides:
return False
shape = k.shape
check_shapes = all(shape == x.shape for x in [k, v])
if not check_shapes:
return False
last_dim_size = shape[-1]
check_offsets = all(i * last_dim_size == x.storage_offset()
for i, x in enumerate([k, v]))
if check_offsets:
return "sbh2d"
last_dims_size = shape[-1] * shape[-2]
check_offsets = all(i * last_dims_size == x.storage_offset()
for i, x in enumerate([k, v]))
if check_offsets:
return "sb2hd"
return "other"
class FlashAttention(torch.nn.Module):
"""Dot product attention, using HazyResearch flash-attn package:
https://github.com/Dao-AILab/flash-attention
"""
def __init__(
self,
norm_factor: float,
attention_dropout: float = 0.0,
attention_dropout_ctx: Optional[Callable] = nullcontext,
deterministic: bool = False,
) -> None:
super().__init__()
assert (
_flash_attn_version >= _flash_attn_version_required
), f"FlashAttention minimum version {_flash_attn_version_required} is required."
self.norm_factor = norm_factor
self.attention_dropout_ctx = attention_dropout_ctx
self.attention_dropout = attention_dropout
self.deterministic = deterministic
def forward(
self,
query_layer: torch.Tensor,
key_layer: torch.Tensor,
value_layer: torch.Tensor,
attn_mask_type: str = "causal",
) -> torch.Tensor:
"""flash-attn fprop"""
assert (
query_layer.dtype in [torch.float16, torch.bfloat16]
and key_layer.dtype in [torch.float16, torch.bfloat16]
and value_layer.dtype in [torch.float16, torch.bfloat16]
), 'FlashAttention currently only supports FP16 and BF16.'
assert (
query_layer.is_cuda and key_layer.is_cuda and value_layer.is_cuda
), 'FlashAttention currently only supports CUDA tensors.'
# For now just 128, will make it more general in the future
if (query_layer.shape[-1] == 128 and
query_layer.shape[0] * query_layer.shape[1] >= 512 and
_check_qkv_layout(query_layer, key_layer, value_layer) == "sbh3d"):
query_layer, key_layer, value_layer = _PrepareQKVForFA.apply(query_layer,
key_layer,
value_layer)
else:
query_layer, key_layer, value_layer = [x.transpose(0,1).contiguous()
for x in (query_layer, key_layer, value_layer)]
batch_size, seqlen = query_layer.shape[0], query_layer.shape[1]
# [b, sq, np, hn]
query_layer, key_layer, value_layer = [
x.view(x.shape[0] * x.shape[1], *x.shape[2:])
for x in [query_layer, key_layer, value_layer]
]
max_seqlen = seqlen
cu_seqlens = torch.arange(
0,
(batch_size + 1) * seqlen,
step=seqlen,
dtype=torch.int32,
device=query_layer.device)
with self.attention_dropout_ctx():
fa_optional_forward_kwargs = {}
if not _flash_attn_2_available:
fa_optional_forward_kwargs["deterministic"] = self.deterministic
output = flash_attn_forward_func(
query_layer, key_layer, value_layer, cu_seqlens, cu_seqlens, max_seqlen, max_seqlen,
self.attention_dropout if self.training else 0.0,
softmax_scale=1.0/self.norm_factor, causal=attn_mask_type=="causal",
**fa_optional_forward_kwargs
)
# [(b sq), np, hn] -> [sq, b, (np hn)]
return output.view(batch_size, seqlen, -1).transpose(0, 1).contiguous()
class FusedAttnFunc_qkvpacked(torch.autograd.Function):
"""Function for FusedAttention with packed QKV input"""
@staticmethod
def forward(ctx, is_training, max_seqlen, cu_seqlens, qkv, qkv_dtype, attn_bias, attn_scale,
dropout_p, fast_zero_fill, qkv_layout, attn_bias_type, attn_mask_type,
rng_gen, fused_attention_backend, use_FAv2_bwd):
out, aux_ctx_tensors = fused_attn_fwd_qkvpacked(
is_training, max_seqlen, cu_seqlens, qkv, qkv_dtype,
fused_attention_backend, attn_bias,
None, None, None, None, None,
attn_scale, dropout_p, fast_zero_fill, qkv_layout, attn_bias_type, attn_mask_type,
rng_gen)
ctx.save_for_backward(qkv, out, cu_seqlens)
ctx.aux_ctx_tensors = aux_ctx_tensors
ctx.max_seqlen = max_seqlen
ctx.qkv_dtype = qkv_dtype
ctx.attn_scale = attn_scale
ctx.dropout_p = dropout_p
ctx.fast_zero_fill = fast_zero_fill
ctx.qkv_layout = qkv_layout
ctx.attn_bias_type = attn_bias_type
ctx.attn_mask_type = attn_mask_type
ctx.fused_attention_backend = fused_attention_backend
ctx.use_FAv2_bwd = use_FAv2_bwd
return out
@staticmethod
def backward(ctx, d_out):
qkv, out, cu_seqlens = ctx.saved_tensors
if ctx.use_FAv2_bwd:
softmax_lse, rng_state = ctx.aux_ctx_tensors
dqkv = torch.empty_like(qkv)
maybe_contiguous = lambda x: x.contiguous() if x.stride(-1) != 1 else x
d_out, q, k, v, out = [maybe_contiguous(x)
for x in (d_out, qkv[:,0], qkv[:,1], qkv[:,2], out)]
flash_attn_cuda_bwd(
d_out, q, k, v, out, softmax_lse, dqkv[:,0], dqkv[:,1], dqkv[:,2],
cu_seqlens, cu_seqlens, ctx.max_seqlen, ctx.max_seqlen,
ctx.dropout_p, ctx.attn_scale, False,
ctx.attn_mask_type == "causal", None, rng_state
)
dqkv = dqkv[..., :d_out.shape[-1]]
else:
dqkv, *rest = fused_attn_bwd_qkvpacked(
ctx.max_seqlen, cu_seqlens, qkv, out, d_out,
ctx.qkv_dtype, ctx.aux_ctx_tensors,
ctx.fused_attention_backend,
None, None, None, None, None, None, None, None, None,
ctx.attn_scale, ctx.dropout_p, ctx.fast_zero_fill,
ctx.qkv_layout, ctx.attn_bias_type, ctx.attn_mask_type)
# if no_bias, return dqkv
if ctx.attn_bias_type == "no_bias":
return (None, None, None, dqkv, None, None, None,
None, None, None, None, None, None,
None, None, None, None, None, None)
# else, return (dqkv, dbias)
return (None, None, None, dqkv, None, rest[0], None,
None, None, None, None, None, None,
None, None, None, None, None, None)
class FusedAttnFunc_kvpacked(torch.autograd.Function):
"""Function for FusedAttention with packed KV input"""
@staticmethod
def forward(ctx, is_training, max_seqlen_q, max_seqlen_kv, cu_seqlens_q, cu_seqlens_kv,
q, kv, qkv_dtype, attn_bias, attn_scale, dropout_p, fast_zero_fill,
qkv_layout, attn_bias_type, attn_mask_type,
rng_gen, fused_attention_backend, use_FAv2_bwd):
out, aux_ctx_tensors = fused_attn_fwd_kvpacked(
is_training, max_seqlen_q, max_seqlen_kv, cu_seqlens_q, cu_seqlens_kv,
q, kv, qkv_dtype, fused_attention_backend, attn_bias,
None, None, None, None, None,
attn_scale, dropout_p, fast_zero_fill, qkv_layout, attn_bias_type, attn_mask_type,
rng_gen)
ctx.save_for_backward(q, kv, out, cu_seqlens_q, cu_seqlens_kv)
ctx.aux_ctx_tensors = aux_ctx_tensors
ctx.max_seqlen_q = max_seqlen_q
ctx.max_seqlen_kv = max_seqlen_kv
ctx.qkv_dtype = qkv_dtype
ctx.attn_scale = attn_scale
ctx.dropout_p = dropout_p
ctx.fast_zero_fill = fast_zero_fill
ctx.qkv_layout = qkv_layout
ctx.attn_bias_type = attn_bias_type
ctx.attn_mask_type = attn_mask_type
ctx.fused_attention_backend = fused_attention_backend
ctx.use_FAv2_bwd = use_FAv2_bwd
return out
@staticmethod
def backward(ctx, d_out):
q, kv, out, cu_seqlens_q, cu_seqlens_kv = ctx.saved_tensors
if ctx.use_FAv2_bwd:
softmax_lse, rng_state = ctx.aux_ctx_tensors
dq = torch.empty_like(q)
dkv = torch.empty_like(kv)
maybe_contiguous = lambda x: x.contiguous() if x.stride(-1) != 1 else x
d_out, q, k, v, out = [maybe_contiguous(x)
for x in (d_out, q, kv[:,0], kv[:,1], out)]
flash_attn_cuda_bwd(
d_out, q, k, v, out, softmax_lse, dq, dkv[:,0], dkv[:,1],
cu_seqlens_q, cu_seqlens_kv, ctx.max_seqlen_q, ctx.max_seqlen_kv,
ctx.dropout_p, ctx.attn_scale, False,
ctx.attn_mask_type == "causal", None, rng_state
)
dq = dq[..., :d_out.shape[-1]]
dkv = dkv[..., :d_out.shape[-1]]
else:
dq, dkv, *rest = fused_attn_bwd_kvpacked(
ctx.max_seqlen_q, ctx.max_seqlen_kv, cu_seqlens_q, cu_seqlens_kv,
q, kv, out, d_out,
ctx.qkv_dtype, ctx.aux_ctx_tensors,
ctx.fused_attention_backend,
None, None, None, None, None, None, None, None, None,
ctx.attn_scale, ctx.dropout_p, ctx.fast_zero_fill,
ctx.qkv_layout, ctx.attn_bias_type, ctx.attn_mask_type)
# if no_bias, return dqkv
if ctx.attn_bias_type == "no_bias":
return (None, None, None, None, None, dq, dkv, None, None, None,
None, None, None, None, None, None,
None, None, None, None, None, None)
# else, return (dqkv, dbias)
return (None, None, None, None, None, dq, dkv, None, rest[0], None,
None, None, None, None, None, None,
None, None, None, None, None, None)
class FusedAttention(torch.nn.Module):
"""Dot product attention, with multiple backends:
1. FusedAttnBackend["F16_max512_seqlen"]
cuDNN based fused attention for FP16/BF16 and <=512 sequence length.
2. FusedAttnBackend["F16_arbitrary_seqlen"]
cuDNN based fused attention for FP16/BF16 and any sequence length.
Support matrix:
| backend | 1 | 2 |
| flash based | no | yes |
| cuDNN based | yes | yes |
| qkv dtype | fp16/bf16 | fp16/bf16 |
| attn_type | self/cross | self |
| qkv_layout | | |
| - qkv | qkv_interleaved | qkv_interleaved |
| - (q,kv) | kv_interleaved | |
| mask_type | causal/no_mask | causal |
| bias_type | no_bias/post_scale_bias | no_bias |
| dropout | yes | yes |
| max_seqlen | <=512 | any |
| head_dim | 64 | 64,128 |
| output dtype | fp16/bf16 | fp16/bf16 |
"""
def __init__(
self,
norm_factor: float,
attention_dropout: float = 0.0,
attention_dropout_ctx: Optional[Callable] = nullcontext,
attention_type: str = "self",
) -> None:
super().__init__()
self.norm_factor = norm_factor
self.attention_dropout = attention_dropout
self.attention_dropout_ctx = attention_dropout_ctx
self.attention_type = attention_type
self.use_FAv2_bwd = (os.getenv("NVTE_FUSED_ATTN_USE_FAv2_BWD", "0") == "1"
and _flash_attn_2_available
and get_device_compute_capability() == 9.0)
def forward(
self,
query_layer: torch.Tensor,
key_layer: torch.Tensor,
value_layer: torch.Tensor,
attn_mask_type: str = "causal",
fused_attention_backend:
tex.NVTE_Fused_Attn_Backend = tex.NVTE_Fused_Attn_Backend.NVTE_No_Backend,
core_attention_bias_type: str = "no_bias",
core_attention_bias: Optional[torch.Tensor] = None,
fast_zero_fill: bool = True,
) -> torch.Tensor:
"""fused attention fprop"""
assert (fused_attention_backend
!= tex.NVTE_Fused_Attn_Backend.NVTE_No_Backend
), 'No fused attention backend supports this input combination!'
assert (
(query_layer.dtype in [torch.float16, torch.bfloat16])
and (key_layer.dtype in [torch.float16, torch.bfloat16])
and (value_layer.dtype in [torch.float16, torch.bfloat16])
), 'FusedAttention only supports FP16 and BF16 data types.'
assert (
query_layer.is_cuda and key_layer.is_cuda and value_layer.is_cuda
), 'FusedAttention only supports CUDA tensors.'
qkv_dtype = TE_DType[query_layer.dtype]
seqlen_q, batch_size = query_layer.shape[0], query_layer.shape[1]
seqlen_kv = key_layer.shape[0]
max_seqlen_q = seqlen_q
max_seqlen_kv = seqlen_kv
if self.attention_type == "self":
qkv_layout = _check_qkv_layout(query_layer, key_layer, value_layer)
if qkv_layout == "sbh3d":
mixed_layer = _CombineQKV.apply(query_layer, key_layer, value_layer, 3)
# [s, b, h, 3, d]
mixed_layer = mixed_layer.view(
*mixed_layer.shape[0:3], 3, query_layer.shape[-1])
# [b, s, 3, h, d]
mixed_layer = mixed_layer.transpose(2, 3).transpose(0, 1).contiguous()
elif qkv_layout == "sb3hd":
mixed_layer = _CombineQKV.apply(query_layer, key_layer, value_layer, 2)
# [s, b, 3, h, d]
mixed_layer = mixed_layer.view(
*mixed_layer.shape[0:2], 3, *query_layer.shape[2:])
# [b, s, 3, h, d]
mixed_layer = mixed_layer.transpose(0, 1).contiguous()
else:
raise Exception("FusedAttention only supports qkv layout sbh3d or sb3hd!")
# [total_seqs, 3, h, d]
mixed_layer = mixed_layer.view(
mixed_layer.shape[0] * mixed_layer.shape[1], *mixed_layer.shape[2:])
qkv_layout = "qkv_interleaved"
max_seqlen = seqlen_q
cu_seqlens = torch.arange(
0,
(batch_size + 1) * seqlen_q,
step=seqlen_q,
dtype=torch.int32,
device=query_layer.device)
use_FAv2_bwd = (self.use_FAv2_bwd
and (fused_attention_backend
== tex.NVTE_Fused_Attn_Backend.NVTE_F16_arbitrary_seqlen)
and core_attention_bias_type == "no_bias")
with self.attention_dropout_ctx():
output = FusedAttnFunc_qkvpacked.apply(
self.training,
max_seqlen,
cu_seqlens,
mixed_layer,
qkv_dtype,
core_attention_bias,
1.0/self.norm_factor,
self.attention_dropout if self.training else 0.0,
fast_zero_fill,
qkv_layout,
core_attention_bias_type,
attn_mask_type,
None, # rng_gen
fused_attention_backend,
use_FAv2_bwd
)
output = output.view(batch_size, seqlen_q, -1).transpose(0, 1).contiguous()
if self.attention_type == "cross":
kv_layout = _check_kv_layout(key_layer, value_layer)
if kv_layout == "sbh2d":
key_value = _CombineKV.apply(key_layer, value_layer, 3)
# [s, b, h, 2, d]
key_value = key_value.view(
*key_value.shape[0:3], 2, key_layer.shape[-1])
# [b, s, 2, h, d]
key_value = key_value.transpose(2, 3).transpose(0, 1).contiguous()
elif qkv_layout == "sb2hd":
key_value = _CombineKV.apply(key_layer, value_layer, 2)
# [s, b, 2, h, d]
key_value = key_value.view(
*key_value.shape[0:2], 2, *key_layer.shape[2:])
# [b, s, 2, h, d]
key_value = key_value.transpose(0, 1).contiguous()
else:
raise Exception("FusedAttention only supports kv layout sbh2d or sb2hd!")
# [total_seqs, h, d]
query_layer = query_layer.transpose(0, 1).contiguous()
query_layer = query_layer.view(
query_layer.shape[0] * query_layer.shape[1], *query_layer.shape[2:])
# [total_seqs, 2, h, d]
key_value = key_value.view([key_value.shape[0] * key_value.shape[1]]
+ key_value.shape[2:])
qkv_layout = "kv_interleaved"
cu_seqlens_q = torch.arange(
0,
(batch_size + 1) * seqlen_q,
step=seqlen_q,
dtype=torch.int32,
device=query_layer.device)
cu_seqlens_kv = torch.arange(
0,
(batch_size + 1) * seqlen_kv,
step=seqlen_kv,
dtype=torch.int32,
device=key_layer.device)
with self.attention_dropout_ctx():
outputs = FusedAttnFunc_kvpacked.apply(
self.training,
max_seqlen_q, max_seqlen_kv,
cu_seqlens_q, cu_seqlens_kv,
query_layer, key_value,
qkv_dtype,
core_attention_bias,
1.0/self.norm_factor,
self.attention_dropout if self.training else 0.0,
fast_zero_fill,
qkv_layout,
core_attention_bias_type,
attn_mask_type,
None, # rng_gen
fused_attention_backend,
use_FAv2_bwd
)
output = (outputs[0].view(batch_size, seqlen_q, -1).transpose(0, 1).contiguous(),
outputs[1].view(batch_size, seqlen_q, -1).transpose(0, 1).contiguous())
return output
class DotProductAttention(torch.nn.Module):
"""Allows the model to jointly attend to information from different
representation subspaces as described in the paper:
`Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_.
.. note::
Argument :attr:`attention_mask` will be ignored in the `forward` call when
:attr:`attn_mask_type` is set to `"causal"`.
.. warning::
FlashAttention uses a non-deterministic algorithm for optimal performance. To observe
deterministic behavior at the cost of performance, use FlashAttention version < `2.0.0`
and set the environment variable :attr:`NVTE_ALLOW_NONDETERMINISTIC_ALGO=0`. In order
to disable`flash-attn` entirely, set :attr:`NVTE_FLASH_ATTN=0`.
.. warning::
Argument :attr:`attn_mask_type` has been moved to the `forward` method and
is deprecated. It will be fully removed in future releases.
Parameters
----------
num_attention_heads : int
number of attention heads in the transformer layer.
kv_channels : int
number of key-value channels.
num_gqa_groups : Optional[int] = None
number of GQA groups in the transformer layer.
Grouped Query Attention is described in
`this paper <https://arxiv.org/pdf/2305.13245.pdf>`_.
This only affects the keys and values, not the queries.
GQA-1 is equivalent to Multi-Query Attention
(`MQA <https://arxiv.org/pdf/1911.02150.pdf>`_), while GQA-H
is equivalent to MHA, i.e. `num_gqa_groups = num_attention_heads`.
attention_dropout: float, default = 0.0
dropout probability for the dropout op during multi-head attention.
layer_number: int, default = `None`
layer number of the current `DotProductAttention` when multiple such modules
are concatenated, for instance in consecutive transformer blocks.
Parallelism parameters
----------------------
sequence_parallel : bool, default = `False`
if set to `True`, uses sequence parallelism.
tp_size : int, default = 1
tensor parallel world size.
tp_group : ProcessGroup, default = `None`
tensor parallel process group.
"""
def __init__(
self,
num_attention_heads: int,
kv_channels: int,
num_gqa_groups: Optional[int] = None,
attention_dropout: float = 0.0,
attn_mask_type: Optional[str] = None,
sequence_parallel: bool = False,
tp_size: int = 1,
get_rng_state_tracker: Optional[Callable] = None,
tp_group: Optional[dist_group_type] = None,
layer_number: Optional[int] = None,
attention_type: str = "self",
) -> None:
super().__init__()
if attn_mask_type is not None:
warnings.warn(
"Argument :attr:`attn_mask_type` has been moved to the `forward` method and"
"is deprecated. It will be fully removed in future releases.",
category=DeprecationWarning,
)
self.attn_mask_type = attn_mask_type
self.tp_size = tp_size if tp_group is None else get_distributed_world_size(tp_group)
self.tp_group = tp_group
self.get_rng_state_tracker = get_rng_state_tracker
self.num_attention_heads = num_attention_heads
self.hidden_size_per_attention_head = kv_channels
self.num_gqa_groups = (
num_attention_heads if num_gqa_groups is None else num_gqa_groups
)
self.num_gqa_groups_per_partition = int(self.num_gqa_groups // tp_size)
assert (num_attention_heads % self.num_gqa_groups == 0
), "The number of attention heads must be divisible by the number of GQA groups!"
if sequence_parallel or get_rng_state_tracker is None:
attention_dropout_ctx = nullcontext
else:
attention_dropout_ctx = get_rng_state_tracker().fork
norm_factor = math.sqrt(self.hidden_size_per_attention_head)
self.device_compute_capability = get_device_compute_capability()
self.deterministic = not bool(int(os.getenv("NVTE_ALLOW_NONDETERMINISTIC_ALGO", "1")))
self.use_flash_attention = (
int(os.getenv("NVTE_FLASH_ATTN", "1"))
and self.device_compute_capability >= 8.0
)
if _flash_attn_2_available and self.deterministic:
self.use_flash_attention = False
warnings.warn(
"Disabling usage of FlashAttention since version 2 does not support deterministic"
"execution. In order to use FA with deterministic behavior, please install"
"FlashAttention version 1."
)
self.use_fused_attention = (
int(os.getenv("NVTE_FUSED_ATTN", "1"))
and self.device_compute_capability >= 8.0
)
attn_kwargs = {
"attention_dropout": attention_dropout,
"attention_dropout_ctx": attention_dropout_ctx,
}
self.attention_type = attention_type
self.attention_dropout = attention_dropout
if self.use_flash_attention:
self.flash_attention = FlashAttention(
norm_factor, **attn_kwargs,
deterministic=self.deterministic)
# Instantiating three types since use of flash-attn and FusedAttention
# might be ruled out due to forward inputs.
if self.use_fused_attention:
self.fused_attention = FusedAttention(
norm_factor, **attn_kwargs,
attention_type = attention_type)
self.unfused_attention = UnfusedDotProductAttention(
norm_factor, **attn_kwargs, layer_number=layer_number)
def _checkpointed_attention_forward(
self,
attention_func: Callable,
*forward_args: Tuple[torch.Tensor, ...],
**forward_kwargs: Dict[str, Any],
) -> torch.Tensor:
"""Forward method with activation checkpointing."""
def custom_forward(*input_args, **input_kwargs):
return attention_func(*input_args, **input_kwargs)
hidden_states = checkpoint(
custom_forward,
False,
self.get_rng_state_tracker,
self.tp_group,
*forward_args,
**forward_kwargs,
)
return hidden_states
def forward(
self,
query_layer: torch.Tensor,
key_layer: torch.Tensor,
value_layer: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
attn_mask_type: str = "causal",
checkpoint_core_attention: bool = False,
core_attention_bias_type: str = "no_bias",
core_attention_bias: Optional[torch.Tensor] = None,
fast_zero_fill: bool = True,
) -> torch.Tensor:
"""
Dot Product Attention Layer.
.. note::
Argument :attr:`attention_mask` will be ignored when :attr:`attn_mask_type`
is set to `"causal"`.
.. note::
Input tensors :attr:`query_layer`, :attr:`key_layer`, and :attr:`value_layer`
must each be of shape (:attr:`sequence_length`, :attr:`batch_size`,
:attr:`num_attention_heads`, :attr:`kv_channels`). Output of shape
(:attr:`sequence_length`, :attr:`batch_size`, :attr:`num_attention_heads`
* :attr:`kv_channels`) is returned.
.. note::
DotProductAttention supports three backends: 1) FlashAttention which calls
HazyResearch/Dao-AILab's `flash-attn <https://arxiv.org/pdf/2305.13245.pdf>`_
PyTorch API, 2) FusedAttention which has multiple fused attention implementations
based on `cuDNN Graph API
<https://docs.nvidia.com/deeplearning/cudnn/developer-guide/index.html#op-fusion>`_
(see :attr:`FusedAttention` for more details on FusedAttention backends), and 3)
UnfusedDotProductAttention which is the native PyTorch implementation
with fused scaled masked softmax.
.. note::
Users can use environment variables :attr:`NVTE_FLASH_ATTN`, :attr:`NVTE_FUSED_ATTN`,
and :attr:`NVTE_FUSED_ATTN_BACKEND` to control which DotProductAttention backend,
and FusedAttention backend if applicable, to use. TransformerEngine prioritizes
FlashAttention over FusedAttention and over UnfusedDotProductAttention.
If FusedAttention is being used, users can also choose to switch to flash-attn's
implementation for backward by setting :attr:`NVTE_FUSED_ATTN_USE_FAv2_BWD=1`
(default: 0), because of the performance differences between various versions of
flash-attn and FusedAttention. Further, :attr:`NVTE_FUSED_ATTN_DP_WORKSPACE_LIMIT`
can be used to enable the workspace related optimizations in FusedAttention
(default: 256MB; raise the limit to enable these performance optimizations).
Parameters
----------
query_layer : torch.Tensor
Query tensor.
key_layer : torch.Tensor
Key tensor.
value_layer : torch.Tensor
Value tensor.
attention_mask : Optional[torch.Tensor], default = `None`
Boolean tensor used to mask out softmax input when not using flash-attn.
attn_mask_type: {'causal', 'padding', 'no_mask'}, default = `causal`
type of attention mask passed into softmax operation.
checkpoint_core_attention : bool, default = `False`
If true, forward activations for attention are recomputed
during the backward pass in order to save memory that would
otherwise be occupied to store the forward activations until
backprop.
core_attention_bias_type: str, default = `no_bias`
Bias type, {`no_bias`, `pre_scale_bias`, 'post_scale_bias`}
core_attention_bias: Optional[torch.Tensor], default = `None`
Bias tensor for Q * K.T
fast_zero_fill: bool, default = `True`
Whether to use the fast path to set output tensors to 0 or not.
"""
if self.attn_mask_type is not None:
warnings.warn(
"Argument :attr:`attn_mask_type` has been moved to the `forward` method and"
"is deprecated. It will be fully removed in future releases.",
category=DeprecationWarning,
)
# Keep previous functionality for current users.
attn_mask_type = self.attn_mask_type
assert (key_layer.shape[-2] == self.num_gqa_groups_per_partition
and value_layer.shape[-2] == self.num_gqa_groups_per_partition
), f"Keys and values must have {self.num_gqa_groups} heads!"
use_flash_attention = self.use_flash_attention
use_fused_attention = self.use_fused_attention
if (query_layer.dtype not in [torch.bfloat16, torch.float16]
or key_layer.dtype not in [torch.bfloat16, torch.float16]
or value_layer.dtype not in [torch.bfloat16, torch.float16]
):
use_flash_attention = False
if key_layer.shape[-1] > 64:
if self.device_compute_capability in (8.6, 8.7):
use_flash_attention = False
elif not _flash_attn_2_available and self.device_compute_capability == 8.9:
use_flash_attention = False
if not _flash_attn_2_available and self.num_gqa_groups != self.num_attention_heads:
use_flash_attention = False
if attn_mask_type == "padding" and attention_mask is not None:
use_flash_attention = False
use_fused_attention = False
if core_attention_bias_type != "no_bias" or core_attention_bias is not None:
use_flash_attention = False
if is_in_onnx_export_mode():
use_flash_attention = False
use_fused_attention = False
qkv_layout = "qkv_interleaved" if self.attention_type == "self" else "kv_interleaved"
if use_fused_attention:
fused_attention_backend = tex.get_fused_attn_backend(
TE_DType[query_layer.dtype],
TE_DType[key_layer.dtype],
QKVLayout[qkv_layout],
AttnBiasType[core_attention_bias_type],
AttnMaskType[attn_mask_type],
self.attention_dropout,
query_layer.shape[0], key_layer.shape[0],
query_layer.shape[-1])
# DPA does not support FP8; for FP8, use cpp_extensions modules directly
is_backend_avail = (fused_attention_backend in
[FusedAttnBackend["F16_max512_seqlen"], FusedAttnBackend["F16_arbitrary_seqlen"]])
use_fused_attention = (use_fused_attention
and is_backend_avail
and self.num_gqa_groups == self.num_attention_heads)
if (self.deterministic
and fused_attention_backend == FusedAttnBackend["F16_arbitrary_seqlen"]):
use_fused_attention = False
warnings.warn(
"Disabling usage of FusedAttention since the FusedAttention"
"backend does not support deterministic exection."
)
if use_flash_attention:
if checkpoint_core_attention:
return self._checkpointed_attention_forward(self.flash_attention,
query_layer,
key_layer,
value_layer,
attn_mask_type=attn_mask_type)
return self.flash_attention(
query_layer, key_layer, value_layer, attn_mask_type=attn_mask_type)
if use_fused_attention:
if checkpoint_core_attention:
return self._checkpointed_attention_forward(self.fused_attention,
query_layer,
key_layer,
value_layer,
attn_mask_type=attn_mask_type,
fused_attention_backend=fused_attention_backend,
core_attention_bias_type=core_attention_bias_type,
core_attention_bias=core_attention_bias,
fast_zero_fill=fast_zero_fill)
return self.fused_attention(query_layer, key_layer, value_layer,
attn_mask_type=attn_mask_type,
fused_attention_backend=fused_attention_backend,
core_attention_bias_type=core_attention_bias_type,
core_attention_bias=core_attention_bias,
fast_zero_fill=fast_zero_fill)
if checkpoint_core_attention:
return self._checkpointed_attention_forward(
self.unfused_attention,
query_layer,
key_layer,
value_layer,
attn_mask_type=attn_mask_type,
attention_mask=attention_mask,
core_attention_bias_type=core_attention_bias_type,
core_attention_bias=core_attention_bias,
)
return self.unfused_attention(query_layer,
key_layer,
value_layer,
attn_mask_type=attn_mask_type,
attention_mask=attention_mask,
core_attention_bias_type=core_attention_bias_type,
core_attention_bias=core_attention_bias,
)
class MultiheadAttention(torch.nn.Module):
r"""
Multi-head Attention (MHA), including Query,
Key, Value and Output projection.
.. note::
Argument :attr:`attention_mask` will be ignored in the `forward` call when
:attr:`attn_mask_type` is set to `"causal"`.
.. warning::
Argument :attr:`attn_mask_type` has been moved to the `forward` method and
is deprecated. It will be fully removed in future releases.
Parameters
----------
hidden_size : int
size of each input sample.
num_attention_heads : int
number of attention heads in the transformer layer.
kv_channels: int, default = `None`
number of key-value channels. defaults to
:attr:`hidden_size` / :attr:`num_attention_heads` if `None`.
attention_dropout: float, default = 0.1
dropout probability for the dropout op during multi-head attention.
layernorm_epsilon : float, default = 1e-5
a value added to the denominator of layer normalization
for numerical stability.
init_method : Callable, default = `None`
used for initializing weights of QKV and FC1 weights in the following way:
`init_method(weight)`. When set to `None`, defaults to
`torch.nn.init.normal_(mean=0.0, std=0.023)`.
output_layer_init_method : Callable, default = `None`
used for initializing weights of PROJ and FC2 in the following way:
`output_layer_init_method(weight)`. When set to `None`, defaults to
`torch.nn.init.normal_(mean=0.0, std=0.023)`.
layer_number: int, default = `None`
layer number of the current `TransformerLayer` when multiple such modules are
concatenated to form a transformer block.
num_gqa_groups : int, default = `None`
number of GQA groups in the transformer layer.
Grouped Query Attention is described in
`this paper <https://arxiv.org/pdf/2305.13245.pdf>`_.
This only affects the keys and values, not the querys.
GQA-1 is equivalent to Multi-Query Attention
(`MQA <https://arxiv.org/pdf/1911.02150.pdf>`_), while GQA-H
is equivalent to MHA, i.e. `num_gqa_groups = num_attention_heads`.
return_layernorm_output : bool, default = `False`
if set to `True`, output of layernorm is returned from the forward
together with the output of the linear transformation.
Example use case: residual connection for transformer module is
taken post layernorm.
input_layernorm: bool, default = `True`
if set to `False`, layer normalization to the input is not applied.
attention_type: { 'self', 'cross' }, default = 'self'
type of attention applied.
zero_centered_gamma : bool, default = 'False'
if set to 'True', gamma parameter in LayerNorm is initialized to 0 and
the LayerNorm formula changes to
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \varepsilon}} *
(1 + \gamma) + \beta
normalization : { 'LayerNorm', 'RMSNorm' }, default = 'LayerNorm'
type of normalization applied.
qkv_weight_interleaved : bool, default = `True`
if set to `False`, the QKV weight is interpreted as a concatenation of
query, key, and value weights along the `0th` dimension. The default
interpretation is that the individual `q`, `k`, and `v` weights for each
attention head are interleaved. This parameter is set to `False` when
using :attr:`fuse_qkv_params=False`.
bias : bool, default = `True`
if set to `False`, the transformer layer will not learn any additive biases.
device : Union[torch.device, str], default = "cuda"
The device on which the parameters of the model will allocated. It is the user's
responsibility to ensure all parameters are moved to the GPU before running the
forward pass.
Parallelism parameters
----------------------
set_parallel_mode : bool, default = `False`
if set to `True`, QKV and FC1 layers are used as Column Parallel
whereas PROJ and FC2 is used as Row Parallel as described
`here <https://arxiv.org/pdf/1909.08053.pdf>`_.
sequence_parallel : bool, default = `False`
if set to `True`, uses sequence parallelism.
tp_group : ProcessGroup, default = `None`
tensor parallel process group.
tp_size : int, default = 1
used as TP (tensor parallel) world size when TP groups are not formed during
initialization. In this case, users must call the
`set_tensor_parallel_group(tp_group)` method on the initialized module before the
forward pass to supply the tensor parallel group needed for tensor and sequence
parallel collectives.
Optimization parameters
-----------------------
fuse_wgrad_accumulation : bool, default = 'False'
if set to `True`, enables fusing of creation and accumulation of
the weight gradient. When enabled, it is assumed that the weights
have an additional `main_grad` attribute (used instead of the
regular `grad`) which is a pre-allocated buffer of the correct
size to accumulate gradients in.
params_dtype : torch.dtype, default = `torch.get_default_dtype()`
it controls the type used to allocate the initial parameters. Useful when
the model is trained with lower precision and the original FP32 parameters
would not fit in GPU memory.
return_bias : bool, default = `False`
when set to `True`, this module will not apply the additive bias itself, but
instead return the bias value during the forward pass together with the
output of the linear transformation :math:`y = xA^T`. This is useful when
the bias addition can be fused to subsequent operations.
fuse_qkv_params: bool, default = 'False'
if set to `True`, `TransformerLayer` module exposes a single fused
parameter for query-key-value. This enables optimizations such as QKV
fusion without concatentations/splits and also enables the argument
`fuse_wgrad_accumulation`.
"""
def __init__(
self,
hidden_size: int,
num_attention_heads: int,
kv_channels: Optional[int] = None,
attention_dropout: float = 0.1,
layernorm_epsilon: float = 1e-5,
init_method: Optional[Callable] = None,
output_layer_init_method: Optional[Callable] = None,
layer_number: Optional[int] = None,
attn_mask_type: Optional[str] = None,
tp_group: Optional[dist_group_type] = None,
tp_size: int = 1,
num_gqa_groups: Optional[int] = None,
fuse_wgrad_accumulation: bool = False,
get_rng_state_tracker: Optional[Callable] = None,
sequence_parallel: bool = False,
params_dtype: Optional[torch.dtype] = None,
return_bias: bool = False,
return_layernorm_output: bool = False,
input_layernorm: bool = False,
attention_type: str = "self",
set_parallel_mode: bool = False,
fuse_qkv_params: bool = False,
zero_centered_gamma: bool = False,
qkv_weight_interleaved: bool = True,
ub_bulk_wgrad: bool = False,
ub_bulk_dgrad: bool = False,
ub_split_rs: bool = False,
ub_split_ag: bool = False,
bias: bool = True,
normalization: str = "LayerNorm",
device: Union[torch.device, str] = "cuda",
) -> None:
super().__init__()
if attn_mask_type is not None:
warnings.warn(
"Argument :attr:`attn_mask_type` has been moved to the `forward` method and"
"is deprecated. It will be fully removed in future releases.",
category=DeprecationWarning,
)
self.attn_mask_type = attn_mask_type
self.layer_number = layer_number
self.input_layernorm = input_layernorm
self.attention_type = attention_type
self.get_rng_state_tracker = get_rng_state_tracker
self.tp_group = tp_group
self.return_layernorm_output = return_layernorm_output
self.params_dtype = torch.get_default_dtype() if params_dtype is None else params_dtype
self.num_attention_heads = num_attention_heads
self.return_bias = return_bias
kv_channels = kv_channels if kv_channels else (hidden_size // num_attention_heads)
if init_method is None:
init_method = get_default_init_method()
if output_layer_init_method is None:
output_layer_init_method = get_default_init_method()
if not fuse_qkv_params:
qkv_weight_interleaved = False
self.qkv_weight_interleaved = qkv_weight_interleaved
assert attention_type in AttnTypes, f"attention_type {attention_type} not supported"
if layer_number is not None:
assert layer_number > 0, "layer_number must be a positive integer"
tp_size = tp_size if tp_group is None else get_distributed_world_size(tp_group)
self.tp_size = tp_size
self.sequence_parallel = (tp_size > 1) and sequence_parallel
self.hidden_size_per_attention_head = kv_channels
self.num_attention_heads_per_partition = divide(num_attention_heads, tp_size)
self.num_gqa_groups = (
num_attention_heads if num_gqa_groups is None else num_gqa_groups
)
assert (num_attention_heads % self.num_gqa_groups == 0
), "The number of GQA groups must be divisible by the number of attention heads!"
assert (num_attention_heads % tp_size == 0
), "The number of GQA groups must be divisible by tensor parallel size!"
self.num_gqa_groups_per_partition = int(self.num_gqa_groups // tp_size)
self.hidden_size_kv = int(hidden_size * self.num_gqa_groups // num_attention_heads)
common_gemm_kwargs = {
"fuse_wgrad_accumulation": fuse_wgrad_accumulation,
"tp_group": tp_group,
"tp_size": tp_size,
"get_rng_state_tracker": get_rng_state_tracker,
"sequence_parallel": sequence_parallel,
"params_dtype": self.params_dtype,
"device": device,
}
qkv_parallel_mode = "column" if set_parallel_mode else None
if self.attention_type == "self" and self.num_gqa_groups == self.num_attention_heads:
if self.input_layernorm:
self.layernorm_qkv = LayerNormLinear(
hidden_size,
3 * hidden_size,
eps=layernorm_epsilon,
init_method=init_method,
bias=bias,
return_bias=False,
parallel_mode=qkv_parallel_mode,
return_layernorm_output=return_layernorm_output,
parameters_split=("query_", "key_", "value_") if not fuse_qkv_params else None,
zero_centered_gamma=zero_centered_gamma,
ub_bulk_wgrad=ub_bulk_wgrad,
ub_bulk_dgrad=ub_bulk_dgrad,
ub_split_ag=ub_split_ag,
normalization=normalization,
**common_gemm_kwargs,
)
else:
self.qkv = Linear(
hidden_size,
3 * hidden_size,
init_method=init_method,
bias=bias,
return_bias=False,
parallel_mode=qkv_parallel_mode,
parameters_split=("query_", "key_", "value_") if not fuse_qkv_params else None,
**common_gemm_kwargs,
)
elif ((self.attention_type == "cross")
or (self.attention_type == "self"
and self.num_gqa_groups != self.num_attention_heads)):
if self.input_layernorm:
self.layernorm_query = LayerNormLinear(
hidden_size,
hidden_size,
eps=layernorm_epsilon,
init_method=init_method,
bias=bias,
return_bias=False,
parallel_mode=qkv_parallel_mode,
return_layernorm_output=return_layernorm_output,
zero_centered_gamma=zero_centered_gamma,
ub_bulk_wgrad=ub_bulk_wgrad,
ub_bulk_dgrad=ub_bulk_dgrad,
ub_split_ag=ub_split_ag,
normalization=normalization,
**common_gemm_kwargs,
)
else:
self.query_layer = Linear(
hidden_size,
hidden_size,
init_method=init_method,
bias=bias,
return_bias=False,
parallel_mode=qkv_parallel_mode,
**common_gemm_kwargs,
)
self.key_value = Linear(
hidden_size,
2 * self.hidden_size_kv,
init_method=init_method,
bias=bias,
return_bias=False,
parallel_mode=qkv_parallel_mode,
parameters_split=("key_", "value_") if not fuse_qkv_params else None,
**common_gemm_kwargs,
)
# Attention.
self.core_attention = DotProductAttention(
num_attention_heads,
kv_channels,
num_gqa_groups=self.num_gqa_groups,
attention_dropout=attention_dropout,
tp_size=tp_size,
get_rng_state_tracker=get_rng_state_tracker,
sequence_parallel=sequence_parallel,
tp_group=tp_group,
layer_number=self.layer_number,
)
# Linear
self.proj = Linear(
hidden_size,
hidden_size,
init_method=output_layer_init_method,
bias=bias,
return_bias=return_bias,
parallel_mode="row" if set_parallel_mode else None,
ub_split_rs=ub_split_rs,
ub_split_ag=ub_split_ag,
**common_gemm_kwargs,
)
def _allocate_memory(
self, inference_max_sequence_len: int, batch_size: int, dtype: torch.dtype
) -> torch.Tensor:
return torch.empty(
inference_max_sequence_len,
batch_size,
self.num_gqa_groups_per_partition,
self.hidden_size_per_attention_head,
dtype=dtype,
device=torch.cuda.current_device(),
)
def set_tensor_parallel_group(self, tp_group: Union[dist_group_type, None]) -> None:
"""Set TP group"""
self.tp_group = tp_group
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_output: Optional[torch.Tensor] = None,
attn_mask_type: str = "causal",
is_first_microbatch: Optional[bool] = None,
checkpoint_core_attention: bool = False,
inference_params: Optional[Any] = None,
rotary_pos_emb: Optional[Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]] = None,
core_attention_bias_type: str = "no_bias",
core_attention_bias: Optional[torch.Tensor] = None,
fast_zero_fill: bool = True,
) -> Tuple[Union[torch.Tensor, None], ...]:
"""
Forward propagation for MultiheadAttention layer.
.. note::
Argument :attr:`attention_mask` will be ignored when :attr:`attn_mask_type`
is set to `"causal"`.
Parameters
----------
hidden_states : torch.Tensor
Input tensor.
attention_mask : Optional[torch.Tensor], default = `None`
Boolean tensor used to mask out self-attention softmax input.
attn_mask_type: {'causal', 'padding', 'no_mask'}, default = `causal`
type of attention mask passed into softmax operation.
encoder_output : Optional[torch.Tensor], default = `None`
Output of the encoder block to be fed into the decoder block if using
`layer_type="decoder"`.
is_first_microbatch : {True, False, None}, default = None
During training using either gradient accumulation or
pipeline parallelism a minibatch of data is further split
into microbatches. Between the microbatches of the same minibatch
the model weights are not updated. Setting this parameter indicates
whether the current microbatch is the first in a minibatch or not.
When set, this parameter enables additional optimizations:
* during FP8 training, it allows caching of the FP8 versions of
the weights
* it also allows skipping gradient accumulation during the
first microbatch (since it is the first gradient being
produced)
checkpoint_core_attention: bool, default = `False`
If true, forward activations for core attention are recomputed
during the backward pass in order to save memory that would
otherwise be occupied to store the forward activations until
backprop.
rotary_pos_emb: Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]], default = `None`
Embeddings for query and key tensors for applying rotary position
embedding. By default no input embedding is applied.
core_attention_bias_type: str, default = `no_bias`
Bias type, {`no_bias`, `pre_scale_bias`, 'post_scale_bias`}
core_attention_bias: Optional[torch.Tensor], default = `None`
Bias tensor for Q * K.T
fast_zero_fill: bool, default = `True`
Whether to set output tensors to 0 or not before use.
"""
# hidden_states: [sq, b, h]
if self.attn_mask_type is not None:
warnings.warn(
"Argument :attr:`attn_mask_type` has been moved to the `forward` method and"
"is deprecated. It will be fully removed in future releases.",
category=DeprecationWarning,
)
# Keep previous functionality for current users.
attn_mask_type = self.attn_mask_type
if attn_mask_type == "padding" and attention_mask is not None:
assert (
attention_mask.dtype == torch.bool
), "Attention mask must be a boolean tensor"
assert (core_attention_bias_type in AttnBiasTypes
), f"core_attention_bias_type {core_attention_bias_type} is not supported!"
# =================================================
# Pre-allocate memory for key-values for inference.
# =================================================
is_first_step = False
if inference_params and self.layer_number is not None:
if self.layer_number not in inference_params.key_value_memory_dict:
inf_max_seq_len = inference_params.max_sequence_len
inf_max_batch_size = inference_params.max_batch_size
inference_key_memory = self._allocate_memory(
inf_max_seq_len, inf_max_batch_size, hidden_states.dtype
)
inference_value_memory = self._allocate_memory(
inf_max_seq_len, inf_max_batch_size, hidden_states.dtype
)
inference_params.key_value_memory_dict[self.layer_number] = (
inference_key_memory,
inference_value_memory,
)
is_first_step = True
else:
(
inference_key_memory,
inference_value_memory,
) = inference_params.key_value_memory_dict[self.layer_number]
# =====================
# Query, Key, and Value
# =====================
if self.attention_type == "self" and self.num_gqa_groups == self.num_attention_heads:
# Attention heads [sq, b, h] --> [sq, b, (np * 3 * hn)]
if self.input_layernorm:
layernorm_qkv_outputs = self.layernorm_qkv(
hidden_states,
is_first_microbatch=is_first_microbatch,
)
if self.return_layernorm_output:
mixed_x_layer, layernorm_output = layernorm_qkv_outputs
else:
mixed_x_layer = layernorm_qkv_outputs
else:
mixed_x_layer = self.qkv(
hidden_states,
is_first_microbatch=is_first_microbatch,
)
if self.qkv_weight_interleaved:
# [sq, b, (np * 3 * hn)] --> [sq, b, np, 3 * hn]
new_tensor_shape = mixed_x_layer.size()[:-1] + (
self.num_attention_heads_per_partition,
3 * self.hidden_size_per_attention_head,
)
# split along last dimension
split_dim = -1
else:
# [sq, b, (np * 3 * hn)] --> [sq, b, 3 * np, hn]
new_tensor_shape = mixed_x_layer.size()[:-1] + (
3 * self.num_attention_heads_per_partition,
self.hidden_size_per_attention_head,
)
# split along second last dimension
split_dim = -2
mixed_x_layer = mixed_x_layer.view(*new_tensor_shape)
# mixed_x_layer --> 3 [sq, b, np, hn]
if split_dim == -1 and not is_in_onnx_export_mode():
query_layer, key_layer, value_layer = _SplitLastDim.apply(mixed_x_layer, 3)
else:
query_layer, key_layer, value_layer = split_tensor_along_dim(
mixed_x_layer, split_dim, 3
)
elif ((self.attention_type == "cross")
or (self.attention_type == "self"
and self.num_gqa_groups != self.num_attention_heads)):
if self.attention_type == "cross":
input_tensor = encoder_output
else:
input_tensor = hidden_states
# Attention heads [sk, b, h] --> [sk, b, (np * 2 * hn)]
mixed_kv_layer = self.key_value(
input_tensor,
is_first_microbatch=is_first_microbatch,
)
if self.qkv_weight_interleaved:
# [sq, b, (np * 2 * hn)] --> [sq, b, np, 2 * hn]
new_tensor_shape = mixed_kv_layer.size()[:-1] + (
self.num_gqa_groups_per_partition,
2 * self.hidden_size_per_attention_head,
)
# split along last dimension
split_dim = -1
else:
# [sq, b, (np * 2 * hn)] --> [sq, b, 2 * np, hn]
new_tensor_shape = mixed_kv_layer.size()[:-1] + (
2 * self.num_gqa_groups_per_partition,
self.hidden_size_per_attention_head,
)
# split along second last dimension
split_dim = -2
mixed_kv_layer = mixed_kv_layer.view(*new_tensor_shape)
# mixed_kv_layer --> 2 [sk, b, np, hn]
if split_dim == -1 and not is_in_onnx_export_mode():
key_layer, value_layer = _SplitLastDim.apply(mixed_kv_layer, 2)
else:
key_layer, value_layer = split_tensor_along_dim(mixed_kv_layer, split_dim, 2)
# Attention head [sq, b, h] --> [sq, b, hp]
if self.input_layernorm:
layernorm_query_outputs = self.layernorm_query(
hidden_states,
is_first_microbatch=is_first_microbatch,
)
if self.return_layernorm_output:
query_layer, layernorm_output = layernorm_query_outputs
else:
query_layer = layernorm_query_outputs
else:
query_layer = self.query_layer(
hidden_states,
is_first_microbatch=is_first_microbatch,
)
# [sq, b, hp] --> [sq, b, np, hn]
new_tensor_shape = query_layer.size()[:-1] + (
self.num_attention_heads_per_partition,
self.hidden_size_per_attention_head,
)
query_layer = query_layer.view(*new_tensor_shape)
# ==================================
# Adjust key and value for inference
# ==================================
# duplicate the pos_emb for self attention
if rotary_pos_emb is not None:
if not isinstance(rotary_pos_emb, tuple):
rotary_pos_emb = ((rotary_pos_emb,) * 2)
if inference_params and self.layer_number is not None:
batch_start = inference_params.batch_size_offset
batch_end = batch_start + key_layer.size(1)
assert batch_end <= inference_key_memory.size(1)
sequence_start = inference_params.sequence_len_offset
sequence_end = sequence_start + key_layer.size(0)
assert sequence_end <= inference_key_memory.size(0)
# Copy key and values.
inference_key_memory[
sequence_start:sequence_end, batch_start:batch_end, ...
] = key_layer
inference_value_memory[
sequence_start:sequence_end, batch_start:batch_end, ...
] = value_layer
key_layer = inference_key_memory[:sequence_end, batch_start:batch_end, ...]
value_layer = inference_value_memory[
:sequence_end, batch_start:batch_end, ...
]
# adjust the key rotary positional embedding
if rotary_pos_emb is not None:
q_pos_emb, k_pos_emb = rotary_pos_emb
# need to cross check this condition during inference
# if not set_inference_key_value_memory:
if not is_first_step:
# In inference, we compute one token at a time.
# Select the correct positional embedding
# (only the last token in the sequence)
q_pos_emb = q_pos_emb[sequence_end - 1 : sequence_end]
else:
# In the first forward pass of inference,
# we use the entire provided prefix.
# q_pos_emb here has the rope embeddings of the entire
# prefix + to-be-generated output so
# we slice to just the prefix.
q_pos_emb = q_pos_emb[:sequence_end, :, :, :]
k_pos_emb = k_pos_emb[:sequence_end, :, :, :]
rotary_pos_emb = (q_pos_emb, k_pos_emb)
# ==================================
# core attention computation
# ==================================
# apply relative positional encoding (rotary embedding)
if rotary_pos_emb is not None:
q_pos_emb, k_pos_emb = rotary_pos_emb
query_layer = apply_rotary_pos_emb(query_layer, q_pos_emb)
key_layer = apply_rotary_pos_emb(key_layer, k_pos_emb)
context_layer = self.core_attention(
query_layer,
key_layer,
value_layer,
attention_mask=attention_mask,
attn_mask_type=attn_mask_type,
checkpoint_core_attention=checkpoint_core_attention,
core_attention_bias_type=core_attention_bias_type,
core_attention_bias=core_attention_bias,
fast_zero_fill=fast_zero_fill,
)
# =================
# Output. [sq, b, h]
# =================
projection_output = self.proj(
context_layer, is_first_microbatch=is_first_microbatch
)
if self.return_bias:
attention_output, attention_bias = projection_output
else:
attention_output, attention_bias = projection_output, None
outputs = (attention_output,)
if self.return_bias:
outputs += (attention_bias,)
if self.input_layernorm and self.return_layernorm_output:
outputs += (layernorm_output,)
return outputs if len(outputs) > 1 else outputs[0]
| TransformerEngine-main | transformer_engine/pytorch/attention.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Utilities for debugging numerical issues with FP8"""
from typing import Tuple
import torch
from transformer_engine.common import recipe
_NUMERICS_DEBUG = False
def debug(enabled: bool = True) -> None:
"""Set FP8 debug mode"""
global _NUMERICS_DEBUG
_NUMERICS_DEBUG = enabled
def fp8_tensor_statistics(
tensor: torch.Tensor, fp8_format: str = "E4M3"
) -> Tuple[int, ...]:
"""Print FP8 tensor stats"""
fp8_format = fp8_format.upper()
assert fp8_format in (
"E4M3",
"E5M2",
), "fp8_format must be 'E4M3' or 'E5M2' for amax"
fmt = recipe.Format[fp8_format]
FP8_MAX = fmt.value.max_fwd
num_overflows = (tensor == FP8_MAX).sum().item()
num_underflows = (tensor == 0).sum().item()
return (num_underflows, num_overflows)
| TransformerEngine-main | transformer_engine/pytorch/numerics_debug.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""FP8 utilities for TransformerEngine"""
import os
from contextlib import contextmanager
from collections import deque
from typing import Callable, List, Optional, Dict, Any, Tuple, Union
import torch
import transformer_engine_extensions as tex
from transformer_engine.common.recipe import DelayedScaling, Format
from .constants import dist_group_type
from .utils import get_device_compute_capability
from .jit import jit_fuser
__all__ = ["fp8_autocast"]
def check_fp8_support() -> Tuple[bool, str]:
"""Return if fp8 support is available"""
if get_device_compute_capability() >= 9.0: # hopper and above
return True, ""
if get_device_compute_capability() < 8.9: # pre-ada
return False, "Device compute capability 8.9 or higher required for FP8 execution."
if tex.get_cublasLt_version() < 120103:
return False, "CublasLt version 12.1.3.x or higher required for FP8 execution on Ada."
if float(torch.version.cuda) < 12.1:
return False, "Cuda version 12.1 or higher required for FP8 execution on Ada."
return True, ""
def get_default_fp8_recipe() -> DelayedScaling:
"""FP8 recipe if not provided by user
Margin = 0, interval = 1, E4M3
"""
return DelayedScaling()
def get_fp8_te_dtype(
fp8_recipe: DelayedScaling, fprop_tensor: bool = True
) -> tex.DType:
"""Get fp8 data type according to recipe and tensor"""
if fp8_recipe.fp8_format == Format.E4M3 or (
fp8_recipe.fp8_format == Format.HYBRID and fprop_tensor
):
return tex.DType.kFloat8E4M3
return tex.DType.kFloat8E5M2
class FP8GlobalStateManager:
"""Class to keep track of and manipulate the global
FP8 state at different stages of execution.
"""
FP8_ENABLED = False
FP8_CALIBRATION = False
FP8_RECIPE = None
FP8_DISTRIBUTED_GROUP = None
IS_FIRST_FP8_MODULE = False
FP8_AUTOCAST_COUNTER = 0
FP8_CURRENT_CONTEXT_ID = 0
FP8_AUTOCAST_DEPTH = 0
global_fp8_buffer = {}
fp8_tensors_recompute_buffer = []
amax_forward_global_reduce_func = None
buffer_delete_key_fwd = None
buffer_delete_key_bwd = None
amax_reduce_handle_fwd = None
fp8_available = None
reason_for_no_fp8 = ""
dp_amax_reduce_interval = None
dp_amax_reduce_forward_idx = 0
dp_amax_reduce_backward_idx = 0
@classmethod
def is_fp8_available(cls) -> Tuple[bool, str]:
"""Return if fp8 support is available"""
if cls.fp8_available is None:
cls.fp8_available, cls.reason_for_no_fp8 = check_fp8_support()
return cls.fp8_available, cls.reason_for_no_fp8
@classmethod
def get_global_fp8_state_checkpoint(cls) -> Dict[str, Union[int, str]]:
"""Returns global fp8 state variables."""
# Convert attributes to dictionary to make future proof against
# changes in global state variables in order to make setting the
# checkpoint backwards compatible.
global_fp8_state = {}
global_fp8_state["FP8_AUTOCAST_COUNTER"] = cls.FP8_AUTOCAST_COUNTER
global_fp8_state["FP8_CURRENT_CONTEXT_ID"] = cls.FP8_CURRENT_CONTEXT_ID
global_fp8_state["FP8_AUTOCAST_DEPTH"] = cls.FP8_AUTOCAST_DEPTH
global_fp8_state["buffer_delete_key_fwd"] = cls.buffer_delete_key_fwd
global_fp8_state["buffer_delete_key_bwd"] = cls.buffer_delete_key_bwd
global_fp8_state["dp_amax_reduce_interval"] = cls.dp_amax_reduce_interval
global_fp8_state["dp_amax_reduce_forward_idx"] = cls.dp_amax_reduce_forward_idx
global_fp8_state["dp_amax_reduce_backward_idx"] = cls.dp_amax_reduce_backward_idx
return global_fp8_state
@classmethod
def set_global_fp8_state_checkpoint(cls, state: Dict[str, Union[int, str]]) -> None:
"""Sets global fp8 state variables."""
for k, v in state.items():
if hasattr(cls, k):
setattr(cls, k, v)
@classmethod
def get_global_fp8_buffer_checkpoint(cls) -> Dict[str, List[torch.Tensor]]:
"""Returns global fp8 amax buffer."""
return cls.global_fp8_buffer
@classmethod
def set_global_fp8_buffer_checkpoint(cls, buffer: Dict[str, List[torch.Tensor]]) -> None:
"""Sets global fp8 amax buffer."""
# Map all tensors back to GPU.
for k, v in buffer.items():
buffer[k] = [tensor.cuda() for tensor in v]
cls.global_fp8_buffer = buffer
@staticmethod
def get_meta_tensor_key(forward: bool = True) -> str:
"""Returns scaling key in `fp8_meta`."""
if forward:
return "scaling_fwd"
return "scaling_bwd"
@staticmethod
def get_buffer_position_key(forward: bool = True) -> str:
"""Returns module position key in `fp8_meta`."""
if forward:
return "global_fp8_buffer_pos_fwd"
return "global_fp8_buffer_pos_bwd"
@staticmethod
def get_autocast_key(forward: bool = True) -> str:
"""Returns module position key in `fp8_meta`."""
if forward:
return "autocast_id_fwd"
return "autocast_id_bwd"
@staticmethod
def get_amax_buffer_key(fp8_meta: Dict[str, Any], forward: bool = True) -> str:
"""Return a key in `_global_fp8_buffer` for the AMAX storage."""
if forward:
return f"FWD_AMAX_{fp8_meta['autocast_id_fwd']}"
return f"BWD_AMAX_{fp8_meta['autocast_id_bwd']}"
@classmethod
def get_amax_reduce_handle_fwd(cls) -> Union[bool, None]:
"""Return AMAX reduction wait handle of forward prop."""
return cls.amax_reduce_handle_fwd
@classmethod
def setup_amax_forward_global_reduce_func(cls, f: Callable) -> None:
"""Sets up the function to call during autocast exit."""
cls.amax_forward_global_reduce_func = f
@classmethod
def add_amax_to_global_buffer(cls, fp8_meta: Dict[str, Any], forward: bool = True) -> None:
"""Append 1D tensor `amax` to global buffer."""
buffer_key = cls.get_amax_buffer_key(fp8_meta, forward=forward)
fp8_meta_tensor_key = cls.get_meta_tensor_key(forward=forward)
buffer_position_key = cls.get_buffer_position_key(forward=forward)
if buffer_key not in cls.global_fp8_buffer:
cls.global_fp8_buffer[buffer_key] = [fp8_meta[fp8_meta_tensor_key].amax_history[0]]
else:
cls.global_fp8_buffer[buffer_key].append(
fp8_meta[fp8_meta_tensor_key].amax_history[0]
)
if buffer_position_key not in fp8_meta:
fp8_meta[buffer_position_key] = len(cls.global_fp8_buffer[buffer_key]) - 1
# Catch incorrect fp8_autocast usage.
assert fp8_meta[buffer_position_key] == len(cls.global_fp8_buffer[buffer_key]) - 1, \
"Same module is being invoked more than once inside an `fp8_autocast` " \
"region when using FP8 with amax reduction. This behavior is currently" \
" unsupported. For more details and correct usage, please see " \
"https://github.com/NVIDIA/TransformerEngine/pull/93."
@classmethod
def copy_amax_from_global_buffer(
cls, fp8_meta: Dict[str, Any], forward: bool = True
) -> None:
"""Populate current amax with the correct location from buffer."""
fp8_meta_tensor_key = cls.get_meta_tensor_key(forward=forward)
buffer_position_key = cls.get_buffer_position_key(forward=forward)
if buffer_position_key not in fp8_meta:
return
amax_buffer_key = cls.get_amax_buffer_key(fp8_meta, forward=forward)
assert amax_buffer_key in cls.global_fp8_buffer, "TE internal error."
fp8_meta[fp8_meta_tensor_key].amax_history[0] = cls.global_fp8_buffer[amax_buffer_key][
fp8_meta[buffer_position_key]
]
@classmethod
def set_amax_buffer_key_deletion(
cls, fp8_meta: Dict[str, Any], forward: bool = True
) -> None:
"""Delete this amax key from global buffer during autocast end."""
if cls.get_autocast_key(forward=forward) not in fp8_meta:
return
if forward:
cls.buffer_delete_key_fwd = cls.get_amax_buffer_key(fp8_meta, forward=forward)
else:
cls.buffer_delete_key_bwd = cls.get_amax_buffer_key(fp8_meta, forward=forward)
@classmethod
def delete_key_from_amax_buffer(cls, forward: bool = True) -> None:
"""Delete the key from global amax buffer."""
if forward:
if (
cls.buffer_delete_key_fwd is not None
and cls.buffer_delete_key_fwd in cls.global_fp8_buffer
):
del cls.global_fp8_buffer[cls.buffer_delete_key_fwd]
else:
if (
cls.buffer_delete_key_bwd is not None
and cls.buffer_delete_key_bwd in cls.global_fp8_buffer
):
del cls.global_fp8_buffer[cls.buffer_delete_key_bwd]
@classmethod
def get_fp8_context_id(cls) -> int:
"""Returns an ID for the current FP8 context."""
return cls.FP8_CURRENT_CONTEXT_ID
@classmethod
def set_fp8_context_id(cls, ctx_id: int) -> None:
"""Sets the current FP8 context."""
cls.FP8_CURRENT_CONTEXT_ID = ctx_id
@classmethod
def new_fp8_context_id(cls) -> int:
"""Returns global autocast counter as a proxy to be used
as the autocast ID for FP8 modules.
"""
return cls.FP8_AUTOCAST_COUNTER
@classmethod
def is_fp8_enabled(cls) -> bool:
"""Is FP8 enabled"""
return cls.FP8_ENABLED
@classmethod
def is_fp8_calibration(cls) -> bool:
"""Is FP8 calibration"""
return cls.FP8_CALIBRATION
@classmethod
def is_first_fp8_module(cls):
"""Returns `True` only the first time when called multiple
times from within the same `fp8_autocast` context.
"""
tmp = cls.IS_FIRST_FP8_MODULE
cls.IS_FIRST_FP8_MODULE = False
return tmp
@classmethod
def get_fp8_recipe(cls) -> DelayedScaling:
"""Return the fp8 recipe"""
return cls.FP8_RECIPE
@classmethod
def get_fp8_group(cls) -> Union[dist_group_type, None]:
"""Return the fp8 group for scale/amax comm"""
return cls.FP8_DISTRIBUTED_GROUP
@classmethod
def get_fp8_autocast_state(cls) -> Tuple[bool, bool, DelayedScaling, dist_group_type, bool]:
"""FP8 autocast state getter"""
return (
cls.FP8_ENABLED,
cls.FP8_CALIBRATION,
cls.FP8_RECIPE,
cls.FP8_DISTRIBUTED_GROUP,
cls.IS_FIRST_FP8_MODULE)
@classmethod
def set_fp8_autocast_state(
cls,
fp8_state: Tuple[bool, bool, DelayedScaling, dist_group_type, bool]
) -> None:
"""FP8 autocast state setter"""
(cls.FP8_ENABLED,
cls.FP8_CALIBRATION,
cls.FP8_RECIPE,
cls.FP8_DISTRIBUTED_GROUP,
cls.IS_FIRST_FP8_MODULE) = fp8_state
@staticmethod
def reduce_tensor_across_group_op_max(
tensor: torch.Tensor, group: dist_group_type, async_op: bool
) -> None:
"""Reduce tensor across given group."""
if torch.distributed.is_initialized():
wait_handle = torch.distributed.all_reduce(
tensor,
op=torch.distributed.ReduceOp.MAX,
group=group,
async_op=async_op,
)
return wait_handle
return None
@classmethod
def global_amax_reduction(
cls,
fp8_meta: Dict[str, Any],
tp_group: dist_group_type,
tp_size: int,
forward: bool = True,
) -> None:
"""Concatenate, reduce, and split amaxes in the global buffer."""
amax_buffer_key = cls.get_amax_buffer_key(fp8_meta, forward=forward)
# Key already deleted.
if amax_buffer_key not in cls.global_fp8_buffer:
return None
# Reduce AMAX in DP-domain at an interval.
if cls.dp_amax_reduce_interval is None:
cls.dp_amax_reduce_interval = int(os.getenv("NVTE_DP_AMAX_REDUCE_INTERVAL", "1"))
tp_amax_reduce = False
if forward:
if cls.dp_amax_reduce_forward_idx == 0:
reduce_group = fp8_meta["fp8_group"]
else:
tp_amax_reduce = True
cls.dp_amax_reduce_forward_idx = (
(cls.dp_amax_reduce_forward_idx + 1) % cls.dp_amax_reduce_interval)
else:
if cls.dp_amax_reduce_backward_idx == 0:
reduce_group = fp8_meta["fp8_group"]
else:
tp_amax_reduce = True
cls.dp_amax_reduce_backward_idx = (
(cls.dp_amax_reduce_backward_idx + 1) % cls.dp_amax_reduce_interval)
if tp_amax_reduce:
if tp_size > 1:
reduce_group = tp_group
else:
return None
chunk_sizes = [x.numel() for x in cls.global_fp8_buffer[amax_buffer_key]]
contiguous_amax = torch.cat(cls.global_fp8_buffer[amax_buffer_key])
wait_handle = cls.reduce_tensor_across_group_op_max(
contiguous_amax,
reduce_group,
fp8_meta["async_amax_reduction"],
)
cls.global_fp8_buffer[amax_buffer_key] = list(contiguous_amax.split(chunk_sizes))
return wait_handle
@classmethod
def fp8_autocast_enter(
cls,
enabled: bool = False,
calibrating: bool = False,
fp8_recipe: Optional[DelayedScaling] = None,
fp8_group: Optional[dist_group_type] = None,
) -> None:
"""Set state and tracking variables for entry into FP8 region."""
cls.FP8_ENABLED = enabled
cls.FP8_CALIBRATION = calibrating
cls.FP8_RECIPE = get_default_fp8_recipe() if fp8_recipe is None else fp8_recipe
cls.FP8_DISTRIBUTED_GROUP = fp8_group
if cls.FP8_AUTOCAST_DEPTH == 0:
cls.IS_FIRST_FP8_MODULE = True
cls.FP8_AUTOCAST_COUNTER += 1
cls.FP8_AUTOCAST_DEPTH += 1
if enabled:
fp8_available, reason_for_no_fp8 = cls.is_fp8_available()
assert fp8_available, reason_for_no_fp8
@classmethod
def fp8_autocast_exit(cls):
"""Set state and tracking variables for exit from FP8 region."""
cls.FP8_AUTOCAST_DEPTH -= 1
if cls.FP8_AUTOCAST_DEPTH == 0:
if callable(cls.amax_forward_global_reduce_func):
cls.amax_reduce_handle_fwd = cls.amax_forward_global_reduce_func() # pylint: disable=not-callable
cls.delete_key_from_amax_buffer(forward=True)
@classmethod
def copy_forward_fp8_meta_tensors_for_recompute(cls, fp8_meta: Dict[str, Any]) -> None:
"""Copy the scaling factors and amaxes for recompute forward phase
to ensure both forward steps are numerically same.
"""
buffer_position_key = "global_fp8_buffer_pos_fwd_recompute"
to_copy = [
fp8_meta["scaling_fwd"].amax_history.clone(),
fp8_meta["scaling_fwd"].scale.clone(),
fp8_meta["scaling_fwd"].scale_inv.clone(),
]
if buffer_position_key in fp8_meta:
cls.fp8_tensors_recompute_buffer[fp8_meta[buffer_position_key]].append(to_copy)
else:
if len(cls.fp8_tensors_recompute_buffer) == 0:
cls.fp8_tensors_recompute_buffer = [deque()]
else:
cls.fp8_tensors_recompute_buffer.append(deque())
cls.fp8_tensors_recompute_buffer[-1].append(to_copy)
fp8_meta[buffer_position_key] = len(cls.fp8_tensors_recompute_buffer) - 1
@classmethod
def get_old_fp8_meta_tensors_for_recompute(cls, fp8_meta: Dict[str, Any]) -> None:
"""Switch to the copied scaling factors and amaxes from phase
1 forward for indentical numerical outputs.
"""
# Store updated amaxes and scales from phase 1 post forward.
fp8_meta["updated_amax_history_fwd"] = fp8_meta["scaling_fwd"].amax_history
fp8_meta["updated_scale_fwd"] = fp8_meta["scaling_fwd"].scale
fp8_meta["updated_scale_inv_fwd"] = fp8_meta["scaling_fwd"].scale_inv
# Retrieve stashed amaxes and scales from phase 1 pre forward.
buffer_position_key = "global_fp8_buffer_pos_fwd_recompute"
stashed_fp8_meta = cls.fp8_tensors_recompute_buffer[
fp8_meta[buffer_position_key]
].popleft()
# Replace amaxes and scales with stashed values for phase 2 forward
fp8_meta["scaling_fwd"].amax_history = stashed_fp8_meta[0]
fp8_meta["scaling_fwd"].scale = stashed_fp8_meta[1]
fp8_meta["scaling_fwd"].scale_inv = stashed_fp8_meta[2]
@staticmethod
def restore_fp8_meta_tensors(fp8_meta: Dict[str, Any]) -> None:
"""Restore latest scaling factors and amaxes after recompute forward run."""
fp8_meta["scaling_fwd"].amax_history = fp8_meta["updated_amax_history_fwd"]
fp8_meta["scaling_fwd"].scale = fp8_meta["updated_scale_fwd"]
fp8_meta["scaling_fwd"].scale_inv = fp8_meta["updated_scale_inv_fwd"]
@contextmanager
def fp8_autocast(
enabled: bool = False,
calibrating: bool = False,
fp8_recipe: Optional[DelayedScaling] = None,
fp8_group: Optional[dist_group_type] = None,
) -> None:
"""
Context manager for FP8 usage.
.. code-block:: python
with fp8_autocast(enabled=True):
out = model(inp)
.. note::
Support for FP8 in the Linear layer of Transformer Engine is currently limited to tensors
with shapes where both dimensions are divisible by 16. In terms of the input to the full
Transformer network, this typically requires padding sequence length to be multiple of 16.
.. note::
When :attr:`fp8_recipe.reduce_amax==True`, any module must not be invoked more than once
inside a single `fp8_autocast` region. This is unsupported behavior because the amax
reduction is handled during the exit of the `fp8_autocast` context. Calling the same
module more than once inside an `fp8_autocast` region overrides the amax tensors
before reduction can occur.
Parameters
----------
enabled: bool, default = `False`
whether or not to enable fp8
calibrating: bool, default = `False`
calibration mode allows collecting statistics such as amax and scale
data of fp8 tensors even when executing without fp8 enabled. This is
useful for saving an inference ready fp8 checkpoint while training
using a higher precision.
fp8_recipe: recipe.DelayedScaling, default = `None`
recipe used for FP8 training.
fp8_group: torch._C._distributed_c10d.ProcessGroup, default = `None`
distributed group over which amaxes for the fp8 tensors
are reduced at the end of each training step.
"""
try:
fp8_state = FP8GlobalStateManager.get_fp8_autocast_state()
FP8GlobalStateManager.fp8_autocast_enter(enabled, calibrating, fp8_recipe, fp8_group)
yield
finally:
FP8GlobalStateManager.set_fp8_autocast_state(fp8_state) # pylint: disable=used-before-assignment
FP8GlobalStateManager.fp8_autocast_exit()
def _update_amax_history(amax_history: torch.Tensor) -> torch.Tensor:
"""Update amax history and set next amax to zero."""
if amax_history.shape[0] > 1:
amax_history = torch.roll(amax_history, -1, 0)
amax_history[0].fill_(0.0)
return amax_history
@jit_fuser
def _default_get_amax(
amax_history: torch.Tensor,
amax_compute_algo: str,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Default function to obtain amax from history."""
if amax_compute_algo == "max":
amax = torch.max(amax_history, dim=0).values
else: # amax_compute_algo == "most_recent"
amax = amax_history[0].clone()
amax_history = _update_amax_history(amax_history)
return amax_history, amax
@jit_fuser
def _default_sf_compute(
amax: torch.Tensor,
scale: torch.Tensor,
fp8_max: float,
margin: int,
) -> torch.Tensor:
"""Default function to convert amax to scaling factor."""
exp = torch.floor(torch.log2(fp8_max / amax)) - margin
sf = torch.round(torch.pow(2, torch.abs(exp)))
sf = torch.where(amax > 0.0, sf, scale)
sf = torch.where(torch.isfinite(amax), sf, scale)
sf = torch.where(exp < 0, 1 / sf, sf)
return sf
@jit_fuser
def _compute_scaling_factor_inverse(
scale: torch.Tensor,
scale_inv: torch.Tensor,
non_weight_mask: torch.Tensor,
update_weight_scale_inv: bool,
) -> torch.Tensor:
"""Compute inverse of scaling factor."""
if update_weight_scale_inv:
return 1.0 / scale
return torch.where(non_weight_mask, 1.0 / scale, scale_inv)
@jit_fuser
def _fused_amax_and_scale_update(
amax_history: torch.Tensor,
scale: torch.Tensor,
scale_inv: torch.Tensor,
fp8_max: float,
margin: int,
amax_compute_algo: str,
non_weight_mask: torch.Tensor,
update_weight_scale_inv: bool,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Amax to scale conversion."""
# Get amax from history.
amax_history, amax = _default_get_amax(
amax_history,
amax_compute_algo,
)
# Calculate new scaling factor.
scale = _default_sf_compute(
amax,
scale,
fp8_max,
margin,
)
# Calculate new inverse of scaling factor.
scale_inv = _compute_scaling_factor_inverse(
scale,
scale_inv,
non_weight_mask,
update_weight_scale_inv,
)
return amax_history, scale, scale_inv
def _compute_amax(
amax_history: torch.Tensor,
recipe: DelayedScaling,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Obtain the amax from the history."""
if callable(recipe.amax_compute_algo):
amax = recipe.amax_compute_algo(amax_history)
amax_history = _update_amax_history(amax_history)
return amax_history, amax
return _default_get_amax(
amax_history,
recipe.amax_compute_algo,
)
def _compute_scaling_factor(
amax: torch.Tensor,
scale: torch.Tensor,
fp8_max: float,
recipe: DelayedScaling,
) -> torch.Tensor:
"""Convert amax to scaling factor."""
if recipe.scaling_factor_compute_algo is None:
return _default_sf_compute(
amax,
scale,
fp8_max,
recipe.margin,
)
return recipe.scaling_factor_compute_algo(amax, scale, fp8_max, recipe)
def amax_and_scale_update(
fp8_meta: Dict[str, Any],
fwd_update: bool,
update_weight_scale_inv: bool = True,
) -> None:
"""Updates fp8 amaxes/scales for fwd | bwd."""
amax_compute = fp8_meta["recipe"].amax_compute_algo
sf_compute = fp8_meta["recipe"].scaling_factor_compute_algo
fp8_meta_tensor_key = "scaling_fwd" if fwd_update else "scaling_bwd"
fp8_max_key = "fp8_max_fwd" if fwd_update else "fp8_max_bwd"
if not callable(amax_compute) and sf_compute is None:
(
fp8_meta[fp8_meta_tensor_key].amax_history,
fp8_meta[fp8_meta_tensor_key].scale,
fp8_meta[fp8_meta_tensor_key].scale_inv,
) = _fused_amax_and_scale_update(
fp8_meta[fp8_meta_tensor_key].amax_history,
fp8_meta[fp8_meta_tensor_key].scale,
fp8_meta[fp8_meta_tensor_key].scale_inv,
fp8_meta[fp8_max_key],
fp8_meta["recipe"].margin,
fp8_meta["recipe"].amax_compute_algo,
fp8_meta[fp8_meta_tensor_key + "_non_weight_mask"],
update_weight_scale_inv,
)
else:
fp8_meta[fp8_meta_tensor_key].amax_history, amax = _compute_amax(
fp8_meta[fp8_meta_tensor_key].amax_history,
fp8_meta["recipe"],
)
fp8_meta[fp8_meta_tensor_key].scale = _compute_scaling_factor(
amax,
fp8_meta[fp8_meta_tensor_key].scale,
fp8_meta[fp8_max_key],
fp8_meta["recipe"],
)
fp8_meta[fp8_meta_tensor_key].scale_inv = _compute_scaling_factor_inverse(
fp8_meta[fp8_meta_tensor_key].scale,
fp8_meta[fp8_meta_tensor_key].scale_inv,
fp8_meta[fp8_meta_tensor_key + "_non_weight_mask"],
update_weight_scale_inv,
)
| TransformerEngine-main | transformer_engine/pytorch/fp8.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""NVFuser functions and JIT utilities"""
import os
from typing import Callable, Optional, Tuple
import torch
jit_fuser = torch.jit.script
if torch.__version__ >= "2" and bool(int(os.getenv("NVTE_TORCH_COMPILE", "1"))):
jit_fuser = torch.compile
# Decorator to disable Torch Dynamo
# See: https://github.com/NVIDIA/TransformerEngine/issues/308
no_torch_dynamo = lambda func: func
if torch.__version__ >= "2":
import torch._dynamo
no_torch_dynamo = torch._dynamo.disable
def set_jit_fusion_options() -> None:
"""Set PyTorch JIT layer fusion options."""
# flags required to enable jit fusion kernels
TORCH_MAJOR = int(torch.__version__.split(".")[0])
TORCH_MINOR = int(torch.__version__.split(".")[1])
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR >= 10):
# nvfuser
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(True)
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_nvfuser_enabled(True)
torch._C._debug_set_autodiff_subgraph_inlining(False)
else:
# legacy pytorch fuser
torch._C._jit_set_profiling_mode(False)
torch._C._jit_set_profiling_executor(False)
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_override_can_fuse_on_gpu(True)
@jit_fuser
def bias_gelu_fused_(inp: torch.Tensor, bias: torch.Tensor) -> torch.Tensor:
"""Bias-GeLU fused"""
x = inp + bias
return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)))
@jit_fuser
def gelu_fused_(inp: torch.Tensor) -> torch.Tensor:
"""
GeLU fused, this is copy of bias_gelu_fused cause jit fusion doesn't allow conditioning.
"""
x = inp
return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)))
# gradient of tanh approximation of gelu
# gradient of actual gelu is:
# 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x)
@jit_fuser
def bgrad_dgelu_fused_(
grad_output: torch.Tensor, inp: torch.Tensor, bias: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Bgrad-Dgelu fused"""
x = inp + bias
tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))
# sqrt(2/pi) * 3 * 0.044715 -> 0.1070322243
ff = 0.5 * x * (
(1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)
) + 0.5 * (1 + tanh_out)
dgelu = ff * grad_output
bgrad = dgelu.sum(dim=0)
return bgrad, dgelu
@jit_fuser
def dgelu_fused_(
grad_output: torch.Tensor, inp: torch.Tensor
) -> torch.Tensor:
"""
Dgelu fused, this is copy of bgrad_dgelu_fused_ cause jit fusion doesn't allow conditioning.
"""
x = inp
tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))
# sqrt(2/pi) * 3 * 0.044715 -> 0.1070322243
ff = 0.5 * x * (
(1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)
) + 0.5 * (1 + tanh_out)
dgelu = ff * grad_output
return dgelu
def bias_gelu_fused(inp: torch.Tensor, bias: torch.Tensor) -> torch.Tensor:
"""Disable native AMP for bias_gelu_fused_"""
with torch.cuda.amp.autocast(enabled=False):
if bias.numel() != 0:
return bias_gelu_fused_(inp, bias)
return gelu_fused_(inp)
def bgrad_dgelu_fused(
grad_output: torch.Tensor, inp: torch.Tensor, bias: torch.Tensor
) -> Tuple[Optional[torch.Tensor], torch.Tensor]:
"""Disable native AMP for `bgrad_dgelu_fused_`"""
with torch.cuda.amp.autocast(enabled=False):
if bias.numel() != 0:
return bgrad_dgelu_fused_(grad_output, inp, bias)
return None, dgelu_fused_(grad_output, inp)
def bias_dropout_add(
x: torch.Tensor,
bias: torch.Tensor,
residual: torch.Tensor,
prob: float,
training: bool,
) -> torch.Tensor:
"""dropout(inp + bias) + residual"""
out = torch.nn.functional.dropout(x + bias, p=prob, training=training)
out = residual + out
return out
def get_bias_dropout_add(training: bool) -> Callable:
"""bias_dropout_add based on training or not"""
def _bias_dropout_add(x, bias, residual, prob):
return bias_dropout_add(x, bias, residual, prob, training)
return _bias_dropout_add
@torch.jit.script
def bias_dropout_add_fused_train_(
x: torch.Tensor, bias: torch.Tensor, residual: torch.Tensor, prob: float
) -> torch.Tensor:
"""Jit fused bias_dropout_add for training"""
return bias_dropout_add(x, bias, residual, prob, True)
def bias_dropout_add_fused_train(
x: torch.Tensor, bias: torch.Tensor, residual: torch.Tensor, prob: float
) -> torch.Tensor:
"""Disable native AMP and enable grad for BDA"""
with torch.enable_grad():
with torch.cuda.amp.autocast(enabled=False):
return bias_dropout_add_fused_train_(x, bias, residual, prob)
@torch.jit.script
def bias_dropout_add_fused_inference_(
x: torch.Tensor, bias: torch.Tensor, residual: torch.Tensor, prob: float
) -> torch.Tensor:
"""Jit fused bias_dropout_add for inference"""
return bias_dropout_add(x, bias, residual, prob, False)
def bias_dropout_add_fused_inference(
x: torch.Tensor, bias: torch.Tensor, residual: torch.Tensor, prob: float
) -> torch.Tensor:
"""Disable native AMP for BDA"""
with torch.cuda.amp.autocast(enabled=False):
return bias_dropout_add_fused_inference_(x, bias, residual, prob)
def warmup_jit_bias_dropout_add(
hidden_size: int, dtype: torch.dtype, seq_length: int, micro_batch_size: int
) -> None:
"""Compile BDA JIT function before the main training steps"""
# Save cuda RNG state to ensure warmup does not affect reproducibility.
rng_state = torch.cuda.get_rng_state()
inp = torch.rand(
(seq_length, micro_batch_size, hidden_size), dtype=dtype, device="cuda"
)
residual = torch.rand(
(seq_length, micro_batch_size, hidden_size), dtype=dtype, device="cuda"
)
bias = torch.rand((hidden_size), dtype=dtype, device="cuda")
dropout_rate = 0.1
# Warmup JIT fusions with the input grad_enable state of both forward
# prop and recomputation
for input_grad, bias_grad, residual_grad in zip(
[False, True], [True, True], [True, True]
):
inp.requires_grad = input_grad
bias.requires_grad = bias_grad
residual.requires_grad = residual_grad
for _ in range(5):
output = bias_dropout_add_fused_train(inp, bias, residual, dropout_rate)
del bias, inp, residual, output
torch.cuda.empty_cache()
torch.cuda.set_rng_state(rng_state)
def warmup_jit_bias_dropout_add_all_dtypes(
hidden_size: int, seq_length: int, micro_batch_size: int
) -> None:
"""Call `warmup_jit_bias_dropout_add` for all training dtypes"""
for dtype in [torch.float32, torch.bfloat16, torch.float16]:
warmup_jit_bias_dropout_add(hidden_size, dtype, seq_length, micro_batch_size)
def warmup_jit_bias_gelu(
ffn_hidden_size_per_partition: int,
dtype: torch.dtype,
seq_length: int,
micro_batch_size: int,
) -> None:
"""Compile bias-gelu JIT function before the main training steps"""
# Save cuda RNG state to ensure warmup does not affect reproducibility.
rng_state = torch.cuda.get_rng_state()
bias = torch.rand(ffn_hidden_size_per_partition, dtype=dtype, device="cuda")
inp = torch.rand(
(seq_length * micro_batch_size, ffn_hidden_size_per_partition),
dtype=dtype,
device="cuda",
)
# Warmup JIT fusions with the input grad_enable state of both forward
# prop and recomputation
for bias_grad, input_grad in zip([True, True], [False, True]):
bias.requires_grad, inp.requires_grad = bias_grad, input_grad
for _ in range(5):
_ = bias_gelu_fused_(inp, bias)
_ = gelu_fused_(inp)
del bias, inp
torch.cuda.empty_cache()
torch.cuda.set_rng_state(rng_state)
def warmup_jit_bias_gelu_all_dtypes(
ffn_hidden_size: int, seq_length: int, micro_batch_size: int
) -> None:
"""Call `warmup_jit_bias_gelu` for all training dtypes"""
for dtype in [torch.float32, torch.bfloat16, torch.float16]:
warmup_jit_bias_gelu(ffn_hidden_size, dtype, seq_length, micro_batch_size)
| TransformerEngine-main | transformer_engine/pytorch/jit.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Enums for e2e transformer"""
import torch
import torch.distributed
import transformer_engine_extensions as tex
"""
This is a map: torch.dtype -> int
Used for passing dtypes into cuda
extension. Has one to one mapping
with enum in transformer_engine.h
"""
TE_DType = {
torch.uint8: tex.DType.kByte,
torch.int32: tex.DType.kInt32,
torch.float32: tex.DType.kFloat32,
torch.half: tex.DType.kFloat16,
torch.bfloat16: tex.DType.kBFloat16,
}
AttnMaskTypes = ("causal", "padding", "no_mask")
AttnTypes = ("self", "cross")
AttnBiasTypes = ("pre_scale_bias", "post_scale_bias", "no_bias")
LayerTypes = ("encoder", "decoder")
GemmParallelModes = ("row", "column", None)
dist_group_type = torch.distributed.ProcessGroup
| TransformerEngine-main | transformer_engine/pytorch/constants.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Transformer Engine bindings for pyTorch"""
from .module import LayerNormLinear
from .module import Linear
from .module import LayerNormMLP
from .module import LayerNorm
from .module import RMSNorm
from .attention import DotProductAttention
from .attention import MultiheadAttention
from .transformer import TransformerLayer
from .fp8 import fp8_autocast
from .export import onnx_export
from .distributed import checkpoint
# Register custom op symbolic ONNX functions
from .te_onnx_extensions import (
onnx_cast_to_fp8,
onnx_cast_from_fp8,
onnx_fp8_gelu,
onnx_fp8_relu,
onnx_te_gemm,
onnx_layernorm_fwd_fp8,
onnx_layernorm_fwd,
onnx_rmsnorm_fwd,
onnx_rmsnorm_fwd_fp8
)
| TransformerEngine-main | transformer_engine/pytorch/__init__.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Export utilities for TransformerEngine"""
from contextlib import contextmanager
_IN_ONNX_EXPORT_MODE = False
@contextmanager
def onnx_export(
enabled: bool = False,
) -> None:
"""
Context manager for exporting to ONNX.
.. code-block:: python
with onnx_export(enabled=True):
torch.onnx.export(model)
Parameters
----------
enabled: bool, default = `False`
whether or not to enable export
"""
global _IN_ONNX_EXPORT_MODE
onnx_export_state = (_IN_ONNX_EXPORT_MODE)
try:
_IN_ONNX_EXPORT_MODE = enabled
yield
finally:
_IN_ONNX_EXPORT_MODE = onnx_export_state
def is_in_onnx_export_mode() -> bool:
"""Returns True if onnx export mode is enabled, False otherwise."""
return _IN_ONNX_EXPORT_MODE
| TransformerEngine-main | transformer_engine/pytorch/export.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""
ONNX symbolic functions for Transformer Engine
Warnings of the type pasted below are a known Pytorch issue
(https://github.com/pytorch/pytorch/issues/81693):
tests/test_onnx_export.py::test_export_cast_ops[112]
/opt/conda/lib/python3.8/site-packages/torch/onnx/utils.py:649:
UserWarning: The shape inference of trt::TRT_FP8DequantizeLinear type is missing,
so it may result in wrong shape inference for the exported graph.
Please consider adding it in symbolic function. (Triggered internally at
/opt/pytorch/pytorch/torch/csrc/jit/passes/onnx/shape_type_inference.cpp:1880.)
_C._jit_pass_onnx_graph_shape_type_inference(
Scale tensors are treated as lists ("fs") instead of tensors ("v") because we need to access
specific entries using the index passes as `fp8_tensor`. If you fail to do this you will get
the following error when accessing a sepcific scale element (e.g. `scale_inv[fp8_tensor]`):
TypeError: 'torch._C.Value' object is not subscriptable
"""
import torch
from torch.onnx import symbolic_helper, register_custom_op_symbolic, _type_utils
import torch._C._onnx as _C_onnx
# Monkey-patch graph manipulation methods on Graph, used for the ONNX symbolics
from torch.onnx._internal import jit_utils
import transformer_engine_extensions as tex
# This file registers custom op symbolic ONNX functions and does not export any symbols.
__all__ = []
# Custom ops spec version
VER = 1
UNSPECIFIED_TYPE = -1
def make_op_name(op_name: str) -> str:
"""custom op name"""
return "trt::" + op_name
def get_TensorProtoDataType(t):
"""Return the _C_onnx.TensorProtoDataType of the input tensor"""
try:
return {
"Float": _C_onnx.TensorProtoDataType.FLOAT,
"Half": _C_onnx.TensorProtoDataType.FLOAT16,
"BFloat16": _C_onnx.TensorProtoDataType.BFLOAT16,
}[t.type().scalarType()]
except KeyError as e:
raise TypeError(f"Onnx export for dtype {t.type().scalarType()} not supported.") from e
def is_dtype_fp32(t):
"""Check fp32 dtype"""
return t.type().scalarType() == "Float"
def is_dtype_fp16(t):
"""Check fp16 dtype"""
return t.type().scalarType() == "Half"
def is_dtype_bf16(t):
"""Check bf16 dtype"""
return t.type().scalarType() == "BFloat16"
def quantize(g, inputs, scale_inv, fp8_tensor):
"""Helper Function for Quantization"""
output_shape = torch.onnx.symbolic_helper._get_tensor_sizes(inputs)
# Q inputs are currently constrained to FP32 due to a similar limitation in ORT
# custom ops, so cast the input if needed.
if not is_dtype_fp32(inputs):
inputs = g.op("Cast", inputs, to_i=_C_onnx.TensorProtoDataType.FLOAT)
scale = g.op("Constant", value_t=torch.tensor(scale_inv[fp8_tensor]))
q_op = g.op(
make_op_name("TRT_FP8QuantizeLinear"), inputs, scale).setType(
inputs.type().with_dtype(torch.uint8).with_sizes(output_shape))
return q_op
def dequantize(g, inputs, scale_inv, fp8_tensor, otype):
"""Helper Function for Dequantization"""
output_shape = torch.onnx.symbolic_helper._get_tensor_sizes(inputs)
scale = g.op("Constant", value_t=torch.tensor(scale_inv[fp8_tensor]))
out = g.op(make_op_name("TRT_FP8DequantizeLinear"), inputs, scale).setType(
inputs.type().with_dtype(torch.float32).with_sizes(output_shape))
# DQ outputs are currently constrained to FP32 due to a similar limitation in ORT
# custom ops, so cast the output if needed.
if otype == int(tex.DType.kFloat16):
out = g.op("Cast", out, to_i=_C_onnx.TensorProtoDataType.FLOAT16)
elif otype == int(tex.DType.kBFloat16):
out = g.op("Cast", out, to_i=_C_onnx.TensorProtoDataType.BFLOAT16)
return out
def compute_in_fp32(g, inp, subgraph, *args, **kwargs):
"""Wrap subgraph with casts to/from FP32 so that its precision is FP32.
If `inp` data type is not FP32, add a cast of `inp` to FP32 and feed that into `subgraph`;
then cast subgraphs's output back to `inp` data type.
"""
inp_dtype = get_TensorProtoDataType(inp)
is_fp32 = inp_dtype == _type_utils.JitScalarType.FLOAT
if not is_fp32:
inp = g.op("Cast", inp, to_i=_C_onnx.TensorProtoDataType.FLOAT)
sg_out = subgraph(g, inp, *args, **kwargs)
if not is_fp32:
sg_out = g.op("Cast", sg_out, to_i=inp_dtype)
return sg_out
@symbolic_helper.parse_args("v", "v", "v", "fs", "i", "i")
def onnx_cast_to_fp8(g, inputs, scale, amax, scale_inv, fp8_tensor, otype):
"""ONNX graph for cast_to_fp8"""
# pylint: disable=unused-argument
return quantize(g, inputs, scale_inv, fp8_tensor)
@symbolic_helper.parse_args("v", "fs", "i", "i", "i")
def onnx_cast_from_fp8(g, inputs, scale_inv, fp8_tensor, itype, otype):
"""ONNX graph for cast_from_fp8"""
# pylint: disable=unused-argument
return dequantize(g, inputs, scale_inv, fp8_tensor, otype)
@symbolic_helper.parse_args("v", "v", "v", "fs", "i", "i")
def onnx_fp8_gelu(g, inputs, scale, amax, scale_inv, fp8_tensor, otype):
"""ONNX graph for fp8_gelu"""
# pylint: disable=unused-argument
# TE computes GELU using float32 precision so wrap the GELU subgraph with
# conversion to/from float32.
gelu = compute_in_fp32(g, inputs, torch.onnx.symbolic_opset9.gelu, "tanh")
if scale_inv:
gelu = quantize(g, gelu, scale_inv, fp8_tensor)
return gelu
@symbolic_helper.parse_args("v", "v", "v", "fs", "i", "i")
def onnx_fp8_relu(g, inputs, scale, amax, scale_inv, fp8_tensor, otype):
"""ONNX graph for fp8_relu"""
# pylint: disable=unused-argument
relu = compute_in_fp32(g, inputs, torch.onnx.symbolic_opset9.relu)
if scale_inv:
relu = quantize(g, relu, scale_inv, fp8_tensor)
return relu
@symbolic_helper.parse_args("v", "i")
def onnx_swiglu(g: jit_utils.GraphContext, inp, dim):
"""ONNX graph for swiglu"""
dim_size = symbolic_helper._get_tensor_dim_size(inp, dim)
if dim_size is not None:
assert dim_size % 2 == 0
first, second = g.op("Split", inp, axis_i=dim, outputs=2)
return g.op("Mul", g.op("Sigmoid", first), second)
@symbolic_helper.parse_args("v", "v", "v", "fs", "i", "i")
def onnx_fp8_swiglu(g, inputs, scale, amax, scale_inv, fp8_tensor, otype):
"""ONNX graph for fp8_swiglu"""
# pylint: disable=unused-argument
swiglu = compute_in_fp32(g, inputs, onnx_swiglu, 1)
if scale_inv:
swiglu = quantize(g, swiglu, scale_inv, fp8_tensor)
return swiglu
@symbolic_helper.parse_args("v", "i")
def onnx_reglu(g: jit_utils.GraphContext, inp, dim):
"""ONNX graph for reglu"""
dim_size = symbolic_helper._get_tensor_dim_size(inp, dim)
if dim_size is not None:
assert dim_size % 2 == 0
first, second = g.op("Split", inp, axis_i=dim, outputs=2)
return g.op("Mul", g.op("Relu", first), second)
@symbolic_helper.parse_args("v", "v", "v", "fs", "i", "i")
def onnx_fp8_reglu(g, inputs, scale, amax, scale_inv, fp8_tensor, otype):
"""ONNX graph for fp8_reglu"""
# pylint: disable=unused-argument
reglu = compute_in_fp32(g, inputs, onnx_reglu, 1)
if scale_inv:
reglu = quantize(g, reglu, scale_inv, fp8_tensor)
return reglu
@symbolic_helper.parse_args("v", "i")
def onnx_geglu(g: jit_utils.GraphContext, inp, dim):
"""ONNX graph for geglu"""
dim_size = symbolic_helper._get_tensor_dim_size(inp, dim)
if dim_size is not None:
assert dim_size % 2 == 0
first, second = g.op("Split", inp, axis_i=dim, outputs=2)
first_gelu = torch.onnx.symbolic_opset9.gelu(g, first, "tanh")
return g.op("Mul", first_gelu, second)
@symbolic_helper.parse_args("v", "v", "v", "fs", "i", "i")
def onnx_fp8_geglu(g, inputs, scale, amax, scale_inv, fp8_tensor, otype):
"""ONNX graph for fp8_geglu"""
# pylint: disable=unused-argument
geglu = compute_in_fp32(g, inputs, onnx_geglu, 1)
if scale_inv:
geglu = quantize(g, geglu, scale_inv, fp8_tensor)
return geglu
@symbolic_helper.parse_args("v", "fs", "i", "i", "i",
"v", "fs", "i", "i", "i",
"v", "fs", "i", "fs", "v", "i", "v", "i",
"v", "i", "i", "i")
def onnx_te_gemm(
g,
weight,
weight_scale_inverse,
weight_fp8_tensor,
weight_type,
trans_weight,
inputs,
input_scale_inverse,
input_fp8_tensor,
input_type,
trans_input,
out,
out_scale,
out_type,
out_amax,
bias,
bias_type,
pre_gelu_out,
grad,
workspace,
workspaceSize,
accumulate,
use_split_accumulator):
"""ONNX graph for te_gemm"""
# pylint: disable=unused-argument
is_fp16 = is_dtype_fp16(inputs)
is_bf16 = is_dtype_bf16(inputs)
if input_type == int(tex.DType.kFloat8E4M3):
inputs = dequantize(g, inputs, input_scale_inverse, input_fp8_tensor, out_type)
if weight_type == int(tex.DType.kFloat8E4M3):
weight = dequantize(g, weight, weight_scale_inverse, weight_fp8_tensor, out_type)
empty_tensor_size = [0]
bias_empty = torch.onnx.symbolic_helper._get_tensor_sizes(bias) == empty_tensor_size
pre_gelu_out_empty = torch.onnx.symbolic_helper._get_tensor_sizes(pre_gelu_out) \
== empty_tensor_size
if not bias_empty:
output = g.op("Gemm", inputs, weight, bias, transA_i=trans_input, transB_i=trans_weight)
else:
output = g.op("Gemm", inputs, weight, transA_i=trans_input, transB_i=trans_weight)
if not bias_empty:
if not pre_gelu_out_empty:
# TE computes GELU using float32 precision so wrap the GELU subgraph with
# conversion to/from float32.
output = compute_in_fp32(g, output, torch.onnx.symbolic_opset9.gelu, "tanh")
else:
if is_fp16:
output = g.op("Cast", output, to_i=_C_onnx.TensorProtoDataType.FLOAT16)
elif is_bf16:
output = g.op("Cast", output, to_i=_C_onnx.TensorProtoDataType.BFLOAT16)
return output
def _ones_like(g, inp, dtype):
"""Returns a tensor filled with the scalar value 1, with the same size as input and
with dtype data-type"""
shape = g.op("Shape", inp)
# WAR ONNX spec: ConstantOfShape accepts all data types except for BF16. To WAR
# create a ConstantOfShape with type FP32 and then add a Cast to BF16.
is_bf16 = dtype == torch.bfloat16
one = g.op("ConstantOfShape", shape, value_t=torch.tensor([1],
dtype=torch.float32 if is_bf16 else dtype))
if is_bf16:
one = g.op("Cast", one, to_i=_C_onnx.TensorProtoDataType.BFLOAT16)
return one
@symbolic_helper.parse_args("v", "v", "v", "f", "v", "v", "fs", "i", "i", "b")
def onnx_layernorm_fwd_fp8(g, inputs, weight, bias, eps, scale, amax,
scale_inv, fp8_tensor, otype, zero_centered_gamma):
"""ONNX graph for layernorm_fwd_fp8"""
# pylint: disable=unused-argument
inp_dtype = get_TensorProtoDataType(inputs)
if inp_dtype != get_TensorProtoDataType(weight):
weight = g.op("Cast", weight, to_i=inp_dtype)
if inp_dtype != get_TensorProtoDataType(bias):
bias = g.op("Cast", bias, to_i=inp_dtype)
ln = onnx_layernorm_fwd(g, inputs, weight, bias, eps, zero_centered_gamma)
fp8_ln = quantize(g, ln, scale_inv, fp8_tensor)
return fp8_ln
@symbolic_helper.parse_args("v", "v", "v", "f", "b")
def onnx_layernorm_fwd(g, inputs, weight, bias, eps, zero_centered_gamma):
"""ONNX graph for layernorm_fwd"""
# pylint: disable=unused-argument
normalized_shape = torch.onnx.symbolic_helper._get_tensor_sizes(inputs)
if normalized_shape is None:
ndim = torch.onnx.symbolic_helper._get_tensor_rank(inputs)
assert ndim is not None
normalized_shape = list(range(0, ndim))
# Normalization axis = 0, so normalized_shape uses all dims except dim = 0
normalized_shape = normalized_shape[1:]
if zero_centered_gamma:
inputs_dtype = inputs.type().dtype()
one = _ones_like(g, weight, inputs_dtype)
weight = g.op("Add", weight, one)
axis = -len(normalized_shape)
ln = g.op(
"LayerNormalization",
inputs,
weight,
bias,
epsilon_f=eps,
axis_i=axis,
# This sets the LN compute precision - use FP32 always as does TE.
stash_type_i=_C_onnx.TensorProtoDataType.FLOAT,
)
return ln
@symbolic_helper.parse_args("v", "v", "f", "v", "v", "fs", "i", "i", "b")
def onnx_rmsnorm_fwd_fp8(g, inputs, weight, eps, scale, amax,
scale_inv, fp8_tensor, otype, zero_centered_gamma):
"""ONNX graph for rmsnorm_fwd_fp8"""
# pylint: disable=unused-argument
inp_dtype = get_TensorProtoDataType(inputs)
if inp_dtype != get_TensorProtoDataType(weight):
weight = g.op("Cast", weight, to_i=inp_dtype)
ln = onnx_rmsnorm_fwd(g, inputs, weight, eps, zero_centered_gamma)
fp8_ln = quantize(g, ln, scale_inv, fp8_tensor)
return fp8_ln
@symbolic_helper.parse_args("v", "v", "f", "b")
def onnx_rmsnorm_fwd(g, inputs, weight, eps, zero_centered_gamma):
"""ONNX graph for rmsnorm_fwd"""
# pylint: disable=unused-argument
normalized_shape = torch.onnx.symbolic_helper._get_tensor_sizes(inputs)
if normalized_shape is None:
ndim = torch.onnx.symbolic_helper._get_tensor_rank(inputs)
assert ndim is not None
normalized_shape = list(range(0, ndim))
# Normalization axis = 0, so normalized_shape uses all dims except dim = 0
normalized_shape = normalized_shape[1:]
if zero_centered_gamma:
inputs_dtype = inputs.type().dtype()
one = _ones_like(g, weight, inputs_dtype)
weight = g.op("Add", weight, one)
axis = -len(normalized_shape)
inputs_float = g.op("Cast", inputs, to_i=_C_onnx.TensorProtoDataType.FLOAT)
sum_square = g.op("ReduceSumSquare", inputs_float, axes_i=[axis])
shape = g.op("Shape", inputs_float, start_i=-1)
shape_f = g.op("Cast", shape, to_i=_C_onnx.TensorProtoDataType.FLOAT)
mean_squared = g.op("Div", sum_square, shape_f)
eps_tensor = g.op("ConstantOfShape", shape, value_t=torch.tensor([eps], dtype=torch.float32))
rms_squared = g.op("Add", mean_squared, eps_tensor)
rms_eps = g.op("Sqrt", rms_squared)
normalized_input = g.op("Div", inputs_float, rms_eps)
result = g.op("Mul", weight, normalized_input)
result = g.op("Cast", result, to_i=get_TensorProtoDataType(inputs))
return result
register_custom_op_symbolic('tex_ts::cast_to_fp8_ts', onnx_cast_to_fp8, VER)
register_custom_op_symbolic('tex_ts::cast_from_fp8_ts', onnx_cast_from_fp8, VER)
register_custom_op_symbolic('tex_ts::gelu_ts', onnx_fp8_gelu, VER)
register_custom_op_symbolic('tex_ts::relu_ts', onnx_fp8_relu, VER)
register_custom_op_symbolic('tex_ts::reglu_ts', onnx_fp8_reglu, VER)
register_custom_op_symbolic('tex_ts::geglu_ts', onnx_fp8_geglu, VER)
register_custom_op_symbolic('tex_ts::swiglu_ts', onnx_fp8_swiglu, VER)
register_custom_op_symbolic('tex_ts::te_gemm_ts', onnx_te_gemm, VER)
register_custom_op_symbolic('tex_ts::layernorm_fwd_fp8_inf_ts', onnx_layernorm_fwd_fp8, VER)
register_custom_op_symbolic('tex_ts::layernorm_fwd_inf_ts', onnx_layernorm_fwd, VER)
register_custom_op_symbolic('tex_ts::rmsnorm_fwd_fp8_inf_ts', onnx_rmsnorm_fwd_fp8, VER)
register_custom_op_symbolic('tex_ts::rmsnorm_fwd_inf_ts', onnx_rmsnorm_fwd, VER)
| TransformerEngine-main | transformer_engine/pytorch/te_onnx_extensions.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Methods needed for distributed training (DP/TP)."""
from contextlib import contextmanager
from typing import Any, Dict, Union, Optional, Callable, Tuple
import torch
from torch.cuda import _lazy_call
from torch.utils.checkpoint import detach_variable
from .utils import safely_set_viewless_tensor_data
from .constants import dist_group_type
from .fp8 import FP8GlobalStateManager
_MODEL_PARALLEL_ATTRIBUTE_DEFAULTS = {
"tensor_model_parallel": False,
"partition_dim": -1,
"partition_stride": 1,
}
_FP8_ACTIVATION_RECOMPUTE_ENABLED = False
_FP8_ACTIVATION_RECOMPUTE_PHASE = False
def _set_cuda_rng_state(new_state: torch.Tensor, device: Union[int, str] = -1) -> None:
"""Sets the random number generator state of the current GPU.
Arguments:
new_state (torch.ByteTensor): The desired state
This function is adapted from PyTorch repo (torch.cuda.set_rng_state)
with a single change: the input state is not cloned. Cloning caused
major performance issues for +4 GPU cases.
"""
if device == -1:
device = torch.device("cuda")
elif isinstance(device, str):
device = torch.device(device)
elif isinstance(device, int):
device = torch.device("cuda", device)
def cb() -> None:
idx = device.index
if idx is None:
idx = torch.cuda.current_device()
default_generator = torch.cuda.default_generators[idx]
default_generator.set_state(new_state)
_lazy_call(cb)
def set_tensor_model_parallel_attributes(
tensor: torch.Tensor, is_parallel: bool, dim: int, stride: int
) -> None:
"""set attributes needed for TP"""
for attribute in _MODEL_PARALLEL_ATTRIBUTE_DEFAULTS:
assert not hasattr(tensor, attribute)
# Set the attributes.
setattr(tensor, "tensor_model_parallel", is_parallel)
setattr(tensor, "partition_dim", dim)
setattr(tensor, "partition_stride", stride)
def get_distributed_world_size(group: Optional[dist_group_type] = None) -> int:
"""Return world size for the distributed group."""
if not torch.distributed.is_initialized():
return 1
return torch.distributed.get_world_size(group=group)
def get_distributed_rank(group: Optional[dist_group_type] = None) -> int:
"""Return my rank for the distributed group."""
assert torch.distributed.is_initialized(), "torch.distributed is not initialized."
return torch.distributed.get_rank(group=group)
def initialize_affine_weight_gpu(
weight: torch.Tensor,
init_method: Callable,
get_rng_state_tracker: Callable,
partition_dim: int,
stride: int = 1,
) -> None:
"""Initialize affine weight for model parallel on GPU."""
set_tensor_model_parallel_attributes(
tensor=weight, is_parallel=True, dim=partition_dim, stride=stride
)
if get_rng_state_tracker is None:
init_method(weight)
return
with get_rng_state_tracker().fork():
init_method(weight)
def split_tensor_into_1d_equal_chunks(
tensor: torch.Tensor, tp_group: dist_group_type, new_buffer: bool = False
) -> torch.Tensor:
"""Break a tensor into equal 1D chunks."""
partition_size = torch.numel(tensor) // get_distributed_world_size(tp_group)
start_index = partition_size * get_distributed_rank(tp_group)
end_index = start_index + partition_size
if new_buffer:
data = torch.empty(
partition_size,
dtype=tensor.dtype,
device=torch.cuda.current_device(),
requires_grad=False,
)
data.copy_(tensor.view(-1)[start_index:end_index])
else:
data = tensor.view(-1)[start_index:end_index]
return data
def gather_split_1d_tensor(
tensor: torch.Tensor, tp_group: dist_group_type
) -> torch.Tensor:
"""Opposite of above function, gather values from model parallel ranks."""
numel_gathered = torch.numel(tensor) * get_distributed_world_size(tp_group)
gathered = torch.empty(
numel_gathered,
dtype=tensor.dtype,
device=torch.cuda.current_device(),
requires_grad=False,
)
torch.distributed.all_gather_into_tensor(gathered, tensor, group=tp_group)
return gathered
@contextmanager
def activation_recompute_forward(
activation_recompute: bool = False,
recompute_phase: bool = False,
) -> None:
"""Context manager used to control the forward runtime behavior when executed
under the `CheckpointFunction` function. For running FP8, the forward pass will
run without storing intermediate activations. Instead, the forward pass saves
the inputs tuple and the calling function. In the backwards pass, these are
retrieved, and the forward pass is computed again while tracking the intermediate
activations, followed by calculation of gradients using these values.
"""
global _FP8_ACTIVATION_RECOMPUTE_ENABLED, _FP8_ACTIVATION_RECOMPUTE_PHASE
try:
_FP8_ACTIVATION_RECOMPUTE_ENABLED = (
activation_recompute and FP8GlobalStateManager.is_fp8_enabled())
_FP8_ACTIVATION_RECOMPUTE_PHASE = recompute_phase
yield
finally:
_FP8_ACTIVATION_RECOMPUTE_ENABLED = False
_FP8_ACTIVATION_RECOMPUTE_PHASE = False
def is_fp8_activation_recompute_enabled() -> bool:
"""Return global boolean"""
return _FP8_ACTIVATION_RECOMPUTE_ENABLED
def in_fp8_activation_recompute_phase() -> bool:
"""Return global boolean"""
return _FP8_ACTIVATION_RECOMPUTE_PHASE
class CheckpointFunction(torch.autograd.Function):
"""This function is adapted from torch.utils.checkpoint with
two main changes:
1) torch.cuda.set_rng_state is replaced with `_set_cuda_rng_state`
2) the states in the model parallel tracker are also properly
tracked/set/reset.
"""
@staticmethod
def forward(
ctx,
run_function: Callable,
distribute_saved_activations: bool,
get_cuda_rng_tracker: Callable,
tp_group: dist_group_type,
kwargs: Dict[str, Any],
*args: Tuple[torch.Tensor, ...],
) -> Tuple[torch.Tensor, ...]:
"""Call forward function while saving state to be able to
redo the computation later."""
ctx.run_function = run_function
ctx.distribute_saved_activations = distribute_saved_activations
# Copy the rng states.
ctx.fwd_cpu_rng_state = torch.get_rng_state()
ctx.fwd_cuda_rng_state = torch.cuda.get_rng_state()
ctx.fwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()
with torch.no_grad():
with activation_recompute_forward(
activation_recompute=True, recompute_phase=False
):
outputs = run_function(*args, **kwargs)
# Divide hidden states across model parallel group and only keep
# the chunk corresponding to the current rank.
if distribute_saved_activations:
ctx.input_0_shape = args[0].data.shape
safely_set_viewless_tensor_data(
args[0],
split_tensor_into_1d_equal_chunks(
args[0].data, tp_group, new_buffer=True
),
)
# Store everything.
ctx.save_for_backward(*args)
ctx.get_cuda_rng_tracker = get_cuda_rng_tracker
ctx.tp_group = tp_group
ctx.kwargs = kwargs
return outputs
@staticmethod
def backward(
ctx, *args: Tuple[Union[torch.Tensor, None], ...]
) -> Tuple[Union[torch.Tensor, None], ...]:
"""Call backward function with activation recomputation."""
if not torch.autograd._is_checkpoint_valid():
raise RuntimeError(
"Checkpointing is not compatible with .grad(), "
"please use .backward() if possible"
)
inputs = ctx.saved_tensors
get_cuda_rng_tracker = ctx.get_cuda_rng_tracker
if ctx.distribute_saved_activations:
safely_set_viewless_tensor_data(
inputs[0],
gather_split_1d_tensor(inputs[0].data, ctx.tp_group).view(
ctx.input_0_shape
),
)
# Store the current states.
bwd_cpu_rng_state = torch.get_rng_state()
bwd_cuda_rng_state = torch.cuda.get_rng_state()
bwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()
# Set the states to what it used to be before the forward pass.
torch.set_rng_state(ctx.fwd_cpu_rng_state)
_set_cuda_rng_state(ctx.fwd_cuda_rng_state)
get_cuda_rng_tracker().set_states(ctx.fwd_cuda_rng_state_tracker)
# Compute the forward pass.
detached_inputs = detach_variable(inputs)
with torch.enable_grad():
with activation_recompute_forward(
activation_recompute=True, recompute_phase=True
):
outputs = ctx.run_function(*detached_inputs, **ctx.kwargs)
# Set the states back to what it was at the start of this function.
torch.set_rng_state(bwd_cpu_rng_state)
_set_cuda_rng_state(bwd_cuda_rng_state)
get_cuda_rng_tracker().set_states(bwd_cuda_rng_state_tracker)
if isinstance(outputs, torch.Tensor):
outputs = (outputs,)
torch.autograd.backward(outputs, args)
grads = tuple(
inp.grad if isinstance(inp, torch.Tensor) else inp
for inp in detached_inputs
)
return (None, None, None, None, None) + grads
def checkpoint(
function: Callable,
distribute_saved_activations: bool,
get_cuda_rng_tracker: Callable,
tp_group: dist_group_type,
*args: Tuple[torch.Tensor, ...],
**kwargs: Dict[str, Any],
) -> Tuple[torch.Tensor, ...]:
"""
Checkpoint a part of the model by trading compute for memory. This function is based on
`torch.utils.checkpoint.checkpoint <https://pytorch.org/docs/stable/checkpoint.html>`_.
.. warning::
It is the user's responsibility to ensure identical behavior when calling
:attr:`function` from the forward and backward pass. If different output is
produced (e.g. due to global state), then the checkpointed version won't
be numerically equivalent.
.. warning::
The tuple :attr:`args` must contain only tensors (or :attr:`None`) in order to comply with
PyTorch's :attr:`save_for_backward` method. :attr:`function` must be callable to produce
valid outputs with the inputs :attr:`args` and :attr:`kwargs`.
Parameters
----------
function: Callable
whether or not to enable fp8
distribute_saved_activations: bool
if set to `True`, the first tensor argument is distributed across the
specified tensor parallel group (`tp_group`) before saving it for the
backward pass.
get_cuda_rng_tracker: `Callable`
python function with the functionality to retrieve a state via
:attr:`state = get_cuda_rng_tracker().get_states()` and to reset the state via
:attr:`get_cuda_rng_tracker().set_states(state)`. This is used to ensure any
extra cuda rng state or general global state can be reproduced across the 2
forward phases; original and recompute.
tp_group : ProcessGroup, default = `None`
tensor parallel process group.
args : tuple
tuple of torch tensors for inputs to :attr:`function`.
kwargs : dict
dictionary of string keys for keyword arguments to :attr:`function`.
"""
return CheckpointFunction.apply(
function,
distribute_saved_activations,
get_cuda_rng_tracker,
tp_group,
kwargs,
*args,
)
def reduce_scatter_along_first_dim(
input_: torch.Tensor, tp_group: dist_group_type, async_op: bool = False
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Reduce-scatter the input tensor across model parallel group."""
world_size = get_distributed_world_size(tp_group)
# Bypass the function if we are using only 1 GPU.
if world_size == 1:
return input_, None
dim_size = list(input_.size())
assert (
dim_size[0] % world_size == 0
), "First dimension of the tensor should be divisible by tensor parallel size"
dim_size[0] = dim_size[0] // world_size
output = torch.empty(
dim_size, dtype=input_.dtype, device=torch.cuda.current_device()
)
handle = torch.distributed.reduce_scatter_tensor(
output, input_.contiguous(), group=tp_group, async_op=async_op
)
return output, handle
def gather_along_first_dim(
input_: torch.Tensor, tp_group: dist_group_type, async_op: bool = False
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Gather tensors and concatinate along the first dimension."""
world_size = get_distributed_world_size(tp_group)
# Bypass the function if we are using only 1 GPU.
if world_size == 1:
return input_, None
dim_size = list(input_.size())
dim_size[0] = dim_size[0] * world_size
output = torch.empty(
dim_size, dtype=input_.dtype, device=torch.cuda.current_device()
)
handle = torch.distributed.all_gather_into_tensor(
output, input_.contiguous(), group=tp_group, async_op=async_op
)
return output, handle
def gather_along_last_dim(
input_: torch.Tensor, tp_group: dist_group_type, async_op: bool = False
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Gather tensors and concatinate along the last dimension."""
world_size = get_distributed_world_size(tp_group)
# Bypass the function if we are using only 1 GPU.
if world_size == 1:
return input_, None
dim_size = list(input_.size())
dim_size[-1] = dim_size[-1] * world_size
output = torch.empty(
dim_size, dtype=input_.dtype, device=torch.cuda.current_device()
)
handle = torch.distributed.all_gather_into_tensor(
output, input_.contiguous(), group=tp_group, async_op=async_op
)
return output, handle
def allreduce(
input_: torch.Tensor,
tp_group: Optional[dist_group_type] = None,
async_op: bool = False,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""All-reduce the input tensor across model parallel group."""
# Bypass the function if we are using only 1 GPU.
if get_distributed_world_size(tp_group) == 1:
return input_, None
# All-reduce.
handle = torch.distributed.all_reduce(input_, group=tp_group, async_op=async_op)
return input_, handle
| TransformerEngine-main | transformer_engine/pytorch/distributed.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Utility functions for Transformer Engine modules"""
import math
from typing import Any, Callable, Optional, Tuple
import torch
def get_device_compute_capability() -> float:
"""Returns the cuda compute capability of current GPU"""
major = torch.cuda.get_device_properties(torch.cuda.current_device()).major
minor = torch.cuda.get_device_properties(torch.cuda.current_device()).minor
return major + minor / 10
def attention_mask_func(
attention_scores: torch.Tensor, attention_mask: torch.Tensor
) -> torch.Tensor:
"""Get attention mask"""
attention_scores.masked_fill_(attention_mask, -10000.0)
return attention_scores
def get_default_init_method() -> Callable:
"""Weight initialization method if not provided by user"""
return init_method_normal(0.023)
def init_method_normal(sigma: float) -> Callable:
"""Init method based on N(0, sigma)."""
def init_(tensor: torch.Tensor) -> Callable:
return torch.nn.init.normal_(tensor, mean=0.0, std=sigma)
return init_
def scaled_init_method_normal(sigma: float, num_layers: int) -> Callable:
"""Init method based on N(0, sigma/sqrt(2*num_layers)."""
std = sigma / math.sqrt(2.0 * num_layers)
def init_(tensor: torch.Tensor) -> Callable:
return torch.nn.init.normal_(tensor, mean=0.0, std=std)
return init_
def all_close(a: torch.Tensor, b: torch.Tensor) -> bool:
"""torch.allclose with cpu to not run into OOMs"""
return torch.allclose(a.cpu(), b.cpu())
def print_rank_0(*args: Any) -> None:
"""print on rank 0"""
if torch.cuda.current_device() == 0:
print(*args)
def compare_tensors(a: torch.Tensor, b: torch.Tensor) -> None:
"""util function to show some tensor stats"""
if a.shape != b.shape:
print_rank_0("Tensors have different shape")
return
print_rank_0(a)
print_rank_0(b)
max_err = torch.max(torch.abs(a - b))
max_a = torch.max(a)
max_b = torch.max(b)
print_rank_0(f"max err={max_err}, max a={max_a}, max_b={max_b}")
def ensure_divisibility(numerator: int, denominator: int) -> None:
"""Ensure that numerator is divisible by the denominator."""
assert (
numerator % denominator == 0
), f"{numerator} is not divisible by {denominator}"
def divide(numerator: int, denominator: int) -> int:
"""Ensure that numerator is divisible by the denominator and return
the division value."""
ensure_divisibility(numerator, denominator)
return numerator // denominator
def split_tensor_along_dim(
tensor: torch.Tensor, dim: int, num_partitions: int, contiguous_split_chunks: bool = False
) -> Tuple[torch.Tensor, ...]:
"""Split a tensor along its last dimension.
Arguments:
tensor: input tensor.
num_partitions: number of partitions to split the tensor
contiguous_split_chunks: If True, make each chunk contiguous
in memory.
"""
# Get the size and dimension.
split_size = divide(tensor.size()[dim], num_partitions)
# Split.
tensor_list = torch.split(tensor, split_size, dim=dim)
# Note: torch.split does not create contiguous tensors by default.
if contiguous_split_chunks:
return tuple(chunk.contiguous() for chunk in tensor_list)
return tensor_list
def validate_ctx_manager(ctx: Callable) -> None:
"""Checks if passed in object can be used as a context manager."""
try:
with ctx():
pass
except Exception as e:
raise ValueError("Object must be a valid ctx manager") from e
def validate_rng_states_func(get_rng_tracker: Callable) -> None:
"""Checks if passed in param function has everything
required for tensor/model and sequence parallel.
"""
assert callable(get_rng_tracker), "get_rng_tracker is not a valid function"
rng_tracker = None
try:
rng_tracker = get_rng_tracker()
except Exception as e:
raise RuntimeError("Cannot call get_rng_tracker function") from e
assert hasattr(rng_tracker, "get_states") and callable(
rng_tracker.get_states
), "rng_tracker object does not have valid method get_states"
assert hasattr(rng_tracker, "set_states") and callable(
rng_tracker.set_states
), "rng_tracker object does not have valid method set_states"
assert hasattr(rng_tracker, "fork") and callable(
rng_tracker.fork
), "rng_tracker object does not have valid method fork"
validate_ctx_manager(rng_tracker.fork)
def assert_viewless_tensor(
tensor: torch.Tensor, extra_msg: Optional[str] = None
) -> torch.Tensor:
"""Assert that a tensor is not a view (i.e., its '._base' field is
not set)."""
if isinstance(tensor, list):
return [assert_viewless_tensor(t) for t in tensor]
if not isinstance(tensor, torch.Tensor):
return tensor
assert tensor._base is None, (
f"Ensure tensor._base is None before setting tensor.data or storing "
f"tensor to memory buffer. Otherwise, a memory leak will occur (and "
f"likely accumulate over iterations). {extra_msg}"
)
return tensor
def safely_set_viewless_tensor_data(
tensor: torch.Tensor, new_data_tensor: torch.Tensor
) -> None:
"""Safely set tensor's '.data' field.
Check first that the tensor is viewless (i.e., '._base' not set). If not,
raise an exception.
"""
extra_msg = (
f"FYI, tensor._base has shape "
f"{'--' if tensor._base is None else tensor._base.shape},"
f"and new_data_tensor has shape {new_data_tensor.shape}."
)
assert_viewless_tensor(tensor, extra_msg=extra_msg)
tensor.data = new_data_tensor
def cast_if_needed(tensor: torch.Tensor, dtype: torch.dtype) -> torch.Tensor:
"""Cast tensor to dtype"""
with torch.enable_grad():
return tensor if tensor is None or tensor.dtype == dtype else tensor.to(dtype)
def check_dim_for_fp8_exec(tensor: torch.Tensor) -> bool:
"""For fp8 fprop (TN layout), inputs and weights must be such
that dim0 is divisible by 8 and dim1 is divisible by 16.
"""
return not tensor.shape[0] % 8 and not tensor.shape[1] % 16
def assert_dim_for_fp8_exec(tensor: torch.Tensor) -> None:
"""For fp8 fprop (TN layout), inputs and weights must be such
that dim0 is divisible by 8 and dim1 is divisible by 16.
"""
# single tensor check so it's clear which tensor is triggering the assertion
assert check_dim_for_fp8_exec(tensor), (
"Tensor dimensions are not compatible for FP8 execution: "
f"({tensor.shape[0]} % 8 != 0, {tensor.shape[1]} % 16 != 0)"
)
| TransformerEngine-main | transformer_engine/pytorch/utils.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Transformer."""
import os
import warnings
from contextlib import nullcontext
from typing import Any, Callable, Optional, Tuple, Union
import torch
import transformer_engine_extensions as tex
from transformer_engine.pytorch.module import LayerNormMLP, LayerNorm, RMSNorm
from transformer_engine.pytorch.attention import MultiheadAttention
from transformer_engine.pytorch.jit import (
set_jit_fusion_options,
warmup_jit_bias_dropout_add_all_dtypes,
get_bias_dropout_add,
bias_dropout_add_fused_train,
bias_dropout_add_fused_inference,
)
from transformer_engine.pytorch.utils import (
cast_if_needed,
get_default_init_method,
)
from transformer_engine.pytorch.constants import (
AttnMaskTypes,
LayerTypes,
dist_group_type,
)
from transformer_engine.pytorch.distributed import get_distributed_world_size
warnings.filterwarnings("module", category=DeprecationWarning, module="transformer")
__all__ = ["TransformerLayer"]
class DropPath(torch.nn.Module):
"""Drop paths (Stochastic Depth) per sample
(when applied in main path of residual blocks).
"""
def __init__(self, drop_prob: float = 0.0) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
"""DropPath FWD"""
if self.drop_prob == 0.0 or not self.training:
return hidden_state
keep_prob = 1 - self.drop_prob
# work with diff dim tensors, not just 2D ConvNets
shape = (hidden_state.shape[0],) + (1,) * (hidden_state.ndim - 1)
random_tensor = keep_prob + torch.rand(
shape, dtype=hidden_state.dtype, device=hidden_state.device
)
random_tensor.floor_() # binarize
output = hidden_state.div(keep_prob) * random_tensor
return output
class TransformerLayer(torch.nn.Module):
r"""
TransformerLayer is made up of an attention block and a feedforward network (MLP).
This standard layer is based on the paper "Attention Is All You Need".
.. warning::
Arguments :attr:`attention_softmax_in_fp32` and :attr:`apply_query_key_layer_scaling`
are deprecated and will be fully removed in future releases.
.. warning::
Argument :attr:`self_attn_mask_type` has been moved to the `forward` method and
is deprecated. It will be fully removed in future releases.
Parameters
----------
hidden_size : int
size of each input sample.
ffn_hidden_size : int
intermediate size to which input samples are projected.
num_attention_heads : int
number of attention heads in the transformer layer.
num_gqa_groups : int, default = `None`
number of GQA groups in the transformer layer.
Grouped Query Attention is described in
`this paper <https://arxiv.org/pdf/2305.13245.pdf>`_.
This only affects the keys and values, not the querys.
GQA-1 is equivalent to Multi-Query Attention
(`MQA <https://arxiv.org/pdf/1911.02150.pdf>`_), while GQA-H
is equivalent to MHA, i.e. `num_gqa_groups = num_attention_heads`.
layernorm_epsilon : float, default = 1e-5
a value added to the denominator of layer normalization
for numerical stability.
hidden_dropout: float, default = 0.1
dropout probability for the dropout op after FC2 layer.
attention_dropout: float, default = 0.1
dropout probability for the dropout op during multi-head attention.
init_method : Callable, default = `None`
used for initializing weights of QKV and FC1 weights in the following way:
`init_method(weight)`. When set to `None`, defaults to
`torch.nn.init.normal_(mean=0.0, std=0.023)`.
output_layer_init_method : Callable, default = `None`
used for initializing weights of PROJ and FC2 in the following way:
`output_layer_init_method(weight)`. When set to `None`, defaults to
`torch.nn.init.normal_(mean=0.0, std=0.023)`.
apply_residual_connection_post_layernorm : bool, default = `False`
if set to `True`, residual connections are taken
from the output of layer norm (default is taken
from input of layer norm)
layer_number: int, default = `None`
layer number of the current `TransformerLayer` when multiple such modules are
concatenated to form a transformer block.
output_layernorm: bool, default = `False`
if set to `True`, layer normalization is applied on the output side,
after the final dropout-add. default behavior is to apply layer
normalization on the input side, before the QKV transformation.
layer_type: {'encoder', 'decoder'}, default = `encoder`
if set to `decoder`, an additional cross-attn block is added after self-attn.
This can be used for structures like `T5` Transformer in conjunction with the
`encoder` option.
kv_channels: int, default = `None`
number of key-value channels. defaults to
:attr:`hidden_size` / :attr:`num_attention_heads` if `None`.
zero_centered_gamma : bool, default = 'False'
if set to 'True', gamma parameter in LayerNorm is initialized to 0 and
the LayerNorm formula changes to
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \varepsilon}} *
(1 + \gamma) + \beta
normalization : { 'LayerNorm', 'RMSNorm' }, default = 'LayerNorm'
type of normalization applied.
qkv_weight_interleaved : bool, default = `True`
if set to `False`, the QKV weight is interpreted as a concatenation of
query, key, and value weights along the `0th` dimension. The default
interpretation is that the individual `q`, `k`, and `v` weights for each
attention head are interleaved. This parameter is set to `False` when
using :attr:`fuse_qkv_params=False`.
bias : bool, default = `True`
if set to `False`, the transformer layer will not learn any additive biases.
activation : str, default = 'gelu'
Type of activation used in MLP block.
Options are: 'gelu', 'relu', 'reglu', 'geglu' and 'swiglu'.
device : Union[torch.device, str], default = "cuda"
The device on which the parameters of the model will allocated. It is the user's
responsibility to ensure all parameters are moved to the GPU before running the
forward pass.
Parallelism parameters
----------------------
set_parallel_mode : bool, default = `False`
if set to `True`, QKV and FC1 layers are used as Column Parallel
whereas PROJ and FC2 is used as Row Parallel as described
`here <https://arxiv.org/pdf/1909.08053.pdf>`_.
sequence_parallel : bool, default = `False`
if set to `True`, uses sequence parallelism.
tp_group : ProcessGroup, default = `None`
tensor parallel process group.
tp_size : int, default = 1
used as TP (tensor parallel) world size when TP groups are not formed during
initialization. In this case, users must call the
`set_tensor_parallel_group(tp_group)` method on the initialized module before the
forward pass to supply the tensor parallel group needed for tensor and sequence
parallel collectives.
Optimization parameters
-----------------------
fuse_wgrad_accumulation : bool, default = 'False'
if set to `True`, enables fusing of creation and accumulation of
the weight gradient. When enabled, it is assumed that the weights
have an additional `main_grad` attribute (used instead of the
regular `grad`) which is a pre-allocated buffer of the correct
size to accumulate gradients in.
params_dtype : torch.dtype, default = `torch.get_default_dtype()`
it controls the type used to allocate the initial parameters. Useful when
the model is trained with lower precision and the original FP32 parameters
would not fit in GPU memory.
seq_length: int
sequence length of input samples. Needed for JIT Warmup, a technique where jit
fused functions are warmed up before training to ensure same kernels are used for
forward propogation and activation recompute phase.
micro_batch_size: int
batch size per training step. Needed for JIT Warmup, a technique where jit
fused functions are warmed up before training to ensure same kernels are
used for forward propogation and activation recompute phase.
drop_path_rate: float, default = 0.0
when > 0.0, applies stochastic depth per sample in
the main path of the residual block.
fuse_qkv_params: bool, default = 'False'
if set to `True`, `TransformerLayer` module exposes a single fused
parameter for query-key-value. This enables optimizations such as QKV
fusion without concatentations/splits and also enables the argument
`fuse_wgrad_accumulation`.
"""
def __init__(
self,
hidden_size: int,
ffn_hidden_size: int,
num_attention_heads: int,
num_gqa_groups: Optional[int] = None,
layernorm_epsilon: float = 1e-5,
hidden_dropout: float = 0.1,
attention_dropout: float = 0.1,
init_method: Optional[Callable] = None,
output_layer_init_method: Optional[Callable] = None,
layer_number: Optional[int] = None,
kv_channels: Optional[int] = None,
self_attn_mask_type: Optional[str] = None,
tp_group: Optional[dist_group_type] = None,
tp_size: int = 1,
params_dtype: Optional[torch.dtype] = None,
get_rng_state_tracker: Optional[Callable] = None,
fuse_wgrad_accumulation: bool = False,
apply_query_key_layer_scaling: bool = False, # pylint: disable=unused-argument
attention_softmax_in_fp32: bool = True, # pylint: disable=unused-argument
seq_length: Optional[int] = None,
micro_batch_size: Optional[int] = None,
sequence_parallel: bool = False,
apply_residual_connection_post_layernorm: bool = False,
output_layernorm: bool = False,
layer_type: str = "encoder",
drop_path_rate: float = 0.0,
set_parallel_mode: bool = False,
fuse_qkv_params: bool = False,
zero_centered_gamma: bool = False,
qkv_weight_interleaved: bool = True,
ub_tp_comm_overlap: bool = False,
bias: bool = True,
activation: str = 'gelu',
normalization: str = "LayerNorm",
device: Union[torch.device, str] = "cuda",
) -> None:
super().__init__()
if self_attn_mask_type is not None:
warnings.warn(
"Argument :attr:`self_attn_mask_type` has been moved to the `forward` method and"
"is deprecated. It will be fully removed in future releases.",
category=DeprecationWarning,
)
warnings.warn(
"Arguments `attention_softmax_in_fp32` and `apply_query_key_layer_scaling`"
"are deprecated and will be fully removed in future releases.",
category=DeprecationWarning,
)
if ub_tp_comm_overlap:
assert (
tex.userbuf_comm_available()
), "Userbuffer communication backend not available."
self.self_attn_mask_type = self_attn_mask_type
params_dtype = torch.get_default_dtype() if params_dtype is None else params_dtype
ub_tp_comm_overlap = ub_tp_comm_overlap and bool(int(os.getenv("NVTE_UB_OVERLAP", "1")))
ub_bulk_wgrad = ub_tp_comm_overlap and bool(int(os.getenv("NVTE_UB_BULK_WGRAD", "1")))
ub_bulk_dgrad = ub_tp_comm_overlap and bool(int(os.getenv("NVTE_UB_BULK_DGRAD", "1")))
ub_split_ag = ub_tp_comm_overlap and bool(int(os.getenv("NVTE_UB_SPLIT_AG", "1")))
ub_split_rs = ub_tp_comm_overlap and bool(int(os.getenv("NVTE_UB_SPLIT_RS", "1")))
bias_dropout_fusion = bool(int(os.getenv("NVTE_BIAS_DROPOUT_FUSION", "1")))
self.layer_number = layer_number
self.output_layernorm = output_layernorm
self.layer_type = layer_type
self.apply_residual_connection_post_layernorm = (
apply_residual_connection_post_layernorm
)
assert layer_type in LayerTypes, f"layer_type {layer_type} not supported"
if not fuse_qkv_params:
assert (
not fuse_wgrad_accumulation
), "Gradient accumulation fusion requires single QKV parameter."
if not fuse_qkv_params:
qkv_weight_interleaved = False
self.kv_channels = (
kv_channels if kv_channels else (hidden_size // num_attention_heads)
)
if init_method is None:
init_method = get_default_init_method()
if output_layer_init_method is None:
output_layer_init_method = get_default_init_method()
self.tp_size = tp_size if tp_group is None else get_distributed_world_size(tp_group)
self.sequence_parallel = (self.tp_size > 1) and sequence_parallel
self.seq_length = seq_length
self.get_rng_state_tracker = get_rng_state_tracker
attention_args = (
hidden_size,
num_attention_heads,
self.kv_channels,
attention_dropout,
layernorm_epsilon,
init_method,
output_layer_init_method,
)
common_attention_kwargs = {
"layer_number": layer_number,
"tp_group": tp_group,
"tp_size": self.tp_size,
"num_gqa_groups": num_gqa_groups,
"fuse_wgrad_accumulation": fuse_wgrad_accumulation,
"get_rng_state_tracker": get_rng_state_tracker,
"sequence_parallel": self.sequence_parallel,
"params_dtype": params_dtype,
"return_layernorm_output": apply_residual_connection_post_layernorm,
"set_parallel_mode": set_parallel_mode,
"fuse_qkv_params": fuse_qkv_params,
"zero_centered_gamma": zero_centered_gamma,
"qkv_weight_interleaved" : qkv_weight_interleaved,
"ub_bulk_wgrad" : ub_bulk_wgrad,
"ub_bulk_dgrad" : ub_bulk_dgrad,
"ub_split_ag" : ub_split_ag,
"ub_split_rs" : ub_split_rs,
}
self.self_attention = MultiheadAttention(
*attention_args,
**common_attention_kwargs,
input_layernorm=not output_layernorm,
attention_type="self",
bias=bias,
return_bias=True,
normalization=normalization,
device=device,
)
if layer_type == "decoder":
self.inter_attention = MultiheadAttention(
*attention_args,
**common_attention_kwargs,
attn_mask_type="padding",
input_layernorm=True,
attention_type="cross",
bias=bias,
return_bias=True,
normalization=normalization,
device=device,
)
# LayerNorm -> activation(Linear + Bias) -> Linear
# parallel_mode not supported for LayerNormMLP,
# FC1 is CPL and FC2 is RPL
# In the case of GLU activation, FC1 handles both
# Linear layers before the activation
self.layernorm_mlp = LayerNormMLP(
hidden_size,
ffn_hidden_size,
eps=layernorm_epsilon,
fuse_wgrad_accumulation=fuse_wgrad_accumulation,
tp_group=tp_group,
tp_size=self.tp_size,
get_rng_state_tracker=get_rng_state_tracker,
init_method=init_method,
output_layer_init_method=output_layer_init_method,
bias=bias,
return_bias=True,
sequence_parallel=self.sequence_parallel,
params_dtype=params_dtype,
return_layernorm_output=apply_residual_connection_post_layernorm,
seq_length=seq_length,
micro_batch_size=micro_batch_size,
set_parallel_mode=set_parallel_mode,
zero_centered_gamma=zero_centered_gamma,
ub_bulk_wgrad=ub_bulk_wgrad,
ub_bulk_dgrad=ub_bulk_dgrad,
ub_split_rs=ub_split_rs,
ub_split_ag=ub_split_ag,
activation=activation,
normalization=normalization,
device=device,
)
self.hidden_dropout = hidden_dropout
self.bias_dropout_fusion = bias_dropout_fusion
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0.0 else None
# Set bias+dropout+add fusion grad_enable execution handler.
TORCH_MAJOR = int(torch.__version__.split(".")[0])
TORCH_MINOR = int(torch.__version__.split(".")[1])
use_nvfuser = TORCH_MAJOR > 1 or (TORCH_MAJOR == 1 and TORCH_MINOR >= 10)
self.bias_dropout_add_exec_handler = (
nullcontext if use_nvfuser else torch.enable_grad
)
if self.bias_dropout_fusion:
set_jit_fusion_options()
if seq_length and micro_batch_size:
if self.sequence_parallel:
seq_length = seq_length // self.tp_size
warmup_jit_bias_dropout_add_all_dtypes(
hidden_size, seq_length, micro_batch_size
)
norm_module = {
"LayerNorm": LayerNorm,
"RMSNorm": RMSNorm,
}
if self.output_layernorm:
self.layernorm = norm_module[normalization](
hidden_size,
eps=layernorm_epsilon,
sequence_parallel=self.sequence_parallel,
params_dtype=params_dtype,
zero_centered_gamma=zero_centered_gamma,
device=device,
)
def set_tensor_parallel_group(self, tp_group: Union[dist_group_type, None]) -> None:
"""Set TP group"""
# Deep iterate but skip self to avoid infinite recursion.
for index, child in enumerate(self.modules()):
if index == 0:
continue
if hasattr(child, "set_tensor_parallel_group"):
child.set_tensor_parallel_group(tp_group)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
self_attn_mask_type: str = "causal",
encoder_output: Optional[torch.Tensor] = None,
enc_dec_attn_mask: Optional[torch.Tensor] = None,
is_first_microbatch: Optional[bool] = None,
checkpoint_core_attention: bool = False,
inference_params: Optional[Any] = None,
rotary_pos_emb: Optional[Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]] = None,
core_attention_bias_type: str = "no_bias",
core_attention_bias: Optional[torch.Tensor] = None,
fast_zero_fill: bool = True,
) -> torch.Tensor:
"""
Transformer Layer: attention block and a feedforward network (MLP)
.. note::
Argument :attr:`attention_mask` will be ignored when :attr:`self_attn_mask_type`
is set to `"causal"`.
Parameters
----------
hidden_states : torch.Tensor
Input tensor.
attention_mask : Optional[torch.Tensor], default = `None`
Boolean tensor used to mask out self-attention softmax input.
self_attn_mask_type: {'causal', 'padding'}, default = `causal`
type of attention mask passed into softmax operation.
encoder_output : Optional[torch.Tensor], default = `None`
Output of the encoder block to be fed into the decoder block if using
`layer_type="decoder"`.
enc_dec_attn_mask : Optional[torch.Tensor], default = `None`
Boolean tensor used to mask out inter-attention softmax input if using
`layer_type="decoder"`.
is_first_microbatch : {True, False, None}, default = None
During training using either gradient accumulation or
pipeline parallelism a minibatch of data is further split
into microbatches. Between the microbatches of the same minibatch
the model weights are not updated. Setting this parameter indicates
whether the current microbatch is the first in a minibatch or not.
When set, this parameter enables additional optimizations:
* during FP8 training, it allows caching of the FP8 versions of
the weights
* it also allows skipping gradient accumulation during the
first microbatch (since it is the first gradient being
produced)
checkpoint_core_attention: bool, default = `False`
If true, forward activations for core attention are recomputed
during the backward pass in order to save memory that would
otherwise be occupied to store the forward activations until
backprop.
rotary_pos_emb: Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]], default = `None`
Embeddings for query and key tensors for applying rotary position
embedding. By default no input embedding is applied.
core_attention_bias_type: str, default = `no_bias`
Bias type, {`no_bias`, `pre_scale_bias`, 'post_scale_bias`}
core_attention_bias: Optional[torch.Tensor], default = `None`
Bias tensor for Q * K.T
fast_zero_fill: bool, default = `True`
Whether to set output tensors to 0 or not before use.
"""
if self.self_attn_mask_type is not None:
warnings.warn(
"Argument :attr:`self_attn_mask_type` has been moved to the `forward` method and"
"is deprecated. It will be fully removed in future releases.",
category=DeprecationWarning,
)
# Keep previous functionality for current users.
self_attn_mask_type = self.self_attn_mask_type
assert (
self_attn_mask_type in AttnMaskTypes
), f"self_attn_mask_type {self_attn_mask_type} not supported"
hidden_states = hidden_states.contiguous()
if self.sequence_parallel and self.seq_length is not None:
assert (
hidden_states.shape[0] == self.seq_length // self.tp_size
), "Sequence dimension must be split across TP group when using sequence parallel."
if self_attn_mask_type != "causal" and attention_mask is not None:
assert (
attention_mask.dtype == torch.bool
), "Attention mask must be a boolean tensor"
# For AMP
if torch.is_autocast_enabled():
hidden_states = cast_if_needed(
hidden_states, torch.get_autocast_gpu_dtype()
)
# Self attention.
self_attention_outputs = self.self_attention(
hidden_states,
attention_mask=attention_mask,
attn_mask_type=self_attn_mask_type,
inference_params=inference_params,
is_first_microbatch=is_first_microbatch,
checkpoint_core_attention=checkpoint_core_attention,
rotary_pos_emb=rotary_pos_emb,
core_attention_bias_type=core_attention_bias_type,
core_attention_bias=core_attention_bias,
fast_zero_fill=fast_zero_fill,
)
if self.apply_residual_connection_post_layernorm and not self.output_layernorm:
attention_output, attention_bias, residual = self_attention_outputs
else:
attention_output, attention_bias = self_attention_outputs
residual = hidden_states
# Set BDA func.
if self.bias_dropout_fusion:
if self.training:
bias_dropout_add_func = bias_dropout_add_fused_train
else:
bias_dropout_add_func = bias_dropout_add_fused_inference
else:
bias_dropout_add_func = get_bias_dropout_add(self.training)
# Bias dropoout add.
if self.drop_path is None and attention_bias.numel() != 0:
with self.bias_dropout_add_exec_handler():
bda_output = bias_dropout_add_func(
attention_output, attention_bias, residual, self.hidden_dropout
)
else:
if attention_bias.numel() != 0:
attention_output = attention_output + attention_bias
out = torch.nn.functional.dropout(
attention_output,
p=self.hidden_dropout,
training=self.training,
)
if self.drop_path is not None:
out = self.drop_path(out)
bda_output = residual + out
# Cross attention.
if self.layer_type == "decoder":
inter_attention_outputs = self.inter_attention(
bda_output,
attention_mask=enc_dec_attn_mask,
attn_mask_type=self_attn_mask_type,
encoder_output=encoder_output,
is_first_microbatch=is_first_microbatch,
checkpoint_core_attention=checkpoint_core_attention,
core_attention_bias_type=core_attention_bias_type,
core_attention_bias=core_attention_bias,
fast_zero_fill=fast_zero_fill,
)
if self.apply_residual_connection_post_layernorm:
attention_output, attention_bias, residual = inter_attention_outputs
else:
attention_output, attention_bias = inter_attention_outputs
residual = bda_output
if attention_bias.numel() != 0:
with self.bias_dropout_add_exec_handler():
bda_output = bias_dropout_add_func(
attention_output, attention_bias, residual, self.hidden_dropout
)
else:
out = torch.nn.functional.dropout(
attention_output,
p=self.hidden_dropout,
training=self.training,
)
bda_output = residual + out
# MLP.
mlp_outputs = self.layernorm_mlp(
bda_output, is_first_microbatch=is_first_microbatch
)
if self.apply_residual_connection_post_layernorm:
mlp_output, mlp_bias, residual = mlp_outputs
else:
mlp_output, mlp_bias = mlp_outputs
residual = bda_output
# Bias dropoout add.
if self.drop_path is None and mlp_bias.numel() != 0:
with self.bias_dropout_add_exec_handler():
output = bias_dropout_add_func(
mlp_output, mlp_bias, residual, self.hidden_dropout
)
else:
if mlp_bias.numel() != 0:
mlp_output = mlp_output + mlp_bias
out = torch.nn.functional.dropout(
mlp_output, p=self.hidden_dropout, training=self.training
)
if self.drop_path is not None:
out = self.drop_path(out)
output = residual + out
# For BERT like architectures.
if self.output_layernorm:
output = self.layernorm(output)
# output: [b, s, h]
return output
| TransformerEngine-main | transformer_engine/pytorch/transformer.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Fused scaled masked softmax functions"""
import os
from typing import Callable, Tuple, Union, Optional
import torch
from torch import nn
import torch._C._onnx as _C_onnx
from torch.onnx import _type_utils
import transformer_engine_extensions as tex
from transformer_engine.pytorch.export import is_in_onnx_export_mode
from transformer_engine.pytorch.te_onnx_extensions import compute_in_fp32
THREADS_PER_WARP = 32
THREADS_PER_BLOCK = 128
_default_causal_mask = {}
def _get_default_causal_mask(sq: int) -> torch.Tensor:
"""Return the causal upper triangular mask for softmax input"""
if sq not in _default_causal_mask:
_default_causal_mask[sq] = torch.triu(torch.ones(sq, sq, device="cuda"), diagonal=1).bool()
return _default_causal_mask[sq]
def _get_onnx_export_causal_mask(
seq_q: int, seq_k: int, onnx_causal_mask: torch.Tensor
) -> torch.Tensor:
"""Return the causal upper triangular mask for softmax input, for ONNX export.
ONNX does not support dynamic control-flow and requires non-square masks when
using a KV-cache (seq_k's length len(context)+len(generative) while seq_q's length is 1).
Argument `onnx_causal_mask` is a square triu (k=1) mask that is sliced to the correct
shape for GPT context and generation phases.
In the context phase the derived mask is a square triu of shape (seq_k, seq_k), and in
the generation phase the mask is rectangular with shape (1, seq_k).
"""
assert len(onnx_causal_mask.size()) == 2
assert onnx_causal_mask.size(0) == onnx_causal_mask.size(1)
assert onnx_causal_mask.size(0) >= (seq_k-seq_q) >= 0
derived_mask = onnx_causal_mask[seq_k-seq_q:seq_k, :seq_k]
return derived_mask
def fp32_compute(onnx_symbolic_fn):
"""A decorator that wraps an ONNX symoblic function with FP32 compute operators."""
def wrapper(g: torch.Graph, inp: torch._C.Value, scale: float, *args, **kwargs):
return compute_in_fp32(g, inp, onnx_symbolic_fn, scale, *args, **kwargs)
return wrapper
class ScaledUpperTriangMaskedSoftmax(torch.autograd.Function):
"""
Fused operation which performs following three operations in sequence
1. Scale the tensor.
2. Apply upper triangular mask (typically used in gpt models).
3. Perform softmax.
"""
@staticmethod
def forward(ctx, inputs: torch.Tensor, scale: float) -> torch.Tensor:
"""ScaledUpperTriangMaskedSoftmax fwd"""
scale_t = torch.tensor([scale])
softmax_results = tex.scaled_upper_triang_masked_softmax_forward(
inputs, scale_t[0]
)
ctx.save_for_backward(softmax_results, scale_t)
return softmax_results
@staticmethod
def backward(
ctx, output_grads: torch.Tensor
) -> Tuple[Union[torch.Tensor, None], ...]:
"""ScaledUpperTriangMaskedSoftmax bwd"""
softmax_results, scale_t = ctx.saved_tensors
input_grads = tex.scaled_upper_triang_masked_softmax_backward(
output_grads, softmax_results, scale_t[0]
)
return input_grads, None
@staticmethod
@fp32_compute
def symbolic(g: torch.Graph, inputs: torch._C.Value, scale: float) -> torch._C.Value:
"""ScaledUpperTriangMaskedSoftmax symbolic method"""
def triangular_mask():
dtype = _type_utils.JitScalarType.INT64
ones = torch.onnx.symbolic_opset9.ones_like(g, inputs, dtype)
k = g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64))
mask = g.op("Trilu", ones, k, upper_i=1)
mask = g.op("Cast", mask, to_i=_C_onnx.TensorProtoDataType.BOOL)
return mask
# Captures the logic of function scaled_upper_triang_masked_softmax_warp_forward
mask = triangular_mask()
one = g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64))
inv_mask = g.op("Sub", one, mask)
neg_tenK = g.op("Constant", value_t=torch.tensor(-10000., dtype=torch.float16))
softmax_mask = g.op("Mul", mask, neg_tenK)
scale_input = g.op("Constant", value_t=torch.tensor(scale, dtype=torch.float16))
scaled = g.op("Mul", inputs, scale_input)
masked_scaled = g.op("Mul", inv_mask, scaled)
masked = g.op("Add", masked_scaled, softmax_mask)
out = g.op("Softmax", masked)
return out
class ScaledMaskedSoftmax(torch.autograd.Function):
"""
Fused operation which performs following three operations in sequence
1. Scale the tensor.
2. Apply the mask.
3. Perform softmax.
"""
@staticmethod
def forward(
ctx, inputs: torch.Tensor, mask: torch.Tensor, scale: float
) -> torch.Tensor:
"""ScaledMaskedSoftmax fwd"""
scale_t = torch.tensor([scale])
softmax_results = tex.scaled_masked_softmax_forward(inputs, mask, scale_t[0])
ctx.save_for_backward(softmax_results, scale_t)
return softmax_results
@staticmethod
def backward(
ctx, output_grads: torch.Tensor
) -> Tuple[Union[torch.Tensor, None], ...]:
"""ScaledMaskedSoftmax bwd"""
softmax_results, scale_t = ctx.saved_tensors
input_grads = tex.scaled_masked_softmax_backward(
output_grads, softmax_results, scale_t[0]
)
return input_grads, None, None
@staticmethod
@fp32_compute
def symbolic(
g: torch.Graph,
inputs: torch._C.Value,
mask: torch._C.Value,
scale: float) -> torch._C.Value:
"""ScaledMaskedSoftmax symbolic method"""
# Captures the logic of function scaled_masked_softmax_warp_forward.
# output = softmax(mask(input*scale)
# Computed as:
# masked_scaled = (1 - mask)*(input*scale)
# softmax_mask = mask * -10000
# output = softmax(masked_scaled + softmax_mask)
scale_input = g.op("Constant", value_t=torch.tensor(scale, dtype=torch.float16))
scaled = g.op("Mul", inputs, scale_input)
one = g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64))
inv_mask = g.op("Sub", one, mask)
# Note: type is hard coded because softmax uses FP16 or BF16
neg_tenK = g.op("Constant", value_t=torch.tensor(-10000., dtype=torch.float16))
softmax_mask = g.op("Mul", mask, neg_tenK)
masked_scaled = g.op("Mul", inv_mask, scaled)
masked = g.op("Add", masked_scaled, softmax_mask)
out = g.op("Softmax", masked)
return out
class ScaledSoftmax(torch.autograd.Function):
"""
Fused operation which performs following two operations in sequence
1. Scale the tensor.
2. Perform softmax.
"""
@staticmethod
def forward(ctx, inputs: torch.Tensor, scale: float) -> torch.Tensor:
"""ScaledSoftmax fwd"""
scale_t = torch.tensor([scale])
softmax_results = tex.scaled_softmax_forward(inputs, scale_t[0])
ctx.save_for_backward(softmax_results, scale_t)
return softmax_results
@staticmethod
def backward(
ctx, output_grads: torch.Tensor
) -> Tuple[Union[torch.Tensor, None], ...]:
"""ScaledSoftmax bwd"""
softmax_results, scale_t = ctx.saved_tensors
input_grads = tex.scaled_softmax_backward(
output_grads, softmax_results, scale_t[0]
)
return input_grads, None, None
@staticmethod
@fp32_compute
def symbolic(g: torch.Graph, inputs: torch._C.Value, scale: float) -> torch._C.Value:
"""ScaledSoftmax symbolic method"""
scale_input = g.op("Constant", value_t=torch.tensor(scale, dtype=torch.float16))
scaled = g.op("Mul", inputs, scale_input)
out = g.op("Softmax", scaled)
return out
class FusedScaleMaskSoftmax(nn.Module):
"""
fused operation: scaling + mask + softmax
Arguments:
mask_func: mask function to be applied.
softmax_in_fp32: if true, softmax in performed at fp32 precision.
"""
def __init__(
self,
mask_func: Callable,
softmax_in_fp32: bool = True,
) -> None:
super().__init__()
self.scaled_masked_softmax_fusion = bool(
int(os.getenv("NVTE_MASKED_SOFTMAX_FUSION", "1"))
)
self.mask_func = mask_func
self.softmax_in_fp32 = softmax_in_fp32
# Users exporting to ONNX can optimize the attention mask for GPT text generation.
self.kvcache_max_seq = int(os.getenv("NVTE_ONNX_KVCACHE_MAX_SEQ_LEN", "-1"))
if self.kvcache_max_seq > 0:
self.register_buffer(
"onnx_causal_mask",
torch.triu(
torch.ones(self.kvcache_max_seq, self.kvcache_max_seq, device="cuda"),
diagonal=1
).bool(),
persistent=False)
def forward(
self,
inp: torch.Tensor,
mask: torch.Tensor,
attn_mask_type: str,
scale: Optional[float] = None,
) -> torch.Tensor:
"""FusedScaleMaskSoftmax fprop"""
# [b, np, sq, sk]
assert inp.dim() == 4
self.input_in_fp16 = inp.dtype == torch.float16
self.input_in_bf16 = inp.dtype == torch.bfloat16
self.input_in_float16 = self.input_in_fp16 or self.input_in_bf16
self.attn_mask_type = attn_mask_type
assert (
scale is None or self.softmax_in_fp32
), "softmax should be in fp32 when scaled"
if self.is_kernel_available(*inp.size()) and not is_in_onnx_export_mode():
return self.forward_fused_softmax(inp, mask, scale)
return self.forward_torch_softmax(inp, mask, scale)
def is_kernel_available(self, b: int, np: int, sq: int, sk: int) -> bool:
"""Check FusedScaleMaskSoftmax kernel availability based on size"""
attn_batches = b * np
if ( # pylint: disable=too-many-boolean-expressions
self.scaled_masked_softmax_fusion # user want to fuse
and self.input_in_float16 # input must be fp16
and 16 < sk <= 4096 # sk must be 16 ~ 2048
and sk % 8 == 0 # sk must be divisor of 8
and sq % 4 == 0 # sq must be divisor of 4
and attn_batches % 4 == 0 # np * b must be divisor of 4
):
if 0 <= sk <= 4096:
batch_per_block = self.get_batch_per_block(int(sk))
if self.attn_mask_type == "causal":
if attn_batches % batch_per_block == 0:
return True
else:
if sq % batch_per_block == 0:
return True
return False
def forward_fused_softmax(
self, inp: torch.Tensor, mask: torch.Tensor, scale: Optional[float] = None
) -> torch.Tensor:
"""Fused masked softmax kernel"""
b, np, sq, sk = inp.size()
scale = 1.0 if scale is None else scale
if self.attn_mask_type == "causal":
assert sq == sk, "causal mask is only for self attention"
# input is 3D tensor (attn_batches, sq, sk)
inp = inp.view(-1, sq, sk)
probs = ScaledUpperTriangMaskedSoftmax.apply(inp, scale)
return probs.view(b, np, sq, sk)
# input is 4D tensor (b, np, sq, sk)
if mask is not None:
return ScaledMaskedSoftmax.apply(inp, mask, scale)
return ScaledSoftmax.apply(inp, scale)
def forward_torch_softmax(
self, inp: torch.Tensor, mask: torch.Tensor, scale: Optional[float] = None
) -> torch.Tensor:
"""Framework softmax"""
if self.input_in_float16 and self.softmax_in_fp32:
inp = inp.float()
if scale is not None:
inp = inp * scale
if self.attn_mask_type == "causal":
if is_in_onnx_export_mode() and self.kvcache_max_seq > 0:
seq_len_q, seq_len_k = inp.size(2), inp.size(3)
assert self.kvcache_max_seq >= seq_len_k
mask = _get_onnx_export_causal_mask(seq_len_q, seq_len_k, self.onnx_causal_mask)
else:
mask = _get_default_causal_mask(inp.size(2))
mask_output = self.mask_func(inp, mask) if mask is not None else inp
probs = torch.nn.Softmax(dim=-1)(mask_output)
if self.input_in_float16 and self.softmax_in_fp32:
if self.input_in_fp16:
probs = probs.half()
else:
probs = probs.bfloat16()
return probs
@staticmethod
def get_batch_per_block(key_seq_len: int) -> int:
"""Softmax utility"""
pow2 = 1 << (key_seq_len - 1).bit_length()
warp_size = pow2 if pow2 < THREADS_PER_WARP else THREADS_PER_WARP
batches_per_warp = 2 if pow2 <= 128 else 1
warps_per_block = THREADS_PER_BLOCK // warp_size
batches_per_block = warps_per_block * batches_per_warp
return batches_per_block
| TransformerEngine-main | transformer_engine/pytorch/softmax.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Python interface for cast extensions"""
from typing import Optional, Union
import torch
import transformer_engine_extensions as tex
__all__ = ['cast_to_fp8',
'cast_from_fp8']
def cast_to_fp8(
inp: torch.Tensor,
fp8_meta_tensor: tex.FP8TensorMeta,
fp8_tensor: Union[tex.FP8FwdTensors, tex.FP8BwdTensors],
otype: tex.DType,
out: Optional[torch.Tensor] = None,
) -> Optional[torch.Tensor]:
"""Cast input to FP8"""
if out is not None:
tex.cast_to_fp8_noalloc(
inp,
fp8_meta_tensor.scale[fp8_tensor],
out,
fp8_meta_tensor.amax_history[0][fp8_tensor],
fp8_meta_tensor.scale_inv[fp8_tensor],
otype
)
return None
return torch.ops.tex_ts.cast_to_fp8_ts(
inp,
fp8_meta_tensor.scale,
fp8_meta_tensor.amax_history,
fp8_meta_tensor.scale_inv,
fp8_tensor,
otype,
)
def cast_from_fp8(
inp: torch.Tensor,
fp8_meta_tensor: tex.FP8TensorMeta,
fp8_tensor: Union[tex.FP8FwdTensors, tex.FP8BwdTensors],
itype: tex.DType,
otype: tex.DType,
) -> torch.Tensor:
"""Cast input from FP8"""
return torch.ops.tex_ts.cast_from_fp8_ts(
inp,
fp8_meta_tensor.scale_inv,
fp8_tensor,
itype,
otype,
)
| TransformerEngine-main | transformer_engine/pytorch/cpp_extensions/cast.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Python interface for GEMM extensions"""
from typing import Optional, Tuple, Union
import torch
import transformer_engine_extensions as tex
from ..constants import TE_DType
from ..utils import assert_dim_for_fp8_exec
__all__ = ['gemm', 'fp8_gemm']
def fp8_gemm(
A: torch.Tensor,
A_scale_inv: torch.Tensor,
A_fp8_tensor: Union[tex.FP8FwdTensors, tex.FP8BwdTensors],
A_dtype: tex.DType,
B: torch.Tensor,
B_scale_inv: torch.Tensor,
B_fp8_tensor: Union[tex.FP8FwdTensors, tex.FP8BwdTensors],
B_dtype: tex.DType,
out_dtype: torch.dtype,
workspace: torch.Tensor,
gelu: bool = False,
accumulate: bool = False,
out: Optional[torch.Tensor] = None,
out_index = None,
fp8_meta_tensor: tex.FP8TensorMeta = None,
bias: Optional[torch.Tensor] = None,
use_bias: bool = False,
use_split_accumulator: bool = False,
D_dtype: Optional[tex.DType] = None,
ub_algo: tex.UbufOverlapAlgo = None,
ub: Union[tex.UbufCommOverlap, tex.UbufP2PCommOverlap] = None,
extra_output_tensor: torch.Tensor = None,
) -> torch.Tensor:
"""TN layout GEMM with fp8 inputs."""
empty_tensor = torch.Tensor()
if D_dtype is not None and D_dtype in [tex.DType.kFloat8E4M3, tex.DType.kFloat8E5M2]:
assert fp8_meta_tensor is not None and out_index is not None
assert_dim_for_fp8_exec(A)
assert_dim_for_fp8_exec(B)
return_output = False
if out is None:
out = torch.empty(
B.shape[0],
A.shape[0],
dtype=out_dtype,
device="cuda",
)
return_output = True
# Use bfloat16 as default bias_dtype
bias_dtype = torch.bfloat16 if bias is None else bias.dtype
if gelu:
gelu_input = torch.empty_like(out, dtype=bias_dtype)
else:
gelu_input = empty_tensor
bias_dtype = TE_DType[bias_dtype]
out_dtype = TE_DType[out.dtype] if D_dtype is None else D_dtype
args = (
A,
A_scale_inv,
A_fp8_tensor,
A_dtype,
True, # transa
B,
B_scale_inv,
B_fp8_tensor,
B_dtype,
False, # transb
out,
empty_tensor if out_index is None else fp8_meta_tensor.scale[out_index],
out_dtype,
empty_tensor if out_index is None else fp8_meta_tensor.amax_history[0][out_index],
bias if use_bias else empty_tensor,
bias_dtype,
gelu_input, # this is pre_gelu_out
False, # grad
workspace,
workspace.shape[0],
accumulate,
use_split_accumulator)
fn = torch.ops.tex_ts.te_gemm_ts
if ub_algo is not None:
assert ub is not None, 'ub object is None!'
if ub_algo == tex.UbufOverlapAlgo.BULK_OVERLAP_AG:
fn = ub.bulk_overlap
args = tuple(args + (1,))
elif ub_algo == tex.UbufOverlapAlgo.BULK_OVERLAP_RS:
fn = ub.bulk_overlap
args = tuple(args + (0,))
elif ub_algo == tex.UbufOverlapAlgo.SPLIT_PIPELINED_AG:
fn = ub.split_overlap_ag
extra_output_tensor = (
empty_tensor if extra_output_tensor is None else extra_output_tensor
)
args = tuple(args + (extra_output_tensor,))
elif ub_algo == tex.UbufOverlapAlgo.SPLIT_PIPELINED_RS:
fn = ub.split_overlap_rs
assert (
extra_output_tensor is not None
), 'SPLIT_PIPELINED_RS requires extra output tensor'
args = tuple(args + (True, extra_output_tensor,))
_ = fn(*args)
if return_output:
if gelu:
return out, gelu_input
return out
if gelu:
return gelu_input
return None
def gemm(
A: torch.Tensor,
B: torch.Tensor,
dtype: torch.dtype,
workspace: torch.Tensor,
gelu: bool = False,
gelu_input: Optional[torch.Tensor] = None,
grad: bool = False,
accumulate: bool = False,
layout: str = "TN",
out: Optional[torch.Tensor] = None,
bias: Optional[torch.Tensor] = None,
use_bias: bool = False,
ub_algo: tex.UbufOverlapAlgo = None,
ub: tex.UbufCommOverlap = None,
extra_output_tensor: torch.Tensor = None,
) -> Tuple[Union[torch.Tensor, None], ...]:
"""Non FP8 GEMM."""
assert layout in ("TN", "NN", "NT"), f"GEMM layout {layout} not supported."
transa = layout[0] == "T"
transb = layout[1] == "T"
empty_tensor = torch.Tensor()
fp8_index = -1 # dummy index
return_output = False
if out is None:
out = torch.empty(
B.shape[1] if transb else B.shape[0],
A.shape[0] if transa else A.shape[1],
dtype=dtype,
device="cuda",
)
return_output = True
if gelu and not grad:
gelu_input = torch.empty_like(out, dtype=dtype)
elif not gelu:
gelu_input = empty_tensor
if grad and use_bias:
grad_bias = torch.empty(B.shape[1], dtype=out.dtype, device="cuda")
else:
grad_bias = empty_tensor
bias = bias if use_bias else empty_tensor
assert A.dtype == dtype and B.dtype == dtype, \
f'Expected dtype={dtype}, but found A.dtype={A.dtype} and B.dtype={B.dtype}'
input_dtype = TE_DType[dtype]
output_dtype = TE_DType[out.dtype]
if use_bias:
bias_dtype = TE_DType[grad_bias.dtype] if grad else TE_DType[bias.dtype]
else:
bias_dtype = output_dtype
args = (
A,
empty_tensor,
fp8_index,
input_dtype,
transa,
B,
empty_tensor,
fp8_index,
input_dtype,
transb,
out,
empty_tensor, # out_scale
output_dtype,
empty_tensor, # out_amax
grad_bias if grad else bias,
bias_dtype,
gelu_input,
grad,
workspace,
workspace.shape[0],
accumulate,
False, # use_split_accumulator
)
fn = torch.ops.tex_ts.te_gemm_ts
if ub_algo is not None:
assert ub is not None, 'ub object is None!'
if ub_algo == tex.UbufOverlapAlgo.BULK_OVERLAP_AG:
fn = ub.bulk_overlap
args = tuple(args + (1,))
elif ub_algo == tex.UbufOverlapAlgo.BULK_OVERLAP_RS:
fn = ub.bulk_overlap
args = tuple(args + (0,))
elif ub_algo == tex.UbufOverlapAlgo.SPLIT_PIPELINED_AG:
fn = ub.split_overlap_ag
extra_output_tensor = (
empty_tensor if extra_output_tensor is None else extra_output_tensor
)
args = tuple(args + (extra_output_tensor,))
elif ub_algo == tex.UbufOverlapAlgo.SPLIT_PIPELINED_RS:
fn = ub.split_overlap_rs
assert (
extra_output_tensor is not None
), 'SPLIT_PIPELINED_RS requires extra output tensor'
args = tuple(args + (False, extra_output_tensor,))
_ = fn(*args)
if return_output:
return out, grad_bias, gelu_input
return None, grad_bias, gelu_input
| TransformerEngine-main | transformer_engine/pytorch/cpp_extensions/gemm.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Python interface for c++ extensions"""
from transformer_engine_extensions import *
from .fused_attn import *
from .gemm import *
from .transpose import *
from .activation import *
from .normalization import *
from .cast import *
| TransformerEngine-main | transformer_engine/pytorch/cpp_extensions/__init__.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Python interface for fused attention extensions"""
import math
from typing import Tuple, List, Union
import torch
import transformer_engine_extensions as tex
from transformer_engine_extensions import (
NVTE_QKV_Layout,
NVTE_Bias_Type,
NVTE_Mask_Type,
NVTE_Fused_Attn_Backend
)
__all__ = ['fused_attn_fwd_qkvpacked',
'fused_attn_bwd_qkvpacked',
'fused_attn_fwd_kvpacked',
'fused_attn_bwd_kvpacked']
TORCH_DType = {
tex.DType.kFloat8E4M3: torch.uint8,
tex.DType.kFloat8E5M2: torch.uint8,
tex.DType.kFloat16: torch.half,
tex.DType.kBFloat16: torch.bfloat16,
tex.DType.kFloat32: torch.float32,
tex.DType.kInt32: torch.int32,
}
QKVLayout = {
"not_interleaved": NVTE_QKV_Layout.NVTE_NOT_INTERLEAVED,
"qkv_interleaved": NVTE_QKV_Layout.NVTE_QKV_INTERLEAVED,
"kv_interleaved": NVTE_QKV_Layout.NVTE_KV_INTERLEAVED,
}
AttnBiasType = {
"no_bias": NVTE_Bias_Type.NVTE_NO_BIAS,
"pre_scale_bias": NVTE_Bias_Type.NVTE_PRE_SCALE_BIAS,
"post_scale_bias": NVTE_Bias_Type.NVTE_POST_SCALE_BIAS,
}
AttnMaskType = {
"no_mask": NVTE_Mask_Type.NVTE_NO_MASK,
"padding": NVTE_Mask_Type.NVTE_PADDING_MASK,
"causal": NVTE_Mask_Type.NVTE_CAUSAL_MASK,
}
FusedAttnBackend = {
"F16_max512_seqlen": NVTE_Fused_Attn_Backend.NVTE_F16_max512_seqlen,
"F16_arbitrary_seqlen": NVTE_Fused_Attn_Backend.NVTE_F16_arbitrary_seqlen,
"FP8": NVTE_Fused_Attn_Backend.NVTE_FP8,
"No_Backend": NVTE_Fused_Attn_Backend.NVTE_No_Backend,
}
BACKEND_F16m512_FP8_THREADS_PER_CTA = 128
BACKEND_F16arb_ELTS_PER_THREADS = 16
def check_tensor(x: torch.Tensor):
"""Check tensor properties."""
assert (x.is_cuda and x.is_contiguous()
), "Tensor should be a GPU tensor and contiguous."
def check_qkv(qkv: torch.Tensor, dtype: torch.dtype):
"""Check tensor properties."""
check_tensor(qkv)
assert (qkv.dtype is dtype
and qkv.dim() == 4
and qkv.shape[1] == 3
), """QKV should be in [total_seqs, 3, num_heads, head_dim] shape
and {dtype} dtype."""
def check_q(q: torch.Tensor, dtype: torch.dtype):
"""Check tensor properties."""
check_tensor(q)
assert (q.dtype is dtype
and q.dim() == 3
), """Q should be in [total_seqs, num_heads, head_dim] shape
and {dtype} dtype."""
def check_kv(kv: torch.Tensor, dtype: torch.dtype):
"""Check tensor properties."""
check_tensor(kv)
assert (kv.dtype is dtype
and kv.dim() == 4
and kv.shape[1] == 2
), """KV should be in [total_seqs, 2, num_heads, head_dim] shape
and {dtype} dtype."""
def check_o(o: torch.Tensor, dtype: torch.dtype):
"""Check tensor properties."""
check_tensor(o)
assert (o.dtype is dtype
and o.dim() == 3
), """O and dO should be in [total_seqs, num_heads, head_dim] shape
and {dtype} dtype."""
def check_stats(stats: torch.Tensor, b: int, h: int, s: int):
"""Check tensor properties."""
check_tensor(stats)
assert (stats.dtype is torch.float32
and stats.dim() == 4
and stats.shape == torch.Size([b, h, s, 1])
), """M and ZInv should be in [batch_size, num_heads, max_seqlen_q, 1]
shape and float32 dtype."""
def check_cu_seqlens(cu_seqlens: torch.Tensor):
"""Check tensor properties."""
check_tensor(cu_seqlens)
assert (cu_seqlens.dtype is torch.int32
and cu_seqlens.dim() == 1
), """cu_seqlens should be in [batch_size +1] shape and int32 dtype."""
def check_scalar(scalar: torch.Tensor):
"""Check tensor properties."""
check_tensor(scalar)
assert (scalar.dtype is torch.float32
and scalar.dim() <= 1
and scalar.numel() == 1
), "amax/scale/descale tensors should be scalars in float32 dtype."
def check_rng_state(rng_state: torch.Tensor):
"""Check tensor properties."""
check_tensor(rng_state)
assert (rng_state.dtype is torch.int64
and rng_state.numel() == 2
), "rng_state should be [seed, offset] and in int64 dtype."
def fused_attn_fwd_qkvpacked(
is_training: bool,
max_seqlen: int,
cu_seqlens: torch.Tensor,
qkv: torch.Tensor,
qkv_dtype: tex.DType,
fused_attention_backend: tex.NVTE_Fused_Attn_Backend,
attn_bias: torch.Tensor = None,
d_scale_qkv: torch.Tensor = None,
q_scale_s: torch.Tensor = None,
q_scale_o: torch.Tensor = None,
amax_s: torch.Tensor = None,
amax_o: torch.Tensor = None,
attn_scale: float = None,
dropout: float = 0.0,
fast_zero_fill: bool = True,
qkv_layout: str = "qkv_interleaved",
attn_bias_type: str = "no_bias",
attn_mask_type: str = "padding",
rng_gen: torch.Generator = None,
) -> Tuple[Union[torch.Tensor, None], ...]:
"""Fused Attention FWD for packed QKV input.
Parameters
----------
is_training: bool
if True, runs training and produces auxiliary tensors aux_ctx_tensors
for the backward; if False, runs inference and doesn't produce aux_ctx_tensors
max_seqlen: int
max sequence length for QKV, used for padding; may be larger than max(cu_seqlens)
cu_seqlens: torch.Tensor
accumulative sequence lengths for QKV; shape [batch_size + 1]
qkv: torch.Tensor
input tensor QKV;
shape [total_seqs, 3, num_heads, head_dim], where total_seqs = cu_seqlens[-1]
qkv_dtype: tex.DType
data type of QKV; in tex.DType, not torch.dtype
fused_attention_backend: tex.NVTE_Fused_Attn_Backend
please see FusedAttention module for details on supported backends.
attn_bias: torch.Tensor, default = None
input tensor Bias when attn_bias_type is "pre_scale_bias" or "post_scale_bias";
shape [1, num_heads, max_seqlen, max_seqlen], same data type as qkv
d_scale_qkv: torch.Tensor, default = None
input tensor for the dequantization of QKV in FP8 computations
q_scale_s: torch.Tensor, default = None
input tensor for the quantization of S in FP8 computations, S = Softmax(Q * K.T)
q_scale_o: torch.Tensor, default = None
input tensor for the quantization of O in FP8 computations
amax_s: torch.Tensor, default = None
output tensor, amax of S, used by the next iteration in FP8 computations
amax_o: torch.Tensor, default = None
output tensor, amax of O, used by the next iteration in FP8 computations
attn_scale: float, default = None
if not None, use attn_scale as the attention scale for Q*K.T BMM;
if None, use 1.0/sqrt(head_dim) as the default
dropout: float, default = 0.0
dropout probability, 0.0 means no dropout, 1.0 means no output;
dropout must be 0.0 if is_training is False
fast_zero_fill: bool, default = True
if True, initializes the output tensor O to zero using the fast filling method;
if False, uses PyTorch's .fill_() method
qkv_layout: str, default = "qkv_interleaved"
layout of QKV; {"qkv_interleaved", "kv_interleaved", "not_interleaved"}
attn_bias_type: str, default = "no_bias"
type of the bias; {"no_bias", "pre_scale_bias", "post_scale_bias"}
attn_mask_type: str, default = "padding"
type of the attention mask; {"padding", "causal", "no_mask"}
rng_gen: torch.Generator, default = None
random number generator;
if None, uses the default CUDA generator from PyTorch; otherwise, uses rng_gen
Returns
----------
o: torch.Tensor
output tensor O, of the attention calculation; same data type as QKV;
shape [total_seqs, num_heads, head_dim], where total_seqs = cu_seqlens[-1]
aux_ctx_tensors: List[torch.Tensor]
auxiliary output tensors used for the backward;
if is_training is True, aux_ctx_tensors = [softmax-related tensors, rng_state]
if is_training is False, aux_ctx_tensors = None
softmax-related tensors:
1. if fused_attention_backend == FusedAttnBackend["F16_max512_seqlen"]
softmax: torch.Tensor
Softmax(Q*K.T)
shape [batch_size, num_heads, max_seqlen, max_seqlen], dtype float32
2. if fused_attention_backend == FusedAttnBackend["F16_arbitrary_seqlen"]
softmaxStats: torch.Tensor
log(sum(e^(x - max(x)))), where x=Q*K.T
shape [batch_size, num_heads, max_seqlen, 1], dtype float32
3. if fused_attention_backend == FusedAttnBackend["FP8"]
M: torch.Tensor
max(Q*K.T)
shape [batch_size, num_heads, max_seqlen, 1], dtype float32
ZInv: torch.Tensor
1/sum(e^(x - max(x))), where x=Q*K.T
shape [batch_size, num_heads, max_seqlen, 1], dtype float32
rng_state: torch.Tensor, optional, if backend is not F16_max512_seqlen
state of the random number generator;
[seed, offset], dtype uint64
"""
check_cu_seqlens(cu_seqlens)
b = cu_seqlens.numel() - 1
qkv_type = TORCH_DType[qkv_dtype]
check_qkv(qkv, qkv_type)
total_seqs = qkv.size(0)
h = qkv.size(2)
d = qkv.size(3)
if attn_scale is None:
attn_scale = 1.0 / math.sqrt(d)
if attn_bias_type != "no_bias":
assert (attn_bias is not None
), "attn_bias tensor cannot be None when attn_bias_type is not no_bias."
assert (attn_bias.shape == torch.Size([1, h, max_seqlen, max_seqlen])
), "attn_bias tensor must be in [1, h, max_seqlen, max_seqlen] shape."
assert (attn_bias.dtype == qkv.dtype
), "attn_bias tensor must be in the same dtype as qkv."
assert (fused_attention_backend != FusedAttnBackend["No_Backend"]
), "Fused attention does not support this input combination."
# BF16/FP16 fused attention API from fmha_v1 apex
if fused_attention_backend == FusedAttnBackend["F16_max512_seqlen"]:
rng_elts_per_thread = (max_seqlen * max_seqlen
+ BACKEND_F16m512_FP8_THREADS_PER_CTA - 1)//BACKEND_F16m512_FP8_THREADS_PER_CTA
# BF16/FP16 fused attention API from fmha_v2
if fused_attention_backend == FusedAttnBackend["F16_arbitrary_seqlen"]:
rng_elts_per_thread = BACKEND_F16arb_ELTS_PER_THREADS
# FP8 fused attention API from fmha_v2
if fused_attention_backend == FusedAttnBackend["FP8"]:
rng_elts_per_thread = (max_seqlen * max_seqlen
+ BACKEND_F16m512_FP8_THREADS_PER_CTA - 1)//BACKEND_F16m512_FP8_THREADS_PER_CTA
assert (d_scale_qkv is not None
), "d_scale_qkv is required as an input for FP8 fused attention."
assert (q_scale_s is not None
), "q_scale_s is required as an input for FP8 fused attention."
assert (q_scale_o is not None
), "q_scale_o is required as an input for FP8 fused attention."
assert (amax_s is not None
), "amax_s is required as an input for FP8 fused attention."
assert (amax_o is not None
), "amax_o is required as an input for FP8 fused attention."
check_scalar(d_scale_qkv)
check_scalar(q_scale_s)
check_scalar(q_scale_o)
check_scalar(amax_s)
check_scalar(amax_o)
# execute kernel
output_tensors = tex.fused_attn_fwd_qkvpacked(
b, max_seqlen, total_seqs, h, d,
is_training, attn_scale, dropout, fast_zero_fill,
QKVLayout[qkv_layout], AttnBiasType[attn_bias_type], AttnMaskType[attn_mask_type],
cu_seqlens, qkv, qkv_dtype,
d_scale_qkv, q_scale_s, q_scale_o, amax_s, amax_o, attn_bias,
rng_gen, rng_elts_per_thread,
)
# out, aux_ctx_tensors
return output_tensors[0], output_tensors[1:]
def fused_attn_bwd_qkvpacked(
max_seqlen: int,
cu_seqlens: torch.Tensor,
qkv: torch.Tensor,
o: torch.Tensor,
d_o: torch.Tensor,
qkv_dtype: tex.DType,
aux_ctx_tensors: List[torch.Tensor],
fused_attention_backend: tex.NVTE_Fused_Attn_Backend,
d_scale_qkv: torch.Tensor = None,
d_scale_s: torch.Tensor = None,
d_scale_o: torch.Tensor = None,
d_scale_do: torch.Tensor = None,
q_scale_s: torch.Tensor = None,
q_scale_dp: torch.Tensor = None,
q_scale_dqkv: torch.Tensor = None,
amax_dp: torch.Tensor = None,
amax_dqkv: torch.Tensor = None,
attn_scale: float = None,
dropout: float = 0.0,
fast_zero_fill: bool = True,
qkv_layout: str = "qkv_interleaved",
attn_bias_type: str = "no_bias",
attn_mask_type: str = "padding",
) -> Tuple[Union[torch.Tensor, None], ...]:
"""Fused Attention BWD for packed QKV input.
Parameters
----------
max_seqlen: int
max sequence length for QKV, used for padding; may be larger than max(cu_seqlens_q)
cu_seqlens: torch.Tensor
accumulative sequence lengths for QKV; shape [batch_size + 1]
qkv: torch.Tensor
input tensor QKV;
shape [total_seqs, 3, num_heads, head_dim], where total_seqs = cu_seqlens[-1]
o: torch.Tensor
input tensor O (output of forward);
shape [total_seqs, num_heads, head_dim], where total_seqs = cu_seqlens[-1]
d_o: torch.Tensor
input tensor dO (gradient of O);
shape [total_seqs, num_heads, head_dim], where total_seqs = cu_seqlens[-1]
qkv_dtype: tex.DType
data type of QKV; in tex.DType, not torch.dtype
aux_ctx_tensors: List[torch.Tensor]
auxiliary output tensors of the forward pass when its is_training is True,
e.g. aux_ctx_tensors = [M, ZInv, rng_state]
fused_attention_backend: tex.NVTE_Fused_Attn_Backend
please see FusedAttention module for details on supported backends.
d_scale_qkv: torch.Tensor, default = None
input tensor for the dequantization of QKV in FP8 computations
d_scale_s: torch.Tensor, default = None
input tensor for the dequantization of S in FP8 computations, S = Softmax(Q * K.T)
d_scale_o: torch.Tensor, default = None
input tensor for the dequantization of O in FP8 computations
d_scale_do: torch.Tensor, default = None
input tensor for the dequantization of dO in FP8 computations
q_scale_s: torch.Tensor, default = None
input tensor for the quantization of S in FP8 computations
q_scale_dp: torch.Tensor, default = None
input tensor for the quantization of dP in FP8 computations, P = Q * K.T
q_scale_dqkv: torch.Tensor, default = None
input tensor for the quantization of dQKV in FP8 computations
amax_dp: torch.Tensor, default = None
output tensor, amax of dP, used by the next iteration in FP8 computations
amax_dqkv: torch.Tensor, default = None
output tensor, amax of dQKV, used by the next iteration in FP8 computations
attn_scale: float, default = None
if not None, use attn_scale as the attention scale for Q*K.T BMM;
if None, use 1.0/sqrt(head_dim) as the default
dropout: float, default = 0.0
dropout probability, 0.0 means no dropout, 1.0 means no output;
dropout must be 0.0 if is_training is False
fast_zero_fill: bool, default = True
if True, initializes the output tensor O to zero using the fast filling method;
if False, uses PyTorch's .fill_() method
qkv_layout: str, default = "qkv_interleaved"
layout of QKV; {"qkv_interleaved", "kv_interleaved", "not_interleaved"}
attn_bias_type: str, default = "no_bias"
type of the bias; {"no_bias", "pre_scale_bias", "post_scale_bias"}
attn_mask_type: str, default = "padding"
type of the attention mask; {"padding", "causal", "no_mask"}
Returns
----------
d_qkv: torch.Tensor
gradient tensor of QKV; same data type and shape as QKV
d_bias: torch.Tensor, optional
gradient tensor of Bias when attn_bias_type is "pre_scale_bias"
or "post_scale_bias"; same data type and shape as Bias
"""
check_cu_seqlens(cu_seqlens)
b = cu_seqlens.numel() - 1
qkv_type = TORCH_DType[qkv_dtype]
check_qkv(qkv, qkv_type)
check_o(o, qkv_type)
check_o(d_o, qkv_type)
total_seqs = qkv.size(0)
h = qkv.size(2)
d = qkv.size(3)
if attn_scale is None:
attn_scale = 1.0 / math.sqrt(d)
assert (fused_attention_backend != FusedAttnBackend["No_Backend"]
), "Fused attention does not support this input combination."
if fused_attention_backend != FusedAttnBackend["F16_max512_seqlen"]:
assert (len(aux_ctx_tensors) >= 1
), "aux_ctx_tensors must contain rng_state as its last element."
rng_state = aux_ctx_tensors[-1]
check_rng_state(rng_state)
if fused_attention_backend == FusedAttnBackend["FP8"]:
assert (d_scale_qkv is not None), "d_scale_qkv is required for FP8 fused attention."
assert (d_scale_s is not None), "d_scale_s is required for FP8 fused attention."
assert (d_scale_o is not None), "d_scale_o is required for FP8 fused attention."
assert (d_scale_do is not None), "d_scale_do is required for FP8 fused attention."
assert (q_scale_s is not None), "q_scale_s is required for FP8 fused attention."
assert (q_scale_dp is not None), "q_scale_dp is required for FP8 fused attention."
assert (q_scale_dqkv is not None), "q_scale_dqkv is required for FP8 fused attention."
assert (amax_dp is not None), "amax_dp is required for FP8 fused attention."
assert (amax_dqkv is not None), "amax_dqkv is required for FP8 fused attention."
assert (len(aux_ctx_tensors) == 3
), "aux_ctx_tensors is required to be [M, ZInv, rng_state] for FP8 fused attention."
check_scalar(d_scale_qkv)
check_scalar(d_scale_s)
check_scalar(d_scale_o)
check_scalar(d_scale_do)
check_scalar(q_scale_s)
check_scalar(q_scale_dp)
check_scalar(q_scale_dqkv)
check_scalar(amax_dp)
check_scalar(amax_dqkv)
m, z_inv = aux_ctx_tensors[:2]
check_stats(m, b, h, max_seqlen)
check_stats(z_inv, b, h, max_seqlen)
# execute kernel
output_tensors = tex.fused_attn_bwd_qkvpacked(
b, max_seqlen, total_seqs, h, d,
attn_scale, dropout, fast_zero_fill,
QKVLayout[qkv_layout], AttnBiasType[attn_bias_type], AttnMaskType[attn_mask_type],
cu_seqlens, qkv, o, d_o, qkv_dtype, aux_ctx_tensors,
d_scale_qkv, d_scale_s, d_scale_o, d_scale_do,
q_scale_s, q_scale_dp, q_scale_dqkv, amax_dp, amax_dqkv,
)
if attn_bias_type == "no_bias":
# return d_qkv when attn_bias_type is no_bias
return output_tensors
# otherwise return (d_qkv, d_bias)
return output_tensors[0], output_tensors[1]
def fused_attn_fwd_kvpacked(
is_training: bool,
max_seqlen_q: int,
max_seqlen_kv: int,
cu_seqlens_q: torch.Tensor,
cu_seqlens_kv: torch.Tensor,
q: torch.Tensor,
kv: torch.Tensor,
qkv_dtype: tex.DType,
fused_attention_backend: tex.NVTE_Fused_Attn_Backend,
attn_bias: torch.Tensor = None,
d_scale_qkv: torch.Tensor = None,
q_scale_s: torch.Tensor = None,
q_scale_o: torch.Tensor = None,
amax_s: torch.Tensor = None,
amax_o: torch.Tensor = None,
attn_scale: float = None,
dropout: float = 0.0,
fast_zero_fill: bool = True,
qkv_layout: str = "qkv_interleaved",
attn_bias_type: str = "no_bias",
attn_mask_type: str = "padding",
rng_gen: torch.Generator = None,
) -> Tuple[Union[torch.Tensor, None], ...]:
"""Fused Attention FWD for packed KV input.
Parameters
----------
is_training: bool
if True, runs training and produces auxiliary tensors aux_ctx_tensors
for the backward; if False, runs inference and doesn't produce aux_ctx_tensors
max_seqlen_q: int
max sequence length for Q, used for padding; may be larger than max(cu_seqlens_q)
max_seqlen_kv: int
max sequence length for KV, used for padding; may be larger than max(cu_seqlens_kv)
cu_seqlens_q: torch.Tensor
accumulative sequence lengths for Q; shape [batch_size + 1]
cu_seqlens_kv: torch.Tensor
accumulative sequence lengths for KV; shape [batch_size + 1]
q: torch.Tensor
input tensor Q;
shape [total_seqs_q, num_heads, head_dim], where total_seqs_q = cu_seqlens_q[-1]
kv: torch.Tensor
packed input tensor KV;
shape [total_seqs_kv, 2, num_heads, head_dim],
where total_seqs_kv = cu_seqlens_kv[-1]
qkv_dtype: tex.DType
data type of Q and KV; in tex.DType, not torch.dtype
fused_attention_backend: tex.NVTE_Fused_Attn_Backend
please see FusedAttention module for details on supported backends.
attn_bias: torch.Tensor, default = None
input tensor Bias when attn_bias_type is "pre_scale_bias" or "post_scale_bias";
shape [1, num_heads, max_seqlen_q, max_seqlen_kv], same data type as q and kv
d_scale_qkv: torch.Tensor, default = None
input tensor for the dequantization of QKV in FP8 computations
q_scale_s: torch.Tensor, default = None
input tensor for the quantization of S in FP8 computations, S = Softmax(Q * K.T)
q_scale_o: torch.Tensor, default = None
input tensor for the quantization of O in FP8 computations
amax_s: torch.Tensor, default = None
output tensor, amax of S, used by the next iteration in FP8 computations
amax_o: torch.Tensor, default = None
output tensor, amax of O, used by the next iteration in FP8 computations
attn_scale: float, default = None
if not None, use attn_scale as the attention scale for Q*K.T BMM;
if None, use 1.0/sqrt(head_dim) as the default
dropout: float, default = 0.0
dropout probability, 0.0 means no dropout, 1.0 means no output;
dropout must be 0.0 if is_training is False
fast_zero_fill: bool, default = True
if True, initializes the output tensor O to zero using the fast filling method;
if False, uses PyTorch's .fill_() method
qkv_layout: str, default = "qkv_interleaved"
layout of QKV; {"qkv_interleaved", "kv_interleaved", "not_interleaved"}
attn_bias_type: str, default = "no_bias"
type of the bias; {"no_bias", "pre_scale_bias", "post_scale_bias"}
attn_mask_type: str, default = "padding"
type of the attention mask; {"padding", "causal", "no_mask"}
rng_gen: torch.Generator, default = None
random number generator;
if None, uses the default CUDA generator from PyTorch; otherwise, uses rng_gen
Returns
----------
o: torch.Tensor
output tensor O, of the attention calculation; same data type as QKV;
shape [total_seqs, num_heads, head_dim], where total_seqs = cu_seqlens[-1]
aux_ctx_tensors: List[torch.Tensor]
auxiliary output tensors used for the backward;
if is_training is True, aux_ctx_tensors = [softmax-related tensors, rng_state]
if is_training is False, aux_ctx_tensors = None
softmax-related tensors:
1. if fused_attention_backend == FusedAttnBackend["F16_max512_seqlen"]
softmax: torch.Tensor
Softmax(Q*K.T)
shape [batch_size, num_heads, max_seqlen_q, max_seqlen_kv], dtype float32
2. if fused_attention_backend == FusedAttnBackend["F16_arbitrary_seqlen"]
softmaxStats: torch.Tensor
log(sum(e^(x - max(x)))), where x=Q*K.T
shape [batch_size, num_heads, max_seqlen_q, 1], dtype float32
3. if fused_attention_backend == FusedAttnBackend["FP8"]
M: torch.Tensor
max(Q*K.T)
shape [batch_size, num_heads, max_seqlen_q, 1], dtype float32
ZInv: torch.Tensor
1/sum(e^(x - max(x))), where x=Q*K.T
shape [batch_size, num_heads, max_seqlen_q, 1], dtype float32
rng_state: torch.Tensor, optional, if backend is not F16_max512_seqlen
state of the random number generator;
[seed, offset], dtype uint64
"""
check_cu_seqlens(cu_seqlens_q)
check_cu_seqlens(cu_seqlens_kv)
assert (cu_seqlens_q.numel() == cu_seqlens_kv.numel()
), "cu_seqlens_q and cu_seqlens_kv must have the same length."
b = cu_seqlens_q.numel() - 1
qkv_type = TORCH_DType[qkv_dtype]
check_q(q, qkv_type)
check_kv(kv, qkv_type)
assert (q.size(1) == kv.size(2)
and q.size(2) == kv.size(3)
), "Q and KV must have the same num_heads and head_dim."
total_seqs_q = q.size(0)
total_seqs_kv = kv.size(0)
h = q.size(1)
d = q.size(2)
if attn_scale is None:
attn_scale = 1.0 / math.sqrt(d)
if attn_bias_type != "no_bias":
assert (attn_bias is not None
), "attn_bias tensor cannot be None when attn_bias_type is not no_bias."
assert (attn_bias.shape == torch.Size([1, h, max_seqlen_q, max_seqlen_kv])
), "attn_bias tensor must be in [1, h, max_seqlen_q, max_seqlen_kv] shape."
assert (attn_bias.dtype == q.dtype
), "attn_bias tensor must be in the same dtype as q and kv."
assert (fused_attention_backend != FusedAttnBackend["No_Backend"]
), "Fused attention does not support this input combination."
# BF16/FP16 fused attention API from fmha_v1 apex
if fused_attention_backend == FusedAttnBackend["F16_max512_seqlen"]:
rng_elts_per_thread = (max_seqlen_q * max_seqlen_kv
+ BACKEND_F16m512_FP8_THREADS_PER_CTA - 1)//BACKEND_F16m512_FP8_THREADS_PER_CTA
# BF16/FP16 fused attention API from fmha_v2
if fused_attention_backend == FusedAttnBackend["F16_arbitrary_seqlen"]:
rng_elts_per_thread = BACKEND_F16arb_ELTS_PER_THREADS
# FP8 fused attention API from fmha_v2
if fused_attention_backend == FusedAttnBackend["FP8"]:
rng_elts_per_thread = (max_seqlen_q * max_seqlen_q
+ BACKEND_F16m512_FP8_THREADS_PER_CTA - 1)//BACKEND_F16m512_FP8_THREADS_PER_CTA
# execute kernel
output_tensors = tex.fused_attn_fwd_kvpacked(
b, max_seqlen_q, max_seqlen_kv, total_seqs_q, total_seqs_kv, h, d,
is_training, attn_scale, dropout, fast_zero_fill,
QKVLayout[qkv_layout], AttnBiasType[attn_bias_type], AttnMaskType[attn_mask_type],
cu_seqlens_q, cu_seqlens_kv, q, kv, qkv_dtype,
d_scale_qkv, q_scale_s, q_scale_o, amax_s, amax_o,
attn_bias, rng_gen, rng_elts_per_thread,
)
# out, aux_ctx_tensors
return output_tensors[0], output_tensors[1:]
def fused_attn_bwd_kvpacked(
max_seqlen_q: int,
max_seqlen_kv: int,
cu_seqlens_q: torch.Tensor,
cu_seqlens_kv: torch.Tensor,
q: torch.Tensor,
kv: torch.Tensor,
o: torch.Tensor,
d_o: torch.Tensor,
qkv_dtype: tex.DType,
aux_ctx_tensors: List[torch.Tensor],
fused_attention_backend: tex.NVTE_Fused_Attn_Backend,
d_scale_qkv: torch.Tensor = None,
d_scale_s: torch.Tensor = None,
d_scale_o: torch.Tensor = None,
d_scale_do: torch.Tensor = None,
q_scale_s: torch.Tensor = None,
q_scale_dp: torch.Tensor = None,
q_scale_dqkv: torch.Tensor = None,
amax_dp: torch.Tensor = None,
amax_dqkv: torch.Tensor = None,
attn_scale: float = None,
dropout: float = 0.0,
fast_zero_fill: bool = True,
qkv_layout: str = "qkv_interleaved",
attn_bias_type: str = "no_bias",
attn_mask_type: str = "padding",
) -> Tuple[Union[torch.Tensor, None], ...]:
"""Fused Attention BWD for packed KV input.
Parameters
----------
max_seqlen_q: int
max sequence length for Q, used for padding; may be larger than max(cu_seqlens_q)
max_seqlen_kv: int
max sequence length for KV, used for padding; may be larger than max(cu_seqlens_kv)
cu_seqlens_q: torch.Tensor
accumulative sequence lengths for Q; shape [batch_size + 1]
cu_seqlens_kv: torch.Tensor
accumulative sequence lengths for KV; shape [batch_size + 1]
q: torch.Tensor
input tensor Q;
shape [total_seqs_q, num_heads, head_dim], where total_seqs_q = cu_seqlens_q[-1]
kv: torch.Tensor
packed input tensor KV;
shape [total_seqs_kv, 2, num_heads, head_dim],
where total_seqs_kv = cu_seqlens_kv[-1]
o: torch.Tensor
input tensor O (output of forward);
shape [total_seqs_q, num_heads, head_dim], where total_seqs_q = cu_seqlens_q[-1]
d_o: torch.Tensor
input tensor dO (gradient of O);
shape [total_seqs_q, num_heads, head_dim], where total_seqs_q = cu_seqlens_q[-1]
qkv_dtype: tex.DType
data type of QKV; in tex.DType, not torch.dtype
aux_ctx_tensors: List[torch.Tensor]
auxiliary output tensors of the forward pass when its is_training is True,
e.g. aux_ctx_tensors = [M, ZInv, rng_state]
fused_attention_backend: tex.NVTE_Fused_Attn_Backend
please see FusedAttention module for details on supported backends.
d_scale_qkv: torch.Tensor, default = None
input tensor for the dequantization of QKV in FP8 computations
d_scale_s: torch.Tensor, default = None
input tensor for the dequantization of S in FP8 computations, S = Softmax(Q * K.T)
d_scale_o: torch.Tensor, default = None
input tensor for the dequantization of O in FP8 computations
d_scale_do: torch.Tensor, default = None
input tensor for the dequantization of dO in FP8 computations
q_scale_s: torch.Tensor, default = None
input tensor for the quantization of S in FP8 computations
q_scale_dp: torch.Tensor, default = None
input tensor for the quantization of dP in FP8 computations, P = Q * K.T
q_scale_dqkv: torch.Tensor, default = None
input tensor for the quantization of dQKV in FP8 computations
amax_dp: torch.Tensor, default = None
output tensor, amax of dP, used by the next iteration in FP8 computations,
P = Q * K.T
amax_dqkv: torch.Tensor, default = None
output tensor, amax of dQKV, used by the next iteration in FP8 computations
attn_scale: float, default = None
if not None, use attn_scale as the attention scale for Q*K.T BMM;
if None, use 1.0/sqrt(head_dim) as the default
dropout: float, default = 0.0
dropout probability, 0.0 means no dropout, 1.0 means no output;
dropout must be 0.0 if is_training is False
fast_zero_fill: bool, default = True
if True, initializes the output tensor O to zero using the fast filling method;
if False, uses PyTorch's .fill_() method
qkv_layout: str, default = "qkv_interleaved"
layout of QKV; {"qkv_interleaved", "kv_interleaved", "not_interleaved"}
attn_bias_type: str, default = "no_bias"
type of the bias; {"no_bias", "pre_scale_bias", "post_scale_bias"}
attn_mask_type: str, default = "padding"
type of the attention mask; {"padding", "causal", "no_mask"}
Returns
----------
d_q: torch.Tensor
gradient tensor of Q; same data type and shape as Q
d_kv: torch.Tensor
gradient tensor of KV; same data type and shape as KV
d_bias: torch.Tensor, optional
gradient tensor of Bias when attn_bias_type is "pre_scale_bias"
or "post_scale_bias"; same data type and shape as Bias
"""
check_cu_seqlens(cu_seqlens_q)
check_cu_seqlens(cu_seqlens_kv)
assert (cu_seqlens_q.numel() == cu_seqlens_kv.numel()
), "cu_seqlens_q and cu_seqlens_kv must have the same length."
b = cu_seqlens_q.numel() - 1
qkv_type = TORCH_DType[qkv_dtype]
check_q(q, qkv_type)
check_kv(kv, qkv_type)
check_o(o, qkv_type)
check_o(d_o, qkv_type)
assert (q.size(1) == kv.size(2)
and q.size(2) == kv.size(3)
), "Q and KV must have the same num_heads and head_dim."
total_seqs_q = q.size(0)
total_seqs_kv = q.size(0)
h = q.size(1)
d = q.size(2)
if attn_scale is None:
attn_scale = 1.0 / math.sqrt(d)
assert (fused_attention_backend != FusedAttnBackend["No_Backend"]
), "Fused attention does not support this input combination."
if fused_attention_backend != FusedAttnBackend["F16_max512_seqlen"]:
assert (len(aux_ctx_tensors) >= 1
), "aux_ctx_tensors must contain rng_state as its last element."
rng_state = aux_ctx_tensors[-1]
check_rng_state(rng_state)
if fused_attention_backend == FusedAttnBackend["FP8"]:
assert (d_scale_qkv is not None), "d_scale_qkv is required for FP8 fused attention."
assert (d_scale_s is not None), "d_scale_s is required for FP8 fused attention."
assert (d_scale_o is not None), "d_scale_o is required for FP8 fused attention."
assert (d_scale_do is not None), "d_scale_do is required for FP8 fused attention."
assert (q_scale_s is not None), "q_scale_s is required for FP8 fused attention."
assert (q_scale_dp is not None), "q_scale_dp is required for FP8 fused attention."
assert (q_scale_dqkv is not None), "q_scale_dqkv is required for FP8 fused attention."
assert (amax_dp is not None), "amax_dp is required for FP8 fused attention."
assert (amax_dqkv is not None), "amax_dqkv is required for FP8 fused attention."
assert (len(aux_ctx_tensors) == 3
), "aux_ctx_tensors is required to be [M, ZInv, rng_state] for FP8 fused attention."
check_scalar(d_scale_qkv)
check_scalar(d_scale_s)
check_scalar(d_scale_o)
check_scalar(d_scale_do)
check_scalar(q_scale_s)
check_scalar(q_scale_dp)
check_scalar(q_scale_dqkv)
check_scalar(amax_dp)
check_scalar(amax_dqkv)
m, z_inv = aux_ctx_tensors[:2]
check_stats(m, b, h, max_seqlen_q)
check_stats(z_inv, b, h, max_seqlen_q)
# execute kernel
output_tensors = tex.fused_attn_bwd_kvpacked(
b, max_seqlen_q, max_seqlen_kv, total_seqs_q, total_seqs_kv, h, d,
attn_scale, dropout, fast_zero_fill,
QKVLayout[qkv_layout], AttnBiasType[attn_bias_type], AttnMaskType[attn_mask_type],
cu_seqlens_q, cu_seqlens_kv, q, kv, o, d_o, qkv_dtype, aux_ctx_tensors,
d_scale_qkv, d_scale_s, d_scale_o, d_scale_do,
q_scale_s, q_scale_dp, q_scale_dqkv, amax_dp, amax_dqkv,
)
if attn_bias_type == "no_bias":
# return (d_q, d_kv) when attn_bias_type is no_bias
return output_tensors
# otherwise return (d_q, d_kv), d_bias
return output_tensors[:2], output_tensors[2]
| TransformerEngine-main | transformer_engine/pytorch/cpp_extensions/fused_attn.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Python interface for activation extensions"""
from typing import Union
import torch
import transformer_engine_extensions as tex
__all__ = ['gelu', 'relu', 'reglu', 'geglu', 'swiglu']
def gelu(
inp: torch.Tensor,
fp8_meta_tensor: tex.FP8TensorMeta,
fp8_tensor: Union[tex.FP8FwdTensors, tex.FP8BwdTensors],
otype: tex.DType,
) -> torch.Tensor:
"""GeLU with FP8 output"""
empty_tensor = torch.Tensor()
if fp8_meta_tensor is not None:
scale = fp8_meta_tensor.scale
amax_history = fp8_meta_tensor.amax_history
scale_inv = fp8_meta_tensor.scale_inv
else:
scale = empty_tensor
amax_history = empty_tensor
scale_inv = empty_tensor
return torch.ops.tex_ts.gelu_ts(
inp,
scale,
amax_history,
scale_inv,
fp8_tensor,
otype,
)
def relu(
inp: torch.Tensor,
fp8_meta_tensor: tex.FP8TensorMeta,
fp8_tensor: Union[tex.FP8FwdTensors, tex.FP8BwdTensors],
otype: tex.DType,
) -> torch.Tensor:
"""ReLU with FP8 output"""
empty_tensor = torch.Tensor()
if fp8_meta_tensor is not None:
scale = fp8_meta_tensor.scale
amax_history = fp8_meta_tensor.amax_history
scale_inv = fp8_meta_tensor.scale_inv
else:
scale = empty_tensor
amax_history = empty_tensor
scale_inv = empty_tensor
return torch.ops.tex_ts.relu_ts(
inp,
scale,
amax_history,
scale_inv,
fp8_tensor,
otype,
)
def geglu(
inp: torch.Tensor,
fp8_meta_tensor: tex.FP8TensorMeta,
fp8_tensor: Union[tex.FP8FwdTensors, tex.FP8BwdTensors],
otype: tex.DType,
) -> torch.Tensor:
"""GeGLU with FP8 output"""
empty_tensor = torch.Tensor()
if fp8_meta_tensor is not None:
scale = fp8_meta_tensor.scale
amax_history = fp8_meta_tensor.amax_history
scale_inv = fp8_meta_tensor.scale_inv
else:
scale = empty_tensor
amax_history = empty_tensor
scale_inv = empty_tensor
return torch.ops.tex_ts.geglu_ts(
inp,
scale,
amax_history,
scale_inv,
fp8_tensor,
otype,
)
def reglu(
inp: torch.Tensor,
fp8_meta_tensor: tex.FP8TensorMeta,
fp8_tensor: Union[tex.FP8FwdTensors, tex.FP8BwdTensors],
otype: tex.DType,
) -> torch.Tensor:
"""ReGLU with FP8 output"""
empty_tensor = torch.Tensor()
if fp8_meta_tensor is not None:
scale = fp8_meta_tensor.scale
amax_history = fp8_meta_tensor.amax_history
scale_inv = fp8_meta_tensor.scale_inv
else:
scale = empty_tensor
amax_history = empty_tensor
scale_inv = empty_tensor
return torch.ops.tex_ts.reglu_ts(
inp,
scale,
amax_history,
scale_inv,
fp8_tensor,
otype,
)
def swiglu(
inp: torch.Tensor,
fp8_meta_tensor: tex.FP8TensorMeta,
fp8_tensor: Union[tex.FP8FwdTensors, tex.FP8BwdTensors],
otype: tex.DType,
) -> torch.Tensor:
"""SwiGLU with FP8 output"""
empty_tensor = torch.Tensor()
if fp8_meta_tensor is not None:
scale = fp8_meta_tensor.scale
amax_history = fp8_meta_tensor.amax_history
scale_inv = fp8_meta_tensor.scale_inv
else:
scale = empty_tensor
amax_history = empty_tensor
scale_inv = empty_tensor
return torch.ops.tex_ts.swiglu_ts(
inp,
scale,
amax_history,
scale_inv,
fp8_tensor,
otype,
)
| TransformerEngine-main | transformer_engine/pytorch/cpp_extensions/activation.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Python interface for transpose extensions"""
from typing import Optional, Tuple, Union
import torch
import transformer_engine_extensions as tex
from ..constants import TE_DType
__all__ = ['fp8_cast_transpose_fused',
'fp8_cast_transpose_bgrad_fused',
'fp8_cast_transpose_bgrad_dgelu_fused',
'fp8_transpose_bgrad_fused']
def fp8_cast_transpose_fused(
inp: torch.Tensor,
fp8_meta_tensor: tex.FP8TensorMeta,
fp8_tensor: Union[tex.FP8FwdTensors, tex.FP8BwdTensors],
otype: tex.DType,
cast_out: Optional[torch.Tensor] = None,
transpose_out: Optional[torch.Tensor] = None,
) -> Union[Tuple[torch.Tensor, torch.Tensor], None]:
"""Cast + Transpose with FP8 output"""
return_outputs = False
if cast_out is None or transpose_out is None:
cast_out = torch.empty_like(inp, dtype=torch.uint8)
transpose_out = torch.empty(
inp.shape[1], inp.shape[0], device="cuda", dtype=torch.uint8
)
return_outputs = True
tex.fused_cast_transpose(
inp,
fp8_meta_tensor.scale[fp8_tensor],
fp8_meta_tensor.amax_history[0][fp8_tensor],
fp8_meta_tensor.scale_inv[fp8_tensor],
cast_out,
transpose_out,
otype,
)
if return_outputs:
return cast_out, transpose_out
return None
def fp8_cast_transpose_bgrad_fused(
inp: torch.Tensor,
fp8_meta_tensor: tex.FP8TensorMeta,
fp8_tensor: Union[tex.FP8FwdTensors, tex.FP8BwdTensors],
otype: tex.DType,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Cast + Transpose + BGRAD with FP8 output"""
return tex.fused_cast_transpose_bgrad(
inp,
fp8_meta_tensor.scale[fp8_tensor],
fp8_meta_tensor.amax_history[0][fp8_tensor],
fp8_meta_tensor.scale_inv[fp8_tensor],
otype,
)
def fp8_transpose_bgrad_fused(
inp: torch.Tensor,
fp8_meta_tensor: tex.FP8TensorMeta,
fp8_tensor: Union[tex.FP8FwdTensors, tex.FP8BwdTensors],
otype: tex.DType,
grad_bias_type: torch.dtype,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Transpose + BGRAD with FP8 output"""
return tex.fused_fp8_transpose_bgrad(
inp,
fp8_meta_tensor.scale[fp8_tensor],
fp8_meta_tensor.amax_history[0][fp8_tensor],
fp8_meta_tensor.scale_inv[fp8_tensor],
otype,
TE_DType[grad_bias_type],
)
def fp8_cast_transpose_bgrad_dgelu_fused(
grad_output: torch.Tensor,
gelu_input: torch.Tensor,
fp8_meta_tensor: tex.FP8TensorMeta,
fp8_tensor: Union[tex.FP8FwdTensors, tex.FP8BwdTensors],
otype: tex.DType,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Cast + Transpose + BGRAD + DGELU with FP8 output"""
return tex.fused_cast_transpose_bgrad_dgelu(
grad_output,
gelu_input,
fp8_meta_tensor.scale[fp8_tensor],
fp8_meta_tensor.amax_history[0][fp8_tensor],
fp8_meta_tensor.scale_inv[fp8_tensor],
otype,
)
| TransformerEngine-main | transformer_engine/pytorch/cpp_extensions/transpose.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Python interface for normalization extensions"""
from typing import Optional, Tuple, Union
import torch
import transformer_engine_extensions as tex
__all__ = ['layernorm_fwd_fp8',
'layernorm_fwd_fp8_inf',
'layernorm_fwd_inf',
'rmsnorm_fwd_fp8',
'rmsnorm_fwd_fp8_inf',
'rmsnorm_fwd_inf']
def layernorm_fwd_fp8(
inp: torch.Tensor,
weight: torch.Tensor,
bias: torch.Tensor,
eps: float,
fp8_meta_tensor: tex.FP8TensorMeta,
fp8_tensor: Union[tex.FP8FwdTensors, tex.FP8BwdTensors],
otype: tex.DType,
sm_margin: int,
zero_centered_gamma: bool,
ln_out: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""LayerNorm with FP8 output"""
if ln_out is not None:
return tex.layernorm_fwd_fp8_noalloc(
inp,
weight,
bias,
eps,
fp8_meta_tensor.scale[fp8_tensor],
ln_out,
fp8_meta_tensor.amax_history[0][fp8_tensor],
fp8_meta_tensor.scale_inv[fp8_tensor],
otype,
sm_margin,
zero_centered_gamma
)
return tex.layernorm_fwd_fp8(
inp,
weight,
bias,
eps,
fp8_meta_tensor.scale[fp8_tensor],
fp8_meta_tensor.amax_history[0][fp8_tensor],
fp8_meta_tensor.scale_inv[fp8_tensor],
otype,
sm_margin,
zero_centered_gamma
)
def layernorm_fwd_fp8_inf(
inp: torch.Tensor,
weight: torch.Tensor,
bias: torch.Tensor,
eps: float,
fp8_meta_tensor: tex.FP8TensorMeta,
fp8_tensor: Union[tex.FP8FwdTensors, tex.FP8BwdTensors],
otype: tex.DType,
zero_centered_gamma,
) -> torch.Tensor:
"""LayerNorm with FP8 output.
This version of layernorm_fwd_fp8 is specialized for inference, and returns
only the normalized output.
"""
ret = torch.ops.tex_ts.layernorm_fwd_fp8_inf_ts(
inp,
weight,
bias,
eps,
fp8_meta_tensor.scale,
fp8_meta_tensor.amax_history,
fp8_meta_tensor.scale_inv,
fp8_tensor,
otype,
zero_centered_gamma)
return ret
def layernorm_fwd_inf(
inp: torch.Tensor,
weight: torch.Tensor,
bias: torch.Tensor,
eps: float,
zero_centered_gamma: bool,
) -> torch.Tensor:
"""LayerNorm with FP8 output"""
return torch.ops.tex_ts.layernorm_fwd_inf_ts(
inp,
weight,
bias,
eps,
zero_centered_gamma,
)
def rmsnorm_fwd_fp8(
inp: torch.Tensor,
weight: torch.Tensor,
eps: float,
fp8_meta_tensor: tex.FP8TensorMeta,
fp8_tensor: Union[tex.FP8FwdTensors, tex.FP8BwdTensors],
otype: tex.DType,
sm_margin: int,
zero_centered_gamma: bool,
rmsnorm_out: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""RMSNorm with FP8 output"""
if rmsnorm_out is not None:
return tex.rmsnorm_fwd_fp8_noalloc(
inp,
weight,
eps,
fp8_meta_tensor.scale[fp8_tensor],
rmsnorm_out,
fp8_meta_tensor.amax_history[0][fp8_tensor],
fp8_meta_tensor.scale_inv[fp8_tensor],
otype,
sm_margin,
zero_centered_gamma
)
return tex.rmsnorm_fwd_fp8(
inp,
weight,
eps,
fp8_meta_tensor.scale[fp8_tensor],
fp8_meta_tensor.amax_history[0][fp8_tensor],
fp8_meta_tensor.scale_inv[fp8_tensor],
otype,
sm_margin,
zero_centered_gamma
)
def rmsnorm_fwd_fp8_inf(
inp: torch.Tensor,
weight: torch.Tensor,
eps: float,
fp8_meta_tensor: tex.FP8TensorMeta,
fp8_tensor: Union[tex.FP8FwdTensors, tex.FP8BwdTensors],
otype: tex.DType,
zero_centered_gamma,
) -> torch.Tensor:
"""RMSNorm with FP8 output.
This version of rmsnorm_fwd_fp8 is specialized for inference, and returns
only the normalized output.
"""
ret = torch.ops.tex_ts.rmsnorm_fwd_fp8_inf_ts(
inp,
weight,
eps,
fp8_meta_tensor.scale,
fp8_meta_tensor.amax_history,
fp8_meta_tensor.scale_inv,
fp8_tensor,
otype,
zero_centered_gamma)
return ret
def rmsnorm_fwd_inf(
inp: torch.Tensor,
weight: torch.Tensor,
eps: float,
zero_centered_gamma: bool,
) -> torch.Tensor:
"""RMSNorm with FP8 output"""
return torch.ops.tex_ts.rmsnorm_fwd_inf_ts(
inp,
weight,
eps,
zero_centered_gamma,
)
| TransformerEngine-main | transformer_engine/pytorch/cpp_extensions/normalization.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""RMSNorm API"""
import os
from typing import Union, Tuple, Optional
import torch
from torch.nn.parameter import Parameter
from torch.nn import init
from .base import TransformerEngineBaseModule
from .. import cpp_extensions as tex
from ..jit import no_torch_dynamo
from ..utils import cast_if_needed
__all__ = ["RMSNorm"]
class _RMSNorm(torch.autograd.Function):
"""functional RMSNorm"""
@staticmethod
def forward(
ctx,
inp: torch.Tensor,
rmsnorm_weight: torch.Tensor,
eps: float,
fwd_rmsnorm_sm_margin: int,
bwd_rmsnorm_sm_margin: int,
zero_centered_gamma: bool,
is_grad_enabled: bool,
activation_dtype: torch.dtype,
) -> torch.Tensor:
# Make sure input dimensions are compatible
in_features = rmsnorm_weight.numel()
assert inp.is_cuda, "TransformerEngine needs CUDA."
assert inp.shape[-1] == in_features, "RMSNorm not possible"
inputmat = inp.view((-1, in_features))
# Cast for native AMP
inputmat = cast_if_needed(inputmat, activation_dtype)
rmsnorm_weight = cast_if_needed(rmsnorm_weight, activation_dtype)
if is_grad_enabled:
rmsnorm_out, rsigma = tex.rmsnorm_fwd(inputmat, rmsnorm_weight,
eps, fwd_rmsnorm_sm_margin,
zero_centered_gamma)
ctx.save_for_backward(inputmat, rmsnorm_weight, rsigma)
ctx.inp_shape = inp.shape
ctx.bwd_rmsnorm_sm_margin = bwd_rmsnorm_sm_margin
ctx.zero_centered_gamma = zero_centered_gamma
else:
rmsnorm_out = tex.rmsnorm_fwd_inf(inputmat, rmsnorm_weight,
eps,
zero_centered_gamma)
return rmsnorm_out.view_as(inp)
@staticmethod
def backward(
ctx, grad_output: torch.Tensor
) -> Tuple[Union[torch.Tensor, None], ...]:
inputmat, rmsnorm_weight, rsigma = ctx.saved_tensors
grad_output = grad_output.contiguous()
d_rmsnorm_out = grad_output.view(inputmat.shape)
dxmat, dgamma = tex.rmsnorm_bwd(
d_rmsnorm_out, inputmat, rsigma, rmsnorm_weight,
ctx.bwd_rmsnorm_sm_margin, ctx.zero_centered_gamma
)
return (
dxmat.view(ctx.inp_shape),
dgamma,
None,
None,
None,
None,
None,
None,
)
class RMSNorm(torch.nn.Module):
r"""
Applies Root Mean Square Layer Normalization over a mini-batch of inputs as described in
the paper `Root Mean Square Layer Normalization <https://arxiv.org/abs/1910.07467>`__
.. math::
y = \frac{x}{RMS_\varepsilon(x)} * \gamma
where
.. math::
RMS_\varepsilon(x) = \sqrt{\frac{1}{n}\sum_{i=0}^nx_i^2 + \varepsilon}
:math:`\gamma` is a learnable affine transform parameter of size :attr:`hidden_size`
Parameters
----------
hidden_size : int
size of each input sample.
eps : float, default = 1e-5
a value added to the denominator of layer normalization for numerical stability.
sequence_parallel : bool, default = `False`
if set to `True`, uses sequence parallelism.
params_dtype : torch.dtype, default = `torch.get_default_dtype()`
it controls the type used to allocate the initial parameters. Useful when
the model is trained with lower precision and the original FP32 parameters
would not fit in GPU memory.
zero_centered_gamma : bool, default = 'False'
if set to 'True', gamma parameter in RMSNorm is initialized to 0 and
the RMSNorm formula changes to
.. math::
y = \frac{x}{RMS(x) + \varepsilon} * (1 + \gamma)
device : Union[torch.device, str], default = "cuda"
The device on which the parameters of the model will allocated. It is the user's
responsibility to ensure all parameters are moved to the GPU before running the
forward pass.
"""
def __init__(
self,
hidden_size: int,
eps: float = 1e-5,
sequence_parallel: bool = False,
params_dtype: Optional[torch.dtype] = None,
zero_centered_gamma: bool = False,
device: Union[torch.device, str] = "cuda",
) -> None:
super().__init__()
params_dtype = torch.get_default_dtype() if params_dtype is None else params_dtype
self.eps = eps
self.zero_centered_gamma = zero_centered_gamma
self.weight = Parameter(
torch.empty(
hidden_size,
device=device,
dtype=params_dtype,
)
)
setattr(self.weight, "sequence_parallel", sequence_parallel)
self.reset_rms_norm_parameters()
# These many SMs are subtracted from the total SM count when calling forward
# and backward RMSNorm C APIs. These envvars can be used to prevent the LN
# kernels from using all SMs in the device. This is useful for cases such as
# communication overlap with RMSNorm.
self.fwd_rmsnorm_sm_margin = int(os.getenv("NVTE_FWD_LAYERNORM_SM_MARGIN", "0"))
self.bwd_rmsnorm_sm_margin = int(os.getenv("NVTE_BWD_LAYERNORM_SM_MARGIN", "0"))
def reset_rms_norm_parameters(self) -> None:
"""Init RMSNorm params"""
if not self.zero_centered_gamma:
init.ones_(self.weight)
else:
init.zeros_(self.weight)
@no_torch_dynamo
def forward(self, inp: torch.Tensor) -> torch.Tensor:
"""RMSNorm FWD"""
# Set the activation type for AMP.
TransformerEngineBaseModule.set_activation_dtype(self, inp)
if torch.is_grad_enabled():
fwd_fn = _RMSNorm.apply
args = []
else:
fwd_fn = _RMSNorm.forward
args = [None]
args += (
inp,
self.weight,
self.eps,
self.fwd_rmsnorm_sm_margin,
self.bwd_rmsnorm_sm_margin,
self.zero_centered_gamma,
torch.is_grad_enabled(),
self.activation_dtype,
)
return fwd_fn(*args)
| TransformerEngine-main | transformer_engine/pytorch/module/rmsnorm.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""LayerNormMLP API"""
import os
from typing import Union, Optional, Callable, Tuple, List, Dict, Any
import torch
from torch.nn.parameter import Parameter
from torch.nn import init
from .base import (
get_workspace,
_prepare_backward,
get_ub,
TransformerEngineBaseModule,
_2X_ACC_FPROP,
_2X_ACC_DGRAD,
_2X_ACC_WGRAD,
)
from ..fp8 import get_fp8_te_dtype
from ..jit import (
bias_gelu_fused,
bgrad_dgelu_fused,
set_jit_fusion_options,
warmup_jit_bias_gelu_all_dtypes,
)
from ..utils import (
divide,
get_default_init_method,
cast_if_needed,
assert_dim_for_fp8_exec,
)
from ..distributed import (
set_tensor_model_parallel_attributes,
get_distributed_world_size,
allreduce,
initialize_affine_weight_gpu,
reduce_scatter_along_first_dim,
gather_along_first_dim,
)
from .. import cpp_extensions as tex
from ..constants import dist_group_type, TE_DType
from ..jit import no_torch_dynamo
from ._common import _apply_normalization
__all__ = ["LayerNormMLP"]
def _act_func(activation: str):
funcs = {
'gelu': (tex.gelu, tex.dgelu),
'relu': (tex.relu, tex.drelu),
'geglu': (tex.geglu, tex.dgeglu),
'reglu': (tex.reglu, tex.dreglu),
'swiglu': (tex.swiglu, tex.dswiglu),
}
if activation not in funcs:
raise NotImplementedError("Activation type " + activation + " is not supported!")
return funcs[activation]
class _LayerNormMLP(torch.autograd.Function):
"""LayerNormMLP semi-top level module
Calls custom cuda extensions.
"""
@staticmethod
def forward(
ctx,
inp: torch.Tensor,
ln_weight: torch.Tensor,
ln_bias: torch.Tensor,
fc1_weight: torch.Tensor,
fc1_weight_fp8: Union[torch.Tensor, None],
fc1_weight_t_fp8: Union[torch.Tensor, None],
fc1_bias: torch.Tensor,
use_fc1_bias: bool,
fc2_weight: torch.Tensor,
fc2_weight_fp8: Union[torch.Tensor, None],
fc2_weight_t_fp8: Union[torch.Tensor, None],
fc2_bias: torch.Tensor,
use_fc2_bias: bool,
eps: float,
is_first_microbatch: Union[bool, None],
fp8: bool,
fp8_calibration: bool,
fp8_meta: Dict[str, Any],
fuse_wgrad_accumulation: bool,
tp_group: Union[dist_group_type, None],
tp_size: int,
sequence_parallel: bool,
tensor_parallel: bool,
activation_dtype: torch.dtype,
return_layernorm_output: bool,
bias_gelu_nvfusion: bool,
set_parallel_mode: bool,
is_grad_enabled: bool,
fwd_ln_sm_margin: int,
bwd_ln_sm_margin: int,
zero_centered_gamma: bool,
ub_bulk_wgrad: bool,
ub_bulk_dgrad: bool,
ub_split_rs: bool,
ub_split_ag: bool,
activation: str,
normalization: str,
) -> Union[Tuple[torch.Tensor, ...], torch.Tensor]:
# Make sure input dimensions are compatible
in_features = ln_weight.numel()
assert inp.shape[-1] == in_features, "GEMM not possible"
inputmat = inp.view((-1, in_features))
if fp8:
assert_dim_for_fp8_exec(inputmat)
assert_dim_for_fp8_exec(fc1_weight)
assert_dim_for_fp8_exec(fc2_weight)
update_fp8_weights = is_first_microbatch is None or is_first_microbatch
activation_func = _act_func(activation)[0]
# Cast for native AMP
inputmat = cast_if_needed(inputmat, activation_dtype)
ln_weight = cast_if_needed(ln_weight, activation_dtype)
if ln_bias is not None:
ln_bias = cast_if_needed(ln_bias, activation_dtype)
if ub_split_ag:
tp_world_size = get_distributed_world_size(tp_group)
if tp_world_size == 1 or (not is_grad_enabled) or return_layernorm_output:
ub_split_ag = False
if ub_split_ag:
ub_obj_lnout = get_ub("fc1_fprop")
ln_out = ub_obj_lnout.get_ubuf_output(0)
else:
ln_out_dtype = torch.uint8 if (fp8 and not return_layernorm_output) else inputmat.dtype
ln_out = torch.empty_like(inputmat, dtype=ln_out_dtype)
if ub_split_rs:
tp_world_size = get_distributed_world_size(tp_group)
if tp_world_size == 1:
ub_split_rs = False
fp8_dtype_forward = get_fp8_te_dtype(fp8_meta["recipe"], fprop_tensor=True)
ln_out, mu, rsigma = _apply_normalization(inputmat,
ln_out,
ln_weight,
ln_bias,
eps,
fp8 and not return_layernorm_output,
fp8_meta,
normalization,
fwd_ln_sm_margin,
zero_centered_gamma,
is_grad_enabled)
# If residual connection is after LN, we need `ln_out`
# tensor in higher precision, this comes at the cost
# of an extra fp8 cast.
if return_layernorm_output:
ln_out_return = ln_out
if fp8:
ln_out = tex.cast_to_fp8(
ln_out,
fp8_meta["scaling_fwd"],
tex.FP8FwdTensors.GEMM1_INPUT,
fp8_dtype_forward,
)
# Column Parallel Linear
if ub_split_ag:
ln_out_total = ub_obj_lnout.get_ubuf_output(1)
ln_out = torch.empty_like(ln_out)
elif set_parallel_mode and sequence_parallel:
ln_out_total, _ = gather_along_first_dim(ln_out, tp_group)
else:
ln_out_total = ln_out
if fp8:
bias_dtype = (
torch.bfloat16
if activation_dtype == torch.float32
else activation_dtype
)
fc1_bias = cast_if_needed(fc1_bias, bias_dtype) if use_fc1_bias else fc1_bias
fc2_bias = cast_if_needed(fc2_bias, bias_dtype) if use_fc2_bias else fc2_bias
if update_fp8_weights:
if is_grad_enabled:
tex.fp8_cast_transpose_fused(
fc1_weight,
fp8_meta["scaling_fwd"],
tex.FP8FwdTensors.GEMM1_WEIGHT,
fp8_dtype_forward,
cast_out=fc1_weight_fp8,
transpose_out=fc1_weight_t_fp8,
)
tex.fp8_cast_transpose_fused(
fc2_weight,
fp8_meta["scaling_fwd"],
tex.FP8FwdTensors.GEMM2_WEIGHT,
fp8_dtype_forward,
cast_out=fc2_weight_fp8,
transpose_out=fc2_weight_t_fp8,
)
else:
fc1_weight_t_fp8 = None
fc1_weight_fp8 = tex.cast_to_fp8(
fc1_weight,
fp8_meta["scaling_fwd"],
tex.FP8FwdTensors.GEMM1_WEIGHT,
fp8_dtype_forward,
)
fc2_weight_t_fp8 = None
fc2_weight_fp8 = tex.cast_to_fp8(
fc2_weight,
fp8_meta["scaling_fwd"],
tex.FP8FwdTensors.GEMM2_WEIGHT,
fp8_dtype_forward,
)
fc1_out = tex.fp8_gemm(
fc1_weight_fp8,
fp8_meta["scaling_fwd"].scale_inv,
tex.FP8FwdTensors.GEMM1_WEIGHT,
fp8_dtype_forward,
ln_out_total,
fp8_meta["scaling_fwd"].scale_inv,
tex.FP8FwdTensors.GEMM1_INPUT,
fp8_dtype_forward,
activation_dtype,
get_workspace(),
bias=fc1_bias,
use_bias=use_fc1_bias,
use_split_accumulator=_2X_ACC_FPROP,
ub_algo=tex.UbufOverlapAlgo.SPLIT_PIPELINED_AG if ub_split_ag else None,
ub=ub_obj_lnout if ub_split_ag else None,
extra_output_tensor=ln_out if ub_split_ag else None,
)
gelu_out = activation_func(
fc1_out,
fp8_meta["scaling_fwd"],
tex.FP8FwdTensors.GEMM2_INPUT,
fp8_dtype_forward,
)
if ub_split_rs:
ub_obj_fc2out = get_ub("fc2_fprop")
fc2_out = ub_obj_fc2out.get_ubuf_output(1)
dim_size = list(gelu_out.size())
dim_size[0] = dim_size[0] // tp_world_size
dim_size[1] = fc2_weight.size(0)
rs_out = torch.empty(dim_size, dtype=activation_dtype, device=gelu_out.device)
else:
dim_size = list(gelu_out.size())
dim_size[1] = fc2_weight.size(0)
fc2_out = torch.empty(dim_size, dtype=activation_dtype, device=gelu_out.device)
_ = tex.fp8_gemm(
fc2_weight_fp8,
fp8_meta["scaling_fwd"].scale_inv,
tex.FP8FwdTensors.GEMM2_WEIGHT,
fp8_dtype_forward,
gelu_out,
fp8_meta["scaling_fwd"].scale_inv,
tex.FP8FwdTensors.GEMM2_INPUT,
fp8_dtype_forward,
activation_dtype,
get_workspace(),
bias=fc2_bias,
use_bias=use_fc2_bias,
use_split_accumulator=_2X_ACC_FPROP,
out=fc2_out,
ub_algo=tex.UbufOverlapAlgo.SPLIT_PIPELINED_RS if ub_split_rs else None,
ub=ub_obj_fc2out if ub_split_rs else None,
extra_output_tensor=rs_out if ub_split_rs else None,
)
else:
# Cast for native AMP
fc1_weight = cast_if_needed(fc1_weight, activation_dtype)
fc2_weight = cast_if_needed(fc2_weight, activation_dtype)
fc1_bias = (
cast_if_needed(fc1_bias, activation_dtype) if use_fc1_bias else fc1_bias
)
fc2_bias = (
cast_if_needed(fc2_bias, activation_dtype) if use_fc2_bias else fc2_bias
)
if fp8_calibration:
# amax of fc1 input
fp8_meta["scaling_fwd"].amax_history[0][tex.FP8FwdTensors.GEMM1_INPUT] = \
torch.amax(ln_out_total).float()
# amax of fc1 weight
fp8_meta["scaling_fwd"].amax_history[0][tex.FP8FwdTensors.GEMM1_WEIGHT] = \
torch.amax(fc1_weight).float()
fc1_outputs = tex.gemm(
fc1_weight,
ln_out_total,
activation_dtype,
get_workspace(),
bias=fc1_bias,
use_bias=(not bias_gelu_nvfusion) and use_fc1_bias,
gelu=not bias_gelu_nvfusion and (activation == 'gelu'),
ub_algo=tex.UbufOverlapAlgo.SPLIT_PIPELINED_AG if ub_split_ag else None,
ub=ub_obj_lnout if ub_split_ag else None,
extra_output_tensor=ln_out if ub_split_ag else None,
)
if bias_gelu_nvfusion:
fc1_out, _, _ = fc1_outputs
gelu_out = bias_gelu_fused(fc1_out, fc1_bias)
else:
if activation == 'gelu':
gelu_out, _, fc1_out = fc1_outputs
else:
fc1_out, _, _ = fc1_outputs
gelu_out = activation_func(fc1_out,
None,
tex.FP8FwdTensors.GEMM2_INPUT,
TE_DType[fc1_out.dtype])
if fp8_calibration:
# amax of fc2 input
fp8_meta["scaling_fwd"].amax_history[0][tex.FP8FwdTensors.GEMM2_INPUT] = \
torch.amax(gelu_out).float()
# amax of fc2 weight
fp8_meta["scaling_fwd"].amax_history[0][tex.FP8FwdTensors.GEMM2_WEIGHT] = \
torch.amax(fc2_weight).float()
if ub_split_rs:
ub_obj_fc2out = get_ub("fc2_fprop")
fc2_out = ub_obj_fc2out.get_ubuf_output(1)
dim_size = list(gelu_out.size())
dim_size[0] = dim_size[0] // tp_world_size
dim_size[1] = fc2_weight.size(0)
rs_out = torch.empty(dim_size, dtype=activation_dtype, device=gelu_out.device)
else:
dim_size = list(gelu_out.size())
dim_size[1] = fc2_weight.size(0)
fc2_out = torch.empty(dim_size, dtype=activation_dtype, device=gelu_out.device)
_, _, _ = tex.gemm(
fc2_weight,
gelu_out,
activation_dtype,
get_workspace(),
bias=fc2_bias,
use_bias=use_fc2_bias,
out=fc2_out,
ub_algo=tex.UbufOverlapAlgo.SPLIT_PIPELINED_RS if ub_split_rs else None,
ub=ub_obj_fc2out if ub_split_rs else None,
extra_output_tensor=rs_out if ub_split_rs else None,
)
if is_grad_enabled:
ctx.save_for_backward(
inputmat,
ln_weight,
mu,
rsigma,
ln_out,
fc1_out,
gelu_out,
fc1_weight,
fc1_weight_t_fp8,
fc2_weight,
fc2_weight_t_fp8,
fc1_bias,
fp8_meta["scaling_fwd"].scale_inv.clone() if fp8 else None,
)
ctx.activation_dtype = activation_dtype
ctx.activation = activation
ctx.fp8 = fp8
ctx.fp8_meta = fp8_meta
ctx.fuse_wgrad_accumulation = fuse_wgrad_accumulation
ctx.is_first_microbatch = is_first_microbatch
ctx.use_fc1_bias = use_fc1_bias
ctx.use_fc2_bias = use_fc2_bias
ctx.sequence_parallel = sequence_parallel
ctx.tensor_parallel = tensor_parallel
ctx.inp_shape = inp.shape
ctx.tp_group = tp_group
ctx.tp_size = tp_size
ctx.bias_gelu_nvfusion = bias_gelu_nvfusion
ctx.return_layernorm_output = return_layernorm_output
ctx.set_parallel_mode = set_parallel_mode
ctx.bwd_ln_sm_margin = bwd_ln_sm_margin
ctx.zero_centered_gamma = zero_centered_gamma
ctx.ub_bulk_wgrad = ub_bulk_wgrad
ctx.ub_bulk_dgrad = ub_bulk_dgrad
ctx.ub_split_ag = ub_split_ag
ctx.requires_dgrad = inp.requires_grad
ctx.normalization = normalization
# Row Parallel Linear
if ub_split_rs:
fc2_out = rs_out
elif set_parallel_mode and sequence_parallel:
fc2_out, _ = reduce_scatter_along_first_dim(fc2_out, tp_group)
elif set_parallel_mode and tensor_parallel:
fc2_out, _ = allreduce(fc2_out, tp_group)
# [*, in_features] -> [*, out_features] except first dimension changes for SP
fc2_out = fc2_out.view(-1, *inp.shape[1:-1], fc2_out.shape[-1])
if return_layernorm_output:
return fc2_out, ln_out_return.view_as(inp)
return fc2_out
@staticmethod
def backward(
ctx, *grad_outputs: Tuple[torch.Tensor, ...]
) -> Tuple[Union[torch.Tensor, None], ...]:
with _prepare_backward(
ctx.fp8, ctx.fp8_meta, ctx.tp_group, ctx.tp_size, name="_LayerNormMLP"
):
(
inputmat,
ln_weight,
mu,
rsigma,
ln_out,
fc1_out,
gelu_out,
fc1_weight,
fc1_weight_t_fp8,
fc2_weight,
fc2_weight_t_fp8,
fc1_bias,
fwd_scale_inverses,
) = ctx.saved_tensors
activation_func = _act_func(ctx.activation)[1]
if ctx.ub_bulk_dgrad:
tp_world_size = get_distributed_world_size(ctx.tp_group)
if tp_world_size == 1:
ctx.ub_bulk_dgrad = False
if ctx.ub_bulk_dgrad:
dim_size = list(ln_out.size())
dim_size[0] = dim_size[0] * tp_world_size
ub_obj_lnout = get_ub("fc1_dgrad")
ub_obj_lnout.copy_input_to_ubuf(ln_out, 1)
if ctx.ub_split_ag:
tp_world_size = get_distributed_world_size(ctx.tp_group)
if tp_world_size == 1:
ctx.ub_split_ag = False
if ctx.ub_split_ag:
dim_size = list(grad_outputs[0].size())
dim_size[0] = dim_size[0] * tp_world_size
ctx.ub_obj_gradout = get_ub("fc2_dgrad")
ctx.use_bias = ctx.use_fc2_bias # For grad_output_preprocess
(
grad_output,
grad_output_c,
grad_output_t,
fc2_bias_grad,
) = TransformerEngineBaseModule.grad_output_preprocess(
ctx, grad_outputs[0], True
)
if ctx.ub_bulk_wgrad:
tp_world_size = get_distributed_world_size(ctx.tp_group)
if tp_world_size == 1:
ctx.ub_bulk_wgrad = False
# Column Parallel Linear
# Overlap input AG with dgrad
if (fc1_weight.requires_grad
and (not ctx.ub_bulk_dgrad)
and ctx.set_parallel_mode
and ctx.sequence_parallel):
ln_out_total, handle = gather_along_first_dim(
ln_out, ctx.tp_group, async_op=True
)
else:
ln_out_total = ln_out
handle = None
if ctx.is_first_microbatch is not None:
accumulate_wgrad_into_param_main_grad = (
ctx.fuse_wgrad_accumulation and not ctx.is_first_microbatch
)
else:
accumulate_wgrad_into_param_main_grad = ctx.fuse_wgrad_accumulation
if ctx.fp8:
fp8_dtype_forward = get_fp8_te_dtype(
ctx.fp8_meta["recipe"], fprop_tensor=True
)
fp8_dtype_backward = get_fp8_te_dtype(
ctx.fp8_meta["recipe"], fprop_tensor=False
)
# FC2 DGRAD; Unconditional
fc2_dgrad = tex.fp8_gemm(
fc2_weight_t_fp8,
fwd_scale_inverses,
tex.FP8FwdTensors.GEMM2_WEIGHT,
fp8_dtype_forward,
grad_output_c,
ctx.fp8_meta["scaling_bwd"].scale_inv,
tex.FP8BwdTensors.GRAD_OUTPUT1,
fp8_dtype_backward,
ctx.activation_dtype,
get_workspace(),
use_split_accumulator=_2X_ACC_DGRAD,
ub_algo=tex.UbufOverlapAlgo.SPLIT_PIPELINED_AG if ctx.ub_split_ag else None,
ub=ctx.ub_obj_gradout if ctx.ub_split_ag else None,
)
if ctx.ub_split_ag:
grad_output_t = tex.fp8_transpose(grad_output_c, fp8_dtype_backward)
# FC2 WGRAD
if not ctx.fp8_meta["recipe"].override_linear_precision.wgrad:
if fc2_weight.requires_grad:
gelu_out_t = tex.fp8_transpose(gelu_out, fp8_dtype_forward)
fc2_wgrad = tex.fp8_gemm(
gelu_out_t,
fwd_scale_inverses,
tex.FP8FwdTensors.GEMM2_INPUT,
fp8_dtype_forward,
grad_output_t,
ctx.fp8_meta["scaling_bwd"].scale_inv,
tex.FP8BwdTensors.GRAD_OUTPUT1,
fp8_dtype_backward,
ctx.activation_dtype,
get_workspace(),
accumulate=accumulate_wgrad_into_param_main_grad,
out=fc2_weight.main_grad
if ctx.fuse_wgrad_accumulation
else None,
use_split_accumulator=_2X_ACC_WGRAD,
)
if ctx.activation == 'gelu':
fc1_bias_grad, dgelu, dgelu_t = tex.fp8_cast_transpose_bgrad_dgelu_fused(
fc2_dgrad,
fc1_out,
ctx.fp8_meta["scaling_bwd"],
tex.FP8BwdTensors.GRAD_OUTPUT2,
fp8_dtype_backward,
)
else:
dgelu = activation_func(fc2_dgrad, fc1_out,
TE_DType[fc2_dgrad.dtype])
fc1_bias_grad, dgelu, dgelu_t = tex.fp8_cast_transpose_bgrad_fused(
dgelu,
ctx.fp8_meta["scaling_bwd"],
tex.FP8BwdTensors.GRAD_OUTPUT2,
fp8_dtype_backward,
)
else:
if fc2_weight.requires_grad:
gelu_out_c = tex.cast_from_fp8(
gelu_out,
ctx.fp8_meta["scaling_fwd"],
tex.FP8FwdTensors.GEMM2_INPUT,
fp8_dtype_forward,
TE_DType[ctx.activation_dtype],
)
fc2_wgrad, _, _ = tex.gemm(
gelu_out_c,
grad_output,
ctx.activation_dtype,
get_workspace(),
layout="NT",
grad=True,
use_bias=False,
accumulate=accumulate_wgrad_into_param_main_grad,
out=fc2_weight.main_grad
if ctx.fuse_wgrad_accumulation
else None,
)
if ctx.activation == 'gelu':
fc1_bias_grad, dgelu_no_fp8 = bgrad_dgelu_fused(
fc2_dgrad, fc1_out, fc1_bias
)
else:
dgelu_no_fp8 = activation_func(fc2_dgrad,
fc1_out,
TE_DType[fc2_dgrad.dtype])
fc1_bias_grad = dgelu_no_fp8.sum(dim=0)
dgelu = tex.cast_to_fp8(
dgelu_no_fp8,
ctx.fp8_meta["scaling_bwd"],
tex.FP8BwdTensors.GRAD_OUTPUT2,
fp8_dtype_backward,
)
dgelu_t = None
fc1_dgrad_size = list(dgelu.size())
fc1_dgrad_size[1] = fc1_weight.size(1)
if ctx.ub_bulk_wgrad: # allocate dgrad output
ub_obj_dgrad = get_ub("fc1_wgrad")
fc1_dgrad = ub_obj_dgrad.get_ubuf_output(1) # AllGather output
else:
fc1_dgrad = torch.empty(
fc1_dgrad_size, dtype=ctx.activation_dtype, device=fc1_weight.device
)
# FC1 DGRAD: Unconditional
_ = tex.fp8_gemm(
fc1_weight_t_fp8,
fwd_scale_inverses,
tex.FP8FwdTensors.GEMM1_WEIGHT,
fp8_dtype_forward,
dgelu,
ctx.fp8_meta["scaling_bwd"].scale_inv,
tex.FP8BwdTensors.GRAD_OUTPUT2,
fp8_dtype_backward,
ctx.activation_dtype,
get_workspace(),
out=fc1_dgrad,
use_split_accumulator=_2X_ACC_DGRAD,
ub_algo=tex.UbufOverlapAlgo.BULK_OVERLAP_AG if ctx.ub_bulk_dgrad else None,
ub=ub_obj_lnout if ctx.ub_bulk_dgrad else None
)
else:
# FC2 DGRAD; Unconditional
fc2_dgrad, _, _ = tex.gemm(
fc2_weight,
grad_output,
ctx.activation_dtype,
get_workspace(),
layout="NN",
gelu=(not ctx.bias_gelu_nvfusion) and (ctx.activation == 'gelu'),
grad=True,
gelu_input=fc1_out,
ub_algo=tex.UbufOverlapAlgo.SPLIT_PIPELINED_AG if ctx.ub_split_ag else None,
ub=ctx.ub_obj_gradout if ctx.ub_split_ag else None,
)
# FC2 WGRAD
if fc2_weight.requires_grad:
fc2_wgrad, fc2_bias_grad, _ = tex.gemm(
gelu_out,
grad_output,
ctx.activation_dtype,
get_workspace(),
layout="NT",
grad=True,
use_bias=ctx.use_fc2_bias,
accumulate=accumulate_wgrad_into_param_main_grad,
out=fc2_weight.main_grad if ctx.fuse_wgrad_accumulation else None,
)
if ctx.bias_gelu_nvfusion and ctx.activation == 'gelu':
fc1_bias_grad, dgelu = bgrad_dgelu_fused(fc2_dgrad, fc1_out, fc1_bias)
else:
if ctx.activation == 'gelu':
dgelu = fc2_dgrad
else:
dgelu = activation_func(fc2_dgrad,
fc1_out,
TE_DType[fc2_dgrad.dtype])
# For non-fp8 execution, FC1 bias gradient is fused with FC1 wgrad GEMM
# and will not be calculated in case wgrad is not required.
if not fc1_weight.requires_grad:
fc1_bias_grad = dgelu.sum(dim=0)
fc1_dgrad_size = list(dgelu.size())
fc1_dgrad_size[1] = fc1_weight.size(1)
if ctx.ub_bulk_wgrad: # allocate dgrad output
ub_obj_dgrad = get_ub("fc1_wgrad")
fc1_dgrad = ub_obj_dgrad.get_ubuf_output(1) # AllGather output
else:
fc1_dgrad = torch.empty(
fc1_dgrad_size, dtype=ctx.activation_dtype, device=fc1_weight.device
)
# FC1 DGRAD: Unconditional
_, _, _ = tex.gemm(
fc1_weight,
dgelu,
ctx.activation_dtype,
get_workspace(),
out=fc1_dgrad,
layout="NN",
grad=True,
ub_algo=tex.UbufOverlapAlgo.BULK_OVERLAP_AG if ctx.ub_bulk_dgrad else None,
ub=ub_obj_lnout if ctx.ub_bulk_dgrad else None
)
if ctx.ub_bulk_dgrad:
ln_out_total = ub_obj_lnout.get_ubuf_output(1)
# Overlap dgrad-RS/AR with wgrad
if ctx.set_parallel_mode and ctx.sequence_parallel:
if not ctx.ub_bulk_dgrad and handle is not None:
handle.wait()
if not ctx.ub_bulk_wgrad:
fc1_dgrad, handle = reduce_scatter_along_first_dim(
fc1_dgrad, ctx.tp_group, async_op=True
)
elif ctx.set_parallel_mode and ctx.tensor_parallel:
fc1_dgrad, handle = allreduce(fc1_dgrad, ctx.tp_group, async_op=True)
if fc1_weight.requires_grad:
if ctx.fp8:
# FC1 WGRAD
if not ctx.fp8_meta["recipe"].override_linear_precision.wgrad:
ln_out_total_t = tex.fp8_transpose(ln_out_total, fp8_dtype_forward)
fc1_wgrad = tex.fp8_gemm(
ln_out_total_t,
fwd_scale_inverses,
tex.FP8FwdTensors.GEMM1_INPUT,
fp8_dtype_forward,
dgelu_t,
ctx.fp8_meta["scaling_bwd"].scale_inv,
tex.FP8BwdTensors.GRAD_OUTPUT2,
fp8_dtype_backward,
ctx.activation_dtype,
get_workspace(),
accumulate=accumulate_wgrad_into_param_main_grad,
out=fc1_weight.main_grad
if ctx.fuse_wgrad_accumulation
else None,
use_split_accumulator=_2X_ACC_WGRAD,
ub_algo=tex.UbufOverlapAlgo.BULK_OVERLAP_RS
if ctx.ub_bulk_wgrad else None,
ub=ub_obj_dgrad if ctx.ub_bulk_wgrad else None,
)
else:
ln_out_total_c = tex.cast_from_fp8(
ln_out_total,
ctx.fp8_meta["scaling_fwd"],
tex.FP8FwdTensors.GEMM1_INPUT,
fp8_dtype_forward,
TE_DType[ctx.activation_dtype],
)
fc1_wgrad, _, _ = tex.gemm(
ln_out_total_c,
dgelu_no_fp8,
ctx.activation_dtype,
get_workspace(),
layout="NT",
grad=True,
accumulate=accumulate_wgrad_into_param_main_grad,
out=fc1_weight.main_grad
if ctx.fuse_wgrad_accumulation
else None,
ub_algo=tex.UbufOverlapAlgo.BULK_OVERLAP_RS
if ctx.ub_bulk_wgrad else None,
ub=ub_obj_dgrad if ctx.ub_bulk_wgrad else None,
)
else:
# FC1 WGRAD
fc1_wgrad_outputs = tex.gemm(
ln_out_total,
dgelu,
ctx.activation_dtype,
get_workspace(),
layout="NT",
grad=True,
use_bias=not ctx.bias_gelu_nvfusion,
accumulate=accumulate_wgrad_into_param_main_grad,
out=fc1_weight.main_grad if ctx.fuse_wgrad_accumulation else None,
ub_algo=tex.UbufOverlapAlgo.BULK_OVERLAP_RS if ctx.ub_bulk_wgrad else None,
ub=ub_obj_dgrad if ctx.ub_bulk_wgrad else None
)
if ctx.bias_gelu_nvfusion:
fc1_wgrad, _, _ = fc1_wgrad_outputs
else:
fc1_wgrad, fc1_bias_grad, _ = fc1_wgrad_outputs
# Column Parallel Linear
if ctx.ub_bulk_wgrad:
fc1_dgrad = ub_obj_dgrad.get_ubuf_output(0) # Reduce-scatter output
elif ctx.set_parallel_mode and ctx.tensor_parallel and handle is not None:
handle.wait()
# LayerNorm gradient
d_ln_out = fc1_dgrad.view(inputmat.shape)
# Residual gradient
if ctx.return_layernorm_output:
d_ln_out = d_ln_out + grad_outputs[1].view_as(d_ln_out)
if ctx.normalization == "LayerNorm":
dxmat, dgamma, dbeta = tex.layernorm_bwd(
d_ln_out, inputmat, mu, rsigma, ln_weight,
ctx.bwd_ln_sm_margin, ctx.zero_centered_gamma
)
elif ctx.normalization == "RMSNorm":
dxmat, dgamma = tex.rmsnorm_bwd(
d_ln_out, inputmat, rsigma, ln_weight,
ctx.bwd_ln_sm_margin, ctx.zero_centered_gamma
)
dbeta = None
return (
dxmat.view(ctx.inp_shape) if ctx.requires_dgrad else None,
dgamma,
dbeta,
fc1_wgrad if fc1_weight.requires_grad else None,
None,
None,
fc1_bias_grad if ctx.use_fc1_bias else None,
None,
fc2_wgrad if fc2_weight.requires_grad else None,
None,
None,
fc2_bias_grad if ctx.use_fc2_bias else None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
)
class LayerNormMLP(TransformerEngineBaseModule):
r"""
Applies layer normalization on the input followed by the MLP module, consisting of
2 successive linear transformations, separated by the GeLU activation.
Parameters
----------
hidden_size : int
size of each input sample.
ffn_hidden_size : int
intermediate size to which input samples are projected.
eps : float, default = 1e-5
a value added to the denominator of layer normalization for numerical stability.
bias : bool, default = `True`
if set to `False`, the FC1 and FC2 layers will not learn an additive bias.
normalization : { 'LayerNorm', 'RMSNorm' }, default = 'LayerNorm'
type of normalization applied.
activation : str, default = 'gelu'
activation function used.
Options: 'gelu', 'geglu', 'relu', 'reglu', 'squared_relu', 'swiglu'.
init_method : Callable, default = `None`
used for initializing FC1 weights in the following way: `init_method(weight)`.
When set to `None`, defaults to `torch.nn.init.normal_(mean=0.0, std=0.023)`.
output_layer_init_method : Callable, default = `None`
used for initializing FC2 weights in the following way:
`output_layer_init_method(weight)`. When set to `None`, defaults to
`torch.nn.init.normal_(mean=0.0, std=0.023)`.
return_layernorm_output : bool, default = `False`
if set to `True`, output of layernorm is returned from the forward
together with the output of the linear transformation.
Example use case: residual connection for transformer module
is taken post layernorm.
zero_centered_gamma : bool, default = 'False'
if set to 'True', gamma parameter in LayerNorm is initialized to 0 and
the LayerNorm formula changes to
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \varepsilon}} *
(1 + \gamma) + \beta
device : Union[torch.device, str], default = "cuda"
The device on which the parameters of the model will allocated. It is the user's
responsibility to ensure all parameters are moved to the GPU before running the
forward pass.
Parallelism parameters
----------------------
set_parallel_mode : bool, default = `False`
if set to `True`, FC1 is used as Column Parallel and FC2 is used as Row
Parallel as described `here <https://arxiv.org/pdf/1909.08053.pdf>`_.
sequence_parallel : bool, default = `False`
if set to `True`, uses sequence parallelism.
tp_group : ProcessGroup, default = `None`
tensor parallel process group.
tp_size : int, default = 1
used as TP (tensor parallel) world size when TP groups are not formed during
initialization. In this case, users must call the
`set_tensor_parallel_group(tp_group)` method on the initialized module before the
forward pass to supply the tensor parallel group needed for tensor and sequence
parallel collectives.
Optimization parameters
-----------------------
fuse_wgrad_accumulation : bool, default = 'False'
if set to `True`, enables fusing of creation and accumulation of
the weight gradient. When enabled, it is assumed that the weights
have an additional `main_grad` attribute (used instead of the
regular `grad`) which is a pre-allocated buffer of the correct
size to accumulate gradients in.
return_bias : bool, default = `False`
when set to `True`, this module will not apply the additive bias for FC2, but
instead return the bias value during the forward pass together with the
output of the linear transformation :math:`y = xA^T`. This is useful when
the bias addition can be fused to subsequent operations.
params_dtype : torch.dtype, default = `torch.get_default_dtype()`
it controls the type used to allocate the initial parameters. Useful when
the model is trained with lower precision and the original FP32 parameters
would not fit in GPU memory.
seq_length: int
sequence length of input samples. Needed for JIT Warmup, a technique where jit fused
functions are warmed up before training to ensure same kernels are used for forward
propogation and activation recompute phase.
micro_batch_size: int
batch size per training step. Needed for JIT Warmup, a technique where jit
fused functions are warmed up before training to ensure same kernels are
used for forward propogation and activation recompute phase.
"""
def __init__(
self,
hidden_size: int,
ffn_hidden_size: int,
eps: float = 1e-5,
sequence_parallel: bool = False,
return_bias: bool = False,
get_rng_state_tracker: Optional[Callable] = None,
tp_group: Optional[dist_group_type] = None,
tp_size: int = 1,
init_method: Optional[Callable] = None,
bias: bool = True,
normalization: str = 'LayerNorm',
activation : str = "gelu",
output_layer_init_method: Optional[Callable] = None,
fuse_wgrad_accumulation: bool = False,
params_dtype: Optional[torch.dtype] = None,
return_layernorm_output: bool = False,
seq_length: Optional[int] = None,
micro_batch_size: Optional[int] = None,
set_parallel_mode: bool = False,
zero_centered_gamma: bool = False,
ub_bulk_wgrad: bool = False,
ub_bulk_dgrad: bool = False,
ub_split_rs: bool = False,
ub_split_ag: bool = False,
device: Union[torch.device, str] = "cuda",
) -> None:
super().__init__()
params_dtype = torch.get_default_dtype() if params_dtype is None else params_dtype
self.fuse_wgrad_accumulation = fuse_wgrad_accumulation
self.normalization = normalization
assert normalization in ['LayerNorm', 'RMSNorm'], "Unsupported normalization type!"
self.use_bias = bias
self.activation = activation
self.return_bias = return_bias
self.apply_bias = bias and not return_bias
self.return_layernorm_output = return_layernorm_output
self.bias_gelu_nvfusion = (bool(int(os.getenv("NVTE_BIAS_GELU_NVFUSION", "1"))) and
self.activation == 'gelu')
self.set_parallel_mode = set_parallel_mode
self.zero_centered_gamma = zero_centered_gamma
self.ub_bulk_wgrad = ub_bulk_wgrad
self.ub_bulk_dgrad = ub_bulk_dgrad
self.ub_split_rs = ub_split_rs
self.ub_split_ag = ub_split_ag
if ub_bulk_wgrad or ub_bulk_dgrad or ub_split_rs or ub_split_ag:
assert (
tex.userbuf_comm_available()
), "Userbuffer communication backend not available."
if tp_group is None:
self.tp_size = tp_size
if tp_size == 1:
self.set_tensor_parallel_group(tp_group)
else:
self.tp_size = get_distributed_world_size(tp_group)
self.set_tensor_parallel_group(tp_group)
self.set_nccl_overlap_warning_if_tp()
if init_method is None:
init_method = get_default_init_method()
if output_layer_init_method is None:
output_layer_init_method = get_default_init_method()
self.sequence_parallel = (self.tp_size > 1) and sequence_parallel
self.size_per_partition = divide(ffn_hidden_size, self.tp_size)
# LN init
self.eps = eps
self.layer_norm_weight = Parameter(
torch.empty(hidden_size, device=device, dtype=params_dtype)
)
setattr(self.layer_norm_weight, "sequence_parallel", self.sequence_parallel)
if self.normalization != "RMSNorm":
self.layer_norm_bias = Parameter(
torch.empty(hidden_size, device=device, dtype=params_dtype)
)
setattr(self.layer_norm_bias, "sequence_parallel", self.sequence_parallel)
else:
self.layer_norm_bias = None
self.reset_layer_norm_parameters()
if self.activation in ['reglu', 'geglu', 'swiglu']:
fc1_output_features = 2 * self.size_per_partition
else:
fc1_output_features = self.size_per_partition
# FC1 init
self.fc1_weight = Parameter(
torch.empty(fc1_output_features, hidden_size, device=device, dtype=params_dtype)
)
self.fp8_weight_shapes.append(self.fc1_weight.shape)
initialize_affine_weight_gpu(
self.fc1_weight,
init_method,
get_rng_state_tracker,
partition_dim=0,
stride=1,
)
if self.use_bias:
self.fc1_bias = Parameter(
torch.empty(fc1_output_features, device=device, dtype=params_dtype)
)
set_tensor_model_parallel_attributes(self.fc1_bias, True, 0, 1)
else:
self.fc1_bias = torch.Tensor().to(dtype=params_dtype, device=device)
with torch.no_grad():
self.fc1_bias.zero_()
# FC2 init
self.fc2_weight = Parameter(
torch.empty(hidden_size, self.size_per_partition, device=device, dtype=params_dtype)
)
self.fp8_weight_shapes.append(self.fc2_weight.shape)
initialize_affine_weight_gpu(
self.fc2_weight,
output_layer_init_method,
get_rng_state_tracker,
partition_dim=1,
stride=1,
)
if self.use_bias:
self.fc2_bias = Parameter(
torch.empty(hidden_size, device=device, dtype=params_dtype)
)
else:
self.fc2_bias = torch.Tensor().to(dtype=params_dtype, device=device)
# For RPL, bias has to be added after TP collectives
# So it cannot be fused with the GEMM
if self.set_parallel_mode and self.apply_bias:
self.gemm_bias_unfused_add = True
else:
self.gemm_bias_unfused_add = False
with torch.no_grad():
self.fc2_bias.zero_()
if self.bias_gelu_nvfusion:
set_jit_fusion_options()
if seq_length and micro_batch_size:
warmup_jit_bias_gelu_all_dtypes(
self.size_per_partition, seq_length, micro_batch_size
)
# These many SMs are subtracted from the total SM count when calling forward
# and backward LayerNorm C APIs. These envvars can be used to prevent the LN
# kernels from using all SMs in the device. This is useful for cases such as
# communication overlap with LN.
self.fwd_ln_sm_margin = int(os.getenv("NVTE_FWD_LAYERNORM_SM_MARGIN", "0"))
self.bwd_ln_sm_margin = int(os.getenv("NVTE_BWD_LAYERNORM_SM_MARGIN", "0"))
def reset_layer_norm_parameters(self) -> None:
"""Init LN params"""
if not self.zero_centered_gamma:
init.ones_(self.layer_norm_weight)
else:
init.zeros_(self.layer_norm_weight)
if self.layer_norm_bias is not None:
init.zeros_(self.layer_norm_bias)
def get_fp8_weights_scratchpad(
self,
is_first_microbatch: Union[bool, None],
) -> List[torch.Tensor]:
"""
Fetch the fp8 weight tensor placeholders if they exist (when
`is_first_microbatch` is not `None`) or return empty fp8 weight
tensors (if `is_first_microbatch is None`)
"""
if not self.fp8:
return [None, None, None, None]
if is_first_microbatch is None:
# Return empty weight placeholders for each fwd/bwd pass
fp8_weight_tensors = self.get_fp8_weights_empty_tensors(
is_first_microbatch
)
else:
# These persistent weight placeholders should've been created in
# `set_fp8_weights` method
fp8_weight_tensors = [self.weight1_fp8, self.weight1_t_fp8,
self.weight2_fp8, self.weight2_t_fp8]
return fp8_weight_tensors
@no_torch_dynamo
def forward(
self, inp: torch.Tensor, is_first_microbatch: Optional[bool] = None
) -> Union[torch.Tensor, Tuple[torch.Tensor, ...]]:
"""
Apply layer normalization to the input followed by a feedforward network (MLP Block).
Parameters
----------
inp : torch.Tensor
Input tensor.
is_first_microbatch : {True, False, None}, default = None
During training using either gradient accumulation or
pipeline parallelism a minibatch of data is further split
into microbatches. Between the microbatches of the same minibatch
the model weights are not updated. Setting this parameter indicates
whether the current microbatch is the first in a minibatch or not.
When set, this parameter enables additional optimizations:
* during FP8 training, it allows caching of the FP8 versions of
the weights
* it also allows skipping gradient accumulation during the
first microbatch (since it is the first gradient being
produced)
"""
with self.prepare_forward(inp, is_first_microbatch, num_gemms=2) as inp:
# Fetch the fp8 weights placeholders (for linear/gemm)
weight1_fp8, weight1_t_fp8, weight2_fp8, weight2_t_fp8 = \
self.get_fp8_weights_scratchpad(
is_first_microbatch
)
if torch.is_grad_enabled():
fwd_fn = _LayerNormMLP.apply
args = []
else:
fwd_fn = _LayerNormMLP.forward
args = [None]
args += (
inp,
self.layer_norm_weight,
self.layer_norm_bias,
self.fc1_weight,
weight1_fp8,
weight1_t_fp8,
self.fc1_bias,
self.use_bias,
self.fc2_weight,
weight2_fp8,
weight2_t_fp8,
self.fc2_bias,
self.apply_bias and not self.gemm_bias_unfused_add,
self.eps,
is_first_microbatch,
self.fp8,
self.fp8_calibration,
self.fp8_meta,
self.fuse_wgrad_accumulation,
self.tp_group,
self.tp_size,
self.sequence_parallel,
self.tp_size > 1,
self.activation_dtype,
self.return_layernorm_output,
self.bias_gelu_nvfusion,
self.set_parallel_mode,
torch.is_grad_enabled(),
self.fwd_ln_sm_margin,
self.bwd_ln_sm_margin,
self.zero_centered_gamma,
self.ub_bulk_wgrad,
self.ub_bulk_dgrad,
self.ub_split_rs,
self.ub_split_ag,
self.activation,
self.normalization,
)
out = fwd_fn(*args)
if self.return_layernorm_output:
out, ln_out = out
if self.gemm_bias_unfused_add:
out = out + cast_if_needed(self.fc2_bias, self.activation_dtype)
if self.return_bias:
if self.return_layernorm_output:
return out, cast_if_needed(self.fc2_bias, self.activation_dtype), ln_out
return out, cast_if_needed(self.fc2_bias, self.activation_dtype)
if self.return_layernorm_output:
return out, ln_out
return out
| TransformerEngine-main | transformer_engine/pytorch/module/layernorm_mlp.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""LayerNorm API"""
import os
from typing import Union, Tuple, Any, Mapping, Optional
import torch
from torch.nn.parameter import Parameter
from torch.nn import init
import transformer_engine_extensions as tex
from .base import TransformerEngineBaseModule
from ..cpp_extensions import (
layernorm_fwd_inf,
)
from ..jit import no_torch_dynamo
from ..utils import cast_if_needed
__all__ = ["LayerNorm"]
class _LayerNorm(torch.autograd.Function):
"""functional LayerNorm"""
@staticmethod
def forward(
ctx,
inp: torch.Tensor,
ln_weight: torch.Tensor,
ln_bias: torch.Tensor,
eps: float,
fwd_ln_sm_margin: int,
bwd_ln_sm_margin: int,
zero_centered_gamma: bool,
is_grad_enabled: bool,
activation_dtype: torch.dtype,
) -> torch.Tensor:
# Make sure input dimensions are compatible
in_features = ln_weight.numel()
assert inp.is_cuda, "TransformerEngine needs CUDA."
assert inp.shape[-1] == in_features, "LayerNorm not possible"
inputmat = inp.view((-1, in_features))
# Cast for native AMP
inputmat = cast_if_needed(inputmat, activation_dtype)
ln_weight = cast_if_needed(ln_weight, activation_dtype)
ln_bias = cast_if_needed(ln_bias, activation_dtype)
if is_grad_enabled:
ln_out, mu, rsigma = tex.layernorm_fwd(inputmat, ln_weight,
ln_bias, eps, fwd_ln_sm_margin, zero_centered_gamma)
ctx.save_for_backward(inputmat, ln_weight, mu, rsigma)
ctx.inp_shape = inp.shape
ctx.bwd_ln_sm_margin = bwd_ln_sm_margin
ctx.zero_centered_gamma = zero_centered_gamma
else:
ln_out, mu, rsigma = layernorm_fwd_inf(inputmat, ln_weight,
ln_bias, eps, zero_centered_gamma), None, None
return ln_out.view_as(inp)
@staticmethod
def backward(
ctx, grad_output: torch.Tensor
) -> Tuple[Union[torch.Tensor, None], ...]:
inputmat, ln_weight, mu, rsigma = ctx.saved_tensors
grad_output = grad_output.contiguous()
d_ln_out = grad_output.view(inputmat.shape)
dxmat, dgamma, dbeta = tex.layernorm_bwd(
d_ln_out, inputmat, mu, rsigma, ln_weight,
ctx.bwd_ln_sm_margin, ctx.zero_centered_gamma
)
return dxmat.view(ctx.inp_shape), dgamma, dbeta, None, None, None, None, None, None
class LayerNorm(torch.nn.Module):
r"""
Applies Layer Normalization over a mini-batch of inputs as described in
the paper `Layer Normalization <https://arxiv.org/abs/1607.06450>`__
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \varepsilon}} * \gamma + \beta
:math:`\gamma` and :math:`\beta` are learnable affine transform parameters of
size :attr:`hidden_size`
Parameters
----------
hidden_size : int
size of each input sample.
eps : float, default = 1e-5
a value added to the denominator of layer normalization for numerical stability.
sequence_parallel : bool, default = `False`
if set to `True`, uses sequence parallelism.
params_dtype : torch.dtype, default = `torch.get_default_dtype()`
it controls the type used to allocate the initial parameters. Useful when
the model is trained with lower precision and the original FP32 parameters
would not fit in GPU memory.
zero_centered_gamma : bool, default = 'False'
if set to 'True', gamma parameter in LayerNorm is initialized to 0 and
the LayerNorm formula changes to
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \varepsilon}} *
(1 + \gamma) + \beta
device : Union[torch.device, str], default = "cuda"
The device on which the parameters of the model will allocated. It is the user's
responsibility to ensure all parameters are moved to the GPU before running the
forward pass.
"""
def __init__(
self,
hidden_size: int,
eps: float = 1e-5,
sequence_parallel: bool = False,
params_dtype: Optional[torch.dtype] = None,
zero_centered_gamma: bool = False,
device: Union[torch.device, str] = "cuda",
) -> None:
super().__init__()
params_dtype = torch.get_default_dtype() if params_dtype is None else params_dtype
self.eps = eps
self.zero_centered_gamma = zero_centered_gamma
self.weight = Parameter(
torch.empty(
hidden_size,
device=device,
dtype=params_dtype,
)
)
self.bias = Parameter(
torch.empty(
hidden_size,
device=device,
dtype=params_dtype,
)
)
setattr(self.weight, "sequence_parallel", sequence_parallel)
setattr(self.bias, "sequence_parallel", sequence_parallel)
self.reset_layer_norm_parameters()
# These many SMs are subtracted from the total SM count when calling forward
# and backward LayerNorm C APIs. These envvars can be used to prevent the LN
# kernels from using all SMs in the device. This is useful for cases such as
# communication overlap with LN.
self.fwd_ln_sm_margin = int(os.getenv("NVTE_FWD_LAYERNORM_SM_MARGIN", "0"))
self.bwd_ln_sm_margin = int(os.getenv("NVTE_BWD_LAYERNORM_SM_MARGIN", "0"))
def load_state_dict(
self,
state_dict: Mapping[str, Any],
strict: bool = True,
) -> None:
"""Override PyTorch loader to maintain backward compatibility
with previous version of LayerNorm parameter names.
"""
if "layer_norm_weight" in state_dict:
state_dict["weight"] = state_dict["layer_norm_weight"]
del state_dict["layer_norm_weight"]
if "layer_norm_bias" in state_dict:
state_dict["bias"] = state_dict["layer_norm_bias"]
del state_dict["layer_norm_bias"]
super().load_state_dict(state_dict, strict)
def reset_layer_norm_parameters(self) -> None:
"""Init LN params"""
if not self.zero_centered_gamma:
init.ones_(self.weight)
else:
init.zeros_(self.weight)
init.zeros_(self.bias)
@no_torch_dynamo
def forward(self, inp: torch.Tensor) -> torch.Tensor:
"""LayerNorm FWD"""
# Maintain backward compatibility.
if hasattr(self, "layer_norm_weight"):
setattr(self, "weight", self.layer_norm_weight)
if hasattr(self, "layer_norm_bias"):
setattr(self, "bias", self.layer_norm_bias)
# Set the activation type for AMP.
TransformerEngineBaseModule.set_activation_dtype(self, inp)
if torch.is_grad_enabled():
fwd_fn = _LayerNorm.apply
args = []
else:
fwd_fn = _LayerNorm.forward
args = [None]
args += (
inp,
self.weight,
self.bias,
self.eps,
self.fwd_ln_sm_margin,
self.bwd_ln_sm_margin,
self.zero_centered_gamma,
torch.is_grad_enabled(),
self.activation_dtype,
)
return fwd_fn(*args)
| TransformerEngine-main | transformer_engine/pytorch/module/layernorm.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.