File size: 2,685 Bytes
7894dce |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
import pytest
import torch
from megatron.core.transformer.module import Float16Module, MegatronModule
from megatron.core.transformer.transformer_config import TransformerConfig
DEVICE_CAPABILITY = None
if torch.cuda.is_available():
DEVICE_CAPABILITY = torch.cuda.get_device_capability()
class DummyModule(MegatronModule):
# def __init__(self, config: TransformerConfig, share_embeddings_and_output_weights=True):
def __init__(self, config: TransformerConfig):
super().__init__(config)
self.linear = torch.nn.modules.Linear(in_features=2, out_features=1)
def forward(self, x):
return self.linear(x)
@pytest.fixture
def megatron_module(transformer_config):
return DummyModule(config=transformer_config).cuda()
class TestMegatronModule:
def test_megatron_module(self, megatron_module):
assert megatron_module
assert megatron_module.config.hidden_size == 12
assert megatron_module.config.ffn_hidden_size == 48
assert megatron_module.linear.weight.dtype == torch.float32
x = torch.ones((2, 2)).cuda()
assert megatron_module(x).dtype == torch.float32
# TODO: test bad configs actually fail
# failed_module = megatron_module
# failed_module.fp16 = True
# failed_module.bf16 = True
class TestFloat16Module:
def test_fp16_module(self, transformer_config, megatron_module):
transformer_config.fp16 = True
fp16_module = Float16Module(config=transformer_config, module=megatron_module)
assert fp16_module
assert fp16_module.config.hidden_size == 12
assert fp16_module.config.ffn_hidden_size == 48
assert fp16_module.module.linear.weight.dtype == torch.float16
x = torch.ones((2, 2)).cuda()
# inputs are converted to fp16 then outputs are converted to fp32
assert fp16_module(x).dtype == torch.float32
pytest.mark.skipif(
not DEVICE_CAPABILITY or DEVICE_CAPABILITY[0] < 8, reason='bfloat16 is not supported on this device'
)
def test_bf16_module(self, transformer_config, megatron_module):
transformer_config.bf16 = True
bf16_module = Float16Module(config=transformer_config, module=megatron_module)
assert bf16_module
assert bf16_module.config.hidden_size == 12
assert bf16_module.config.ffn_hidden_size == 48
assert bf16_module.module.linear.weight.dtype == torch.bfloat16
x = torch.ones((2, 2)).cuda()
# inputs are converted to bf16 then outputs are converted to fp32
assert bf16_module(x).dtype == torch.float32
|