File size: 1,591 Bytes
7894dce |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
import pytest
import torch
from megatron.core.transformer.parallel_mlp import ParallelMLP
@pytest.fixture
def mlp(transformer_config):
return ParallelMLP(transformer_config)
class TestParallelMLP:
def test_constructor(self, mlp):
assert isinstance(mlp, ParallelMLP)
num_weights = sum([p.numel() for p in mlp.parameters()])
assert num_weights == 1212
def test_cpu_forward(self, mlp):
# [sequence length, micro batch size, hidden size]
hidden_states = torch.ones((32, 2, mlp.config.hidden_size))
output, output_bias = mlp(hidden_states)
assert output.shape[0] == 32
assert output.shape[1] == 2
assert output.shape[2] == mlp.config.hidden_size
assert output_bias.shape[0] == mlp.config.hidden_size
assert output.dtype == torch.float32
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test_gpu_forward(self, mlp):
mlp.cuda()
# [sequence length, batch size, hidden size]
hidden_states = torch.ones((32, 2, mlp.config.hidden_size))
hidden_states = hidden_states.cuda()
output, output_bias = mlp(hidden_states)
assert output.shape[0] == 32
assert output.shape[1] == 2
assert output.shape[2] == mlp.config.hidden_size
assert output_bias.shape[0] == mlp.config.hidden_size
assert output.dtype == torch.float32
assert output.device.type == 'cuda'
assert output_bias.device.type == 'cuda'
|