peacock-data-public-datasets-idc-mint
/
docker
/bloom13b
/Megatron-DeepSpeed
/tests
/transformer
/test_parallel_mlp.py
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. | |
import pytest | |
import torch | |
from megatron.core.transformer.parallel_mlp import ParallelMLP | |
def mlp(transformer_config): | |
return ParallelMLP(transformer_config) | |
class TestParallelMLP: | |
def test_constructor(self, mlp): | |
assert isinstance(mlp, ParallelMLP) | |
num_weights = sum([p.numel() for p in mlp.parameters()]) | |
assert num_weights == 1212 | |
def test_cpu_forward(self, mlp): | |
# [sequence length, micro batch size, hidden size] | |
hidden_states = torch.ones((32, 2, mlp.config.hidden_size)) | |
output, output_bias = mlp(hidden_states) | |
assert output.shape[0] == 32 | |
assert output.shape[1] == 2 | |
assert output.shape[2] == mlp.config.hidden_size | |
assert output_bias.shape[0] == mlp.config.hidden_size | |
assert output.dtype == torch.float32 | |
def test_gpu_forward(self, mlp): | |
mlp.cuda() | |
# [sequence length, batch size, hidden size] | |
hidden_states = torch.ones((32, 2, mlp.config.hidden_size)) | |
hidden_states = hidden_states.cuda() | |
output, output_bias = mlp(hidden_states) | |
assert output.shape[0] == 32 | |
assert output.shape[1] == 2 | |
assert output.shape[2] == mlp.config.hidden_size | |
assert output_bias.shape[0] == mlp.config.hidden_size | |
assert output.dtype == torch.float32 | |
assert output.device.type == 'cuda' | |
assert output_bias.device.type == 'cuda' | |