python_code
stringlengths
0
456k
import torch from colossalai.fx.tracer.meta_patch import patched_function from functools import partial def _run(data, patch_fn): try: output = patch_fn(data) return output except Exception as e: return e def _assert_output_shape(data, patch_fn, expect_exception, output_shape): output = _run(data, patch_fn) if expect_exception: assert isinstance(output, AssertionError) else: assert not isinstance(output, Exception) assert output.is_meta assert output.shape == output_shape def test_repeat_interleave(): patch_fn = patched_function.torch_repeat_interleave # examples from https://pytorch.org/docs/stable/generated/torch.repeat_interleave.html data = torch.tensor([1, 2, 3]) materialized_output = torch.repeat_interleave(data, repeats=2) repeat_interleave = partial(patch_fn, repeats=2) meta_data = data.to('meta') _assert_output_shape(data=meta_data, patch_fn=repeat_interleave, expect_exception=False, output_shape=materialized_output.shape) data = torch.tensor([[1, 2], [3, 4]]) materialized_output = torch.repeat_interleave(data, repeats=3, dim=1) repeat_interleave = partial(patch_fn, repeats=3, dim=1) meta_data = data.to('meta') _assert_output_shape(data=meta_data, patch_fn=repeat_interleave, expect_exception=False, output_shape=materialized_output.shape) data = torch.tensor([[1, 2], [3, 4]]) materialized_output = torch.repeat_interleave(data, repeats=torch.tensor([1, 2]), dim=-1) repeat_interleave = partial(patch_fn, repeats=torch.tensor([1, 2]), dim=-1) meta_data = data.to('meta') _assert_output_shape(data=meta_data, patch_fn=repeat_interleave, expect_exception=False, output_shape=materialized_output.shape) data = torch.tensor([[1, 2], [3, 4]]) materialized_output = torch.repeat_interleave(data, repeats=torch.tensor([1, 2]), dim=0) repeat_interleave = partial(patch_fn, repeats=[1, 2], dim=0) meta_data = data.to('meta') _assert_output_shape(data=meta_data, patch_fn=repeat_interleave, expect_exception=True, output_shape=materialized_output.shape) def test_torch_max(): data = torch.rand(4, 3) out = torch.max(data) patched_out = patched_function.torch_max(data) assert out.shape == patched_out.shape data = torch.rand(4, 3, 2) out, idx = torch.max(data, dim=1) patched_out, patched_idx = patched_function.torch_max(data, dim=1) assert out.shape == patched_out.shape assert idx.shape == patched_idx.shape data = torch.rand(4, 3, 2) out, idx = torch.max(data, dim=1, keepdim=True) patched_out, patched_idx = patched_function.torch_max(data, dim=1, keepdim=True) assert out.shape == patched_out.shape assert idx.shape == patched_idx.shape
import torch import torch.nn as nn from torch.fx import GraphModule from colossalai.fx import ColoTracer as Tracer class ControlFlowModel(nn.Module): def __init__(self): super().__init__() self.linear1 = nn.Linear(10, 10) self.linear2 = nn.Linear(10, 10) def forward(self, x, y): x1 = self.linear1(x) y1 = self.linear2(y) if x1.dim() == 2: return x1 + y1 else: return x1 - y1 def test_control_flow(): model = ControlFlowModel() tracer = Tracer() graph_branch_true = tracer.trace(model, meta_args={ 'x': torch.rand(4, 10, device='meta'), 'y': torch.rand(4, 10, device='meta') }) graph_branch_false = tracer.trace(model, meta_args={ 'x': torch.rand(10, device='meta'), 'y': torch.rand(4, 10, device='meta') }) gm_branch_true = GraphModule(model, graph_branch_true, model.__class__.__name__) gm_branch_false = GraphModule(model, graph_branch_false, model.__class__.__name__) gm_branch_true.recompile() gm_branch_false.recompile() # test the true branch x = torch.rand(4, 10) y = torch.rand(4, 10) assert torch.all(model(x, y) == gm_branch_true(x, y)) assert torch.all(gm_branch_false(x, y) != gm_branch_true(x, y)) # test the true branch x = torch.rand(10) y = torch.rand(4, 10) assert torch.all(model(x, y) == gm_branch_false(x, y)) assert torch.all(gm_branch_false(x, y) != gm_branch_true(x, y)) if __name__ == '__main__': test_control_flow()
import torch from colossalai.fx.tracer.meta_patch import patched_module def _run(data, module, patch_fn): try: if isinstance(data, dict): output = patch_fn(module, **data) if isinstance(data, tuple) or isinstance(data, list): output = patch_fn(module, *data) else: output = patch_fn(module, data) return output except Exception as e: return e def _assert_output_shape(data, module, patch_fn, expect_exception, output_shape): output = _run(data, module, patch_fn) if expect_exception: assert isinstance(output, AssertionError) else: assert not isinstance(output, Exception) if isinstance(output, tuple): for item, shape in zip(output, output_shape): assert item.is_meta assert item.shape == shape else: assert output.is_meta assert output.shape == output_shape def test_linear(): # test linear patch can produce the meta output with correct shape data = torch.rand(2, 4, device='meta') module = torch.nn.Linear(4, 2) _assert_output_shape(data, module, patched_module.torch_nn_linear, False, torch.Size([2, 2])) # test if the linear patch can catch exception when dimension does not match data = torch.rand(2, 2, device='meta') _assert_output_shape(data, module, patched_module.torch_nn_linear, True, None) def test_rnn(): # test rnn patch can produce the meta output with correct shape data = (torch.randn(5, 3, 10), torch.randn(2, 3, 20)) module = torch.nn.RNN(10, 20, 2) output, hn = module(*data) meta_data = (torch.randn(5, 3, 10).to('meta'), torch.randn(2, 3, 20).to('meta')) _assert_output_shape(meta_data, module, patched_module.torch_nn_rnn, False, (output.shape, hn.shape)) # test if the rnn patch can catch exception when dimension does not match data = (torch.randn(5, 3, 10), torch.randn(2, 3, 20)) module = torch.nn.RNN(10, 20, 2) output, hn = module(*data) meta_data = (torch.randn(5, 3, 1).to('meta'), torch.randn(2, 3, 20).to('meta')) _assert_output_shape(meta_data, module, patched_module.torch_nn_rnn, True, None) def test_embedding(): data = torch.rand(2, 4, device='meta') # test layernorm ln = torch.nn.LayerNorm(4) _assert_output_shape(data, ln, patched_module.torch_nn_normalize, False, data.shape) # test group norm gn = torch.nn.GroupNorm(4, num_channels=8) _assert_output_shape(data, gn, patched_module.torch_nn_normalize, False, data.shape) # test batch norm 1d bn1d = torch.nn.BatchNorm1d(4) data = torch.rand(2, 4, device='meta') _assert_output_shape(data=data, module=bn1d, patch_fn=patched_module.torch_nn_normalize, expect_exception=False, output_shape=data.shape) data = torch.rand(2, 4, device='meta') _assert_output_shape(data=data, module=bn1d, patch_fn=patched_module.torch_nn_normalize, expect_exception=False, output_shape=data.shape) data = torch.rand(2, 3, 4, device='meta') _assert_output_shape(data=data, module=bn1d, patch_fn=patched_module.torch_nn_normalize, expect_exception=False, output_shape=data.shape) data = torch.rand(1, 2, 3, 4, device='meta') _assert_output_shape(data=data, module=bn1d, patch_fn=patched_module.torch_nn_normalize, expect_exception=True, output_shape=None) # test batch norm 2d bn2d = torch.nn.BatchNorm2d(4) data = torch.rand(1, 2, 3, 4, device='meta') _assert_output_shape(data=data, module=bn2d, patch_fn=patched_module.torch_nn_normalize, expect_exception=False, output_shape=data.shape) data = torch.rand(2, 3, 4, device='meta') _assert_output_shape(data=data, module=bn2d, patch_fn=patched_module.torch_nn_normalize, expect_exception=True, output_shape=None) # # test batch size 3d bn3d = torch.nn.BatchNorm3d(4) data = torch.rand(1, 1, 2, 3, 4, device='meta') _assert_output_shape(data=data, module=bn3d, patch_fn=patched_module.torch_nn_normalize, expect_exception=False, output_shape=data.shape) data = torch.rand(1, 2, 3, 4, device='meta') _assert_output_shape(data=data, module=bn3d, patch_fn=patched_module.torch_nn_normalize, expect_exception=True, output_shape=None) def test_conv1d(): # test conv 1d data = torch.rand(2, 3, 4) conv1d = torch.nn.Conv1d(in_channels=3, out_channels=4, kernel_size=2) materialized_output = conv1d(data) meta_data = data.to('meta') _assert_output_shape(data=meta_data, module=conv1d, patch_fn=patched_module.torch_nn_conv1d, expect_exception=False, output_shape=materialized_output.shape) conv1d = torch.nn.Conv1d(in_channels=3, out_channels=4, kernel_size=2, padding=1) materialized_output = conv1d(data) meta_data = data.to('meta') _assert_output_shape(data=meta_data, module=conv1d, patch_fn=patched_module.torch_nn_conv1d, expect_exception=False, output_shape=materialized_output.shape) conv1d = torch.nn.Conv1d(in_channels=3, out_channels=4, kernel_size=2, padding=1, dilation=2, padding_mode='reflect') materialized_output = conv1d(data) meta_data = data.to('meta') _assert_output_shape(data=meta_data, module=conv1d, patch_fn=patched_module.torch_nn_conv1d, expect_exception=False, output_shape=materialized_output.shape) def test_conv2d(): # test conv 2d data = torch.rand(2, 3, 4, 4) conv2d = torch.nn.Conv2d(in_channels=3, out_channels=4, kernel_size=2) materialized_output = conv2d(data) _assert_output_shape(data=data, module=conv2d, patch_fn=patched_module.torch_nn_conv2d, expect_exception=False, output_shape=materialized_output.shape) conv2d = torch.nn.Conv2d(in_channels=3, out_channels=4, kernel_size=2, padding=1) materialized_output = conv2d(data) _assert_output_shape(data=data, module=conv2d, patch_fn=patched_module.torch_nn_conv2d, expect_exception=False, output_shape=materialized_output.shape) conv2d = torch.nn.Conv2d(in_channels=3, out_channels=4, kernel_size=2, padding=1, dilation=2) materialized_output = conv2d(data) _assert_output_shape(data=data, module=conv2d, patch_fn=patched_module.torch_nn_conv2d, expect_exception=False, output_shape=materialized_output.shape) conv2d = torch.nn.Conv2d(in_channels=3, out_channels=4, kernel_size=2, padding=1, dilation=2, padding_mode='reflect') materialized_output = conv2d(data) _assert_output_shape(data=data, module=conv2d, patch_fn=patched_module.torch_nn_conv2d, expect_exception=False, output_shape=materialized_output.shape) def test_conv3d(): # test conv 3d data = torch.rand(2, 3, 4, 4, 4) conv3d = torch.nn.Conv3d(in_channels=3, out_channels=4, kernel_size=2) materialized_output = conv3d(data) _assert_output_shape(data=data, module=conv3d, patch_fn=patched_module.torch_nn_conv3d, expect_exception=False, output_shape=materialized_output.shape) conv3d = torch.nn.Conv3d(in_channels=3, out_channels=4, kernel_size=2, padding=1) materialized_output = conv3d(data) _assert_output_shape(data=data, module=conv3d, patch_fn=patched_module.torch_nn_conv3d, expect_exception=False, output_shape=materialized_output.shape) conv3d = torch.nn.Conv3d(in_channels=3, out_channels=4, kernel_size=2, padding=1, dilation=2) materialized_output = conv3d(data) _assert_output_shape(data=data, module=conv3d, patch_fn=patched_module.torch_nn_conv3d, expect_exception=False, output_shape=materialized_output.shape) conv3d = torch.nn.Conv3d(in_channels=3, out_channels=4, kernel_size=2, padding=1, dilation=2, padding_mode='reflect') materialized_output = conv3d(data) _assert_output_shape(data=data, module=conv3d, patch_fn=patched_module.torch_nn_conv3d, expect_exception=False, output_shape=materialized_output.shape) def test_conv_transpose1d(): # test conv transpose1d data = torch.rand(2, 3, 4) convtrans1d = torch.nn.ConvTranspose1d(in_channels=3, out_channels=4, kernel_size=2) materialized_output = convtrans1d(data) meta_data = data.to('meta') _assert_output_shape(data=meta_data, module=convtrans1d, patch_fn=patched_module.torch_nn_convtranspose1d, expect_exception=False, output_shape=materialized_output.shape) convtrans1d = torch.nn.ConvTranspose1d(in_channels=3, out_channels=4, kernel_size=2, padding=1) materialized_output = convtrans1d(data) meta_data = data.to('meta') _assert_output_shape(data=meta_data, module=convtrans1d, patch_fn=patched_module.torch_nn_convtranspose1d, expect_exception=False, output_shape=materialized_output.shape) def test_conv_transpose2d(): # test conv transpose2d data = torch.rand(2, 3, 4, 4) convtrans2d = torch.nn.ConvTranspose2d(in_channels=3, out_channels=4, kernel_size=2) materialized_output = convtrans2d(data) meta_data = data.to('meta') _assert_output_shape(data=meta_data, module=convtrans2d, patch_fn=patched_module.torch_nn_convtranspose2d, expect_exception=False, output_shape=materialized_output.shape) convtrans2d = torch.nn.ConvTranspose2d(in_channels=3, out_channels=4, kernel_size=2, padding=1) materialized_output = convtrans2d(data) meta_data = data.to('meta') _assert_output_shape(data=meta_data, module=convtrans2d, patch_fn=patched_module.torch_nn_convtranspose2d, expect_exception=False, output_shape=materialized_output.shape) def test_conv_transpose3d(): # test conv transpose2d data = torch.rand(2, 3, 4, 4, 4) convtrans3d = torch.nn.ConvTranspose3d(in_channels=3, out_channels=4, kernel_size=2) materialized_output = convtrans3d(data) meta_data = data.to('meta') _assert_output_shape(data=meta_data, module=convtrans3d, patch_fn=patched_module.torch_nn_convtranspose3d, expect_exception=False, output_shape=materialized_output.shape) convtrans3d = torch.nn.ConvTranspose3d(in_channels=3, out_channels=4, kernel_size=2, padding=1) materialized_output = convtrans3d(data) meta_data = data.to('meta') _assert_output_shape(data=meta_data, module=convtrans3d, patch_fn=patched_module.torch_nn_convtranspose3d, expect_exception=False, output_shape=materialized_output.shape) def test_pool1d(): combinations = [[torch.nn.MaxPool1d, patched_module.torch_nn_maxpool1d], [torch.nn.AvgPool1d, patched_module.torch_nn_avgpool1d]] for (layer_cls, patch_func) in combinations: pooler = layer_cls(kernel_size=3) data = torch.rand(2, 3, 4) materialized_output = pooler(data) _assert_output_shape(data=data, module=pooler, patch_fn=patch_func, expect_exception=False, output_shape=materialized_output.shape) data = torch.rand(2, 4) materialized_output = pooler(data) _assert_output_shape(data=data, module=pooler, patch_fn=patch_func, expect_exception=False, output_shape=materialized_output.shape) data = torch.rand(2, 3, 4, 4) _assert_output_shape(data=data, module=pooler, patch_fn=patch_func, expect_exception=True, output_shape=None) def test_pool2d(): combinations = [[torch.nn.MaxPool2d, patched_module.torch_nn_maxpool2d], [torch.nn.AvgPool2d, patched_module.torch_nn_avgpool2d]] for (layer_cls, patch_func) in combinations: pooler = layer_cls(kernel_size=3) # test max pool 3d data = torch.rand(2, 3, 4, 4) materialized_output = pooler(data) _assert_output_shape(data=data, module=pooler, patch_fn=patch_func, expect_exception=False, output_shape=materialized_output.shape) # test max pool 3d data = torch.rand(2, 4, 4) materialized_output = pooler(data) _assert_output_shape(data=data, module=pooler, patch_fn=patch_func, expect_exception=False, output_shape=materialized_output.shape) # test max pool 3d data = torch.rand(2, 3, 4, 4, 4) _assert_output_shape(data=data, module=pooler, patch_fn=patch_func, expect_exception=True, output_shape=None) def test_pool3d(): combinations = [[torch.nn.MaxPool3d, patched_module.torch_nn_maxpool3d], [torch.nn.AvgPool3d, patched_module.torch_nn_avgpool3d]] for (layer_cls, patch_func) in combinations: pooler = layer_cls(kernel_size=3) # test max pool 3d data = torch.rand(2, 3, 4, 4, 4) materialized_output = pooler(data) _assert_output_shape(data=data, module=pooler, patch_fn=patch_func, expect_exception=False, output_shape=materialized_output.shape) # test max pool 3d data = torch.rand(2, 4, 4, 4) materialized_output = pooler(data) _assert_output_shape(data=data, module=pooler, patch_fn=patch_func, expect_exception=False, output_shape=materialized_output.shape) # test max pool 3d data = torch.rand(2, 3, 4) _assert_output_shape(data=data, module=pooler, patch_fn=patch_func, expect_exception=True, output_shape=None) # adapative pooling is different from other pooling, so test it individually def test_adaptive_pooling_1d(): pooler = torch.nn.AdaptiveAvgPool1d(output_size=3) patch_func = patched_module.torch_nn_adapative_pooling_1d data = torch.rand(3, 4) output = pooler(data) _assert_output_shape(data=data, module=pooler, patch_fn=patch_func, expect_exception=False, output_shape=output.shape) data = torch.rand(2, 3, 4) output = pooler(data) _assert_output_shape(data=data, module=pooler, patch_fn=patch_func, expect_exception=False, output_shape=output.shape) data = torch.rand(2, 3, 4, 5) _assert_output_shape(data=data, module=pooler, patch_fn=patch_func, expect_exception=True, output_shape=None) def test_adaptive_pooling_2d(): pooler = torch.nn.AdaptiveAvgPool2d(output_size=3) patch_func = patched_module.torch_nn_adapative_pooling_2d data = torch.rand(3, 4) _assert_output_shape(data=data, module=pooler, patch_fn=patch_func, expect_exception=True, output_shape=None) data = torch.rand(2, 3, 4) output = pooler(data) _assert_output_shape(data=data, module=pooler, patch_fn=patch_func, expect_exception=False, output_shape=output.shape) data = torch.rand(2, 3, 4, 5) output = pooler(data) _assert_output_shape(data=data, module=pooler, patch_fn=patch_func, expect_exception=False, output_shape=output.shape) def test_adaptive_pooling_3d(): pooler = torch.nn.AdaptiveAvgPool3d(output_size=3) patch_func = patched_module.torch_nn_adapative_pooling_3d data = torch.rand(3, 4, 5) _assert_output_shape(data=data, module=pooler, patch_fn=patch_func, expect_exception=True, output_shape=None) data = torch.rand(2, 3, 4, 5) output = pooler(data) _assert_output_shape(data=data, module=pooler, patch_fn=patch_func, expect_exception=False, output_shape=output.shape) data = torch.rand(2, 3, 4, 5, 6) output = pooler(data) _assert_output_shape(data=data, module=pooler, patch_fn=patch_func, expect_exception=False, output_shape=output.shape)
import pytest import timm.models as tm import torch from colossalai.fx import symbolic_trace def trace_and_compare(model_cls, data, meta_args=None): # trace model = model_cls() # convert to eval for inference # it is important to set it to eval mode before tracing # without this statement, the torch.nn.functional.batch_norm will always be in training mode model.eval() gm = symbolic_trace(model, meta_args=meta_args) # run forward with torch.no_grad(): fx_out = gm(data) non_fx_out = model(data) # compare output if isinstance(fx_out, tuple): # some models produce tuple as output for v1, v2 in zip(fx_out, non_fx_out): assert torch.allclose(v1, v2), f'{model.__class__.__name__} has inconsistent outputs, {v1} vs {v2}' else: assert torch.allclose( fx_out, non_fx_out, atol=1e-5), f'{model.__class__.__name__} has inconsistent outputs, {fx_out} vs {non_fx_out}' def test_timm_models_without_control_flow(): torch.backends.cudnn.deterministic = True MODEL_LIST = [ tm.resnest.resnest50d, tm.beit.beit_base_patch16_224, tm.cait.cait_s24_224, tm.convmixer.convmixer_768_32, tm.efficientnet.efficientnetv2_m, tm.resmlp_12_224, tm.vision_transformer.vit_base_patch16_224, tm.deit_base_distilled_patch16_224, ] data = torch.rand(2, 3, 224, 224) for model_cls in MODEL_LIST: trace_and_compare(model_cls, data) def test_timm_models_with_control_flow(): torch.backends.cudnn.deterministic = True MODEL_LIST_WITH_CONTROL_FLOW = [ tm.convnext.convnext_base, tm.vgg.vgg11, tm.dpn.dpn68, tm.densenet.densenet121, tm.rexnet.rexnet_100, tm.swin_transformer.swin_base_patch4_window7_224 ] data = torch.rand(2, 3, 224, 224) meta_args = {'x': data.to('meta')} for model_cls in MODEL_LIST_WITH_CONTROL_FLOW: trace_and_compare(model_cls, data, meta_args) if __name__ == '__main__': test_timm_models_with_control_flow() test_timm_models_without_control_flow()
import torch from torchaudio_utils import trace_and_compare from torchaudio.models import ConvTasNet, DeepSpeech, Wav2Letter, WaveRNN from torchaudio.models.wavernn import MelResNet, UpsampleNetwork import pytest def test_wave2letter_waveform(): batch_size = 2 num_features = 1 num_classes = 40 input_length = 320 model = Wav2Letter(num_classes=num_classes, num_features=num_features) def data_gen(): x = torch.rand(batch_size, num_features, input_length) return dict(x=x) trace_and_compare(model, data_gen, need_meta=False, need_concrete=False) def test_wave2letter_mfcc(): batch_size = 2 num_features = 13 num_classes = 40 input_length = 2 model = Wav2Letter(num_classes=num_classes, input_type="mfcc", num_features=num_features) def data_gen(): x = torch.rand(batch_size, num_features, input_length) return dict(x=x) trace_and_compare(model, data_gen, need_meta=False, need_concrete=False) def test_melresnet_waveform(): n_batch = 2 n_time = 200 n_freq = 100 n_output = 128 n_res_block = 10 n_hidden = 128 kernel_size = 5 model = MelResNet(n_res_block, n_freq, n_hidden, n_output, kernel_size) def data_gen(): x = torch.rand(n_batch, n_freq, n_time) return dict(specgram=x) trace_and_compare(model, data_gen, need_meta=False, need_concrete=False) def test_upsample_network_waveform(): upsample_scales = [5, 5, 8] n_batch = 2 n_time = 200 n_freq = 100 n_output = 64 n_res_block = 10 n_hidden = 32 kernel_size = 5 total_scale = 1 for upsample_scale in upsample_scales: total_scale *= upsample_scale model = UpsampleNetwork(upsample_scales, n_res_block, n_freq, n_hidden, n_output, kernel_size) def data_gen(): x = torch.rand(n_batch, n_freq, n_time) return dict(specgram=x) trace_and_compare(model, data_gen, need_meta=False, need_concrete=False) def test_wavernn_waveform(): upsample_scales = [2, 2, 5] n_rnn = 16 n_fc = 16 n_classes = 10 hop_length = 20 n_batch = 2 n_time = 20 n_freq = 10 n_output = 16 n_res_block = 3 n_hidden = 16 kernel_size = 5 model = WaveRNN(upsample_scales, n_classes, hop_length, n_res_block, n_rnn, n_fc, kernel_size, n_freq, n_hidden, n_output) def data_gen(): x = torch.rand(n_batch, 1, hop_length * (n_time - kernel_size + 1)) mels = torch.rand(n_batch, 1, n_freq, n_time) return dict(waveform=x, specgram=mels) trace_and_compare(model, data_gen, need_meta=True, need_concrete=False) def test_convtasnet_config(): batch_size = 32 num_frames = 800 model = ConvTasNet() def data_gen(): tensor = torch.rand(batch_size, 1, num_frames) return dict(input=tensor) trace_and_compare(model, data_gen, need_meta=True, need_concrete=False) def test_deepspeech(): n_batch = 2 n_feature = 1 n_channel = 1 n_class = 40 n_time = 32 model = DeepSpeech(n_feature=n_feature, n_class=n_class) def data_gen(): x = torch.rand(n_batch, n_channel, n_time, n_feature) return dict(x=x) trace_and_compare(model, data_gen, need_meta=False, need_concrete=False) if __name__ == '__main__': TEST_LIST = [ test_wave2letter_waveform, test_wave2letter_mfcc, test_melresnet_waveform, test_upsample_network_waveform, test_wavernn_waveform, test_convtasnet_config, test_deepspeech, ] for test_fn in TEST_LIST: test_fn()
import torch from colossalai.fx import symbolic_trace def trace_and_compare(model, data_gen, need_meta=False, need_concrete=False, kwargs_transform=False): data = data_gen() concrete_args = data if need_concrete else {} meta_args = {k: v.to('meta') for k, v in data.items()} if need_meta else {} model.eval() gm = symbolic_trace(model, concrete_args=concrete_args, meta_args=meta_args) with torch.no_grad(): non_fx_out = model(**data) if kwargs_transform: data = kwargs_transform(data) fx_out = gm(**data) if isinstance(fx_out, tuple): for non_fx, fx in zip(non_fx_out, fx_out): assert torch.allclose( non_fx, fx, atol=1e-5), f'{model.__class__.__name__} has inconsistent outputs, {fx_out} vs {non_fx_out}' else: assert torch.allclose( fx_out, non_fx_out, atol=1e-5), f'{model.__class__.__name__} has inconsistent outputs, {fx_out} vs {non_fx_out}'
import torch from torchaudio.models import Tacotron2 from torchaudio_utils import trace_and_compare import pytest def _get_tacotron2_model(n_mels, decoder_max_step=2000, gate_threshold=0.5): return Tacotron2( mask_padding=False, n_mels=n_mels, n_symbol=20, n_frames_per_step=1, symbol_embedding_dim=32, encoder_embedding_dim=32, encoder_n_convolution=3, encoder_kernel_size=5, decoder_rnn_dim=32, decoder_max_step=decoder_max_step, decoder_dropout=0.1, decoder_early_stopping=True, attention_rnn_dim=32, attention_hidden_dim=32, attention_location_n_filter=32, attention_location_kernel_size=31, attention_dropout=0.1, prenet_dim=32, postnet_n_convolution=5, postnet_kernel_size=5, postnet_embedding_dim=512, gate_threshold=gate_threshold, ) @pytest.mark.skip("Tracing failed") def test_tacotron_model(): n_mels = 80 n_batch = 3 max_mel_specgram_length = 300 max_text_length = 100 model = _get_tacotron2_model(n_mels) def data_gen(): text = torch.randint(0, 148, (n_batch, max_text_length)) text_lengths = max_text_length * torch.ones((n_batch,)) mel_specgram = torch.rand(n_batch, n_mels, max_mel_specgram_length) mel_specgram_lengths = max_mel_specgram_length * torch.ones((n_batch,)) return dict(tokens=text, token_lengths=text_lengths, mel_specgram=mel_specgram, mel_specgram_lengths=mel_specgram_lengths) trace_and_compare(model, data_gen, need_meta=True, need_concrete=False) if __name__ == "__main__": test_tacotron_model()
import torch from torchaudio.models.wav2vec2 import ( hubert_base, hubert_large, hubert_xlarge, wav2vec2_base, wav2vec2_large, wav2vec2_large_lv60k, ) from torchaudio_utils import trace_and_compare import pytest MODEL_LIST = [ hubert_base, hubert_large, hubert_xlarge, wav2vec2_base, wav2vec2_large, wav2vec2_large_lv60k, ] def _smoke_test(model, device): model = model.to(device=device) batch_size, num_frames = 3, 1024 def data_gen(): waveforms = torch.randn(batch_size, num_frames, device=device) lengths = torch.randint( low=0, high=num_frames, size=[ batch_size, ], device=device, ) return dict(waveforms=waveforms, lengths=lengths) trace_and_compare(model, data_gen, need_meta=True, need_concrete=False) @pytest.mark.skip("Tracing failed") def test_wav2vec(): for model_fn in MODEL_LIST: _smoke_test(model_fn(), 'cpu') if __name__ == "__main__": test_wav2vec()
import torch from torchaudio_utils import trace_and_compare from torchaudio.models import Emformer, Conformer import pytest def test_conformer(): input_dim = 80 batch_size = 10 num_frames = 400 num_heads = 4 ffn_dim = 128 num_layers = 4 depthwise_conv_kernel_size = 31 model = Conformer( input_dim=input_dim, num_heads=num_heads, ffn_dim=ffn_dim, num_layers=num_layers, depthwise_conv_kernel_size=depthwise_conv_kernel_size, ) def data_gen(): lengths = torch.randint(1, num_frames, (batch_size,)) input = torch.rand(batch_size, int(lengths.max()), input_dim) return dict(input=input, lengths=lengths) def kwargs_transform(data): new_data = {} for k, v in data.items(): new_data[f'{k}_1'] = v return new_data trace_and_compare(model, data_gen, need_meta=False, need_concrete=True, kwargs_transform=kwargs_transform) @pytest.mark.skip("Tracing failed") def test_emformer(): input_dim = 128 batch_size = 10 num_heads = 8 ffn_dim = 256 num_layers = 3 segment_length = 4 num_frames = 400 right_context_length = 1 model = Emformer(input_dim, num_heads, ffn_dim, num_layers, segment_length, right_context_length) def data_gen(): lengths = torch.randint(1, num_frames, (batch_size,)) input = torch.rand(batch_size, num_frames, input_dim) return dict(input=input, lengths=lengths) trace_and_compare(model, data_gen, need_meta=True, need_concrete=False) @pytest.mark.skip def test_torchaudio_transformers(): test_conformer() test_emformer() if __name__ == "__main__": test_torchaudio_transformers()
import torch import torchvision import torchvision.models as tm from packaging import version from colossalai.fx import symbolic_trace def test_torchvision_models(): MODEL_LIST = [ tm.vgg11, tm.resnet18, tm.densenet121, tm.mobilenet_v3_small, tm.resnext50_32x4d, tm.wide_resnet50_2, tm.regnet_x_16gf, tm.mnasnet0_5, tm.efficientnet_b0 ] RANDOMIZED_MODELS = [tm.efficientnet_b0] if version.parse(torchvision.__version__) >= version.parse('0.12.0'): MODEL_LIST.extend([tm.vit_b_16, tm.convnext_small]) RANDOMIZED_MODELS.append(tm.convnext_small) torch.backends.cudnn.deterministic = True data = torch.rand(2, 3, 224, 224) for model_cls in MODEL_LIST: if model_cls in RANDOMIZED_MODELS: # remove the impact of randomicity model = model_cls(stochastic_depth_prob=0) else: model = model_cls() gm = symbolic_trace(model) model.eval() gm.eval() with torch.no_grad(): fx_out = gm(data) non_fx_out = model(data) assert torch.allclose( fx_out, non_fx_out), f'{model.__class__.__name__} has inconsistent outputs, {fx_out} vs {non_fx_out}' if __name__ == '__main__': test_torchvision_models()
import pytest import torch import transformers from hf_tracer_utils import trace_model_and_compare_output BATCH_SIZE = 1 SEQ_LENGTH = 16 def test_t5(): MODEL_LIST = [ transformers.T5Model, transformers.T5ForConditionalGeneration, transformers.T5EncoderModel, ] config = transformers.T5Config(d_model=128, num_layers=2) def data_gen(): input_ids = torch.zeros((BATCH_SIZE, SEQ_LENGTH), dtype=torch.int64) decoder_input_ids = torch.zeros((BATCH_SIZE, SEQ_LENGTH), dtype=torch.int64) kwargs = dict(input_ids=input_ids, decoder_input_ids=decoder_input_ids) return kwargs def data_gen_for_encoder_only(): input_ids = torch.zeros((BATCH_SIZE, SEQ_LENGTH), dtype=torch.int64) kwargs = dict(input_ids=input_ids) return kwargs for model_cls in MODEL_LIST: model = model_cls(config=config) if isinstance(model, transformers.T5EncoderModel): data_gen_func = data_gen_for_encoder_only else: data_gen_func = data_gen trace_model_and_compare_output(model, data_gen_func) if __name__ == '__main__': test_t5()
import pytest import torch import transformers from hf_tracer_utils import trace_model_and_compare_output BATCH_SIZE = 1 SEQ_LENGTH = 16 # TODO: remove this skip once we handle the latest gpt model @pytest.mark.skip def test_gpt(): MODEL_LIST = [ transformers.GPT2Model, transformers.GPT2LMHeadModel, transformers.GPT2DoubleHeadsModel, transformers.GPT2ForTokenClassification, # transformers.GPT2ForSequenceClassification, # not supported yet ] config = transformers.GPT2Config(n_position=64, n_layer=2, n_head=4) def data_gen(): input_ids = torch.zeros((BATCH_SIZE, SEQ_LENGTH), dtype=torch.int64) token_type_ids = torch.zeros((BATCH_SIZE, SEQ_LENGTH), dtype=torch.int64) attention_mask = torch.zeros((BATCH_SIZE, SEQ_LENGTH), dtype=torch.int64) kwargs = dict(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask) return kwargs for model_cls in MODEL_LIST: model = model_cls(config=config) trace_model_and_compare_output(model, data_gen) if __name__ == '__main__': test_gpt()
import pytest import torch import transformers from hf_tracer_utils import trace_model_and_compare_output BATCH_SIZE = 2 SEQ_LENGTH = 16 def test_single_sentence_bert(): MODEL_LIST = [ transformers.BertModel, transformers.BertForPreTraining, transformers.BertLMHeadModel, transformers.BertForMaskedLM, transformers.BertForSequenceClassification, transformers.BertForTokenClassification, ] config = transformers.BertConfig(hidden_size=128, num_hidden_layers=2, num_attention_heads=4, intermediate_size=256) def data_gen(): input_ids = torch.zeros((BATCH_SIZE, SEQ_LENGTH), dtype=torch.int64) token_type_ids = torch.zeros((BATCH_SIZE, SEQ_LENGTH), dtype=torch.int64) attention_mask = torch.zeros((BATCH_SIZE, SEQ_LENGTH), dtype=torch.int64) meta_args = dict(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask) return meta_args for model_cls in MODEL_LIST: model = model_cls(config=config) trace_model_and_compare_output(model, data_gen) def test_multi_sentence_bert(): config = transformers.BertConfig(hidden_size=128, num_hidden_layers=2, num_attention_heads=4, intermediate_size=256) tokenizer = transformers.BertTokenizer.from_pretrained("bert-base-uncased") def data_gen_for_next_sentence(): prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." next_sentence = "The sky is blue due to the shorter wavelength of blue light." encoding = tokenizer(prompt, next_sentence, return_tensors="pt") return encoding model = transformers.BertForNextSentencePrediction(config) trace_model_and_compare_output(model, data_gen_for_next_sentence) def data_gen_for_qa(): question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors="pt") return inputs model = transformers.BertForQuestionAnswering(config) trace_model_and_compare_output(model, data_gen_for_qa) def data_gen_for_mcq(): prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="pt", padding=True) encoding = {k: v.unsqueeze(0) for k, v in encoding.items()} return encoding model = transformers.BertForMultipleChoice(config) trace_model_and_compare_output(model, data_gen_for_mcq) if __name__ == '__main__': test_single_sentence_bert() test_multi_sentence_bert()
import pytest import torch import transformers from hf_tracer_utils import trace_model_and_compare_output BATCH_SIZE = 2 SEQ_LENGTH = 16 def test_single_sentence_albert(): MODEL_LIST = [ transformers.AlbertModel, transformers.AlbertForPreTraining, transformers.AlbertForMaskedLM, transformers.AlbertForSequenceClassification, transformers.AlbertForTokenClassification, ] config = transformers.AlbertConfig(embedding_size=128, hidden_size=128, num_hidden_layers=2, num_attention_heads=4, intermediate_size=256) def data_gen(): input_ids = torch.zeros((BATCH_SIZE, SEQ_LENGTH), dtype=torch.int64) token_type_ids = torch.zeros((BATCH_SIZE, SEQ_LENGTH), dtype=torch.int64) attention_mask = torch.zeros((BATCH_SIZE, SEQ_LENGTH), dtype=torch.int64) meta_args = dict(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask) return meta_args for model_cls in MODEL_LIST: model = model_cls(config=config) trace_model_and_compare_output(model, data_gen) def test_multi_sentence_albert(): config = transformers.AlbertConfig(hidden_size=128, num_hidden_layers=2, num_attention_heads=4, intermediate_size=256) tokenizer = transformers.BertTokenizer.from_pretrained("bert-base-uncased") def data_gen_for_qa(): question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" inputs = tokenizer(question, text, return_tensors="pt") return inputs model = transformers.AlbertForQuestionAnswering(config) trace_model_and_compare_output(model, data_gen_for_qa) def data_gen_for_mcq(): prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." choice0 = "It is eaten with a fork and a knife." choice1 = "It is eaten while held in the hand." encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="pt", padding=True) encoding = {k: v.unsqueeze(0) for k, v in encoding.items()} return encoding model = transformers.AlbertForMultipleChoice(config) trace_model_and_compare_output(model, data_gen_for_mcq) if __name__ == '__main__': test_single_sentence_albert() test_multi_sentence_albert()
import torch from numpy import isin from torch.fx import GraphModule from torch.utils._pytree import tree_flatten from colossalai.fx import symbolic_trace def trace_model_and_compare_output(model, data_gen): # must turn on eval mode to ensure the output is consistent model.eval() try: kwargs = data_gen() meta_args = {k: v.to('meta') for k, v in kwargs.items()} gm = symbolic_trace(model, meta_args=meta_args) except Exception as e: raise RuntimeError(f"Failed to trace {model.__class__.__name__}, error: {e}") # run forward inputs = data_gen() non_fx_out = model(**inputs) fx_out = gm(**inputs) # check output for k in non_fx_out.keys(): if torch.is_tensor(fx_out[k]): assert torch.equal( fx_out[k], non_fx_out[k] ), f'{model.__class__.__name__} has incorrect output {k}, expect {non_fx_out[k]}, but got {fx_out[k]}'
import pytest import torch import transformers from hf_tracer_utils import trace_model_and_compare_output BATCH_SIZE = 1 SEQ_LENGTH = 16 def test_opt(): MODEL_LIST = [ transformers.OPTModel, transformers.OPTForCausalLM, ] config = transformers.OPTConfig(hidden_size=128, num_hidden_layers=2, num_attention_heads=4) def data_gen(): input_ids = torch.zeros((BATCH_SIZE, SEQ_LENGTH), dtype=torch.int64) attention_mask = torch.zeros((BATCH_SIZE, SEQ_LENGTH), dtype=torch.int64) kwargs = dict(input_ids=input_ids, attention_mask=attention_mask) return kwargs for model_cls in MODEL_LIST: model = model_cls(config=config) trace_model_and_compare_output(model, data_gen) if __name__ == '__main__': test_opt()
import pytest import torch import transformers from hf_tracer_utils import trace_model_and_compare_output from colossalai.fx import symbolic_trace try: import diffusers HAS_DIFFUSERS = True except ImportError: HAS_DIFFUSERS = False BATCH_SIZE = 2 SEQ_LENGTH = 5 HEIGHT = 224 WIDTH = 224 IN_CHANNELS = 3 LATENTS_SHAPE = (BATCH_SIZE, IN_CHANNELS, HEIGHT // 8, WIDTH // 8) TIME_STEP = 2 @pytest.mark.skipif(not HAS_DIFFUSERS, reason="diffusers has not been installed") def test_vae(): MODEL_LIST = [ diffusers.AutoencoderKL, diffusers.VQModel, ] for model_cls in MODEL_LIST: model = model_cls() sample = torch.zeros(LATENTS_SHAPE) gm = symbolic_trace(model) model.eval() gm.eval() with torch.no_grad(): fx_out = gm(sample) non_fx_out = model(sample) assert torch.allclose( fx_out['sample'], non_fx_out['sample']), f'{model.__class__.__name__} has inconsistent outputs, {fx_out} vs {non_fx_out}' def test_clip(): MODEL_LIST = [ transformers.CLIPModel, transformers.CLIPTextModel, transformers.CLIPVisionModel, ] CONFIG_LIST = [ transformers.CLIPConfig, transformers.CLIPTextConfig, transformers.CLIPVisionConfig, ] def data_gen(): if isinstance(model, transformers.CLIPModel): input_ids = torch.zeros((BATCH_SIZE, SEQ_LENGTH), dtype=torch.int64) attention_mask = torch.zeros((BATCH_SIZE, SEQ_LENGTH), dtype=torch.int64) position_ids = torch.zeros((BATCH_SIZE, SEQ_LENGTH), dtype=torch.int64) pixel_values = torch.zeros((BATCH_SIZE, IN_CHANNELS, HEIGHT, WIDTH), dtype=torch.float32) kwargs = dict(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, pixel_values=pixel_values) elif isinstance(model, transformers.CLIPTextModel): input_ids = torch.zeros((BATCH_SIZE, SEQ_LENGTH), dtype=torch.int64) attention_mask = torch.zeros((BATCH_SIZE, SEQ_LENGTH), dtype=torch.int64) kwargs = dict(input_ids=input_ids, attention_mask=attention_mask) elif isinstance(model, transformers.CLIPVisionModel): pixel_values = torch.zeros((BATCH_SIZE, IN_CHANNELS, HEIGHT, WIDTH), dtype=torch.float32) kwargs = dict(pixel_values=pixel_values) return kwargs for model_cls, config in zip(MODEL_LIST, CONFIG_LIST): model = model_cls(config=config()) trace_model_and_compare_output(model, data_gen) @pytest.mark.skipif(not HAS_DIFFUSERS, reason="diffusers has not been installed") @pytest.mark.skip(reason='cannot pass the test yet') def test_unet(): MODEL_LIST = [ diffusers.UNet2DModel, diffusers.UNet2DConditionModel, ] for model_cls in MODEL_LIST: model = model_cls() sample = torch.zeros(LATENTS_SHAPE) gm = symbolic_trace(model) model.eval() gm.eval() with torch.no_grad(): fx_out = gm(sample, TIME_STEP) non_fx_out = model(sample, TIME_STEP) assert torch.allclose( fx_out['sample'], non_fx_out['sample']), f'{model.__class__.__name__} has inconsistent outputs, {fx_out} vs {non_fx_out}' if __name__ == "__main__": test_vae() test_clip() # skip because of failure # test_unet()
import torch from colossalai.fx import symbolic_trace try: from torchrec.models import dlrm from torchrec.modules.embedding_configs import EmbeddingBagConfig from torchrec.modules.embedding_modules import EmbeddingBagCollection from torchrec.sparse.jagged_tensor import KeyedJaggedTensor, KeyedTensor NOT_TORCHREC = False except ImportError: NOT_TORCHREC = True import pytest BATCH = 2 SHAPE = 10 @pytest.mark.skipif(NOT_TORCHREC, reason='torchrec is not installed') def test_torchrec_dlrm_models(): MODEL_LIST = [ dlrm.DLRM, dlrm.DenseArch, dlrm.InteractionArch, dlrm.InteractionV2Arch, dlrm.OverArch, dlrm.SparseArch, # dlrm.DLRMV2 ] # Data Preparation # EmbeddingBagCollection eb1_config = EmbeddingBagConfig(name="t1", embedding_dim=SHAPE, num_embeddings=SHAPE, feature_names=["f1"]) eb2_config = EmbeddingBagConfig(name="t2", embedding_dim=SHAPE, num_embeddings=SHAPE, feature_names=["f2"]) ebc = EmbeddingBagCollection(tables=[eb1_config, eb2_config]) keys = ["f1", "f2"] # KeyedTensor KT = KeyedTensor(keys=keys, length_per_key=[SHAPE, SHAPE], values=torch.rand((BATCH, 2 * SHAPE))) # KeyedJaggedTensor KJT = KeyedJaggedTensor.from_offsets_sync(keys=keys, values=torch.tensor([1, 2, 3, 4, 5, 6, 7, 8]), offsets=torch.tensor([0, 2, 4, 6, 8])) # Dense Features dense_features = torch.rand((BATCH, SHAPE)) # Sparse Features sparse_features = torch.rand((BATCH, len(keys), SHAPE)) for model_cls in MODEL_LIST: # Initializing model if model_cls == dlrm.DLRM: model = model_cls(ebc, SHAPE, [SHAPE, SHAPE], [5, 1]) elif model_cls == dlrm.DenseArch: model = model_cls(SHAPE, [SHAPE, SHAPE]) elif model_cls == dlrm.InteractionArch: model = model_cls(len(keys)) elif model_cls == dlrm.InteractionV2Arch: I1 = dlrm.DenseArch(3 * SHAPE, [3 * SHAPE, 3 * SHAPE]) I2 = dlrm.DenseArch(3 * SHAPE, [3 * SHAPE, 3 * SHAPE]) model = model_cls(len(keys), I1, I2) elif model_cls == dlrm.OverArch: model = model_cls(SHAPE, [5, 1]) elif model_cls == dlrm.SparseArch: model = model_cls(ebc) elif model_cls == dlrm.DLRMV2: # Currently DLRMV2 cannot be traced model = model_cls(ebc, SHAPE, [SHAPE, SHAPE], [5, 1], [4 * SHAPE, 4 * SHAPE], [4 * SHAPE, 4 * SHAPE]) # Setup GraphModule if model_cls == dlrm.InteractionV2Arch: concrete_args = {"dense_features": dense_features, "sparse_features": sparse_features} gm = symbolic_trace(model, concrete_args=concrete_args) else: gm = symbolic_trace(model) model.eval() gm.eval() # Aligned Test with torch.no_grad(): if model_cls == dlrm.DLRM or model_cls == dlrm.DLRMV2: fx_out = gm(dense_features, KJT) non_fx_out = model(dense_features, KJT) elif model_cls == dlrm.DenseArch: fx_out = gm(dense_features) non_fx_out = model(dense_features) elif model_cls == dlrm.InteractionArch or model_cls == dlrm.InteractionV2Arch: fx_out = gm(dense_features, sparse_features) non_fx_out = model(dense_features, sparse_features) elif model_cls == dlrm.OverArch: fx_out = gm(dense_features) non_fx_out = model(dense_features) elif model_cls == dlrm.SparseArch: fx_out = gm(KJT) non_fx_out = model(KJT) if torch.is_tensor(fx_out): assert torch.allclose( fx_out, non_fx_out), f'{model.__class__.__name__} has inconsistent outputs, {fx_out} vs {non_fx_out}' else: assert torch.allclose( fx_out.values(), non_fx_out.values()), f'{model.__class__.__name__} has inconsistent outputs, {fx_out} vs {non_fx_out}' if __name__ == "__main__": test_torchrec_dlrm_models()
import pytest import torch from colossalai.fx import symbolic_trace try: from torchrec.models import deepfm from torchrec.modules.embedding_configs import EmbeddingBagConfig from torchrec.modules.embedding_modules import EmbeddingBagCollection from torchrec.sparse.jagged_tensor import KeyedJaggedTensor, KeyedTensor NOT_TORCHREC = False except ImportError: NOT_TORCHREC = True BATCH = 2 SHAPE = 10 @pytest.mark.skipif(NOT_TORCHREC, reason='torchrec is not installed') def test_torchrec_deepfm_models(): MODEL_LIST = [deepfm.DenseArch, deepfm.FMInteractionArch, deepfm.OverArch, deepfm.SimpleDeepFMNN, deepfm.SparseArch] # Data Preparation # EmbeddingBagCollection eb1_config = EmbeddingBagConfig(name="t1", embedding_dim=SHAPE, num_embeddings=SHAPE, feature_names=["f1"]) eb2_config = EmbeddingBagConfig(name="t2", embedding_dim=SHAPE, num_embeddings=SHAPE, feature_names=["f2"]) ebc = EmbeddingBagCollection(tables=[eb1_config, eb2_config]) keys = ["f1", "f2"] # KeyedTensor KT = KeyedTensor(keys=keys, length_per_key=[SHAPE, SHAPE], values=torch.rand((BATCH, 2 * SHAPE))) # KeyedJaggedTensor KJT = KeyedJaggedTensor.from_offsets_sync(keys=keys, values=torch.tensor([1, 2, 3, 4, 5, 6, 7, 8]), offsets=torch.tensor([0, 2, 4, 6, 8])) # Dense Features features = torch.rand((BATCH, SHAPE)) for model_cls in MODEL_LIST: # Initializing model if model_cls == deepfm.DenseArch: model = model_cls(SHAPE, SHAPE, SHAPE) elif model_cls == deepfm.FMInteractionArch: model = model_cls(SHAPE * 3, keys, SHAPE) elif model_cls == deepfm.OverArch: model = model_cls(SHAPE) elif model_cls == deepfm.SimpleDeepFMNN: model = model_cls(SHAPE, ebc, SHAPE, SHAPE) elif model_cls == deepfm.SparseArch: model = model_cls(ebc) # Setup GraphModule gm = symbolic_trace(model) model.eval() gm.eval() # Aligned Test with torch.no_grad(): if model_cls == deepfm.DenseArch or model_cls == deepfm.OverArch: fx_out = gm(features) non_fx_out = model(features) elif model_cls == deepfm.FMInteractionArch: fx_out = gm(features, KT) non_fx_out = model(features, KT) elif model_cls == deepfm.SimpleDeepFMNN: fx_out = gm(features, KJT) non_fx_out = model(features, KJT) elif model_cls == deepfm.SparseArch: fx_out = gm(KJT) non_fx_out = model(KJT) if torch.is_tensor(fx_out): assert torch.allclose( fx_out, non_fx_out), f'{model.__class__.__name__} has inconsistent outputs, {fx_out} vs {non_fx_out}' else: assert torch.allclose( fx_out.values(), non_fx_out.values()), f'{model.__class__.__name__} has inconsistent outputs, {fx_out} vs {non_fx_out}' if __name__ == "__main__": test_torchrec_deepfm_models()
import pytest import torch import torch.multiprocessing as mp import torch.nn.functional as F from torch.fx import GraphModule from torch.utils.checkpoint import checkpoint import colossalai from colossalai.core import global_context as gpc from colossalai.fx import ColoTracer from colossalai.fx.graph_module import ColoGraphModule from colossalai.utils import free_port try: from colossalai.fx.codegen import ActivationCheckpointCodeGen with_codegen = True except: # fall back to older pytorch version from colossalai.fx.codegen import python_code_with_activation_checkpoint with_codegen = False class MLP(torch.nn.Module): def __init__(self): super().__init__() self.linear1 = torch.nn.Linear(4, 4) self.linear2 = torch.nn.Linear(4, 4) def forward(self, x): return self.linear1(x), self.linear2(x) class relu(torch.nn.Module): def __init__(self) -> None: super().__init__() self.relu = torch.nn.ReLU(inplace=True) def forward(self, x): return self.relu(x) class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.mlp1 = MLP() self.relu = relu() self.linear2 = torch.nn.Linear(4, 4) def ckpt2(self, x): return F.relu(x, inplace=True) def ckpt3(self, x, y): return self.linear2(x) + self.linear2(y) def forward(self, x, y): y1, y2 = checkpoint(self.mlp1, x) y3 = checkpoint(self.relu, x) y4 = checkpoint(self.ckpt2, y) y5 = checkpoint(self.ckpt3, y, y4) y6 = self.linear2(y4) return y1 + y2 + y3 + y4 + y5 + y6 def _run_act_ckpt_codegen(rank): # launch colossalai to make sure we could execute colossalai.utils.checkpoint currectly colossalai.launch(config={}, rank=rank, world_size=1, host='localhost', port=free_port(), backend='nccl') # build model and run forward model = MyModule() data1 = torch.rand(4, 4) data2 = torch.rand(4, 4) # copy model to cuda model = model.to(device="cuda") data1 = data1.to(device="cuda") data2 = data2.to(device="cuda") non_fx_out = model(data1, data2) # trace the module and replace codegen tracer = ColoTracer(trace_act_ckpt=True) graph = tracer.trace(model) codegen = ActivationCheckpointCodeGen() graph.set_codegen(codegen) # check ops are annotated with ckpt # also annotate the selected node for offloading ckpt_nodes = ['mlp1_linear1', 'mlp1_linear2', 'relu_relu', 'relu'] offload_starts = ['mlp1_linear1'] for node in graph.nodes: if node.name in ckpt_nodes: assert 'activation_checkpoint' in node.meta # annotate the selected node for offload if node.name in offload_starts: node.meta['activation_offload'] = True gm = ColoGraphModule(model, graph) gm.recompile() # assert checkpoint function will be generated and # the offload option is correct code = graph.python_code('self').src assert 'colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_0, True, x, use_reentrant=False)' in code and \ 'colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_1, False, x, use_reentrant=False)' in code and \ 'colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_2, False, y, use_reentrant=False)' in code and \ 'colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_3, False, y, relu, use_reentrant=True)' in code # recompile and verify the outputs are consistent fx_out = gm(data1, data2) assert torch.equal(non_fx_out, fx_out) gpc.destroy() @pytest.mark.skipif(not with_codegen, reason='torch version is lower than 1.12.0') def test_act_ckpt_codegen(): mp.spawn(_run_act_ckpt_codegen, nprocs=1) def _run_act_ckpt_python_code_torch11(rank): # launch colossalai to make sure we could execute colossalai.utils.checkpoint currectly colossalai.launch(config={}, rank=rank, world_size=1, host='localhost', port=free_port(), backend='nccl') # build model and run forward model = MyModule() data1 = torch.rand(4, 4) data2 = torch.rand(4, 4) # copy model to cuda data1 = data1.to(device="cuda") data2 = data2.to(device="cuda") non_fx_out = model(data1, data2) # trace the module and replace codegen tracer = ColoTracer(trace_act_ckpt=True) graph = tracer.trace(model) # replace a bound method of an object graph._python_code = python_code_with_activation_checkpoint.__get__(graph) # check ops are annotated with ckpt ckpt_nodes = ['mlp1_linear1', 'mlp1_linear2', 'relu_relu', 'relu'] offload_starts = ['mlp1_linear1'] for node in graph.nodes: if node.name in ckpt_nodes: assert 'activation_checkpoint' in node.meta # annotate the selected node for offload if node.name in offload_starts: node.meta['activation_offload'] = True gm = ColoGraphModule(model, graph) gm.recompile() # assert checkpoint function will be generated and # the offload option is correct code = graph.python_code('self').src assert 'colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_0, True, x, use_reentrant=False)' in code and \ 'colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_1, False, x, use_reentrant=False)' in code and \ 'colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_2, False, y, use_reentrant=False)' in code and \ 'colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_3, False, y, relu, use_reentrant=True)' in code # recompile and verify the outputs are consistent fx_out = gm(data1, data2) assert torch.equal(non_fx_out, fx_out) gpc.destroy() @pytest.mark.skipif(with_codegen, reason='torch version is equal to or higher than 1.12.0') @pytest.mark.skip(reason="currently torch11 ColoGraphModule is not done") def test_act_ckpt_python_code_torch11(): mp.spawn(_run_act_ckpt_python_code_torch11, nprocs=1) if __name__ == '__main__': _run_act_ckpt_codegen(rank=0)
import copy import pytest import torch import torch.multiprocessing as mp import torch.nn.functional as F from torch.fx import GraphModule import colossalai from colossalai.core import global_context as gpc from colossalai.fx import ColoTracer from colossalai.fx.graph_module import ColoGraphModule from colossalai.utils import free_port try: from colossalai.fx.codegen import ActivationCheckpointCodeGen with_codegen = True except: # fall back to older pytorch version from colossalai.fx.codegen import python_code_with_activation_checkpoint with_codegen = False class MyNet(torch.nn.Module): def __init__(self) -> None: super().__init__() self.linear0 = torch.nn.Linear(4, 4) self.linear1 = torch.nn.Linear(4, 4) self.linear2 = torch.nn.Linear(4, 4) self.linear3 = torch.nn.Linear(4, 4) self.linear4 = torch.nn.Linear(4, 4) self.linear5 = torch.nn.Linear(4, 4) self.linear6 = torch.nn.Linear(4, 4) def forward(self, x): x = self.linear0(x) x = self.linear1(x) x = self.linear2(x) x = self.linear3(x) x = self.linear4(x) x = self.linear5(x) x = self.linear6(x) return x def _is_all_gradient_close(m: torch.nn.Module, gm: GraphModule) -> bool: for m_p, gm_p in zip(m.parameters(), gm.parameters()): if not torch.allclose(m_p.grad, gm_p.grad): return False return True def _test_fwd_and_bwd(model: torch.nn.Module, gm: ColoGraphModule, data: torch.Tensor): # test forward non_fx_out = model(data) fx_out = gm(data) assert torch.equal(non_fx_out, fx_out), "fx_out doesn't comply with original output" # test barckward loss0 = non_fx_out.sum() loss0.backward() loss1 = fx_out.sum() loss1.backward() assert _is_all_gradient_close(model, gm), "gm doesn't have the same gradient as original one" def _run_offload_codegen(rank): # launch colossalai to make sure we could execute colossalai.utils.checkpoint currectly colossalai.launch(config={}, rank=rank, world_size=1, host='localhost', port=free_port(), backend='nccl') # build model and input model = MyNet().cuda() data = torch.rand(4, 4).cuda() # trace the module and replace codegen tracer = ColoTracer(trace_act_ckpt=True) graph = tracer.trace(model) codegen = ActivationCheckpointCodeGen() graph.set_codegen(codegen) # annotate the activation offload part # also annotate the activation_checkpoint so we could test both types # of input offload for node in graph.nodes: if node.name == "linear0": node.meta['activation_offload'] = [0, True, False] if node.name == "linear1": node.meta['activation_offload'] = [0, True, False] if node.name == "linear2": node.meta['activation_offload'] = [1, True, True] if node.name == "linear4": node.meta['activation_offload'] = [2, False, True] if node.name == "linear5": node.meta['activation_checkpoint'] = [0] node.meta['activation_offload'] = True gm = ColoGraphModule(copy.deepcopy(model), graph) gm.recompile() # assert we have all the components code = graph.python_code("self").src assert "def pack_hook_input(self, x):" in code and \ "def unpack_hook(self, packed):" in code and \ "def pack_hook_no_input(self, x):" in code and \ "setattr(x, 'offload', True)" in code and \ "setattr(linear3, 'offload', False)" in code and \ "with torch.autograd.graph.saved_tensors_hooks(self.pack_hook_input, self.unpack_hook):" in code and \ "with torch.autograd.graph.save_on_cpu(pin_memory=True):" in code and \ "with torch.autograd.graph.saved_tensors_hooks(self.pack_hook_no_input, self.unpack_hook):" in code and \ "colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_0, True, linear4, use_reentrant=False)" in code _test_fwd_and_bwd(model, gm, data) gpc.destroy() @pytest.mark.skipif(not with_codegen, reason='torch version is lower than 1.12.0') def test_act_ckpt_codegen(): mp.spawn(_run_offload_codegen, nprocs=1) def _run_offload_codegen_torch11(rank): # launch colossalai to make sure we could execute colossalai.utils.checkpoint currectly colossalai.launch(config={}, rank=rank, world_size=1, host='localhost', port=free_port(), backend='nccl') # build model and input model = MyNet().cuda() data = torch.rand(4, 4).cuda() # trace the module and replace codegen tracer = ColoTracer(trace_act_ckpt=True) graph = tracer.trace(model) # replace a bound method of an object graph._python_code = python_code_with_activation_checkpoint.__get__(graph) # annotate the activation offload part # also annotate the activation_checkpoint so we could test both types # of input offload for node in graph.nodes: if node.name == "linear0": node.meta['activation_offload'] = [0, True, False] if node.name == "linear1": node.meta['activation_offload'] = [0, True, False] if node.name == "linear2": node.meta['activation_offload'] = [1, True, True] if node.name == "linear4": node.meta['activation_offload'] = [2, False, True] if node.name == "linear5": node.meta['activation_checkpoint'] = [0] node.meta['activation_offload'] = True gm = ColoGraphModule(copy.deepcopy(model), graph) gm.recompile() # assert we have all the components code = graph.python_code("self").src assert "def pack_hook_input(self, x):" in code and \ "def unpack_hook(self, packed):" in code and \ "def pack_hook_no_input(self, x):" in code and \ "setattr(x, 'offload', True)" in code and \ "setattr(linear3, 'offload', False)" in code and \ "with torch.autograd.graph.saved_tensors_hooks(self.pack_hook_input, self.unpack_hook):" in code and \ "with torch.autograd.graph.save_on_cpu(pin_memory=True):" in code and \ "with torch.autograd.graph.saved_tensors_hooks(self.pack_hook_no_input, self.unpack_hook):" in code and \ "colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_0, True, linear4, use_reentrant=False)" in code _test_fwd_and_bwd(model, gm, data) gpc.destroy() @pytest.mark.skip(reason="currently torch11 ColoGraphModule is not implemented") def test_act_ckpt_python_code_torch11(): mp.spawn(_run_offload_codegen_torch11, nprocs=1) if __name__ == "__main__": _run_offload_codegen(0)
import pytest import torch import torch.multiprocessing as mp import torch.nn.functional as F from torch.fx import GraphModule from torch.utils.checkpoint import checkpoint import colossalai from colossalai.core import global_context as gpc from colossalai.fx import ColoTracer from colossalai.fx.graph_module import ColoGraphModule from colossalai.utils import free_port try: from colossalai.fx.codegen import ActivationCheckpointCodeGen with_codegen = True except: # fall back to older pytorch version from colossalai.fx.codegen import python_code_with_activation_checkpoint with_codegen = False class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.linear1 = torch.nn.Linear(4, 4) self.linear2 = torch.nn.Linear(4, 4) self.linear3 = torch.nn.Linear(4, 4) self.linear4 = torch.nn.Linear(4, 4) self.linear5 = torch.nn.Linear(4, 4) self.linear6 = torch.nn.Linear(4, 4) def forward(self, x): return self.linear6(self.linear5(self.linear4(self.linear3(self.linear2(self.linear1(x)))))) def _run_act_ckpt_codegen(rank): # launch colossalai to make sure we could execute colossalai.utils.checkpoint currectly colossalai.launch(config={}, rank=rank, world_size=1, host='localhost', port=free_port(), backend='nccl') # build model and run forward model = MyModule() data1 = torch.rand(4, 4) # copy model to cuda model = model.to(device="cuda") data1 = data1.to(device="cuda") non_fx_out = model(data1) # trace the module and replace codegen tracer = ColoTracer(trace_act_ckpt=True) graph = tracer.trace(model) codegen = ActivationCheckpointCodeGen() graph.set_codegen(codegen) # annotate nested checkpoint for node in graph.nodes: if node.name == "linear1": node.meta['activation_checkpoint'] = [0, 0, 0] continue if node.name == "linear2": node.meta['activation_checkpoint'] = [0, 0, None] if node.name == "linear3": node.meta['activation_checkpoint'] = [0, 0, 1] if node.name == "linear4": node.meta['activation_checkpoint'] = [0, 1, None] if node.name == "linear5": node.meta['activation_checkpoint'] = 1 gm = ColoGraphModule(model, graph) gm.recompile() # assert checkpoint function will be generated and code = graph.python_code('self').src assert 'colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_0_0, False, x, use_reentrant=False)' in code and \ 'colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_0_1, False, linear3, use_reentrant=False)' in code and \ 'colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_0_0_0, False, x, use_reentrant=False)' in code and \ 'colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_0_0_1, False, linear2, use_reentrant=False)' in code and \ 'colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_0, False, x, use_reentrant=False)' in code and \ 'colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_1, False, linear4, use_reentrant=False)' in code # recompile and verify the outputs are consistent fx_out = gm(data1) assert torch.equal(non_fx_out, fx_out) gpc.destroy() @pytest.mark.skipif(not with_codegen, reason='torch version is lower than 1.12.0') def test_act_ckpt_codegen(): mp.spawn(_run_act_ckpt_codegen, nprocs=1) def _run_act_ckpt_python_code_torch11(rank): # launch colossalai to make sure we could execute colossalai.utils.checkpoint currectly colossalai.launch(config={}, rank=rank, world_size=1, host='localhost', port=free_port(), backend='nccl') # build model and run forward model = MyModule() data1 = torch.rand(4, 4) # copy model to cuda model = model.to(device="cuda") data1 = data1.to(device="cuda") non_fx_out = model(data1) # trace the module and replace codegen tracer = ColoTracer(trace_act_ckpt=True) graph = tracer.trace(model) codegen = ActivationCheckpointCodeGen() graph.set_codegen(codegen) # annotate nested checkpoint for node in graph.nodes: if node.name == "linear1": node.meta['activation_checkpoint'] = [0, 0, 0] continue if node.name == "linear2": node.meta['activation_checkpoint'] = [0, 0, None] if node.name == "linear3": node.meta['activation_checkpoint'] = [0, 0, 1] if node.name == "linear4": node.meta['activation_checkpoint'] = [0, 1, None] if node.name == "linear5": node.meta['activation_checkpoint'] = 1 gm = ColoGraphModule(model, graph) gm.recompile() # assert checkpoint function will be generated and code = graph.python_code('self').src assert 'colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_0_0, False, x, use_reentrant=False)' in code and \ 'colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_0_1, False, linear3, use_reentrant=False)' in code and \ 'colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_0_0_0, False, x, use_reentrant=False)' in code and \ 'colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_0_0_1, False, linear2, use_reentrant=False)' in code and \ 'colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_0, False, x, use_reentrant=False)' in code and \ 'colossalai.utils.activation_checkpoint.checkpoint(self.checkpoint_1, False, linear4, use_reentrant=False)' in code # recompile and verify the outputs are consistent fx_out = gm(data1) assert torch.equal(non_fx_out, fx_out) gpc.destroy() @pytest.mark.skipif(with_codegen, reason='torch version is equal to or higher than 1.12.0') @pytest.mark.skip(reason="currently torch11 ColoGraphModule is not done") def test_act_ckpt_python_code_torch11(): mp.spawn(_run_act_ckpt_python_code_torch11, nprocs=1) if __name__ == '__main__': _run_act_ckpt_codegen(rank=0)
from typing import Optional, Tuple, Union import torch import torch.fx import torchvision.models as tm from gpt_utils import gpt2_medium, gpt2_xl from torch.fx import symbolic_trace from colossalai.fx.passes.meta_info_prop import MetaInfoProp from colossalai.fx.profiler import calculate_fwd_out, calculate_fwd_tmp, is_compatible_with_meta, parameter_size from colossalai.fx.tracer.tracer import ColoTracer from colossalai.testing.pytest_wrapper import run_on_environment_flag if is_compatible_with_meta(): from colossalai.fx.profiler import MetaTensor TM_BATCH_SIZE = 64 GPT_BATCH_SIZE = 8 NUM_STEPS = 5 def extract_forward_mem(gm: torch.fx.GraphModule): node_size = 0 param_size = 0 for node in gm.graph.nodes: node_size += calculate_fwd_tmp(node) node_size += calculate_fwd_out(node) param_size = parameter_size(gm) return (node_size + param_size) / 1024**2, param_size / 1024**2 def extract_forward_flops(gm: torch.fx.GraphModule): fwd_flop = 0 bwd_flop = 0 for node in gm.graph.nodes: fwd_flop += node.meta.get('fwd_flop', 0) bwd_flop += node.meta.get('bwd_flop', 0) return fwd_flop, bwd_flop def gen_tm_data(batch_size: int, shape: Tuple[int, int, int], device='cuda'): data = torch.rand(batch_size, *shape, device=device) label = torch.empty(batch_size, dtype=torch.long, device=device).random_(1000) return data, label def gen_gpt_data(batch_size, seq_len, vocab_size, device='cpu'): input_ids = torch.randint(0, vocab_size, (batch_size, seq_len), device=device) attention_mask = torch.ones_like(input_ids, device=device) return input_ids, attention_mask def run_tm_forward(gm: torch.fx.GraphModule): torch.cuda.reset_peak_memory_stats() forward_mem = -torch.cuda.memory_allocated(device="cuda:0") / 1024**2 param_mem = -torch.cuda.memory_allocated(device="cuda:0") / 1024**2 gm.cuda() param_mem += torch.cuda.memory_allocated(device="cuda:0") / 1024**2 gm.train() for n in range(NUM_STEPS): torch.cuda.reset_peak_memory_stats() data, _ = gen_tm_data(TM_BATCH_SIZE, (3, 224, 224)) # If we need to dive deep into the memory usage by # inspecting `saved_tensor_hooks` # ===================================================== # fwd_mem = 0 # cache = set() # def pack(x): # if isinstance(x, torch.Tensor): # nonlocal fwd_mem, cache # if x.data_ptr() not in cache: # fwd_mem += activation_size(x) # cache.add(x.data_ptr()) # return x # def unpack(x): # return x # # with torch.autograd.graph.saved_tensors_hooks(pack, unpack): # output = gm(data) # print(f'Memory estimation by saved_tensor_hooks: {fwd_mem / 1024**2}') # ===================================================== output = gm(data) forward_mem += torch.cuda.memory_allocated(device="cuda:0") / 1024**2 / NUM_STEPS del output return forward_mem, param_mem def run_gpt_forward(gm: torch.fx.GraphModule): torch.cuda.reset_peak_memory_stats() forward_mem = -torch.cuda.memory_allocated(device="cuda:0") / 1024**2 param_mem = -torch.cuda.memory_allocated(device="cuda:0") / 1024**2 gm.cuda() param_mem += torch.cuda.memory_allocated(device="cuda:0") / 1024**2 for n in range(NUM_STEPS): torch.cuda.reset_peak_memory_stats() data, mask = gen_gpt_data(GPT_BATCH_SIZE, 1024, 50257, device='cuda:0') # If we need to dive deep into the memory usage by # inspecting `saved_tensor_hooks` # ===================================================== # fwd_mem = 0 # cache = set() # def pack(x): # if isinstance(x, torch.Tensor): # nonlocal fwd_mem, cache # if x.data_ptr() not in cache: # fwd_mem += activation_size(x) # cache.add(x.data_ptr()) # return x # def unpack(x): # return x # # with torch.autograd.graph.saved_tensors_hooks(pack, unpack): # output = gm(data, mask) # print(f'Memory estimation by saved_tensor_hooks: {fwd_mem / 1024**2}') # ===================================================== output = gm(data, mask) forward_mem += torch.cuda.memory_allocated(device="cuda:0") / 1024**2 / NUM_STEPS del output return forward_mem, param_mem @run_on_environment_flag(name='FX_PROFILER') def test_meta_info_prop(): for m in [ tm.alexnet, tm.resnet18, tm.resnet34, tm.resnet50, tm.resnet101, tm.resnet152, tm.densenet121, tm.densenet161, tm.densenet169, tm.densenet201, tm.convnext_tiny, tm.convnext_small, tm.convnext_base, tm.convnext_large, tm.wide_resnet50_2, tm.wide_resnet101_2, tm.regnet_x_16gf, tm.mnasnet0_5, tm.efficientnet_b0, tm.shufflenet_v2_x0_5, tm.shufflenet_v2_x1_0, tm.shufflenet_v2_x1_5, tm.shufflenet_v2_x2_0, tm.mobilenet_v2, tm.mobilenet_v3_small, tm.mobilenet_v3_large, tm.resnext50_32x4d, tm.resnext101_32x8d, tm.resnext101_64x4d, tm.vit_b_16, tm.vit_b_32, tm.vit_h_14, tm.vit_l_16, tm.vit_l_32, tm.vgg11, tm.vgg11_bn, tm.vgg13, tm.vgg13_bn, tm.vgg16, tm.vgg16_bn, tm.vgg19, tm.vgg19_bn ]: model = m().cuda() model.train() data = MetaTensor(torch.rand(int(TM_BATCH_SIZE), 3, 224, 224, device='meta'), fake_device='cuda:0') gm = symbolic_trace(model) interp = MetaInfoProp(gm) interp.propagate(data) gm.cpu() meta_forward_mem, meta_param_mem = extract_forward_mem(gm) fwd_flop, bwd_flop = extract_forward_flops(gm) concrete_forward_mem, concrete_param_mem = run_tm_forward(gm) print( f'|{m.__name__}|{meta_forward_mem:.3f} MB|{meta_param_mem:.3f} MB|{concrete_forward_mem:.3f} MB|{concrete_param_mem:.3f} MB|fwd_flop={fwd_flop / 1e9:.3f}GFLOPs|bwd_flop={bwd_flop / 1e9:.3f}GFLOPs|' ) del model, gm @run_on_environment_flag(name='FX_PROFILER') def test_gpt_meta_info_prop(): for m in [gpt2_medium]: model = m().cuda() model.train() data, mask = gen_gpt_data(GPT_BATCH_SIZE, 1024, 50257, device='meta') graph = ColoTracer().trace(model, meta_args={'input_ids': data, 'attention_mask': mask}) gm = torch.fx.GraphModule(model, graph) interp = MetaInfoProp(gm) interp.propagate(MetaTensor(data, fake_device='cuda:0'), MetaTensor(mask, fake_device='cuda:0')) model.cpu() fwd_flop, bwd_flop = extract_forward_flops(gm) concrete_forward_mem, concrete_param_mem = run_gpt_forward(gm) meta_forward_mem, meta_param_mem = extract_forward_mem(gm) print( f'|{m.__name__}|{meta_forward_mem:.3f} MB|{meta_param_mem:.3f} MB|{concrete_forward_mem:.3f} MB|{concrete_param_mem:.3f} MB|fwd_flop={fwd_flop / 1e9:.3f}GFLOPs|bwd_flop={bwd_flop / 1e9:.3f}GFLOPs|' ) del model, gm if __name__ == '__main__': test_meta_info_prop() test_gpt_meta_info_prop()
import torch import torch.nn as nn from transformers import GPT2Config, GPT2LMHeadModel class GPTLMModel(nn.Module): def __init__(self, hidden_size=768, num_layers=12, num_attention_heads=12, max_seq_len=1024, vocab_size=50257, checkpoint=False): super().__init__() self.checkpoint = checkpoint self.model = GPT2LMHeadModel( GPT2Config(n_embd=hidden_size, n_layer=num_layers, n_head=num_attention_heads, n_positions=max_seq_len, n_ctx=max_seq_len, vocab_size=vocab_size)) if checkpoint: self.model.gradient_checkpointing_enable() def forward(self, input_ids, attention_mask): # Only return lm_logits return self.model(input_ids=input_ids, attention_mask=attention_mask, use_cache=not self.checkpoint)[0] class GPTLMLoss(nn.Module): def __init__(self): super().__init__() self.loss_fn = nn.CrossEntropyLoss() def forward(self, logits, labels): shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens return self.loss_fn(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) def gpt2_medium(checkpoint=False): return GPTLMModel(hidden_size=1024, num_layers=24, num_attention_heads=16, checkpoint=checkpoint) def gpt2_xl(checkpoint=False): return GPTLMModel(hidden_size=1600, num_layers=48, num_attention_heads=32, checkpoint=checkpoint)
import pytest import timm.models as tm import torch from timm_utils import split_model_and_compare_output @pytest.mark.skip('balance split v2 is not ready') def test_timm_models_without_control_flow(): MODEL_LIST = [ tm.resnest.resnest50d, tm.beit.beit_base_patch16_224, tm.cait.cait_s24_224, tm.convmixer.convmixer_768_32, tm.efficientnet.efficientnetv2_m, tm.resmlp_12_224, tm.vision_transformer.vit_base_patch16_224, tm.deit_base_distilled_patch16_224, ] data = torch.rand(2, 3, 224, 224) for model_cls in MODEL_LIST: model = model_cls() split_model_and_compare_output(model, data) @pytest.mark.skip('balance split v2 is not ready') def test_timm_models_with_control_flow(): torch.backends.cudnn.deterministic = True MODEL_LIST_WITH_CONTROL_FLOW = [ tm.convnext.convnext_base, tm.vgg.vgg11, tm.dpn.dpn68, tm.densenet.densenet121, tm.rexnet.rexnet_100, tm.swin_transformer.swin_base_patch4_window7_224 ] data = torch.rand(2, 3, 224, 224) meta_args = {'x': data.to('meta')} for model_cls in MODEL_LIST_WITH_CONTROL_FLOW: model = model_cls() split_model_and_compare_output(model, data, meta_args) if __name__ == '__main__': test_timm_models_without_control_flow() test_timm_models_with_control_flow()
import torch from torch.fx import symbolic_trace from torch.fx import GraphModule from colossalai.fx.passes.adding_split_node_pass import split_with_split_nodes_pass, balanced_split_pass from colossalai.fx import ColoTracer import inspect import random import numpy as np MANUAL_SEED = 0 random.seed(MANUAL_SEED) np.random.seed(MANUAL_SEED) torch.manual_seed(MANUAL_SEED) torch.backends.cudnn.deterministic = True def split_model_and_compare_output(model, data, meta_args=None): model.eval() # get origin output and rng state cpu_rng_state = torch.get_rng_state() output = model(data) # tracing model tracer = ColoTracer() try: graph = tracer.trace(root=model, meta_args=meta_args) except Exception as e: raise RuntimeError(f"Failed to trace {model.__class__.__name__}, error: {e}") gm = GraphModule(model, graph, model.__class__.__name__) gm.recompile() # apply transform passes annotated_model = balanced_split_pass(gm, 2) split_model, split_submodules = split_with_split_nodes_pass(annotated_model) # get split model model_part0 = list(split_model.children())[0] model_part1 = list(split_model.children())[1] # set rng state and compute output of split model torch.set_rng_state(cpu_rng_state) output_part0 = model_part0(data) sig = inspect.signature(model_part1.forward) if isinstance(output_part0, torch.Tensor): output_part1 = model_part1(output_part0) else: if len(output_part0) > len(sig.parameters): output_part0 = output_part0[:len(sig.parameters)] output_part1 = model_part1(*output_part0) assert output.equal(output_part1)
import inspect import random import numpy as np import pytest import torch import torchvision import torchvision.models as tm from packaging import version from torch.fx import GraphModule from colossalai.fx import ColoTracer from colossalai.fx.passes.adding_split_node_pass import balanced_split_pass, split_with_split_nodes_pass MANUAL_SEED = 0 random.seed(MANUAL_SEED) np.random.seed(MANUAL_SEED) torch.manual_seed(MANUAL_SEED) torch.backends.cudnn.deterministic = True @pytest.mark.skip('balance split v2 is not ready') def test_torchvision_models(): MODEL_LIST = [ tm.vgg11, tm.resnet18, tm.densenet121, tm.mobilenet_v3_small, tm.resnext50_32x4d, tm.wide_resnet50_2, tm.regnet_x_16gf, tm.efficientnet_b0, tm.mnasnet0_5 ] if version.parse(torchvision.__version__) >= version.parse('0.12.0'): MODEL_LIST.extend([tm.vit_b_16, tm.convnext_small]) tracer = ColoTracer() data = torch.rand(2, 3, 224, 224) for model_cls in MODEL_LIST: model = model_cls() model.eval() cpu_rng_state = torch.get_rng_state() output = model(data) graph = tracer.trace(root=model) gm = GraphModule(model, graph, model.__class__.__name__) gm.recompile() # apply transform passes annotated_model = balanced_split_pass(gm, 2) split_model, split_submodules = split_with_split_nodes_pass(annotated_model) # get split model model_part0 = list(split_model.children())[0] model_part1 = list(split_model.children())[1] # set rng state and compute output of split model torch.set_rng_state(cpu_rng_state) output_part0 = model_part0(data) sig = inspect.signature(model_part1.forward) if isinstance(output_part0, torch.Tensor): output_part1 = model_part1(output_part0) else: if len(output_part0) > len(sig.parameters): output_part0 = output_part0[:len(sig.parameters)] output_part1 = model_part1(*output_part0) assert output.equal(output_part1) if __name__ == '__main__': test_torchvision_models()
import torch from torch.fx import GraphModule from colossalai.fx.passes.adding_split_node_pass import split_with_split_nodes_pass, balanced_split_pass from colossalai.fx import ColoTracer from colossalai.pipeline.middleware import Partition, PartitionInputVal, PartitionOutputVal, Topo from colossalai.pipeline.middleware.adaptor import get_fx_topology import random import numpy as np MANUAL_SEED = 0 random.seed(MANUAL_SEED) np.random.seed(MANUAL_SEED) torch.manual_seed(MANUAL_SEED) class MLP(torch.nn.Module): def __init__(self, config={}): super().__init__() dim = config['dim'] layers = config['layers'] self.layers = torch.nn.ModuleList() for _ in range(layers): self.layers.append(torch.nn.Linear(dim, dim, bias=False)) def forward(self, x): for layer in self.layers: x = layer(x) return x def split_model_and_get_DAG(model, data_gen): model.eval() # generate input sample kwargs = data_gen() # tracing model tracer = ColoTracer() try: meta_args = {k: v.to('meta') for k, v in kwargs.items()} graph = tracer.trace(root=model, meta_args=meta_args) except Exception as e: raise RuntimeError(f"Failed to trace {model.__class__.__name__}, error: {e}") gm = GraphModule(model, graph, model.__class__.__name__) gm.recompile() # apply transform passes annotated_model = balanced_split_pass(gm, 2) top_module, split_submodules = split_with_split_nodes_pass(annotated_model) topo = get_fx_topology(top_module) for submodule in split_submodules: if isinstance(submodule, torch.fx.GraphModule): setattr(submodule, '_topo', topo) return top_module, split_submodules[0]._topo def check_input(top_module, input_partition: Partition): partition_output = input_partition.get_output_vals() arg_pos = 0 for node in top_module.graph.nodes: if node.op == 'placeholder': cur_checkee = partition_output[arg_pos] to_partition_and_offset = cur_checkee.get() assert len(to_partition_and_offset) == len(node.users.keys()) arg_pos += 1 assert arg_pos == len(partition_output) def check_submod(top_module, part_id, mid_partition: Partition): partition_input = mid_partition.get_input_vals() partition_output = mid_partition.get_output_vals() cnt = 1 cur_node = None for node in top_module.graph.nodes: if node.name.startswith('submod'): cnt += 1 if cnt == part_id: cur_node = node break assert len(partition_input) == len(cur_node.args) assert len(partition_output) == len(cur_node.users) def check_topo(top_module, topo: Topo): input_partition = topo.get_input_partition() mid_partitions = topo.get_mid_partitions() check_input(top_module, input_partition) for part_id, submod in mid_partitions.items(): check_submod(top_module, part_id, submod)
import pytest import torch import transformers from topo_utils import split_model_and_get_DAG, check_topo, MLP BATCH_SIZE = 1 SEQ_LENGHT = 16 def test_opt(): MODEL_LIST = [ MLP, transformers.OPTModel, ] CONFIGS = [ {'dim': 10, 'layers': 12}, transformers.OPTConfig(vocab_size=100, hidden_size=128, num_hidden_layers=4, num_attention_heads=4), ] def data_gen_MLP(): x = torch.zeros((16, 10)) kwargs = dict(x=x) return kwargs def data_gen_OPT(): input_ids = torch.zeros((BATCH_SIZE, SEQ_LENGHT), dtype=torch.int64) attention_mask = torch.zeros((BATCH_SIZE, SEQ_LENGHT), dtype=torch.int64) kwargs = dict(input_ids=input_ids, attention_mask=attention_mask) return kwargs DATAGEN = [ data_gen_MLP, data_gen_OPT, ] for i, model_cls in enumerate(MODEL_LIST): model = model_cls(config=CONFIGS[i]) top_mod, topo = split_model_and_get_DAG(model, DATAGEN[i]) # print(f'{top_mod=}\n----\n{topo=}') check_topo(top_mod, topo) if __name__ == '__main__': test_opt()
import pytest import torch import transformers from hf_utils import split_model_and_compare_output BATCH_SIZE = 1 SEQ_LENGHT = 16 @pytest.mark.skip('balance split v2 is not ready') def test_t5(): MODEL_LIST = [ transformers.T5Model, transformers.T5ForConditionalGeneration, transformers.T5EncoderModel, ] config = transformers.T5Config(vocab_size=100, d_model=128, num_layers=2) def data_gen(): input_ids = torch.zeros((BATCH_SIZE, SEQ_LENGHT), dtype=torch.int64) decoder_input_ids = torch.zeros((BATCH_SIZE, SEQ_LENGHT), dtype=torch.int64) kwargs = dict(input_ids=input_ids, decoder_input_ids=decoder_input_ids) return kwargs def data_gen_for_encoder_only(): input_ids = torch.zeros((BATCH_SIZE, SEQ_LENGHT), dtype=torch.int64) kwargs = dict(input_ids=input_ids) return kwargs for model_cls in MODEL_LIST: model = model_cls(config=config) if isinstance(model, transformers.T5EncoderModel): data_gen_func = data_gen_for_encoder_only else: data_gen_func = data_gen split_model_and_compare_output(model, data_gen_func) if __name__ == '__main__': test_t5()
import pytest import torch import transformers from hf_utils import split_model_and_compare_output BATCH_SIZE = 1 SEQ_LENGHT = 16 @pytest.mark.skip('balance split v2 is not ready') def test_opt(): MODEL_LIST = [ transformers.OPTModel, transformers.OPTForCausalLM, ] config = transformers.OPTConfig(vocab_size=100, hidden_size=128, num_hidden_layers=4, num_attention_heads=4) def data_gen(): input_ids = torch.zeros((BATCH_SIZE, SEQ_LENGHT), dtype=torch.int64) attention_mask = torch.zeros((BATCH_SIZE, SEQ_LENGHT), dtype=torch.int64) kwargs = dict(input_ids=input_ids, attention_mask=attention_mask) return kwargs for model_cls in MODEL_LIST: model = model_cls(config=config) split_model_and_compare_output(model, data_gen) if __name__ == '__main__': test_opt()
import torch from torch.fx import symbolic_trace from torch.fx import GraphModule from colossalai.fx.passes.adding_split_node_pass import split_with_split_nodes_pass, balanced_split_pass from colossalai.fx import ColoTracer import inspect import random import numpy as np MANUAL_SEED = 0 random.seed(MANUAL_SEED) np.random.seed(MANUAL_SEED) torch.manual_seed(MANUAL_SEED) def split_model_and_compare_output(model, data_gen): model.eval() # generate input sample kwargs = data_gen() # get origin output and rng state cpu_rng_state = torch.get_rng_state() output = model(**kwargs) # tracing model tracer = ColoTracer() try: meta_args = {k: v.to('meta') for k, v in kwargs.items()} graph = tracer.trace(root=model, meta_args=meta_args) except Exception as e: raise RuntimeError(f"Failed to trace {model.__class__.__name__}, error: {e}") gm = GraphModule(model, graph, model.__class__.__name__) gm.recompile() # apply transform passes annotated_model = balanced_split_pass(gm, 2) split_model, split_submodules = split_with_split_nodes_pass(annotated_model) # get split model model_part0 = list(split_model.children())[0] model_part1 = list(split_model.children())[1] # set rng state and compute output of split model torch.set_rng_state(cpu_rng_state) output_part0 = model_part0(**kwargs) sig = inspect.signature(model_part1.forward) if isinstance(output_part0, torch.Tensor): output_part1 = model_part1(output_part0) else: if len(output_part0) > len(sig.parameters): output_part0 = output_part0[:len(sig.parameters)] output_part1 = model_part1(*output_part0) # get output tensor from HFOutput datastructure if 'logits' in output: output_to_compare = output['logits'] elif 'prediction_logits' in output: output_to_compare = output['prediction_logits'] else: output_to_compare = output['last_hidden_state'] # compare output if isinstance(output_part1, torch.Tensor): assert output_to_compare.equal(output_part1) elif isinstance(output_part1, (tuple, list)): assert output_to_compare.equal(output_part1[0]) else: assert False
import pytest import torch import transformers from hf_utils import split_model_and_compare_output BATCH_SIZE = 64 SEQ_LENGHT = 16 NUM_EPOCHS = 2 NUM_CHUNKS = 1 @pytest.mark.skip('balance split v2 is not ready') def test_gpt(): MODEL_LIST = [ transformers.GPT2Model, transformers.GPT2LMHeadModel, transformers.GPT2DoubleHeadsModel, transformers.GPT2ForTokenClassification, # transformers.GPT2ForSequenceClassification, # not supported yet ] config = transformers.GPT2Config(n_position=64, n_layer=4, n_head=8) def data_gen(): input_ids = torch.zeros((BATCH_SIZE, SEQ_LENGHT), dtype=torch.int64) token_type_ids = torch.zeros((BATCH_SIZE, SEQ_LENGHT), dtype=torch.int64) attention_mask = torch.zeros((BATCH_SIZE, SEQ_LENGHT), dtype=torch.int64) kwargs = dict(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask) return kwargs for model_cls in MODEL_LIST: model = model_cls(config=config) split_model_and_compare_output(model, data_gen) if __name__ == '__main__': test_gpt()
import pytest import torch import transformers from hf_utils import split_model_and_compare_output BATCH_SIZE = 2 SEQ_LENGHT = 16 @pytest.mark.skip('balance split v2 is not ready') def test_single_sentence_albert(): MODEL_LIST = [ transformers.AlbertModel, transformers.AlbertForPreTraining, transformers.AlbertForMaskedLM, transformers.AlbertForSequenceClassification, transformers.AlbertForTokenClassification, ] config = transformers.AlbertConfig(vocab_size=100, embedding_size=128, hidden_size=128, num_hidden_layers=2, num_attention_heads=4, intermediate_size=256) def data_gen(): input_ids = torch.zeros((BATCH_SIZE, SEQ_LENGHT), dtype=torch.int64) token_type_ids = torch.zeros((BATCH_SIZE, SEQ_LENGHT), dtype=torch.int64) attention_mask = torch.zeros((BATCH_SIZE, SEQ_LENGHT), dtype=torch.int64) meta_args = dict(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask) return meta_args for model_cls in MODEL_LIST: model = model_cls(config=config) split_model_and_compare_output(model, data_gen) if __name__ == '__main__': test_single_sentence_albert()
import pytest import torch import transformers from hf_utils import split_model_and_compare_output BATCH_SIZE = 2 SEQ_LENGHT = 16 @pytest.mark.skip('balance split v2 is not ready') def test_single_sentence_bert(): MODEL_LIST = [ transformers.BertModel, transformers.BertForPreTraining, transformers.BertLMHeadModel, transformers.BertForMaskedLM, transformers.BertForSequenceClassification, transformers.BertForTokenClassification, ] config = transformers.BertConfig(vocab_size=100, hidden_size=128, num_hidden_layers=4, num_attention_heads=4, intermediate_size=256) def data_gen(): input_ids = torch.zeros((BATCH_SIZE, SEQ_LENGHT), dtype=torch.int64) token_type_ids = torch.zeros((BATCH_SIZE, SEQ_LENGHT), dtype=torch.int64) attention_mask = torch.zeros((BATCH_SIZE, SEQ_LENGHT), dtype=torch.int64) meta_args = dict(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask) return meta_args for model_cls in MODEL_LIST: model = model_cls(config=config) split_model_and_compare_output(model, data_gen) if __name__ == '__main__': test_single_sentence_bert()
import pytest import timm.models as tmm import torch import torchvision.models as tm from colossalai.fx._compatibility import is_compatible_with_meta if is_compatible_with_meta(): from colossalai.fx import meta_trace tm_models = [ tm.vgg11, tm.resnet18, tm.densenet121, tm.mobilenet_v3_small, tm.resnext50_32x4d, tm.wide_resnet50_2, tm.regnet_x_16gf, tm.mnasnet0_5, tm.efficientnet_b0, ] tmm_models = [ tmm.resnest.resnest50d, tmm.beit.beit_base_patch16_224, tmm.cait.cait_s24_224, tmm.efficientnet.efficientnetv2_m, tmm.resmlp_12_224, tmm.vision_transformer.vit_base_patch16_224, tmm.deit_base_distilled_patch16_224, tmm.convnext.convnext_base, tmm.vgg.vgg11, tmm.dpn.dpn68, tmm.densenet.densenet121, tmm.rexnet.rexnet_100, tmm.swin_transformer.swin_base_patch4_window7_224 ] @pytest.mark.skipif(not is_compatible_with_meta(), reason='torch version is lower than 1.12.0') def test_torchvision_models_trace(): for m in tm_models: model = m() data = torch.rand(1000, 3, 224, 224, device='meta') graph = meta_trace(model, torch.device('cpu'), data) @pytest.mark.skipif(not is_compatible_with_meta(), reason='torch version is lower than 1.12.0') def test_timm_models_trace(): for m in tmm_models: model = m() data = torch.rand(1000, 3, 224, 224, device='meta') graph = meta_trace(model, torch.device('cpu'), data) if __name__ == '__main__': test_torchvision_models_trace() test_timm_models_trace()
import pytest import timm.models as tmm import torch import torchvision.models as tm from colossalai.fx._compatibility import is_compatible_with_meta if is_compatible_with_meta(): from colossalai.fx.profiler import MetaTensor tm_models = [ tm.vgg11, tm.resnet18, tm.densenet121, tm.mobilenet_v3_small, tm.resnext50_32x4d, tm.wide_resnet50_2, tm.regnet_x_16gf, tm.mnasnet0_5, tm.efficientnet_b0, ] tmm_models = [ tmm.resnest.resnest50d, tmm.beit.beit_base_patch16_224, tmm.cait.cait_s24_224, tmm.efficientnet.efficientnetv2_m, tmm.resmlp_12_224, tmm.vision_transformer.vit_base_patch16_224, tmm.deit_base_distilled_patch16_224, tmm.convnext.convnext_base, tmm.vgg.vgg11, tmm.dpn.dpn68, tmm.densenet.densenet121, tmm.rexnet.rexnet_100, tmm.swin_transformer.swin_base_patch4_window7_224 ] @pytest.mark.skipif(not is_compatible_with_meta(), reason='torch version is lower than 1.12.0') def test_torchvision_models(): for m in tm_models: model = m() data = torch.rand(100000, 3, 224, 224, device='meta') model(MetaTensor(data, fake_device=torch.device('cpu'))).sum().backward() @pytest.mark.skipif(not is_compatible_with_meta(), reason='torch version is lower than 1.12.0') def test_timm_models(): for m in tmm_models: model = m() data = torch.rand(100000, 3, 224, 224, device='meta') model(MetaTensor(data, fake_device=torch.device('cpu'))).sum().backward() if __name__ == '__main__': test_torchvision_models() test_timm_models()
from typing import Any, Callable, Union import pytest import torch import torch.nn as nn from colossalai.fx._compatibility import is_compatible_with_meta if is_compatible_with_meta(): from colossalai.fx.profiler import MetaTensor aten = torch.ops.aten registered_meta = { ('aten.convolution.default', True): [ # (aten ops, requires_backward) (nn.Conv1d(in_channels=3, out_channels=4, kernel_size=2, padding=1, dilation=2), torch.rand(2, 3, 4)), (nn.Conv2d(in_channels=3, out_channels=4, kernel_size=2, padding=1, dilation=2), torch.rand(2, 3, 4, 4)), (nn.Conv3d(in_channels=3, out_channels=4, kernel_size=2, padding=1, dilation=2), torch.rand(2, 3, 4, 4, 4)), (nn.ConvTranspose1d(in_channels=3, out_channels=4, kernel_size=2, padding=1, dilation=2), torch.rand(2, 3, 4)), (nn.ConvTranspose2d(in_channels=3, out_channels=4, kernel_size=2, padding=1, dilation=2), torch.rand(2, 3, 4, 4)), (nn.ConvTranspose3d(in_channels=3, out_channels=4, kernel_size=2, padding=1, dilation=2), torch.rand(2, 3, 4, 4, 4)), ], ('aten.native_batch_norm.default', True): [ (nn.BatchNorm1d(4), torch.rand(2, 4)), (nn.BatchNorm2d(4), torch.rand(1, 4, 4, 4)), (nn.BatchNorm3d(4), torch.rand(1, 4, 4, 4, 4)), ], ('aten.native_layer_norm.default', True): [(nn.LayerNorm(4), torch.rand(1, 2, 3, 4)),], ('aten.avg_pool1d.default', True): [ (nn.MaxPool1d(3, stride=2), torch.rand(4, 5, 5)), (nn.AvgPool1d(3, stride=2), torch.rand(4, 5, 5)), (nn.AdaptiveMaxPool1d(3), torch.rand(4, 5, 5)), (nn.AdaptiveAvgPool1d(3), torch.rand(4, 5, 5)), ], ('aten.avg_pool2d.default', True): [ (nn.MaxPool2d((3, 2), stride=(2, 1)), torch.rand(2, 4, 5, 5)), (nn.AvgPool2d((3, 2), stride=(2, 1)), torch.rand(2, 4, 5, 5)), (nn.AdaptiveMaxPool2d((3, 2)), torch.rand(2, 4, 5, 5)), (nn.AdaptiveAvgPool2d((3, 2)), torch.rand(2, 4, 5, 5)), ], ('aten.relu.default', True): [ (nn.ReLU(), torch.rand(4, 3, 1, 2)), (nn.LeakyReLU(), torch.rand(4, 3, 1, 2)), (nn.SiLU(), torch.rand(4, 3, 1, 2)), (nn.GELU(), torch.rand(4, 3, 1, 2)), (nn.ELU(), torch.rand(4, 3, 1, 2)), (nn.Sigmoid(), torch.rand(4, 3, 1, 2)), (nn.Tanh(), torch.rand(4, 3, 1, 2)), (nn.Hardswish(), torch.rand(4, 3, 1, 2)), ] } def compare_all(tensor: torch.Tensor, meta_tensor: torch.Tensor) -> Any: assert tensor.shape == meta_tensor.shape, f'the shape of tensor ({tensor.shape}) and meta tensor ({meta_tensor.shape}) does not match.' assert tensor.dtype == meta_tensor.dtype, f'the dtype of tensor ({tensor.dtype}) and meta tensor ({meta_tensor.dtype}) does not match.' assert tensor.stride() == meta_tensor.stride( ), f'the stride of tensor ({tensor.stride()}) and meta tensor ({meta_tensor.stride()}) does not match.' def run_and_compare(f: Union[nn.Module, Callable], x: torch.Tensor, requires_backward=False) -> Any: x.requires_grad = requires_backward meta_x = MetaTensor(x) x_out, meta_out = f(x), f(meta_x) compare_all(x_out, meta_out) if requires_backward: x_out.sum().backward() meta_out.sum().backward() compare_all(x.grad, meta_x.grad) @pytest.mark.skipif(not is_compatible_with_meta(), reason='torch version is lower than 1.12.0') def test_meta_aten(): for (aten_op, requires_backward), v in registered_meta.items(): for f, x in v: run_and_compare(f, x, requires_backward) if __name__ == '__main__': test_meta_aten()
from functools import partial import colossalai import pytest import torch.multiprocessing as mp from colossalai.amp import AMP_TYPE from colossalai.core import global_context as gpc from colossalai.utils import free_port from tests.components_to_test.registry import non_distributed_component_funcs from colossalai.testing import parameterize, rerun_if_address_is_in_use CONFIG = dict(parallel=dict(pipeline=dict(size=1), tensor=dict(size=1, mode=None)), fp16=dict(mode=None), clip_grad_norm=1.0) @parameterize('model_name', ['repeated_computed_layers', 'resnet18', 'repeated_computed_layers']) @parameterize('amp_mode', [AMP_TYPE.APEX, AMP_TYPE.TORCH, AMP_TYPE.NAIVE, None]) def run_train(model_name, amp_mode): # FIXME: test bert get_components_func = non_distributed_component_funcs.get_callable(model_name) gpc.config.fp16['mode'] = amp_mode model_builder, train_dataloader, _, optimizer_class, criterion = get_components_func() model = model_builder(checkpoint=False) engine, train_dataloader, *args = colossalai.initialize(model=model, optimizer=optimizer_class(model.parameters(), lr=1e-3), criterion=criterion, train_dataloader=train_dataloader) try: engine.train() for data, label in train_dataloader: engine.zero_grad() data = data.cuda() label = label.cuda() if criterion: output = engine(data) loss = engine.criterion(output, label) else: loss = engine(data, label) engine.backward(loss) engine.step() break except IndexError: # if using apex amp, NetWithRepeatedlyComputedLayers will raise an index out of range issue # the following check fails in apex # if cached_x.grad_fn.next_functions[1][0].variable is not x: pass def run_engine(rank, world_size, port): # init dist env colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') run_train() @pytest.mark.dist @rerun_if_address_is_in_use() def test_engine(): world_size = 2 run_func = partial(run_engine, world_size=world_size, port=free_port()) mp.spawn(run_func, nprocs=world_size) if __name__ == '__main__': test_engine()
import os from functools import partial from pathlib import Path import colossalai from colossalai.testing.utils import rerun_if_address_is_in_use import pytest import torch import torch.multiprocessing as mp import torch.nn as nn from colossalai.core import global_context as gpc from colossalai.logging import get_dist_logger from colossalai.utils import free_port, get_dataloader from colossalai.testing import rerun_if_address_is_in_use from torch.optim import Adam from torchvision import transforms from torchvision.datasets import CIFAR10 from torchvision.models import resnet18 # Config BATCH_SIZE = 2 NUM_CLASSES = 10 CONFIG = dict(parallel=dict(pipeline=dict(size=1), tensor=dict(size=1, mode=None)), clip_grad_norm=1.0, gradient_accumulation=4) def run_no_pipeline(rank, world_size, port): # init dist env colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') # build model model = resnet18(num_classes=10) # build dataloaders train_dataset = CIFAR10(root=Path(os.environ['DATA']), download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) ])) train_dataloader = get_dataloader(dataset=train_dataset, shuffle=True, batch_size=BATCH_SIZE, pin_memory=True, drop_last=True) # build optimizer optimizer = Adam(model.parameters(), lr=0.001) criterion = nn.CrossEntropyLoss() engine, train_dataloader, *args = colossalai.initialize(model=model, optimizer=optimizer, criterion=criterion, train_dataloader=train_dataloader) logger = get_dist_logger() rank = torch.distributed.get_rank() param_track = [] grad_track = [] next(model.parameters()).retain_grad() engine.train() step = 0 for img, label in train_dataloader: engine.zero_grad() img = img.cuda() label = label.cuda() output = engine(img) loss = engine.criterion(output, label) engine.backward(loss) engine.step() # check param_track.append(next(model.parameters())[0].clone()) grad_track.append(next(model.parameters()).grad[0].clone()) step += 1 if step == CONFIG['gradient_accumulation']: break assert not torch.all(grad_track[0] == grad_track[-1]), 'grad should be different in different iterations' assert torch.all(param_track[0] == param_track[1]) and not torch.all(param_track[0] == param_track[-1]), \ 'param should be the same in the first few iterations and only changed in the last iteration' gpc.destroy() torch.cuda.empty_cache() @pytest.mark.dist @rerun_if_address_is_in_use() def test_engine(): world_size = 4 func = partial(run_no_pipeline, world_size=world_size, port=free_port()) mp.spawn(func, nprocs=world_size) if __name__ == '__main__': test_engine()
import torch from timm.models.beit import Beit from colossalai.utils.cuda import get_current_device from .registry import non_distributed_component_funcs from .utils.dummy_data_generator import DummyDataGenerator class DummyDataLoader(DummyDataGenerator): img_size = 64 num_channel = 3 num_class = 10 batch_size = 4 def generate(self): data = torch.randn((DummyDataLoader.batch_size, DummyDataLoader.num_channel, DummyDataLoader.img_size, DummyDataLoader.img_size), device=get_current_device()) label = torch.randint(low=0, high=DummyDataLoader.num_class, size=(DummyDataLoader.batch_size,), device=get_current_device()) return data, label @non_distributed_component_funcs.register(name='beit') def get_training_components(): def model_buider(checkpoint=False): model = Beit(img_size=DummyDataLoader.img_size, num_classes=DummyDataLoader.num_class, embed_dim=32, depth=2, num_heads=4) return model trainloader = DummyDataLoader() testloader = DummyDataLoader() criterion = torch.nn.CrossEntropyLoss() return model_buider, trainloader, testloader, torch.optim.Adam, criterion
#!/usr/bin/env python import torch import torch.nn as nn from colossalai.nn import CheckpointModule from .registry import non_distributed_component_funcs from .utils.dummy_data_generator import DummyDataGenerator class NetWithRepeatedlyComputedLayers(CheckpointModule): """ This model is to test with layers which go through forward pass multiple times. In this model, the fc1 and fc2 call forward twice """ def __init__(self, checkpoint=False) -> None: super().__init__(checkpoint=checkpoint) self.fc1 = nn.Linear(5, 5) self.fc2 = nn.Linear(5, 5) self.fc3 = nn.Linear(5, 2) self.layers = [self.fc1, self.fc2, self.fc1, self.fc2, self.fc3] def forward(self, x): for layer in self.layers: x = layer(x) return x class DummyDataLoader(DummyDataGenerator): def generate(self): data = torch.rand(16, 5) label = torch.randint(low=0, high=2, size=(16,)) return data, label @non_distributed_component_funcs.register(name='repeated_computed_layers') def get_training_components(): def model_builder(checkpoint=False): return NetWithRepeatedlyComputedLayers(checkpoint) trainloader = DummyDataLoader() testloader = DummyDataLoader() criterion = torch.nn.CrossEntropyLoss() return model_builder, trainloader, testloader, torch.optim.Adam, criterion
import torch import torch.nn as nn from transformers import GPT2Config, GPT2LMHeadModel from colossalai.utils.cuda import get_current_device from .registry import non_distributed_component_funcs from .utils.dummy_data_generator import DummyDataGenerator class DummyDataLoader(DummyDataGenerator): vocab_size = 128 batch_size = 4 seq_len = 64 def generate(self): input_ids = torch.randint(0, DummyDataLoader.vocab_size, (DummyDataLoader.batch_size, DummyDataLoader.seq_len), device=get_current_device()) return input_ids, input_ids class GPTLMModel(nn.Module): def __init__(self, hidden_size=768, num_layers=12, num_attention_heads=12, max_seq_len=1024, vocab_size=50304, checkpoint=False): super().__init__() self.checkpoint = checkpoint self.model = GPT2LMHeadModel( GPT2Config(n_embd=hidden_size, n_layer=num_layers, n_head=num_attention_heads, n_positions=max_seq_len, n_ctx=max_seq_len, vocab_size=vocab_size, resid_pdrop=0.0, embd_pdrop=0.0, attn_pdrop=0.0)) if checkpoint: self.model.gradient_checkpointing_enable() def forward(self, input_ids): # Only return lm_logits attention_mask = torch.ones_like(input_ids) return self.model(input_ids=input_ids, attention_mask=attention_mask, use_cache=not self.checkpoint)[0] def gpt2_micro(checkpoint=True): return GPTLMModel(checkpoint=checkpoint, hidden_size=32, num_layers=2, num_attention_heads=4, max_seq_len=64, vocab_size=128) def gpt2_s(checkpoint=True): return GPTLMModel(checkpoint=checkpoint) def gpt2_m(checkpoint=True): return GPTLMModel(hidden_size=1024, num_layers=24, num_attention_heads=16, checkpoint=checkpoint) class GPTLMLoss(nn.Module): def __init__(self): super().__init__() self.loss_fn = nn.CrossEntropyLoss() def forward(self, logits, labels): shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens return self.loss_fn(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) @non_distributed_component_funcs.register(name='gpt2') def get_training_components(): trainloader = DummyDataLoader() testloader = DummyDataLoader() criterion = GPTLMLoss() return gpt2_micro, trainloader, testloader, torch.optim.Adam, criterion
#!/usr/bin/env python class Registry: def __init__(self): self._registry = dict() def register(self, name): assert name not in self._registry def _regsiter(callable_): self._registry[name] = callable_ return _regsiter def get_callable(self, name: str): return self._registry[name] def __iter__(self): self._idx = 0 self._len = len(self._registry) self._names = list(self._registry.keys()) return self def __next__(self): if self._idx < self._len: key = self._names[self._idx] callable_ = self._registry[key] self._idx += 1 return callable_ else: raise StopIteration non_distributed_component_funcs = Registry() model_paralle_component_funcs = Registry() __all__ = ['non_distributed_component_funcs', 'model_paralle_component_funcs']
import torch import torch.nn as nn from colossalai.nn import CheckpointModule from colossalai.utils.cuda import get_current_device from .registry import non_distributed_component_funcs from .utils.dummy_data_generator import DummyDataGenerator class SimpleNet(CheckpointModule): """ In this no-leaf module, it has subordinate nn.modules and a nn.Parameter. """ def __init__(self, checkpoint=False) -> None: super().__init__(checkpoint=checkpoint) self.embed = nn.Embedding(20, 4) self.proj1 = nn.Linear(4, 8) self.ln1 = nn.LayerNorm(8) self.proj2 = nn.Linear(8, 4) self.ln2 = nn.LayerNorm(4) self.classifier = nn.Linear(4, 4) def forward(self, x): x = self.embed(x) x = self.proj1(x) x = self.ln1(x) x = self.proj2(x) x = self.ln2(x) x = self.classifier(x) return x class DummyDataLoader(DummyDataGenerator): def generate(self): data = torch.randint(low=0, high=20, size=(16,), device=get_current_device()) label = torch.randint(low=0, high=2, size=(16,), device=get_current_device()) return data, label @non_distributed_component_funcs.register(name='simple_net') def get_training_components(): def model_builder(checkpoint=False): return SimpleNet(checkpoint) trainloader = DummyDataLoader() testloader = DummyDataLoader() criterion = torch.nn.CrossEntropyLoss() from colossalai.nn.optimizer import HybridAdam return model_builder, trainloader, testloader, HybridAdam, criterion
import torch import torch.nn as nn import torch.nn.functional as F from colossalai.nn import CheckpointModule from .registry import non_distributed_component_funcs from .utils import DummyDataGenerator class SubNet(nn.Module): def __init__(self, out_features) -> None: super().__init__() self.bias = nn.Parameter(torch.zeros(out_features)) def forward(self, x, weight): return F.linear(x, weight, self.bias) class NestedNet(CheckpointModule): def __init__(self, checkpoint=False) -> None: super().__init__(checkpoint) self.fc1 = nn.Linear(5, 5) self.sub_fc = SubNet(5) self.fc2 = nn.Linear(5, 2) def forward(self, x): x = self.fc1(x) x = self.sub_fc(x, self.fc1.weight) x = self.fc1(x) x = self.fc2(x) return x class DummyDataLoader(DummyDataGenerator): def generate(self): data = torch.rand(16, 5) label = torch.randint(low=0, high=2, size=(16,)) return data, label @non_distributed_component_funcs.register(name='nested_model') def get_training_components(): def model_builder(checkpoint=False): return NestedNet(checkpoint) trainloader = DummyDataLoader() testloader = DummyDataLoader() criterion = torch.nn.CrossEntropyLoss() return model_builder, trainloader, testloader, torch.optim.Adam, criterion
import torch import transformers from packaging import version from transformers import AlbertConfig, AlbertForSequenceClassification from .bert import get_bert_data_loader from .registry import non_distributed_component_funcs @non_distributed_component_funcs.register(name='albert') def get_training_components(): hidden_dim = 8 num_head = 4 sequence_length = 12 num_layer = 2 vocab_size = 32 def bert_model_builder(checkpoint: bool = False): config = AlbertConfig(vocab_size=vocab_size, gradient_checkpointing=checkpoint, hidden_size=hidden_dim, intermediate_size=hidden_dim * 4, num_attention_heads=num_head, max_position_embeddings=sequence_length, num_hidden_layers=num_layer, hidden_dropout_prob=0., attention_probs_dropout_prob=0.) print('building AlbertForSequenceClassification model') # adapting huggingface BertForSequenceClassification for single unitest calling interface class ModelAaptor(AlbertForSequenceClassification): def forward(self, input_ids, labels): """ inputs: data, label outputs: loss """ return super().forward(input_ids=input_ids, labels=labels)[0] model = ModelAaptor(config) # if checkpoint and version.parse(transformers.__version__) >= version.parse("4.11.0"): # model.gradient_checkpointing_enable() return model is_distrbuted = torch.distributed.is_initialized() trainloader = get_bert_data_loader(n_class=vocab_size, batch_size=2, total_samples=10000, sequence_length=sequence_length, is_distrbuted=is_distrbuted) testloader = get_bert_data_loader(n_class=vocab_size, batch_size=2, total_samples=10000, sequence_length=sequence_length, is_distrbuted=is_distrbuted) criterion = None return bert_model_builder, trainloader, testloader, torch.optim.Adam, criterion
from . import ( beit, bert, gpt2, hanging_param_model, inline_op_model, nested_model, repeated_computed_layers, resnet, simple_net, ) from .utils import run_fwd_bwd from . import albert # isort:skip __all__ = [ 'bert', 'gpt2', 'hanging_param_model', 'inline_op_model', 'nested_model', 'repeated_computed_layers', 'resnet', 'simple_net', 'run_fwd_bwd', 'albert', 'beit' ]
from torchvision.models import resnet18 from .registry import non_distributed_component_funcs from pathlib import Path import os import torch from torchvision.transforms import transforms from torchvision.datasets import CIFAR10 from colossalai.utils import get_dataloader def get_cifar10_dataloader(train): # build dataloaders dataset = CIFAR10(root=Path(os.environ['DATA']), download=True, train=train, transform=transforms.Compose( [transforms.ToTensor(), transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])) dataloader = get_dataloader(dataset=dataset, shuffle=True, batch_size=16, drop_last=True) return dataloader @non_distributed_component_funcs.register(name='resnet18') def get_resnet_training_components(): def model_builder(checkpoint=False): return resnet18(num_classes=10) trainloader = get_cifar10_dataloader(train=True) testloader = get_cifar10_dataloader(train=False) criterion = torch.nn.CrossEntropyLoss() return model_builder, trainloader, testloader, torch.optim.Adam, criterion
import torch import torch.nn as nn import torch.nn.functional as F from colossalai.nn import CheckpointModule from .registry import non_distributed_component_funcs from .utils.dummy_data_generator import DummyDataGenerator class InlineOpModule(CheckpointModule): """ a module with inline Ops """ def __init__(self, checkpoint=False) -> None: super().__init__(checkpoint=checkpoint) self.proj1 = nn.Linear(4, 8) self.proj2 = nn.Linear(8, 8) def forward(self, x): x = self.proj1(x) # inline add_ x.add_(10) x = self.proj2(x) # inline relu_ x = torch.relu_(x) x = self.proj2(x) return x class DummyDataLoader(DummyDataGenerator): def generate(self): data = torch.rand(16, 4) label = torch.randint(low=0, high=2, size=(16,)) return data, label @non_distributed_component_funcs.register(name='inline_op_model') def get_training_components(): def model_builder(checkpoint=False): return InlineOpModule(checkpoint) trainloader = DummyDataLoader() testloader = DummyDataLoader() criterion = torch.nn.CrossEntropyLoss() from colossalai.nn.optimizer import HybridAdam return model_builder, trainloader, testloader, HybridAdam, criterion
import torch import torch.nn as nn import torch.nn.functional as F from colossalai.nn import CheckpointModule from .registry import non_distributed_component_funcs from .utils.dummy_data_generator import DummyDataGenerator class HangingParamModule(CheckpointModule): """ Hanging Parameter: a parameter dose not belong to a leaf Module. It has subordinate nn.modules and a nn.Parameter. """ def __init__(self, checkpoint=False) -> None: super().__init__(checkpoint=checkpoint) self.proj1 = nn.Linear(4, 8) self.weight = nn.Parameter(torch.randn(8, 8)) self.proj2 = nn.Linear(8, 4) def forward(self, x): x = self.proj1(x) x = F.linear(x, self.weight) x = self.proj2(x) return x class DummyDataLoader(DummyDataGenerator): def generate(self): data = torch.rand(16, 4) label = torch.randint(low=0, high=2, size=(16,)) return data, label @non_distributed_component_funcs.register(name='hanging_param_model') def get_training_components(): def model_builder(checkpoint=False): return HangingParamModule(checkpoint) trainloader = DummyDataLoader() testloader = DummyDataLoader() criterion = torch.nn.CrossEntropyLoss() from colossalai.nn.optimizer import HybridAdam return model_builder, trainloader, testloader, HybridAdam, criterion
import torch import transformers from packaging import version from torch.utils.data import SequentialSampler from transformers import BertConfig, BertForSequenceClassification from .registry import non_distributed_component_funcs def get_bert_data_loader( n_class, batch_size, total_samples, sequence_length, device=torch.device('cpu:0'), is_distrbuted=False, ): train_data = torch.randint( low=0, high=n_class, size=(total_samples, sequence_length), device=device, dtype=torch.long, ) train_label = torch.randint(low=0, high=2, size=(total_samples,), device=device, dtype=torch.long) train_dataset = torch.utils.data.TensorDataset(train_data, train_label) if is_distrbuted: sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) else: sampler = SequentialSampler(train_dataset) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, sampler=sampler) return train_loader @non_distributed_component_funcs.register(name='bert') def get_training_components(): hidden_dim = 8 num_head = 4 sequence_length = 12 num_layer = 2 vocab_size = 32 def bert_model_builder(checkpoint: bool = False): config = BertConfig(vocab_size=vocab_size, gradient_checkpointing=checkpoint, hidden_size=hidden_dim, intermediate_size=hidden_dim * 4, num_attention_heads=num_head, max_position_embeddings=sequence_length, num_hidden_layers=num_layer, hidden_dropout_prob=0., attention_probs_dropout_prob=0.) print('building BertForSequenceClassification model') # adapting huggingface BertForSequenceClassification for single unitest calling interface class ModelAaptor(BertForSequenceClassification): def forward(self, input_ids, labels): """ inputs: data, label outputs: loss """ return super().forward(input_ids=input_ids, labels=labels)[0] model = ModelAaptor(config) if checkpoint and version.parse(transformers.__version__) >= version.parse("4.11.0"): model.gradient_checkpointing_enable() return model is_distrbuted = torch.distributed.is_initialized() trainloader = get_bert_data_loader(n_class=vocab_size, batch_size=2, total_samples=10000, sequence_length=sequence_length, is_distrbuted=is_distrbuted) testloader = get_bert_data_loader(n_class=vocab_size, batch_size=2, total_samples=10000, sequence_length=sequence_length, is_distrbuted=is_distrbuted) criterion = None return bert_model_builder, trainloader, testloader, torch.optim.Adam, criterion
from abc import ABC, abstractmethod class DummyDataGenerator(ABC): def __init__(self, length=10): self.length = length @abstractmethod def generate(self): pass def __iter__(self): self.step = 0 return self def __next__(self): if self.step < self.length: self.step += 1 return self.generate() else: raise StopIteration def __len__(self): return self.length
from .dummy_data_generator import DummyDataGenerator from .executor import run_fwd_bwd
import torch def run_fwd_bwd(model, data, label, criterion, optimizer=None) -> torch.Tensor: """run_fwd_bwd run fwd and bwd for the model Args: model (torch.nn.Module): a PyTorch model data (torch.Tensor): input data label (torch.Tensor): label criterion (Optional[Callable]): a function of criterion Returns: torch.Tensor: loss of fwd """ if criterion: y = model(data) y = y.float() loss = criterion(y, label) else: loss = model(data, label) loss = loss.float() if optimizer: optimizer.backward(loss) else: loss.backward() return loss
import math import torch import torch.nn as nn from numpy import dtype from colossalai.testing import parameterize from colossalai.utils import multi_tensor_applier def torch_adam_update( step, lr, beta1, beta2, eps, weight_decay, param, grad, exp_avg, exp_avg_sq, use_adamw, ): bias_correction1 = 1 - beta1**step bias_correction2 = 1 - beta2**step if weight_decay != 0: if use_adamw: # Perform stepweight decay param.mul_(1 - lr * weight_decay) else: grad = grad.add(param, alpha=weight_decay) # Decay the first and second moment running average coefficient exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps) step_size = lr / bias_correction1 param.addcdiv_(exp_avg, denom, value=-step_size) @parameterize('adamw', [False, True]) @parameterize('step', [1, 2]) @parameterize('p_dtype', [torch.float, torch.half]) @parameterize('g_dtype', [torch.float, torch.half]) def test_adam(adamw, step, p_dtype, g_dtype): from colossalai.kernel.op_builder import FusedOptimBuilder fused_optim = FusedOptimBuilder().load() fused_adam = fused_optim.multi_tensor_adam dummy_overflow_buf = torch.cuda.IntTensor([0]) count = 0 for i in range(1024): p = torch.rand(64, dtype=p_dtype).cuda() p_copy = p.clone().float() g = torch.rand(p.shape, dtype=g_dtype).cuda() g_copy = g.clone().float() m = torch.rand(p.shape).cuda() m_copy = m.clone() v = torch.rand(p.shape).cuda() v_copy = v.clone() lr = 1e-3 beta1, beta2 = 0.9, 0.999 eps = 1e-8 weight_decay = 0 multi_tensor_applier(fused_adam, dummy_overflow_buf, [[g], [p], [m], [v]], lr, beta1, beta2, eps, step, adamw, True, weight_decay, -1) torch_adam_update( step, lr, beta1, beta2, eps, weight_decay, p_copy, # fp32 data g_copy, # fp32 grad m_copy, v_copy, adamw, ) if torch.isnan(p).any() or torch.isnan(p_copy).any(): count += 1 continue assert count < 200, "too many nans" assert torch.allclose(p.to(torch.float), p_copy.to(torch.float), 1e-5, 1e-5), f"failed check, adamw {adamw}, p_dtype {p_dtype}, g_dtype {g_dtype}"
import pytest import torch from tests.components_to_test.registry import non_distributed_component_funcs from colossalai.nn.optimizer import CPUAdam, HybridAdam def move_some_params_to_cuda(model, torch_model): model.embed.weight.data = model.embed.weight.cuda() torch_model.embed.weight.data = model.embed.weight.cuda() model.ln1.weight.data = model.ln1.weight.cuda() torch_model.ln1.weight.data = model.ln1.weight.cuda() def check_params_equal(model, torch_model): for p, torch_p in zip(model.parameters(), torch_model.parameters()): assert torch.allclose(p, torch_p, atol=1e-3), f'diff: {torch.abs(p - torch_p)}' @pytest.mark.parametrize('nvme_offload_fraction', [0.0, 0.5, 1.0]) @pytest.mark.parametrize('nvme_offload_dir', ['./offload', None]) @pytest.mark.parametrize('adam_cls', [CPUAdam, HybridAdam]) def test_nvme_adam(nvme_offload_fraction, nvme_offload_dir, adam_cls): get_components_func = non_distributed_component_funcs.get_callable('simple_net') model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func() model = model_builder() torch_model = model_builder() move_some_params_to_cuda(model, torch_model) optimizer = adam_cls(model.parameters(), lr=0.1, nvme_offload_fraction=nvme_offload_fraction, nvme_offload_dir=nvme_offload_dir) torch_optimizer = torch.optim.Adam(torch_model.parameters(), lr=0.1) with torch.no_grad(): for p, torch_p in zip(model.parameters(), torch_model.parameters()): torch_p.copy_(p) p.grad = torch.rand_like(p) torch_p.grad = p.grad for _ in range(3): optimizer.step() torch_optimizer.step() check_params_equal(model, torch_model) if __name__ == '__main__': test_nvme_adam(0.5, './offload', CPUAdam)
import torch import torch.nn as nn from torch.optim.adam import Adam from torch.optim import AdamW from colossalai.nn.optimizer.hybrid_adam import HybridAdam from colossalai.testing import parameterize RE = 1024 @parameterize('adamw', [False, True]) @parameterize('device', ['cpu', 'cuda:0']) @parameterize('p_dtype', [torch.float]) @parameterize('g_dtype', [torch.float, torch.half]) def test_adam(adamw, device, p_dtype, g_dtype): rng_state = torch.get_rng_state() p = nn.Parameter(torch.rand(64).to(device, p_dtype)) torch.set_rng_state(rng_state) p_copy = nn.Parameter(torch.rand(64).to(device).float()) if adamw: optim = HybridAdam([p], lr=1e-3, adamw_mode=True) torch_optim = AdamW([p_copy], lr=1e-3) else: optim = HybridAdam([p], lr=1e-3) torch_optim = Adam([p_copy], lr=1e-3) print(f"adaw mode {adamw}, device {device}, p_dtype {p_dtype}, g_dtype {g_dtype}") for i in range(RE): p.grad = torch.rand(64).to(device, p_dtype) p_copy.grad = p.grad.clone().float() p.grad.data = p.grad.data.to(g_dtype) optim.step() torch_optim.step() if torch.isnan(p.data).any() or torch.isnan(p_copy.data).any(): continue assert torch.allclose(p.data, p_copy.data, 1e-4, 1e-2), \ f"adaw mode {adamw}, device {device}, p_dtype {p_dtype}, g_dtype {g_dtype}"
import math import torch from colossalai.testing import parameterize def torch_adam_update( step, lr, beta1, beta2, eps, weight_decay, param, grad, exp_avg, exp_avg_sq, use_adamw, ): bias_correction1 = 1 - beta1**step bias_correction2 = 1 - beta2**step if weight_decay != 0: if use_adamw: # Perform stepweight decay param.mul_(1 - lr * weight_decay) else: grad = grad.add(param, alpha=weight_decay) # Decay the first and second moment running average coefficient exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps) step_size = lr / bias_correction1 param.addcdiv_(exp_avg, denom, value=-step_size) def assertLess(data_diff, threshold, msg): assert data_diff < threshold, msg def assertTrue(condition, msg): assert condition, msg @parameterize('adamw', [True, False]) @parameterize('step', [1, 2]) @parameterize('p_dtype', [torch.float, torch.half]) @parameterize('g_dtype', [torch.float, torch.half]) def test_cpu_adam(adamw, step, p_dtype, g_dtype): lr = 1e-3 beta1, beta2 = 0.9, 0.999 eps = 1e-8 weight_decay = 0 for i in range(1024): p_data = torch.rand(64, dtype=p_dtype) p_data_copy = p_data.clone().float() p_grad = torch.rand(64, dtype=g_dtype) p_grad_copy = p_grad.clone().float() exp_avg = torch.rand(p_data.shape) exp_avg_copy = exp_avg.clone() exp_avg_sq = torch.rand(p_data.shape) exp_avg_sq_copy = exp_avg_sq.clone() from colossalai.kernel.op_builder import CPUAdamBuilder cpu_optim = CPUAdamBuilder().load() cpu_adam_op = cpu_optim.CPUAdamOptimizer(lr, beta1, beta2, eps, weight_decay, adamw) cpu_adam_op.step( step, lr, beta1, beta2, eps, weight_decay, True, p_data.view(-1), # fp32 data p_grad.view(-1), # fp32 grad exp_avg.view(-1), exp_avg_sq.view(-1), -1, ) torch_adam_update( step, lr, beta1, beta2, eps, weight_decay, p_data_copy, # fp32 data p_grad_copy, # fp32 grad exp_avg_copy, exp_avg_sq_copy, adamw, ) var = p_data_copy - p_data data_diff = torch.max(torch.abs(var)) threshold = 1e-3 assertLess( data_diff, threshold, f"p_data diff {data_diff}. failed check, step {step}, lr {lr}, eps " f"{eps} beta1 {beta1} beta2 {beta2} weight_decay {weight_decay} p_dtype {p_dtype}, g_dtype {g_dtype}", ) max_grad_diff = torch.max(torch.abs(p_grad_copy - p_grad)) assertTrue(max_grad_diff < threshold, f"diff {max_grad_diff}") max_exp_avg_diff = torch.max(torch.abs(exp_avg_copy - exp_avg)) assertTrue(max_exp_avg_diff < threshold, f"max_exp_avg_diff {max_exp_avg_diff}") max_exp_avg_sq_diff = torch.max(torch.abs(exp_avg_sq_copy - exp_avg_sq)) assertTrue(max_exp_avg_sq_diff < threshold, f"max_exp_avg_sq_diff {max_exp_avg_sq_diff}") if __name__ == '__main__': test_cpu_adam()
import torch import torch.nn as nn from torch.optim.adam import Adam from torch.optim import AdamW from colossalai.nn.optimizer.fused_adam import FusedAdam from colossalai.testing import parameterize class FC(nn.Module): def __init__(self) -> None: super().__init__() self.fc = nn.Sequential(nn.Linear(64, 64)) def forward(self, x): return self.fc(x) @parameterize('adamw', [False, True]) @parameterize('p_dtype', [torch.float, torch.half]) @parameterize('g_dtype', [torch.float, torch.half]) def test_adam(adamw, p_dtype, g_dtype): model = FC().cuda().to(p_dtype) state = model.state_dict() model_copy = FC().cuda().to(p_dtype) model_copy.load_state_dict(state.copy()) if adamw: optim = FusedAdam(model.parameters(), lr=1e-3, adamw_mode=True) torch_optim = AdamW(model_copy.parameters(), lr=1e-3) else: optim = FusedAdam(model.parameters(), lr=1e-3) torch_optim = Adam(model_copy.parameters(), lr=1e-3) data = torch.rand(1024, 64).cuda().to(p_dtype) data_copy = data.clone() label = torch.rand(1024, 64).cuda().to(p_dtype) for d, l in zip(data, label): y = model(d) loss = ((l - y)**2).sum() optim.zero_grad() loss.backward() if p_dtype != g_dtype: for i in range(len(optim.param_groups[0]['params'])): optim.param_groups[0]['params'][i].grad.data = optim.param_groups[0]['params'][i].grad.data.to(g_dtype) optim.step() for d, l in zip(data_copy, label): y = model_copy(d) loss = ((l - y)**2).sum() torch_optim.zero_grad() loss.backward() torch_optim.step() assert len(optim.param_groups[0]['params']) == len(torch_optim.param_groups[0]['params']) for i in range(len(optim.param_groups[0]['params'])): if torch.isnan(optim.param_groups[0]['params'][i]).any() \ or torch.isnan(torch_optim.param_groups[0]['params'][i]).any(): continue assert torch.allclose(optim.param_groups[0]['params'][i], torch_optim.param_groups[0]['params'][i], 2e-3, 2e-3)
from functools import partial import colossalai import pytest import torch import torch.multiprocessing as mp import torch.nn.functional as F from colossalai.testing import rerun_if_address_is_in_use from colossalai.utils import free_port from colossalai.tensor import ColoTensorSpec, ProcessGroup, ColoTensor from tests.test_tensor.common_utils import tensor_equal, tensor_shard_equal, split_param_col_tp1d, split_param_row_tp1d def run_with_spec(spec_init_func, split_bias): pg = ProcessGroup(tp_degree=torch.distributed.get_world_size()) model = torch.nn.Linear(4, 8).cuda() weight = ColoTensor(torch.nn.Parameter(model.weight.detach()), ColoTensorSpec(pg)) bias = ColoTensor(torch.nn.Parameter(model.bias.detach()), ColoTensorSpec(pg)) spec_init_func(weight, pg) if split_bias: spec_init_func(bias, pg) x = torch.rand(2, 4).cuda() out = model(x) colo_out = F.linear(x, weight, bias) colo_out = colo_out.to_replicate() assert tensor_equal(out, colo_out) grad = torch.rand_like(out) out.backward(grad) colo_out.backward(grad) assert tensor_shard_equal(model.weight.grad, weight.grad, pg.tp_local_rank(), pg.tp_world_size()) assert tensor_shard_equal(model.bias.grad, bias.grad, pg.tp_local_rank(), pg.tp_world_size()) def run_dist(rank, world_size, port): config = dict(parallel=dict(tensor=dict(mode="1d", size=world_size),)) colossalai.launch(config=config, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') run_with_spec(spec_init_func=split_param_col_tp1d, split_bias=False) run_with_spec(spec_init_func=split_param_row_tp1d, split_bias=True) @pytest.mark.dist @pytest.mark.parametrize('world_size', [1, 4]) @rerun_if_address_is_in_use() def test_linear_1d(world_size): run_func = partial(run_dist, world_size=world_size, port=free_port()) mp.spawn(run_func, nprocs=world_size) if __name__ == '__main__': test_linear_1d(4)
import torch import pytest import colossalai import torch.nn.functional as F import torch.multiprocessing as mp from functools import partial from colossalai.tensor import ColoTensor, ProcessGroup, ColoTensorSpec from colossalai.utils import get_current_device from colossalai.testing import rerun_if_address_is_in_use from colossalai.utils import free_port from colossalai.tensor import ShardSpec, ComputeSpec, ComputePattern def check_cross_entropy(): input_t = torch.randn(4, 4, device=get_current_device(), requires_grad=True) input_ct = torch.randn(4, 4, device=get_current_device(), requires_grad=True) with torch.no_grad(): input_ct.copy_(input_t) target = torch.randint(4, (4,), dtype=torch.int64, device=get_current_device()) world_size = torch.distributed.get_world_size() pg = ProcessGroup(tp_degree=world_size) input_t_colo = ColoTensor.from_torch_tensor(tensor=input_ct, spec=ColoTensorSpec(pg)) input_shard = input_t_colo.redistribute(ShardSpec([-1], [pg.tp_world_size()])) input_shard.set_tensor_spec(dist_spec=None, compute_spec=ComputeSpec(ComputePattern.TP1D)) output = F.cross_entropy(input_t, target) output_colo = F.cross_entropy(input_shard, target) assert torch.allclose(output_colo, output) output.backward() output_colo.backward() assert torch.allclose(input_t.grad, input_ct.grad) def run_dist(rank, world_size, port): colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') check_cross_entropy() @pytest.mark.dist @pytest.mark.parametrize('world_size', [1, 2]) @rerun_if_address_is_in_use() def test_loss_func(world_size): run_func = partial(run_dist, world_size=world_size, port=free_port()) mp.spawn(run_func, nprocs=world_size) if __name__ == '__main__': test_loss_func(1)
from torch.nn import functional as F from functools import partial import colossalai import pytest import torch import torch.multiprocessing as mp from colossalai.testing import rerun_if_address_is_in_use from colossalai.utils import free_port from colossalai.tensor import ColoParameter, ColoTensorSpec, ProcessGroup from tests.test_tensor.common_utils import tensor_equal, tensor_shard_equal, split_param_col_tp1d def run_with_spec(spec_init_func): pg = ProcessGroup(tp_degree=torch.distributed.get_world_size()) model = torch.nn.EmbeddingBag(10, 4).cuda() weight = ColoParameter(model.weight.clone(), True, ColoTensorSpec(pg)) spec_init_func(weight, pg) inputs = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9]).cuda() offsets = torch.tensor([0, 4]).cuda() out = model(inputs, offsets=offsets) colo_out = F.embedding_bag(inputs, weight, offsets=offsets) assert tensor_equal(out, colo_out) grad = torch.rand_like(out) out.backward(grad) colo_out.backward(grad) assert tensor_shard_equal(model.weight.grad, weight.grad, pg.tp_local_rank(), pg.tp_world_size()) def run_dist(rank, world_size, port): config = dict(parallel=dict(tensor=dict(mode="1d", size=world_size),)) colossalai.launch(config=config, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') run_with_spec(split_param_col_tp1d) @pytest.mark.dist @pytest.mark.parametrize('world_size', [1, 4]) @rerun_if_address_is_in_use() def test_embedding_bag_1d(world_size): run_func = partial(run_dist, world_size=world_size, port=free_port()) mp.spawn(run_func, nprocs=world_size) if __name__ == '__main__': test_embedding_bag_1d(4)
import colossalai import torch import pytest import torch.nn as nn import torch.multiprocessing as mp from colossalai.tensor import ColoTensor, ProcessGroup from colossalai.tensor import ColoTensorSpec from colossalai.testing import rerun_if_address_is_in_use from colossalai.utils import free_port from functools import partial from tests.test_tensor.common_utils import tensor_shard_equal, tensor_equal, split_param_row_tp1d, split_param_col_tp1d class Conv1D(nn.Module): """ 1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2). Basically works like a linear layer but the weights are transposed. Args: nf (`int`): The number of output features. nx (`int`): The number of input features. """ def __init__(self, nf, nx): super().__init__() self.nf = nf w = torch.empty(nx, nf) nn.init.normal_(w, std=0.02) self.weight = nn.Parameter(w) self.bias = nn.Parameter(torch.ones(nf)) def forward(self, x): size_out = x.size()[:-1] + (self.nf,) x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight) x = x.view(size_out) return x def run_with_spec(spec_init_func, split_bias): model = Conv1D(4, 16).cuda() world_size = torch.distributed.get_world_size() pg = ProcessGroup(tp_degree=world_size) weight = ColoTensor(torch.nn.Parameter(model.weight.detach()), ColoTensorSpec(pg)) bias = ColoTensor(torch.nn.Parameter(model.bias.detach()), ColoTensorSpec(pg)) spec_init_func(weight, pg) if split_bias: spec_init_func(bias, pg) x = torch.rand(2, 16).cuda() out = model(x) colo_out = torch.addmm(bias, x, weight) colo_out = colo_out.to_replicate() assert tensor_equal(out, colo_out) grad = torch.rand_like(out) out.backward(grad) colo_out.backward(grad) tensor_shard_equal(model.weight.grad, weight.grad, pg.tp_local_rank(), pg.tp_world_size()) tensor_shard_equal(model.bias.grad, bias.grad, pg.tp_local_rank(), pg.tp_world_size()) def run_dist(rank, world_size, port): colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') run_with_spec(spec_init_func=split_param_row_tp1d, split_bias=False) run_with_spec(spec_init_func=split_param_col_tp1d, split_bias=True) @pytest.mark.dist @pytest.mark.parametrize('world_size', [1, 4]) @rerun_if_address_is_in_use() def test_addmm_1d(world_size): run_func = partial(run_dist, world_size=world_size, port=free_port()) mp.spawn(run_func, nprocs=world_size) if __name__ == '__main__': test_addmm_1d(4)
from functools import partial import colossalai import pytest import torch import torch.multiprocessing as mp import torch.distributed as dist from colossalai.testing import rerun_if_address_is_in_use from colossalai.utils import free_port, get_current_device from colossalai.tensor import ColoTensorSpec, ProcessGroup, ColoTensor, ShardSpec from colossalai.tensor.distspec import DistPlacementPattern from tests.test_tensor.common_utils import split_param_row_tp1d, split_param_col_tp1d, debug_print def exam_view_core(pg): # the case of replicated ColoTensors x = torch.randn(4, 4).cuda() x_colo = ColoTensor(x, ColoTensorSpec(pg)) y = x.view(2, -1, 2) y_colo = x_colo.view(2, -1, 2) assert torch.all(y == y_colo) assert y_colo.dist_spec.placement == DistPlacementPattern.REPLICATE # the perfect case of col-sliced ColoTensors split_param_col_tp1d(x_colo, pg) z = x.view(torch.Size((2, 1, 2, -1))) z_colo = x_colo.view(torch.Size((2, 1, 2, -1))) if dist.get_rank() == 0: z = z[:, :, :, 0:2] else: z = z[:, :, :, 2:] assert torch.all(z == z_colo) assert z_colo.dist_spec == x_colo.dist_spec # the perfect case of row-sliced ColoTensors split_param_row_tp1d(x_colo, pg) z = x.view(torch.Size((-1, 2, 2))) z_colo = x_colo.view(torch.Size((-1, 2, 2))) if dist.get_rank() == 0: z = z[0:2, :, :] else: z = z[2:, :, :] assert torch.all(z == z_colo) assert z_colo.dist_spec == x_colo.dist_spec # the normal case of row-sliced ColoTensors z = x.view(-1, 2, 2, 2) z_colo = x_colo.view(-1, 2, 2, 2) assert torch.all(z == z_colo) assert y_colo.dist_spec.placement == DistPlacementPattern.REPLICATE def exam_view_autograd(pg): x = torch.randn(8, 2, device=get_current_device(), requires_grad=True) y = torch.randn(8, 2, device=get_current_device(), requires_grad=True) with torch.no_grad(): y.copy_(x) y = ColoTensor(y, ColoTensorSpec(pg)) y_slice = y.redistribute(ShardSpec([-1], [pg.tp_world_size()])) xx = x.view(2, 2, -1) yy_slice = y_slice.view(2, 2, -1) yy = yy_slice.to_replicate() grad = torch.randn(2, 2, 4, device=get_current_device()) xx.backward(grad) yy.backward(grad) assert torch.all(x.grad == y.grad) def exam_view_errors(pg): x = torch.randn(8, 2, device=get_current_device()) x = ColoTensor(x, ColoTensorSpec(pg)) split_param_row_tp1d(x, pg) x.view('a', 'b', 'c') x.view(8, -1) x.view([-2, -2, -2]) x.view((-1, -1, -1)) def run_dist(rank, world_size, port): colossalai.launch(config=dict(), rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') pg = ProcessGroup(tp_degree=torch.distributed.get_world_size()) exam_view_core(pg) exam_view_autograd(pg) # exam_view_errors(pg) @pytest.mark.dist @pytest.mark.parametrize('world_size', [2]) @rerun_if_address_is_in_use() def test_view(world_size): run_func = partial(run_dist, world_size=world_size, port=free_port()) mp.spawn(run_func, nprocs=world_size) if __name__ == '__main__': test_view(2)
import torch import pytest import colossalai import torch.nn.functional as F import torch.multiprocessing as mp from functools import partial from colossalai.tensor import ColoTensor, ProcessGroup, ColoTensorSpec, ShardSpec from colossalai.utils import get_current_device from torch.nn import Parameter from colossalai.testing import rerun_if_address_is_in_use from colossalai.utils import free_port def _run_layer_norm(): ln_op = torch.nn.LayerNorm(2, 3, device=get_current_device()) input_t = torch.randn(3, 2, device=get_current_device()) pg = ProcessGroup(tp_degree=torch.distributed.get_world_size()) input_t_colo = ColoTensor.from_torch_tensor(input_t.clone().detach(), ColoTensorSpec(pg)) # prepare colossalai LN weight = ColoTensor(Parameter(ln_op.weight.detach()), ColoTensorSpec(pg)) bias = ColoTensor(Parameter(ln_op.bias.detach()), ColoTensorSpec(pg)) output = ln_op(input_t) output_colo = F.layer_norm(input_t_colo, ln_op.normalized_shape, weight, bias, ln_op.eps) assert torch.allclose(output_colo, output) torch.mean(output).backward() torch.mean(output_colo).backward() assert torch.allclose(ln_op.weight.grad, weight.grad) def check_spec_eq(tensor, other): assert isinstance(tensor, ColoTensor) and isinstance(other, ColoTensor) for k in dir(tensor.dist_spec): if not k.startswith('__'): assert hasattr(other.dist_spec, k), f"{k}" assert getattr(tensor.dist_spec, k) == getattr(other.dist_spec, k) def check_element_wise_ops(): world_size = torch.distributed.get_world_size() pg = ProcessGroup(tp_degree=world_size) t = torch.rand(2, 2) x = ColoTensor(t, spec=ColoTensorSpec(pg, ShardSpec([0], [pg.tp_world_size()]))) check_spec_eq(x, x.cuda()) assert torch.equal(x.cuda(), t.cuda()) check_spec_eq(x, torch.abs(x)) assert torch.equal(torch.abs(x), torch.abs(t)) check_spec_eq(x, F.sigmoid(x)) assert torch.equal(F.sigmoid(x), F.sigmoid(t)) def run_dist(rank, world_size, port): colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') check_element_wise_ops() _run_layer_norm() @pytest.mark.dist @pytest.mark.parametrize('world_size', [2]) @rerun_if_address_is_in_use() def test_element_wise_ops(world_size): run_func = partial(run_dist, world_size=world_size, port=free_port()) mp.spawn(run_func, nprocs=world_size) def run_dist2(rank, world_size, port): colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') _run_layer_norm() @pytest.mark.dist @pytest.mark.parametrize('world_size', [1]) @rerun_if_address_is_in_use() def test_ln(world_size): run_func = partial(run_dist2, world_size=world_size, port=free_port()) mp.spawn(run_func, nprocs=world_size) def check_all(): test_element_wise_ops(2) if __name__ == '__main__': check_all()
from torch.nn import functional as F from functools import partial import colossalai import pytest import torch import torch.multiprocessing as mp from colossalai.testing import rerun_if_address_is_in_use from colossalai.utils import free_port from colossalai.tensor import ColoTensorSpec, ProcessGroup, ColoTensor from tests.test_tensor.common_utils import tensor_equal, tensor_shard_equal, split_param_col_tp1d, split_param_row_tp1d def run_with_spec(spec_init_func, pg: ProcessGroup): model = torch.nn.Embedding(12, 32).cuda() weight = ColoTensor(torch.nn.Parameter(model.weight.detach()), ColoTensorSpec(pg)) spec_init_func(weight, pg) x = torch.tensor((0, 3, 6, 9)).cuda() out = model(x) colo_out = F.embedding(x, weight) assert tensor_equal(out, colo_out) grad = torch.rand_like(out) out.backward(grad) colo_out.backward(grad) # compare grad inside a TP group assert tensor_shard_equal(model.weight.grad, weight.grad, pg.tp_local_rank(), pg.tp_world_size()) def run_dist(rank, world_size, port): # config = dict(parallel=dict(tensor=dict(mode="1d", size=world_size),)) colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') pg = ProcessGroup(tp_degree=world_size) run_with_spec(split_param_row_tp1d, pg) run_with_spec(split_param_col_tp1d, pg) @pytest.mark.dist @pytest.mark.parametrize('world_size', [1, 4]) @rerun_if_address_is_in_use() def test_embedding_1d(world_size): run_func = partial(run_dist, world_size=world_size, port=free_port()) mp.spawn(run_func, nprocs=world_size) if __name__ == '__main__': test_embedding_1d(4)
from functools import partial import colossalai import pytest import torch import torch.multiprocessing as mp from colossalai.amp.amp_type import AMP_TYPE from colossalai.logging import get_dist_logger from colossalai.trainer import Trainer from colossalai.utils import MultiTimer, free_port from tests.components_to_test.registry import non_distributed_component_funcs from colossalai.testing import parameterize, rerun_if_address_is_in_use BATCH_SIZE = 4 IMG_SIZE = 32 NUM_EPOCHS = 200 CONFIG = dict(fp16=dict(mode=AMP_TYPE.TORCH)) @parameterize('model_name', ['repeated_computed_layers', 'resnet18', 'nested_model']) def run_trainer(model_name): get_components_func = non_distributed_component_funcs.get_callable(model_name) model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func() model = model_builder() optimizer = optimizer_class(model.parameters(), lr=1e-3) engine, train_dataloader, *_ = colossalai.initialize(model=model, optimizer=optimizer, criterion=criterion, train_dataloader=train_dataloader) logger = get_dist_logger() logger.info("engine is built", ranks=[0]) timer = MultiTimer() trainer = Trainer(engine=engine, logger=logger, timer=timer) logger.info("trainer is built", ranks=[0]) logger.info("start training", ranks=[0]) trainer.fit(train_dataloader=train_dataloader, test_dataloader=test_dataloader, epochs=NUM_EPOCHS, max_steps=3, display_progress=True, test_interval=5) torch.cuda.empty_cache() def run_dist(rank, world_size, port): colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') @pytest.mark.dist @rerun_if_address_is_in_use() def test_trainer_no_pipeline(): world_size = 4 run_func = partial(run_dist, world_size=world_size, port=free_port()) mp.spawn(run_func, nprocs=world_size) if __name__ == '__main__': test_trainer_no_pipeline()
import os from functools import partial from pathlib import Path import colossalai import pytest import torch import torch.multiprocessing as mp import torch.nn as nn from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc from colossalai.engine.schedule import PipelineSchedule from colossalai.logging import get_dist_logger from colossalai.trainer import Trainer from colossalai.utils import MultiTimer, free_port, get_dataloader from torch.optim import Adam from torchvision import transforms from torchvision.datasets import CIFAR10 from torchvision.models import resnet18 from colossalai.testing import rerun_if_address_is_in_use BATCH_SIZE = 4 IMG_SIZE = 32 NUM_EPOCHS = 200 CONFIG = dict( NUM_MICRO_BATCHES=2, parallel=dict(pipeline=2), ) def run_trainer_with_pipeline(rank, world_size, port): colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') # build model model = resnet18(num_classes=10) if gpc.get_local_rank(ParallelMode.PIPELINE) == 0: model = nn.Sequential(model.conv1, model.bn1, model.relu, model.maxpool, model.layer1, model.layer2) elif gpc.get_local_rank(ParallelMode.PIPELINE) == 1: class Flatten(nn.Module): def forward(self, x): return torch.flatten(x, 1) model = nn.Sequential(model.layer3, model.layer4, model.avgpool, Flatten(), model.fc) # build dataloaders train_dataset = CIFAR10(root=Path(os.environ['DATA']), download=True, transform=transforms.Compose([ transforms.Resize(size=(IMG_SIZE, IMG_SIZE)), transforms.ToTensor(), transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) ])) train_dataloader = get_dataloader(dataset=train_dataset, shuffle=True, batch_size=BATCH_SIZE, pin_memory=True, drop_last=True) # build optimizer optimizer = Adam(model.parameters(), lr=0.001) criterion = nn.CrossEntropyLoss() engine, train_dataloader, *args = colossalai.initialize(model=model, optimizer=optimizer, criterion=criterion, train_dataloader=train_dataloader) logger = get_dist_logger() logger.info("engine is built", ranks=[0]) timer = MultiTimer() trainer = Trainer(engine=engine, logger=logger, timer=timer) logger.info("trainer is built", ranks=[0]) logger.info("start training", ranks=[0]) trainer.fit(train_dataloader=train_dataloader, epochs=NUM_EPOCHS, max_steps=3, display_progress=True, test_interval=5) gpc.destroy() torch.cuda.empty_cache() @pytest.mark.dist @rerun_if_address_is_in_use() def test_trainer_with_pipeline(): world_size = 4 run_func = partial(run_trainer_with_pipeline, world_size=world_size, port=free_port()) mp.spawn(run_func, nprocs=world_size) if __name__ == '__main__': test_trainer_with_pipeline()
#!/usr/bin/env python # -*- encoding: utf-8 -*- from functools import partial import pytest import torch import torch.distributed as dist import torch.multiprocessing as mp from colossalai.communication import (recv_backward, recv_forward, recv_obj_meta, send_backward, send_backward_recv_forward, send_forward, send_forward_recv_backward, send_obj_meta) from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc from colossalai.initialize import launch from colossalai.logging import get_dist_logger from colossalai.utils import free_port, get_current_device from colossalai.testing import rerun_on_exception BATCH_SIZE = 4 SEQ_LENGTH = 2 HIDDEN_SIZE = 16 CONFIG = dict(parallel=dict(pipeline=dict(size=4), tensor=dict(size=1, mode=None)), seed=1024) def check_equal(A, B): return torch.allclose(A, B, rtol=1e-5, atol=1e-3) def check_forward(output_tensor, rank, logger): dist.barrier() if gpc.is_first_rank(ParallelMode.PIPELINE): tensor = output_tensor.clone() else: tensor = recv_forward(output_tensor.shape) logger.info('Rank {} received forward. Correct tensor: {}'.format(rank, check_equal(tensor, output_tensor))) if not gpc.is_last_rank(ParallelMode.PIPELINE): send_forward(tensor) logger.info('Rank {} sent forward.'.format(rank)) def check_backward(output_grad, rank, logger): dist.barrier() if gpc.is_last_rank(ParallelMode.PIPELINE): grad = output_grad.clone() else: grad = recv_backward(output_grad.shape) logger.info('Rank {} received backward. Correct grad: {}'.format(rank, check_equal(grad, output_grad))) if not gpc.is_first_rank(ParallelMode.PIPELINE): send_backward(grad) logger.info('Rank {} sent backward.'.format(rank)) def check_forward_backward(output_tensor, output_grad, rank, logger): dist.barrier() if not gpc.is_first_rank(ParallelMode.PIPELINE): tensor = send_backward_recv_forward(output_grad, output_tensor.shape) logger.info('Rank {} sent backward received forward. Correct tensor: {}'.format( rank, check_equal(tensor, output_tensor))) if not gpc.is_last_rank(ParallelMode.PIPELINE): grad = send_forward_recv_backward(output_tensor, output_grad.shape) logger.info('Rank {} sent forward received backward. Correct grad: {}'.format( rank, check_equal(grad, output_grad))) def check_comm(size, rank, prev_rank, next_rank, logger): dtype = torch.float32 device = get_current_device() tensor_shape = (BATCH_SIZE, SEQ_LENGTH, HIDDEN_SIZE) grad_shape = (BATCH_SIZE, SEQ_LENGTH, HIDDEN_SIZE) tensor = torch.randn(tensor_shape, dtype=dtype, device=device) dist.all_reduce(tensor) grad = torch.randn(grad_shape, dtype=dtype, device=device) dist.all_reduce(grad) check_forward(tensor, rank, logger) check_backward(grad, rank, logger) check_forward_backward(tensor, grad, rank, logger) def run_check(rank, world_size, port): launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') logger = get_dist_logger() rank = gpc.get_global_rank() prev_rank = gpc.get_prev_global_rank(ParallelMode.PIPELINE) next_rank = gpc.get_next_global_rank(ParallelMode.PIPELINE) logger.info('Rank {0}: prev rank {1}, next rank {2}'.format(rank, prev_rank, next_rank)) logger.info('Distributed environment is initialzied.') check_comm(world_size, rank, prev_rank, next_rank, logger) gpc.destroy() torch.cuda.empty_cache() @pytest.mark.dist @rerun_on_exception(exception_type=mp.ProcessRaisedException, pattern=".*Address already in use.*") def test_p2p(): world_size = 4 run_func = partial(run_check, world_size=world_size, port=free_port()) mp.spawn(run_func, nprocs=world_size) if __name__ == '__main__': test_p2p()
# referenced from Megatron and used to testify communication import os import os.path as osp from functools import partial from pathlib import Path import colossalai import pytest import torch import torch.nn as nn import torch.multiprocessing as mp from colossalai.core import global_context as gpc from colossalai.context import ParallelMode from colossalai.initialize import launch from colossalai.utils import free_port, get_dataloader, print_rank_0 from colossalai.testing import rerun_on_exception from torchvision import transforms from torchvision.datasets import CIFAR10 from torchvision.models import resnet18 BATCH_SIZE = 8 CONFIG=dict( NUM_MICRO_BATCHES=2, parallel = dict( pipeline=dict(size=2), tensor=dict(size=1, mode=None) ) ) def run_schedule(rank, world_size, port): launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') # build model model = resnet18(num_classes=10) if gpc.get_local_rank(ParallelMode.PIPELINE) == 0: model = nn.Sequential(model.conv1, model.bn1, model.relu, model.maxpool, model.layer1, model.layer2) elif gpc.get_local_rank(ParallelMode.PIPELINE) == 1: class Flatten(nn.Module): def forward(self, x): return torch.flatten(x, 1) model = nn.Sequential(model.layer3, model.layer4, model.avgpool, Flatten(), model.fc) print_rank_0('model is created') train_dataset = CIFAR10(root=Path(os.environ['DATA']), download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.2010]), ])) train_dataloader = get_dataloader( dataset=train_dataset, shuffle=True, add_sampler=True, batch_size=BATCH_SIZE, pin_memory=True, ) # build criterion criterion = torch.nn.CrossEntropyLoss() # optimizer optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=0) # initialize engine, train_dataloader, _, _ = colossalai.initialize(model, optimizer, criterion, train_dataloader) # build pipeline schedule schedule = engine.schedule # run schedule data_iter = iter(train_dataloader) schedule.forward_backward_step(engine, data_iter) gpc.destroy() torch.cuda.empty_cache() @pytest.mark.dist @rerun_on_exception(exception_type=mp.ProcessRaisedException, pattern=".*Address already in use.*") def test_pipeline_schedule(): world_size = 2 run_func = partial(run_schedule, world_size=world_size, port=free_port()) mp.spawn(run_func, nprocs=world_size) if __name__ == '__main__': test_pipeline_schedule()
from functools import partial import pytest import torch import torch.distributed as dist import torch.multiprocessing as mp from colossalai.communication import all_gather, all_reduce, reduce_scatter from colossalai.context import ParallelMode from colossalai.core import global_context as gpc from colossalai.initialize import launch from colossalai.utils import free_port, get_current_device from colossalai.testing import rerun_if_address_is_in_use CONFIG = dict(parallel=dict(data=8, pipeline=1, tensor=dict(mode=None, size=1))) SIZE = 8 def check_all_gather(): tensor = torch.tensor([dist.get_rank() * SIZE + j for j in range(SIZE)]) tensor = tensor.to(get_current_device()) print('Before: Rank {0} - {1}'.format(dist.get_rank(), tensor)) tensor, op = all_gather(tensor, 0, ParallelMode.GLOBAL, async_op=True) print('After: Rank {0} - {1}'.format(dist.get_rank(), tensor)) op.wait() print('Complete: Rank {0} - {1}'.format(dist.get_rank(), tensor)) torch.cuda.synchronize() def check_reduce_scatter(): tensor = torch.tensor([dist.get_rank() * SIZE + j for j in range(SIZE)]) tensor = tensor.to(get_current_device()) print('Before: Rank {0} - {1}'.format(dist.get_rank(), tensor)) tensor, op = reduce_scatter(tensor, 0, ParallelMode.GLOBAL, async_op=True) print('After: Rank {0} - {1}'.format(dist.get_rank(), tensor)) op.wait() print('Complete: Rank {0} - {1}'.format(dist.get_rank(), tensor)) torch.cuda.synchronize() def check_all_reduce(): tensor = torch.tensor([dist.get_rank() * SIZE + j for j in range(SIZE)]) tensor = tensor.to(get_current_device()) print('Before: Rank {0} - {1}'.format(dist.get_rank(), tensor)) tensor, op = all_reduce(tensor, ParallelMode.GLOBAL, async_op=True) print('After: Rank {0} - {1}'.format(dist.get_rank(), tensor)) op.wait() print('Complete: Rank {0} - {1}'.format(dist.get_rank(), tensor)) torch.cuda.synchronize() def check_layer(rank, world_size, port): launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') assert dist.get_rank() == gpc.get_global_rank() print('Rank {} / {}'.format(dist.get_rank(), dist.get_world_size())) check_all_gather() check_reduce_scatter() check_all_reduce() gpc.destroy() torch.cuda.empty_cache() @pytest.mark.dist @rerun_if_address_is_in_use() def test_comm(): world_size = 4 run_func = partial(check_layer, world_size=world_size, port=free_port()) mp.spawn(run_func, nprocs=world_size) if __name__ == '__main__': test_comm()
from functools import partial import pytest import torch import torch.distributed as dist import torch.multiprocessing as mp from colossalai.communication.p2p import send_forward, recv_forward, send_backward, recv_backward, send_forward_recv_backward, send_backward_recv_forward from colossalai.context import ParallelMode from colossalai.core import global_context as gpc from colossalai.initialize import launch from colossalai.utils import free_port, get_current_device from colossalai.testing import rerun_if_address_is_in_use CONFIG = dict(parallel=dict(pipeline=2)) torch.manual_seed(123) LIST_LENGTH = 3 TENSOR_SIZE = torch.Size((3, 3)) TENSOR_SIZE_LIST = [TENSOR_SIZE for i in range(LIST_LENGTH)] data = torch.rand(3, 3) data_list = [torch.rand(3, 3) for i in range(LIST_LENGTH)] grad = torch.rand(3, 3) grad_list = [torch.rand(3, 3) for i in range(LIST_LENGTH)] def check_send_recv_forward(): if gpc.get_local_rank(ParallelMode.PIPELINE) == 0: device = torch.device('cuda:0') data_to_send = data.to(device) data_list_to_send = [] for data_in_list in data_list: data_list_to_send.append(data_in_list.to(device)) send_forward(data_to_send) send_forward(data_list_to_send) else: device = torch.device('cuda:1') data_recv = recv_forward(TENSOR_SIZE) data_list_recv = recv_forward(TENSOR_SIZE_LIST) data_to_check = data.to(device) assert data_recv.equal(data_to_check) for data_recv, data_send in zip(data_list_recv, data_list): data_to_check = data_send.to(device) assert data_recv.equal(data_to_check) def check_send_recv_backward(): if gpc.get_local_rank(ParallelMode.PIPELINE) == 0: device = torch.device('cuda:0') grad_recv = recv_backward(TENSOR_SIZE) grad_list_recv = recv_backward(TENSOR_SIZE_LIST) grad_to_check = grad.to(device) assert grad_recv.equal(grad_to_check) for grad_recv, grad_send in zip(grad_list_recv, grad_list): grad_to_check = grad_send.to(device) assert grad_recv.equal(grad_to_check) else: device = torch.device('cuda:1') grad_to_send = grad.to(device) grad_list_to_send = [] for grad_in_list in grad_list: grad_list_to_send.append(grad_in_list.to(device)) send_backward(grad_to_send) send_backward(grad_list_to_send) def check_send_recv_forward_backward(): if gpc.get_local_rank(ParallelMode.PIPELINE) == 0: device = torch.device('cuda:0') data_list_to_send = [] for data_in_list in data_list: data_list_to_send.append(data_in_list.to(device)) grad_list_recv = send_forward_recv_backward(data_list_to_send, TENSOR_SIZE_LIST) for grad_recv, grad_send in zip(grad_list_recv, grad_list): grad_to_check = grad_send.to(device) assert grad_recv.equal(grad_to_check) else: device = torch.device('cuda:1') grad_list_to_send = [] for grad_in_list in grad_list: grad_list_to_send.append(grad_in_list.to(device)) data_list_recv = send_backward_recv_forward(grad_list_to_send, TENSOR_SIZE_LIST) for data_recv, data_send in zip(data_list_recv, data_list): data_to_check = data_send.to(device) assert data_recv.equal(data_to_check) def check_layer(rank, world_size, port): launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') check_send_recv_forward() check_send_recv_backward() check_send_recv_forward_backward() gpc.destroy() torch.cuda.empty_cache() @pytest.mark.dist @rerun_if_address_is_in_use() def test_object_list_p2p(): world_size = 2 run_func = partial(check_layer, world_size=world_size, port=free_port()) mp.spawn(run_func, nprocs=world_size) if __name__ == '__main__': test_object_list_p2p()
from functools import partial import pytest import torch import torch.distributed as dist import torch.multiprocessing as mp from colossalai.communication.p2p_v2 import send_forward, recv_forward, send_backward, recv_backward, init_process_group from colossalai.context import ParallelMode, Initializer_Pipeline from colossalai.core import global_context as gpc from colossalai.initialize import launch from colossalai.utils import free_port, get_current_device from colossalai.testing import rerun_if_address_is_in_use from colossalai.logging import disable_existing_loggers disable_existing_loggers() # config world_size = 4 CONFIG = dict(parallel=dict(pipeline=4)) torch.manual_seed(123) use_scatter_gather_tensors = False # data torch.manual_seed(123) LIST_LENGTH = 3 TENSOR_SIZE = torch.Size((3, 3)) TENSOR_SIZE_LIST = [TENSOR_SIZE for i in range(LIST_LENGTH)] data = torch.rand(3, 3) data_list = [torch.rand(3, 3) for i in range(LIST_LENGTH)] grad = torch.rand(3, 3) grad_list = [torch.rand(3, 3) for i in range(LIST_LENGTH)] def check_send_recv_forward(): disable_existing_loggers() local_rank = gpc.get_local_rank(ParallelMode.PIPELINE) if local_rank == 0: device = torch.device('cuda:0') data_to_send = data.to(device) data_list_to_send = [] for data_in_list in data_list: data_list_to_send.append(data_in_list.to(device)) send_forward(data_to_send, scatter_gather_tensors=use_scatter_gather_tensors) send_forward(data_list_to_send, scatter_gather_tensors=use_scatter_gather_tensors) elif local_rank == 1: device = torch.device('cuda:1') data_recv = recv_forward(TENSOR_SIZE, scatter_gather_tensors=use_scatter_gather_tensors) data_list_recv = recv_forward(TENSOR_SIZE_LIST, scatter_gather_tensors=use_scatter_gather_tensors) data_to_check = data.to(device) assert data_recv.equal(data_to_check) for data_recv, data_send in zip(data_list_recv, data_list): data_to_check = data_send.to(device) data_recv = data_recv.to(device) assert data_recv.equal(data_to_check) def check_send_recv_backward(): disable_existing_loggers() if gpc.get_local_rank(ParallelMode.PIPELINE) == 0: device = torch.device('cuda:0') grad_recv = recv_backward(TENSOR_SIZE) grad_list_recv = recv_backward(TENSOR_SIZE_LIST) grad_to_check = grad.to(device) grad_recv = grad_recv[0].to(device) assert grad_recv.equal(grad_to_check) for grad_recv, grad_send in zip(grad_list_recv, grad_list): grad_recv = grad_recv.to(device) grad_to_check = grad_send.to(device) assert grad_recv.equal(grad_to_check) else: device = torch.device('cuda:1') grad_to_send = grad.to(device) grad_list_to_send = [] for grad_in_list in grad_list: grad_list_to_send.append(grad_in_list.to(device)) send_backward(grad_to_send) send_backward(grad_list_to_send) def check_small_pipeline(): disable_existing_loggers() # make sure the rank is 4 assert gpc.world_size == 4, "make sure to set world size to 4 to start the training process" local_rank = gpc.get_local_rank(ParallelMode.PIPELINE) if local_rank == 0: obj = [1, torch.randn(2, 2).cuda(), None] send_forward(obj) elif local_rank == 1: obj = recv_forward() send_forward(obj) elif local_rank == 2: obj = recv_forward() send_forward(obj) elif local_rank == 3: obj = recv_forward() else: pass def check_layer(rank, world_size, port): disable_existing_loggers() launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') disable_existing_loggers() # check_send_recv_forward() check_small_pipeline() gpc.destroy() torch.cuda.empty_cache() @pytest.mark.dist @rerun_if_address_is_in_use() def test_object_list_p2p(): disable_existing_loggers() run_func = partial(check_layer, world_size=world_size, port=free_port()) disable_existing_loggers() mp.spawn(run_func, nprocs=world_size) if __name__ == '__main__': disable_existing_loggers() test_object_list_p2p()
from functools import partial from typing import List import pytest import torch import torch.distributed as dist import torch.multiprocessing as mp from colossalai.communication.p2p_v2 import _send_object, _recv_object, init_process_group from colossalai.context import ParallelMode from colossalai.core import global_context as gpc from colossalai.initialize import launch from colossalai.utils import free_port, get_current_device from colossalai.testing import rerun_if_address_is_in_use from colossalai.logging import disable_existing_loggers disable_existing_loggers() world_size = 4 CONFIG = dict(parallel=dict(pipeline=world_size)) torch.manual_seed(123) def check_layer(rank, world_size, port): disable_existing_loggers() launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl', verbose=False) rank = gpc.get_local_rank(ParallelMode.PIPELINE) if rank == 0: obj = [torch.randn(3,)] _send_object(obj, 1) if rank == 1: _recv_object(0) if rank == 2: _recv_object(3) if rank == 3: obj = [torch.randn(3,)] _send_object(obj, 2) gpc.destroy() torch.cuda.empty_cache() @pytest.mark.dist @rerun_if_address_is_in_use() def test_object_list_p2p(): disable_existing_loggers() run_func = partial(check_layer, world_size=world_size, port=free_port()) mp.spawn(run_func, nprocs=world_size) if __name__ == '__main__': test_object_list_p2p()
from functools import partial import pytest import torch import torch.multiprocessing as mp import colossalai from colossalai.amp import convert_to_apex_amp from colossalai.context import MOE_CONTEXT from colossalai.engine.gradient_handler import MoeGradientHandler from colossalai.nn import MoeLoss from colossalai.nn.optimizer import CPUAdam from colossalai.testing import assert_equal_in_group, parameterize, rerun_if_address_is_in_use from colossalai.utils import free_port, get_current_device from colossalai.zero.init_ctx import ZeroInitContext from colossalai.zero.shard_utils import BucketTensorShardStrategy, TensorShardStrategy from colossalai.zero.sharded_model import ShardedModelV2 from colossalai.zero.sharded_model.utils import col_model_deepcopy from colossalai.zero.sharded_optim import ShardedOptimizerV2 from colossalai.zero.sharded_optim._utils import has_inf_or_nan from tests.components_to_test.registry import non_distributed_component_funcs from tests.test_moe.test_moe_zero_init import MoeModel from tests.test_zero.common import CONFIG, check_sharded_model_params def _run_step(model, optimizer, data, label, criterion, grad_handler): model.train() optimizer.zero_grad() if criterion: y = model(data) loss = criterion(y, label) else: loss = model(data, label) loss = loss.float() if isinstance(model, ShardedModelV2): optimizer.backward(loss) else: loss.backward() if grad_handler is not None: grad_handler.handle_gradient() optimizer.step() @parameterize("cpu_offload", [True]) @parameterize("use_cpuadam", [True]) # We do not use Hybrid Adam right now, since it has a little bug @parameterize("reuse_fp16_shard", [True, False]) @parameterize("shard_strategy_class", [TensorShardStrategy, BucketTensorShardStrategy]) def _run_test_sharded_optim_v2(cpu_offload, shard_strategy_class, use_cpuadam, reuse_fp16_shard, gpu_margin_mem_ratio=0.0): shard_strategy = shard_strategy_class() if use_cpuadam and cpu_offload is False: return MOE_CONTEXT.reset_loss() get_components_func = non_distributed_component_funcs.get_callable('hanging_param_model') _, train_dataloader, _, optimizer_class, _ = get_components_func() criterion = MoeLoss(aux_weight=0.01, loss_fn=torch.nn.CrossEntropyLoss) with ZeroInitContext(target_device=torch.device('cpu') if cpu_offload else get_current_device(), shard_strategy=shard_strategy, shard_param=True): zero_model = MoeModel(checkpoint=True) zero_model = ShardedModelV2(zero_model, shard_strategy, tensor_placement_policy='cpu' if cpu_offload else 'cuda', reuse_fp16_shard=reuse_fp16_shard) # check whether parameters are identical in ddp for name, p in zero_model.named_parameters(): if not p.colo_attr.param_is_sharded and p.colo_attr.is_replicated: assert_equal_in_group(p.colo_attr.data_payload.to(get_current_device())) model = MoeModel(checkpoint=True).half() col_model_deepcopy(zero_model, model) model = model.cuda().float() if use_cpuadam: optimizer_class = CPUAdam optim = optimizer_class(model.parameters(), lr=1e-3) sharded_optim = optimizer_class(zero_model.parameters(), lr=1e-3) sharded_optim = ShardedOptimizerV2(zero_model, sharded_optim, initial_scale=2**5, gpu_margin_mem_ratio=gpu_margin_mem_ratio) amp_config = dict(opt_level='O2', keep_batchnorm_fp32=False) apex_model, apex_optimizer = convert_to_apex_amp(model, optim, amp_config) apex_grad_handler = MoeGradientHandler(model) for i, (data, label) in enumerate(train_dataloader): if i > 5: break data, label = data.cuda(), label.cuda() _run_step(apex_model, apex_optimizer, data, label, criterion, apex_grad_handler) _run_step(zero_model, sharded_optim, data, label, criterion, None) check_sharded_model_params(model, zero_model, loose=True, reuse_fp16_shard=use_cpuadam) for param in model.parameters(): assert not has_inf_or_nan(param) def _run_dist(rank, world_size, port): colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') MOE_CONTEXT.setup(seed=42) _run_test_sharded_optim_v2() # use_cpuadam = True can be used with cpu_offload = False @pytest.mark.dist @pytest.mark.parametrize("world_size", [2]) @rerun_if_address_is_in_use() def test_moe_zero_optim(world_size): run_func = partial(_run_dist, world_size=world_size, port=free_port()) mp.spawn(run_func, nprocs=world_size) if __name__ == '__main__': test_moe_zero_optim(world_size=4)
from functools import partial import pytest import torch import torch.multiprocessing as mp import colossalai from colossalai.context import MOE_CONTEXT from colossalai.engine.gradient_handler import MoeGradientHandler from colossalai.nn import MoeLoss from colossalai.testing import assert_equal_in_group, parameterize, rerun_if_address_is_in_use from colossalai.utils import free_port from colossalai.zero.init_ctx import ZeroInitContext from colossalai.zero.shard_utils import BucketTensorShardStrategy, TensorShardStrategy from colossalai.zero.sharded_model import ShardedModelV2 from colossalai.zero.sharded_model._utils import cast_tensor_to_fp16 from colossalai.zero.sharded_model.utils import col_model_deepcopy from tests.components_to_test.registry import non_distributed_component_funcs from tests.test_moe.test_moe_zero_init import MoeModel from tests.test_zero.common import CONFIG, check_grads_padding, run_fwd_bwd @parameterize("enable_autocast", [False]) @parameterize("shard_strategy_class", [TensorShardStrategy, BucketTensorShardStrategy]) def run_model_test(enable_autocast, shard_strategy_class): shard_strategy = shard_strategy_class() get_components_func = non_distributed_component_funcs.get_callable('hanging_param_model') _, train_dataloader, _, optimizer_class, _ = get_components_func() criterion = MoeLoss(aux_weight=0.01, loss_fn=torch.nn.CrossEntropyLoss) with ZeroInitContext(target_device=torch.device('cuda', torch.cuda.current_device()), shard_strategy=shard_strategy, shard_param=True): zero_model = MoeModel(checkpoint=True) zero_model = ShardedModelV2(zero_model, shard_strategy) # check whether parameters are identical in ddp for name, p in zero_model.named_parameters(): if not p.colo_attr.param_is_sharded and p.colo_attr.is_replicated: assert_equal_in_group(p.colo_attr.data_payload) model = MoeModel(checkpoint=True).half() col_model_deepcopy(zero_model, model) model = model.cuda() grad_handler = MoeGradientHandler(model) for i, (data, label) in enumerate(train_dataloader): if i > 5: break data, label = cast_tensor_to_fp16(data).cuda(), label.cuda() run_fwd_bwd(model, data, label, criterion, enable_autocast) run_fwd_bwd(zero_model, data, label, criterion, enable_autocast) grad_handler.handle_gradient() check_grads_padding(model, zero_model, loose=True) def run_dist(rank, world_size, port): colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') MOE_CONTEXT.setup(seed=42) run_model_test() @pytest.mark.dist @pytest.mark.parametrize("world_size", [2]) @rerun_if_address_is_in_use() def test_moe_zero_model(world_size): run_func = partial(run_dist, world_size=world_size, port=free_port()) mp.spawn(run_func, nprocs=world_size) if __name__ == '__main__': test_moe_zero_model(world_size=2)
from functools import partial import pytest import torch import torch.nn as nn import torch.multiprocessing as mp import colossalai from colossalai.context import ParallelMode from colossalai.core import global_context as gpc from colossalai.utils import free_port, get_current_device from colossalai.nn.layer.moe import Top1Router, Top2Router, MoeLayer, Experts from colossalai.context.moe_context import MOE_CONTEXT from colossalai.testing import rerun_if_address_is_in_use BATCH_SIZE = 16 NUM_EXPERTS = 4 CONFIG = dict() def check_equal(tensor_a, tensor_b, atol=1e-06): assert torch.allclose(tensor_a, tensor_b, rtol=0, atol=atol) is True def run_routing(rank, world_size, port, rs=2, hidden_size=128, data_type=torch.float32, router=Top2Router): # Here we do not need TF32, since it brings absolute error on results torch.backends.cuda.matmul.allow_tf32 = False colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') local_rank = gpc.get_local_rank(ParallelMode.GLOBAL) MOE_CONTEXT.setup(42) # MOE environment initialization MOE_CONTEXT.reset_loss() torch.manual_seed(rs + local_rank) # set each process has different random seed # get randomized data tokens = torch.randn(BATCH_SIZE, hidden_size, dtype=data_type, device=get_current_device(), requires_grad=True) expert_module = nn.Linear expert_factor = dict(in_features=hidden_size, out_features=hidden_size, device=get_current_device()) expert = Experts(expert_module, NUM_EXPERTS, **expert_factor) layer = MoeLayer(hidden_size, NUM_EXPERTS, router(capacity_factor_train=1.0), expert) layer = layer.to(get_current_device()) if data_type == torch.float16: layer = layer.half() # use matrix multiplication instead of COL_MOE_KERNL in MOE dispatch and combine layer.use_kernel = False old_out, _ = layer(tokens) ech = old_out.shape grad = torch.randn(ech, device=get_current_device()) old_out.backward(grad) # get gradient # save all results o_tk_grad = tokens.grad.data.clone() o_gt_grad = layer.gate_weight.grad.data.clone() # reset all gradients tokens.grad.zero_() layer.gate_weight.grad.zero_() layer.use_kernel = True new_out, _ = layer(tokens) # get ouputs through colossal kernel if data_type == torch.float32: check_equal(old_out, new_out) else: check_equal(old_out, new_out, 1e-2) # forward function passed new_out.backward(grad) # get new type gradient n_tk_grad = tokens.grad.data.clone() n_gt_grad = layer.gate_weight.grad.data.clone() if data_type == torch.float32: check_equal(o_tk_grad, n_tk_grad) else: check_equal(o_tk_grad, o_tk_grad, 1e-2) # tokens gradient is correct if data_type == torch.float32: check_equal(o_gt_grad, n_gt_grad, 5e-05) else: check_equal(o_gt_grad, n_gt_grad, 2e-01) # bias gradient is correct @pytest.mark.dist @pytest.mark.parametrize("rs", [131]) @pytest.mark.parametrize("hidden_size", [32, 144]) @pytest.mark.parametrize("data_type", [torch.float32, torch.float16]) @pytest.mark.parametrize("router", [Top1Router, Top2Router]) @rerun_if_address_is_in_use() def test_moe_kernel(rs, hidden_size, data_type, router): world_size = 4 run_func = partial(run_routing, world_size=world_size, port=free_port(), rs=rs, hidden_size=hidden_size, data_type=data_type, router=router) mp.spawn(run_func, nprocs=world_size) if __name__ == '__main__': test_moe_kernel(2, 256, torch.float16, Top2Router)
from functools import partial import pytest import torch.nn as nn import torch.multiprocessing as mp import torch.distributed as dist import colossalai from colossalai.utils import free_port, get_current_device from colossalai.nn.layer.moe import Experts from colossalai.context.moe_context import MOE_CONTEXT from colossalai.utils.moe import sync_moe_model_param from colossalai.testing import assert_equal_in_group, rerun_if_address_is_in_use D_MODEL = 4 D_FF = 8 CONFIG = dict() def run_test(rank, port): world_size = 4 colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') expert_module = nn.Linear expert_factor = dict(in_features=D_MODEL, out_features=D_FF, device=get_current_device()) MOE_CONTEXT.setup(42) # MOE environment initialization exp0 = Experts(expert_module, 1, **expert_factor) exp1 = Experts(expert_module, 2, **expert_factor) exp2 = Experts(expert_module, 4, **expert_factor) exp3 = Experts(expert_module, 8, **expert_factor) assert exp0.num_local_experts == 1 assert exp1.num_local_experts == 1 assert exp2.num_local_experts == 1 assert exp3.num_local_experts == 2 # experts deployment passed parallel_info_dict = MOE_CONTEXT.parallel_info_dict rank = dist.get_rank() assert len(parallel_info_dict) == 3 assert dist.get_rank(parallel_info_dict[4].ep_group) == rank assert dist.get_rank(parallel_info_dict[2].ep_group) == rank % 2 assert dist.get_rank(parallel_info_dict[1].ep_group) == 0 assert dist.get_rank(parallel_info_dict[4].dp_group) == 0 assert dist.get_rank(parallel_info_dict[2].dp_group) == rank // 2 assert dist.get_rank(parallel_info_dict[1].dp_group) == rank # group creation passed model = nn.ModuleList([exp0, exp1, exp2, exp3]) model = model.to(get_current_device()) sync_moe_model_param(model) assert_equal_in_group(exp0.experts[0].weight.data, parallel_info_dict[1].dp_group) assert_equal_in_group(exp0.experts[0].bias.data, parallel_info_dict[1].dp_group) # MOE experts layout success when ep_size = 1 assert_equal_in_group(exp1.experts[0].weight.data, parallel_info_dict[2].dp_group) assert_equal_in_group(exp1.experts[0].bias.data, parallel_info_dict[2].dp_group) # MOE experts layout success when ep_size = 2 @pytest.mark.dist @rerun_if_address_is_in_use() def test_moe_initialization(): world_size = 4 run_func = partial(run_test, port=free_port()) mp.spawn(run_func, nprocs=world_size) if __name__ == '__main__': test_moe_initialization()
from functools import partial import colossalai import pytest import torch import torch.multiprocessing as mp import torch.distributed as dist from colossalai.testing import parameterize from colossalai.utils import free_port from colossalai.context import MOE_CONTEXT from colossalai.tensor import ColoParameter from colossalai.utils.model.colo_init_context import ColoInitContext from colossalai.testing import rerun_if_address_is_in_use from colossalai.utils import get_current_device from tests.test_zero.common import CONFIG from tests.test_moe.test_moe_zero_init import MoeModel from tests.test_tensor.common_utils import debug_print @parameterize("init_device_type", ['cpu', 'cuda']) def exam_moe_colo_init(init_device_type): world_size = dist.get_world_size() if init_device_type == 'cuda': init_device = get_current_device() elif init_device_type == 'cpu': init_device = torch.device("cpu") else: raise NotImplementedError("Unknown device found.") with ColoInitContext(device=init_device): model = MoeModel(checkpoint=True) for name, param in model.named_parameters(): assert isinstance(param, ColoParameter), "parameter `{}` has an init problem".format(name) if hasattr(param, "moe_info"): param.set_process_group(param.moe_info.pg) if hasattr(param, "moe_info"): assert param.process_group.dp_world_size() == param.moe_info.dp_size else: assert param.process_group.dp_world_size() == world_size def _run_dist(rank, world_size, port): colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') MOE_CONTEXT.setup(seed=42) exam_moe_colo_init() @pytest.mark.dist @pytest.mark.parametrize("world_size", [4]) @rerun_if_address_is_in_use() def test_moe_colo_init(world_size): run_func = partial(_run_dist, world_size=world_size, port=free_port()) mp.spawn(run_func, nprocs=world_size) if __name__ == '__main__': test_moe_colo_init(world_size=4)
from functools import partial import colossalai import pytest import torch import torch.multiprocessing as mp import torch.nn as nn from colossalai.nn import CheckpointModule from colossalai.logging import get_dist_logger from colossalai.testing import parameterize from colossalai.utils import free_port from colossalai.context import MOE_CONTEXT from colossalai.nn.layer import MoeModule from colossalai.zero.init_ctx import ZeroInitContext from colossalai.zero.shard_utils import (BucketTensorShardStrategy, TensorShardStrategy) from colossalai.testing import rerun_if_address_is_in_use from colossalai.utils import get_current_device from tests.test_zero.common import CONFIG class MoeModel(nn.Module): def __init__(self, checkpoint: bool = False): class TestSubModule(CheckpointModule): def __init__(self): super().__init__(checkpoint) expert_cls = nn.Linear expert_args_dict = dict(in_features=16, out_features=16) self.moe = MoeModule(dim_model=16, num_experts=8, use_residual=True, expert_cls=expert_cls, **expert_args_dict) self.proj = nn.Linear(16, 4) def _forward(self, x): x, y = self.moe(x) x = self.proj(x) return x, y super().__init__() self.test_embed = nn.Linear(4, 16) self.test_transform = TestSubModule() def forward(self, x): MOE_CONTEXT.reset_loss() x = self.test_embed(x) x, y = self.test_transform(x) MOE_CONTEXT.add_loss(y) return x @parameterize("init_device_type", ['cpu', 'cuda']) @parameterize("shard_strategy_class", [TensorShardStrategy, BucketTensorShardStrategy]) def run_moe_zero_init(init_device_type, shard_strategy_class): logger = get_dist_logger("test_moe_zero_init") if init_device_type == 'cuda': init_device = get_current_device() elif init_device_type == 'cpu': init_device = torch.device("cpu") else: raise NotImplementedError("Unknown device found.") model_numel_tensor = torch.zeros(1, dtype=torch.int) with ZeroInitContext(target_device=init_device, shard_strategy=shard_strategy_class(), shard_param=True, model_numel_tensor=model_numel_tensor): model = MoeModel(checkpoint=True) for name, param in model.named_parameters(): assert hasattr(param, 'colo_attr') # the parameters in moe experts and its gate should not be sharded if ('experts' in name) or ('gate' in name) or ('residual_combine' in name): assert not param.colo_attr.sharded_data_tensor.is_sharded, "`{}` parameter has problem".format(name) else: assert param.colo_attr.sharded_data_tensor.is_sharded # the parameters in moe experts is not replicated if 'experts' in name: assert not param.colo_attr.is_replicated else: assert param.colo_attr.is_replicated if param.colo_attr.param_is_sharded: assert param.colo_attr.data_payload.device.type == init_device.type, \ f'{param.colo_attr.data_payload.device.type} vs. {init_device.type}' else: assert param.colo_attr.data_payload.device.type == 'cuda' def _run_dist(rank, world_size, port): colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') MOE_CONTEXT.setup(seed=42) run_moe_zero_init() @pytest.mark.dist @pytest.mark.parametrize("world_size", [2, 4]) @rerun_if_address_is_in_use() def test_moe_zero_init(world_size): run_func = partial(_run_dist, world_size=world_size, port=free_port()) mp.spawn(run_func, nprocs=world_size) if __name__ == '__main__': test_moe_zero_init(world_size=2)
from functools import partial import pytest import torch import torch.nn as nn import torch.multiprocessing as mp import torch.distributed as dist import colossalai from colossalai.utils import free_port, get_current_device from colossalai.nn.layer.moe import Top1Router, UniformNoiseGenerator, MoeLayer, Experts from colossalai.context.moe_context import MOE_CONTEXT from colossalai.utils.moe import sync_moe_model_param from colossalai.engine.gradient_handler import MoeGradientHandler from colossalai.testing import assert_equal_in_group, rerun_if_address_is_in_use BATCH_SIZE = 4 DIM = 16 CONFIG = dict() def run_test(rank, world_size, port): colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') expert_module = nn.Linear expert_factor = dict(in_features=DIM, out_features=DIM, device=get_current_device()) MOE_CONTEXT.setup(42) # MOE initialization noisy_func = UniformNoiseGenerator() router = Top1Router(noisy_func=noisy_func) num_experts_list = [1, 2, 4] layer_list = [] for num_experts in num_experts_list: exp = Experts(expert_module, num_experts, **expert_factor) moe_layer = MoeLayer(DIM, num_experts, router, exp) layer_list.append(moe_layer) model = nn.ModuleList(layer_list) model = model.to(get_current_device()) sync_moe_model_param(model) dist_dict = MOE_CONTEXT.parallel_info_dict assert_equal_in_group(layer_list[0].experts.experts[0].weight.data, dist_dict[1].dp_group) assert_equal_in_group(layer_list[1].experts.experts[0].weight.data, dist_dict[2].dp_group) # MoE model synchronization passed grad_handler = MoeGradientHandler(model, 0) rank = dist.get_rank() torch.cuda.manual_seed(78 + rank) data = torch.randn(BATCH_SIZE, DIM, device=get_current_device()) grad = torch.randn_like(data) MOE_CONTEXT.reset_loss() for layer in layer_list: data, _ = layer(data) data.backward(grad) grad_handler.handle_gradient() assert_equal_in_group(layer_list[0].experts.experts[0].weight.grad, dist_dict[1].dp_group) assert_equal_in_group(layer_list[0].experts.experts[0].bias.grad, dist_dict[1].dp_group) assert_equal_in_group(layer_list[1].experts.experts[0].weight.grad, dist_dict[2].dp_group) assert_equal_in_group(layer_list[1].experts.experts[0].bias.grad, dist_dict[2].dp_group) # MoE grad handler test passed @pytest.mark.dist @rerun_if_address_is_in_use() def test_grad_handler(): world_size = 4 run_func = partial(run_test, world_size=world_size, port=free_port()) mp.spawn(run_func, nprocs=world_size) if __name__ == '__main__': test_grad_handler()
from typing import Any, Dict, List import torch import torch.fx import colossalai from colossalai.autochunk.autochunk_codegen import AUTOCHUNK_AVAILABLE from colossalai.core import global_context as gpc from colossalai.fx.graph_module import ColoGraphModule from colossalai.fx.passes.meta_info_prop import MetaInfoProp from colossalai.utils import free_port if AUTOCHUNK_AVAILABLE: from colossalai.autochunk.autochunk_codegen import AutoChunkCodeGen from colossalai.fx.profiler import MetaTensor from colossalai.fx.tracer.experimental import ColoTracer, symbolic_trace def assert_codegen_run( model: Any, meta_args: List, concrete_args: List = None, max_memory: int = None, print_mem: bool = False, print_est_mem: bool = False, print_progress: bool = False, print_code: bool = False, ) -> List[Dict]: if concrete_args is None: concrete_args = [] model = model() # trace the meta graph and setup codegen meta_graph = symbolic_trace( model, meta_args={k: v.to(torch.device("meta")) for k, v in meta_args}, concrete_args={k: v for k, v in concrete_args}, ) model = model.cuda().eval() interp = MetaInfoProp(meta_graph) meta_tensors = [MetaTensor(i[1], fake_device="cuda:0") for i in meta_args] + [i[1] for i in concrete_args] interp.propagate(*meta_tensors) codegen = AutoChunkCodeGen( meta_graph, max_memory=max_memory, print_mem=print_est_mem, print_progress=print_progress, ) chunks = codegen.chunk_infos # trace and recompile # MetaInfoProp requires symbolic_trace but CodeGen requires ColoTracer graph = ColoTracer().trace( model.cuda(), meta_args={k: v.to(torch.device("meta")) for k, v in meta_args}, concrete_args={k: v for k, v in concrete_args}, ) graph.set_codegen(codegen) gm = ColoGraphModule(model, graph, ckpt_codegen=False) gm.recompile() # assert chunk in code code = graph.python_code("self").src if print_code: print(code) assert "chunk_size = None; " in code # assert result inputs = [i[1] for i in meta_args] + [i[1] for i in concrete_args] inputs = [i.cuda() if isinstance(i, torch.Tensor) else i for i in inputs] model.cuda().eval() gm.eval() with torch.no_grad(): if print_mem: torch.cuda.reset_peak_memory_stats() now_mem_gm = torch.cuda.memory_allocated() / 1024**2 out_gm = gm(*[i.clone() if isinstance(i, torch.Tensor) else i for i in inputs]) if print_mem: max_mem_gm = torch.cuda.max_memory_allocated() / 1024**2 torch.cuda.reset_peak_memory_stats() now_mem_ori = torch.cuda.memory_allocated() / 1024**2 out_model = model(*[i.clone() if isinstance(i, torch.Tensor) else i for i in inputs]) if print_mem: max_mem_ori = torch.cuda.max_memory_allocated() / 1024**2 print("origin mem: %.2fMB, autochunk mem: %.2fMB" % (max_mem_ori - now_mem_ori, max_mem_gm - now_mem_gm)) assert torch.allclose(out_gm["sample"], out_model["sample"], atol=1e-3), "fx_out doesn't comply with original output, diff is %.2e" % torch.mean( torch.abs(out_gm["sample"] - out_model["sample"])) return chunks def run_test( rank: int, model: Any, data: tuple, max_memory: int, print_code: bool = False, print_mem: bool = False, print_est_mem: bool = False, print_progress: bool = False, get_chunk_target: Any = None, ) -> None: # launch colossalai colossalai.launch( config={}, rank=rank, world_size=1, host="localhost", port=free_port(), backend="nccl", ) # build model and input meta_args, concrete_args = data chunks = assert_codegen_run( model, meta_args=meta_args, concrete_args=concrete_args, max_memory=max_memory, print_code=print_code, print_mem=print_mem, print_est_mem=print_est_mem, print_progress=print_progress, ) if get_chunk_target is not None: chunk_found = [i["region"] for i in chunks] chunk_target = get_chunk_target()[max_memory] assert (chunk_found == chunk_target), "found regions %s doesn't equal target regions %s" % ( str(chunk_found), str(chunk_target), ) gpc.destroy()
from functools import partial from typing import List, Tuple import pytest import torch import torch.multiprocessing as mp try: from diffusers import UNet2DModel MODELS = [UNet2DModel] HAS_REPO = True except: MODELS = [] HAS_REPO = False from test_autochunk_diffuser_utils import run_test from colossalai.autochunk.autochunk_codegen import AUTOCHUNK_AVAILABLE BATCH_SIZE = 1 HEIGHT = 448 WIDTH = 448 IN_CHANNELS = 3 LATENTS_SHAPE = (BATCH_SIZE, IN_CHANNELS, HEIGHT // 7, WIDTH // 7) def get_data(shape: tuple) -> Tuple[List, List]: sample = torch.randn(shape) meta_args = [ ("sample", sample), ] concrete_args = [("timestep", 50)] return meta_args, concrete_args @pytest.mark.skipif( not (AUTOCHUNK_AVAILABLE and HAS_REPO), reason="torch version is lower than 1.12.0", ) @pytest.mark.parametrize("model", MODELS) @pytest.mark.parametrize("shape", [LATENTS_SHAPE]) @pytest.mark.parametrize("max_memory", [None]) def test_evoformer_block(model, shape, max_memory): run_func = partial( run_test, max_memory=max_memory, model=model, data=get_data(shape), ) mp.spawn(run_func, nprocs=1) if __name__ == "__main__": run_test( rank=0, data=get_data(LATENTS_SHAPE), max_memory=None, model=UNet2DModel, print_code=False, print_mem=False, print_est_mem=False, print_progress=False, )
from functools import partial from typing import Dict, List, Tuple import pytest import torch import torch.fx import torch.multiprocessing as mp try: from fastfold.model.nn.evoformer import EvoformerBlock HAS_REPO = True except: HAS_REPO = False from test_autochunk_alphafold_utils import run_test from colossalai.autochunk.autochunk_codegen import AUTOCHUNK_AVAILABLE def get_model(): model = EvoformerBlock( c_m=256, c_z=128, c_hidden_msa_att=32, c_hidden_opm=32, c_hidden_mul=128, c_hidden_pair_att=32, no_heads_msa=8, no_heads_pair=4, transition_n=4, msa_dropout=0.15, pair_dropout=0.15, inf=1e4, eps=1e-4, is_multimer=False, ).eval().cuda() return model def get_data(msa_len: int, pair_len: int) -> Tuple[List, List]: node = torch.randn(1, msa_len, pair_len, 256).cuda() node_mask = torch.randn(1, msa_len, pair_len).cuda() pair = torch.randn(1, pair_len, pair_len, 128).cuda() pair_mask = torch.randn(1, pair_len, pair_len).cuda() meta_args = [ ("m", node), ("z", pair), ("msa_mask", node_mask), ("pair_mask", pair_mask), ] concrete_args = [("chunk_size", None), ("_mask_trans", True)] return meta_args, concrete_args def get_chunk_target() -> Dict: return { None: [(120, 123), (222, 237), (269, 289), (305, 311), (100, 105), (146, 152), (187, 193), (241, 242), (25, 50)], 20: [(120, 123), (232, 237), (277, 282), (305, 306), (100, 101), (34, 39)], 24: [(120, 123)], } @pytest.mark.skipif( not (AUTOCHUNK_AVAILABLE and HAS_REPO), reason="torch version is lower than 1.12.0", ) @pytest.mark.parametrize("max_memory", [None, 20, 24]) @pytest.mark.parametrize("data_args", [(32, 64)]) # (msa_len, pair_len) def test_evoformer_block(data_args, max_memory): run_func = partial( run_test, data_args=data_args, max_memory=max_memory, get_model=get_model, get_data=get_data, get_chunk_target=get_chunk_target, ) mp.spawn(run_func, nprocs=1) if __name__ == "__main__": run_test( rank=0, data_args=(32, 64), max_memory=24, get_model=get_model, get_data=get_data, get_chunk_target=get_chunk_target, print_code=False, print_mem=False, print_est_mem=False, print_progress=False, )
from functools import partial from typing import List, Tuple import pytest import torch import torch.fx import torch.multiprocessing as mp try: from fastfold.model.nn.evoformer import EvoformerStack HAS_REPO = True except: HAS_REPO = False from test_autochunk_alphafold_utils import run_test from colossalai.autochunk.autochunk_codegen import AUTOCHUNK_AVAILABLE def get_model(): model = EvoformerStack( c_m=256, c_z=128, c_hidden_msa_att=32, c_hidden_opm=32, c_hidden_mul=128, c_hidden_pair_att=32, c_s=384, no_heads_msa=8, no_heads_pair=4, no_blocks=2, # 48 transition_n=4, msa_dropout=0.15, pair_dropout=0.25, blocks_per_ckpt=None, inf=1000000000.0, eps=1e-08, clear_cache_between_blocks=False, is_multimer=False, ).eval().cuda() return model def get_data(msa_len: int, pair_len: int) -> Tuple[List, List]: node = torch.randn(1, msa_len, pair_len, 256).cuda() node_mask = torch.randn(1, msa_len, pair_len).cuda() pair = torch.randn(1, pair_len, pair_len, 128).cuda() pair_mask = torch.randn(1, pair_len, pair_len).cuda() meta_args = [ ("m", node), ("z", pair), ("msa_mask", node_mask), ("pair_mask", pair_mask), ] concrete_args = [("chunk_size", None), ("_mask_trans", True)] return meta_args, concrete_args @pytest.mark.skipif( not (AUTOCHUNK_AVAILABLE and HAS_REPO), reason="torch version is lower than 1.12.0", ) @pytest.mark.parametrize("max_memory", [None, 20, 24]) @pytest.mark.parametrize("data_args", [(32, 64)]) # (msa_len, pair_len) def test_evoformer_stack(data_args, max_memory): run_func = partial( run_test, data_args=data_args, max_memory=max_memory, get_model=get_model, get_data=get_data, ) mp.spawn(run_func, nprocs=1) if __name__ == "__main__": run_test( rank=0, data_args=(32, 64), max_memory=None, get_model=get_model, get_data=get_data, print_code=False, print_mem=False, print_progress=False, )
import time from typing import Any, Dict, List import torch import torch.fx import colossalai from colossalai.autochunk.autochunk_codegen import AUTOCHUNK_AVAILABLE from colossalai.fx.graph_module import ColoGraphModule from colossalai.fx.passes.meta_info_prop import MetaInfoProp from colossalai.utils import free_port if AUTOCHUNK_AVAILABLE: from colossalai.autochunk.autochunk_codegen import AutoChunkCodeGen from colossalai.fx.profiler import MetaTensor from colossalai.fx.tracer.experimental import ColoTracer, symbolic_trace def _benchmark_evoformer_stack_gm( data_args: tuple, max_memory: int, get_model: Any, get_data: Any, ) -> None: # build model and input model = get_model() meta_args, concrete_args = get_data(*data_args) if concrete_args is None: concrete_args = [] # trace the meta graph and setup codegen meta_graph = symbolic_trace( model, meta_args={k: v.to(torch.device("meta")) for k, v in meta_args}, concrete_args={k: v for k, v in concrete_args}, ) interp = MetaInfoProp(meta_graph) meta_tensors = [MetaTensor(i[1], fake_device="cuda:0") for i in meta_args] + [i[1] for i in concrete_args] interp.propagate(*meta_tensors) codegen = AutoChunkCodeGen( meta_graph, max_memory=max_memory, ) # trace and recompile # MetaInfoProp requires symbolic_trace but CodeGen requires ColoTracer graph = ColoTracer().trace( model, meta_args={k: v.to(torch.device("meta")) for k, v in meta_args}, concrete_args={k: v for k, v in concrete_args}, ) graph.set_codegen(codegen) gm = ColoGraphModule(model, graph, ckpt_codegen=False) gm.recompile() # init inputs inputs = [i[1] for i in meta_args] + [i[1] for i in concrete_args] inputs = [i.cuda() if isinstance(i, torch.Tensor) else i for i in inputs] model.cuda() # bench mem = _benchmark_memory(gm, inputs) speed = _benchmark_speed(gm, inputs) print("evoformer stack gm, mem: %.2fMB, time: %.4fs, data_args: %s" % (mem, speed, str(data_args))) def _benchmark_evoformer_stack_origin( data_args: tuple, get_model: Any, get_data: Any, ) -> None: # build model and input model = get_model() meta_args, concrete_args = get_data(*data_args) if concrete_args is None: concrete_args = [] # init inputs inputs = [i[1] for i in meta_args] + [i[1] for i in concrete_args] inputs = [i.cuda() if isinstance(i, torch.Tensor) else i for i in inputs] model.cuda() # bench mem = _benchmark_memory(model, inputs) speed = _benchmark_speed(model, inputs) print("evoformer stack origin, mem: %.2fMB, time: %.4fs, data_args: %s" % (mem, speed, str(data_args))) def _benchmark_memory(model, inputs): with torch.no_grad(): torch.cuda.reset_peak_memory_stats() now_mem = torch.cuda.memory_allocated() / 1024**2 model(*[i.clone() if isinstance(i, torch.Tensor) else i for i in inputs]) new_max_mem = torch.cuda.max_memory_allocated() / 1024**2 return new_max_mem - now_mem def _benchmark_speed(model, inputs, loop=5): with torch.no_grad(): for _ in range(loop // 2 + 1): model(*inputs) torch.cuda.synchronize() time1 = time.time() for _ in range(loop): model(*inputs) torch.cuda.synchronize() time2 = time.time() return (time2 - time1) / loop def benchmark_evoformer_stack(): from test_autochunk_evoformer_stack import get_data, get_model data_args = [128, 256] print("") _benchmark_evoformer_stack_origin(data_args, get_model, get_data) _benchmark_evoformer_stack_gm(data_args, 600, get_model, get_data) _benchmark_evoformer_stack_gm(data_args, 400, get_model, get_data) _benchmark_evoformer_stack_gm(data_args, None, get_model, get_data) if __name__ == "__main__": # launch colossalai colossalai.launch( config={}, rank=0, world_size=1, host="localhost", port=free_port(), backend="nccl", ) benchmark_evoformer_stack()
from functools import partial from typing import Dict, List, Tuple import pytest import torch import torch.fx import torch.multiprocessing as mp try: from fastfold.model.nn.evoformer import ExtraMSABlock HAS_REPO = True except: HAS_REPO = False from test_autochunk_alphafold_utils import run_test from colossalai.autochunk.autochunk_codegen import AUTOCHUNK_AVAILABLE def get_model(): model = ExtraMSABlock( c_m=256, c_z=128, c_hidden_msa_att=32, c_hidden_opm=32, c_hidden_mul=128, c_hidden_pair_att=32, no_heads_msa=8, no_heads_pair=4, transition_n=4, msa_dropout=0.15, pair_dropout=0.15, inf=1e4, eps=1e-4, ckpt=False, is_multimer=False, ).eval().cuda() return model def get_data(msa_len: int, pair_len: int) -> Tuple[List, List]: node = torch.randn(1, msa_len, pair_len, 256).cuda() node_mask = torch.randn(1, msa_len, pair_len).cuda() pair = torch.randn(1, pair_len, pair_len, 128).cuda() pair_mask = torch.randn(1, pair_len, pair_len).cuda() meta_args = [ ("m", node), ("z", pair), ("msa_mask", node_mask), ("pair_mask", pair_mask), ] concrete_args = [("chunk_size", None), ("_chunk_logits", 1024)] return meta_args, concrete_args def get_chunk_target() -> Dict: return { None: [(128, 131), (230, 245), (277, 297), (313, 319), (108, 113), (154, 160), (195, 201), (249, 250), (36, 46)], 20: [(128, 131), (240, 245), (285, 290), (313, 314), (108, 109), (41, 46)], 24: [(128, 131)], } @pytest.mark.skipif( not (AUTOCHUNK_AVAILABLE and HAS_REPO), reason="torch version is lower than 1.12.0", ) @pytest.mark.parametrize("max_memory", [None, 20, 24]) @pytest.mark.parametrize("data_args", [(32, 64)]) # (msa_len, pair_len) def test_extramsa_block(data_args, max_memory): run_func = partial( run_test, data_args=data_args, max_memory=max_memory, get_model=get_model, get_data=get_data, get_chunk_target=get_chunk_target, ) mp.spawn(run_func, nprocs=1) if __name__ == "__main__": run_test( rank=0, data_args=(32, 64), max_memory=None, get_model=get_model, get_data=get_data, get_chunk_target=get_chunk_target, print_code=False, print_mem=False, print_progress=False, )
from typing import Any, Dict, List import torch import torch.fx import colossalai from colossalai.autochunk.autochunk_codegen import AUTOCHUNK_AVAILABLE from colossalai.autochunk.utils import flat_list from colossalai.core import global_context as gpc from colossalai.fx.graph_module import ColoGraphModule from colossalai.fx.passes.meta_info_prop import MetaInfoProp from colossalai.utils import free_port if AUTOCHUNK_AVAILABLE: from colossalai.autochunk.autochunk_codegen import AutoChunkCodeGen from colossalai.fx.profiler import MetaTensor from colossalai.fx.tracer.experimental import ColoTracer, symbolic_trace def assert_codegen_run( model: Any, meta_args: List, concrete_args: List = None, max_memory: int = None, print_mem: bool = False, print_est_mem: bool = False, print_progress: bool = False, print_code: bool = False, ) -> List[Dict]: if concrete_args is None: concrete_args = [] # trace the meta graph and setup codegen meta_graph = symbolic_trace( model, meta_args={k: v.to(torch.device("meta")) for k, v in meta_args}, concrete_args={k: v for k, v in concrete_args}, ) interp = MetaInfoProp(meta_graph) meta_tensors = [MetaTensor(i[1], fake_device="cuda:0") for i in meta_args] + [i[1] for i in concrete_args] interp.propagate(*meta_tensors) codegen = AutoChunkCodeGen( meta_graph, max_memory=max_memory, print_mem=print_est_mem, print_progress=print_progress, ) chunks = codegen.chunk_infos # trace and recompile # MetaInfoProp requires symbolic_trace but CodeGen requires ColoTracer graph = ColoTracer().trace( model, meta_args={k: v.to(torch.device("meta")) for k, v in meta_args}, concrete_args={k: v for k, v in concrete_args}, ) graph.set_codegen(codegen) gm = ColoGraphModule(model, graph, ckpt_codegen=False) gm.recompile() # assert chunk in code code = graph.python_code("self").src if print_code: print(code) assert "chunk_size = None; " in code # assert result inputs = [i[1] for i in meta_args] + [i[1] for i in concrete_args] inputs = [i.cuda() if isinstance(i, torch.Tensor) else i for i in inputs] model.cuda() with torch.no_grad(): if print_mem: torch.cuda.reset_peak_memory_stats() now_mem = torch.cuda.memory_allocated() / 1024**2 out_gm = gm(*[i.clone() if isinstance(i, torch.Tensor) else i for i in inputs]) if print_mem: new_max_mem = torch.cuda.max_memory_allocated() / 1024**2 print("mem: %.2fMB" % (new_max_mem - now_mem)) out_model = model(*inputs) out_gm = flat_list(out_gm) out_model = flat_list(out_model) for out_gm_i, out_model_i in zip(out_gm, out_model): assert torch.allclose(out_gm_i, out_model_i, atol=1e-4), "fx_out doesn't comply with original output, diff is %.2e" % torch.mean( torch.abs(out_gm_i - out_model_i)) return chunks def run_test( rank: int, data_args: tuple, max_memory: int, get_model: Any, get_data: Any, print_code: bool = False, print_mem: bool = False, print_est_mem: bool = False, print_progress: bool = False, get_chunk_target: Any = None, ) -> None: # launch colossalai colossalai.launch( config={}, rank=rank, world_size=1, host="localhost", port=free_port(), backend="nccl", ) # build model and input model = get_model() meta_args, concrete_args = get_data(*data_args) chunks = assert_codegen_run( model, meta_args=meta_args, concrete_args=concrete_args, max_memory=max_memory, print_code=print_code, print_mem=print_mem, print_est_mem=print_est_mem, print_progress=print_progress, ) if get_chunk_target is not None: chunk_found = [i["region"] for i in chunks] chunk_target = get_chunk_target()[max_memory] assert chunk_found == chunk_target, "found regions %s doesn't equal target regions %s" % ( str(chunk_found), str(chunk_target), )
from typing import Any, Dict, List import torch import torch.fx import colossalai from colossalai.autochunk.autochunk_codegen import AUTOCHUNK_AVAILABLE from colossalai.core import global_context as gpc from colossalai.fx.graph_module import ColoGraphModule from colossalai.fx.passes.meta_info_prop import MetaInfoProp from colossalai.utils import free_port if AUTOCHUNK_AVAILABLE: from colossalai.autochunk.autochunk_codegen import AutoChunkCodeGen from colossalai.fx.profiler import MetaTensor from colossalai.fx.tracer.experimental import ColoTracer, symbolic_trace def assert_codegen_run( model: Any, data: tuple, max_memory: int = None, print_est_mem: bool = False, print_mem: bool = False, print_progress: bool = False, print_code: bool = False, ) -> List[Dict]: meta_args, concrete_args, sequence = data if concrete_args is None: concrete_args = {} # trace the meta graph and setup codegen meta_graph = symbolic_trace( model, meta_args={k: v.to(torch.device("meta")) for k, v in meta_args.items()}, concrete_args={k: v for k, v in concrete_args.items()}, ) interp = MetaInfoProp(meta_graph) meta_tensors = [meta_args[i] if i in meta_args else concrete_args[i] for i in sequence] meta_tensors = [MetaTensor(i, fake_device="cuda:0") if isinstance(i, torch.Tensor) else i for i in meta_tensors] interp.propagate(*meta_tensors) codegen = AutoChunkCodeGen( meta_graph, max_memory=max_memory, print_mem=print_est_mem, print_progress=print_progress, ) chunks = codegen.chunk_infos # trace and recompile # MetaInfoProp requires symbolic_trace but CodeGen requires ColoTracer graph = ColoTracer().trace( model.cuda(), meta_args={k: v.to(torch.device("meta")) for k, v in meta_args.items()}, concrete_args={k: v for k, v in concrete_args.items()}, ) graph.set_codegen(codegen) gm = ColoGraphModule(model, graph, ckpt_codegen=False) gm.recompile() # assert chunk in code code = graph.python_code("self").src if print_code: print(code) assert "chunk_size = None; " in code # assert result inputs = [meta_args[i] if i in meta_args else concrete_args[i] for i in sequence] inputs = [i.cuda() if isinstance(i, torch.Tensor) else i for i in inputs] model.cuda().eval() gm.eval() with torch.no_grad(): if print_mem: torch.cuda.reset_peak_memory_stats() now_mem = torch.cuda.memory_allocated() / 1024**2 out_gm = gm(*[i.clone() if isinstance(i, torch.Tensor) else i for i in inputs]) if print_mem: new_max_mem = torch.cuda.max_memory_allocated() / 1024**2 print("mem: %.2fMB" % (new_max_mem - now_mem)) out_model = model(*inputs) assert_allclose(out_model, out_gm) return chunks def assert_allclose(out_model: Any, out_gm: Any) -> None: """ assert allclose for out """ if isinstance(out_model, torch.Tensor): assert torch.allclose(out_model, out_gm, atol=1e-4), "fx_out doesn't comply with original output, diff is %.2e" % torch.mean( torch.abs(out_model - out_gm)) elif isinstance(out_model, dict): for k in out_model.keys(): assert_allclose(out_model[k], out_gm[k]) elif isinstance(out_model, tuple) or isinstance(out_model, list) or isinstance(out_model, set): for i, j in zip(out_model, out_gm): assert_allclose(i, j) def run_test( rank: int, model: Any, config: Any, data: tuple, max_memory: int, print_code: bool = False, print_est_mem: bool = False, print_mem: bool = False, print_progress: bool = False, get_chunk_target: Any = None, ) -> None: model = model(config=config) # launch colossalai colossalai.launch( config={}, rank=rank, world_size=1, host="localhost", port=free_port(), backend="nccl", ) # build model and input chunks = assert_codegen_run( model, data=data, max_memory=max_memory, print_code=print_code, print_est_mem=print_est_mem, print_mem=print_mem, print_progress=print_progress, ) if get_chunk_target is not None: chunk_found = [i["region"] for i in chunks] chunk_target = get_chunk_target()[max_memory] assert (chunk_found == chunk_target), "found regions %s doesn't equal target regions %s" % ( str(chunk_found), str(chunk_target), )
import time from typing import Any, Dict, List import torch import torch.fx import colossalai from colossalai.autochunk.autochunk_codegen import AUTOCHUNK_AVAILABLE from colossalai.fx.graph_module import ColoGraphModule from colossalai.fx.passes.meta_info_prop import MetaInfoProp from colossalai.fx.profiler import parameter_size from colossalai.utils import free_port if AUTOCHUNK_AVAILABLE: from colossalai.autochunk.autochunk_codegen import AutoChunkCodeGen from colossalai.fx.profiler import MetaTensor from colossalai.fx.tracer.experimental import ColoTracer, symbolic_trace def _benchmark_autochunk_gpt_gm( model: Any, data: tuple, max_memory: int = None, ) -> None: model = model.cuda().eval() # build model and input meta_args, concrete_args, sequence = data if concrete_args is None: concrete_args = {} # trace the meta graph and setup codegen meta_graph = symbolic_trace( model, meta_args={k: v.to(torch.device("meta")) for k, v in meta_args.items()}, concrete_args={k: v for k, v in concrete_args.items()}, ) interp = MetaInfoProp(meta_graph) meta_tensors = [meta_args[i] if i in meta_args else concrete_args[i] for i in sequence] meta_tensors = [MetaTensor(i, fake_device="cuda:0") if isinstance(i, torch.Tensor) else i for i in meta_tensors] interp.propagate(*meta_tensors) codegen = AutoChunkCodeGen( meta_graph, max_memory=max_memory, ) # trace and recompile # MetaInfoProp requires symbolic_trace but CodeGen requires ColoTracer graph = ColoTracer().trace( model.cuda().eval(), meta_args={k: v.to(torch.device("meta")) for k, v in meta_args.items()}, concrete_args={k: v for k, v in concrete_args.items()}, ) graph.set_codegen(codegen) gm = ColoGraphModule(model, graph, ckpt_codegen=False) gm.recompile() # init inputs inputs = [meta_args[i] if i in meta_args else concrete_args[i] for i in sequence] inputs = [i.cuda() if isinstance(i, torch.Tensor) else i for i in inputs] model.cuda().eval() # bench para_mem = float(parameter_size(model)) / 1024**2 * 6 act_mem = _benchmark_memory(gm, inputs) speed = _benchmark_speed(gm, inputs) print("gpt autochunk, time: %.4fs, act mem: %.2fMB, para mem: %.2fMB, all mem: %.2fMB" % (speed, act_mem, para_mem, act_mem + para_mem)) def _benchmark_autochunk_gpt_origin( model: Any, data: tuple, ) -> None: # build model and input meta_args, concrete_args, sequence = data if concrete_args is None: concrete_args = {} # init inputs inputs = [meta_args[i] if i in meta_args else concrete_args[i] for i in sequence] inputs = [i.cuda() if isinstance(i, torch.Tensor) else i for i in inputs] model.cuda().eval() # bench para_mem = float(parameter_size(model)) / 1024**2 * 6 act_mem = _benchmark_memory(model, inputs) speed = _benchmark_speed(model, inputs) print("gpt origin, time: %.4fs, act mem: %.2fMB, para mem: %.2fMB, all mem: %.2fMB" % (speed, act_mem, para_mem, act_mem + para_mem)) return act_mem def _benchmark_memory(model, inputs): with torch.no_grad(): torch.cuda.reset_peak_memory_stats() now_mem = float(torch.cuda.memory_allocated()) / 1024**2 model(*[i.clone() if isinstance(i, torch.Tensor) else i for i in inputs]) new_max_mem = float(torch.cuda.max_memory_allocated()) / 1024**2 return new_max_mem - now_mem def _benchmark_speed(model, inputs, loop=5): with torch.no_grad(): for _ in range(loop // 2 + 1): model(*inputs) torch.cuda.synchronize() time1 = time.time() for _ in range(loop): model(*inputs) torch.cuda.synchronize() time2 = time.time() return (time2 - time1) / loop def benchmark_autochunk_gpt(batch=1, seq=512, n_embd=768, n_head=12): from test_autochunk_gpt import GPT2Config, GPT2Model, get_data model = GPT2Model config = GPT2Config(n_embd=n_embd, n_position=seq, n_layer=2, n_head=n_head) config.max_position_embeddings = seq model = model(config=config) shape = [batch, seq] print("\nbatch: %d, seq: %d, n_embd: %d, n_head: %d" % (batch, seq, n_embd, n_head)) max_mem = _benchmark_autochunk_gpt_origin(model, get_data(shape)) for ratio in [0.5, 0.4, 0.3, 0.2]: try: _benchmark_autochunk_gpt_gm(model, get_data(shape), max_mem * ratio) except RuntimeError as e: if e.args[0] == 'Search failed. Try a larger memory threshold.': break except Exception as e: raise e _benchmark_autochunk_gpt_gm(model, get_data(shape), None) if __name__ == "__main__": # launch colossalai colossalai.launch( config={}, rank=0, world_size=1, host="localhost", port=free_port(), backend="nccl", ) benchmark_autochunk_gpt(batch=1, seq=1024, n_embd=768, n_head=12) benchmark_autochunk_gpt(batch=1, seq=2048, n_embd=768, n_head=12) benchmark_autochunk_gpt(batch=1, seq=4096, n_embd=768, n_head=12) benchmark_autochunk_gpt(batch=1, seq=6144, n_embd=768, n_head=12) benchmark_autochunk_gpt(batch=1, seq=8192, n_embd=768, n_head=12)
from functools import partial from typing import List, Tuple import pytest import torch import torch.multiprocessing as mp try: from transformers import GPT2Config, GPT2Model MODELS = [GPT2Model] HAS_REPO = True except: MODELS = [] HAS_REPO = False from test_autochunk_transformer_utils import run_test from colossalai.autochunk.autochunk_codegen import AUTOCHUNK_AVAILABLE BATCH_SIZE = 1 SEQ_LENGTH = 512 def get_data(shape: tuple) -> Tuple[List, List]: input_ids = torch.zeros(shape, dtype=torch.int64) token_type_ids = torch.zeros(shape, dtype=torch.int64) attention_mask = torch.ones(shape, dtype=torch.int64) meta_args = dict(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask) concrete_args = {"past_key_values": None} sequence = ["input_ids", "past_key_values", "attention_mask", "token_type_ids"] return meta_args, concrete_args, sequence @pytest.mark.skipif( not (AUTOCHUNK_AVAILABLE and HAS_REPO), reason="torch version is lower than 1.12.0", ) @pytest.mark.parametrize("model", MODELS) @pytest.mark.parametrize("shape", [(BATCH_SIZE, SEQ_LENGTH)]) @pytest.mark.parametrize("max_memory", [None, 6, 8]) def test_autochunk_gpt(model, shape, max_memory): run_func = partial( run_test, data=get_data(shape), max_memory=max_memory, model=model, config=GPT2Config(n_embd=96, n_position=shape[1], n_layer=2, n_head=4), ) mp.spawn(run_func, nprocs=1) if __name__ == "__main__": run_test( rank=0, data=get_data((BATCH_SIZE, SEQ_LENGTH)), max_memory=None, model=GPT2Model, config=GPT2Config(n_embd=96, n_position=SEQ_LENGTH, n_layer=2, n_head=4), print_code=False, print_est_mem=False, print_mem=False, print_progress=False, )
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- import datetime # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys sys.path.insert(0, os.path.abspath('..')) # -- Project information ----------------------------------------------------- project = 'Colossal-AI' copyright = f'{datetime.datetime.now().year}, HPC-AI Tech' author = 'HPC-AI Technology Inc.' # The full version, including alpha/beta/rc tags # release = '0.0.1' # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.mathjax', 'sphinx.ext.napoleon', 'sphinx.ext.linkcode', 'myst_parser', ] # Disable docstring inheritance autodoc_inherit_docstrings = False # Disable displaying type annotations, these can be very verbose autodoc_typehints = 'none' # Enable overriding of function signatures in the first line of the docstring. autodoc_docstring_signature = True autodoc_default_options = { 'member-order': 'bysource', } # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ['.build', 'Thumbs.db', '.DS_Store'] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_book_theme' html_show_sourcelink = False html_theme_options = { 'navigation_depth': 3, } html_context = { 'display_github': True, 'github_user': 'hpcaitech', 'github_repo': 'ColossalAI', # 'github_version': 'master/docs/', } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] html_css_files = [ 'css/rtd_theme.css', ] # -- Extension configuration ------------------------------------------------- source_suffix = ['.rst', '.md', '.MD'] import inspect import colossalai def linkcode_resolve(domain, info): """ Determine the URL corresponding to Python object """ if domain != 'py': return None modname = info['module'] fullname = info['fullname'] submod = sys.modules.get(modname) if submod is None: return None obj = submod for part in fullname.split('.'): try: obj = getattr(obj, part) except Exception: return None try: fn = inspect.getsourcefile(obj) except Exception: fn = None if not fn: return None try: source, lineno = inspect.findsource(obj) except Exception: lineno = None if lineno: linespec = "#L%d" % (lineno + 1) else: linespec = "" fn = os.path.relpath(fn, start=os.path.dirname(colossalai.__file__)) github = "https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/{}{}" return github.format(fn, linespec)
from typing import Optional class TensorParallelEnv(object): _instance = None def __new__(cls, *args, **kwargs): if cls._instance is None: cls._instance = object.__new__(cls, *args, **kwargs) return cls._instance def __init__(self, *args, **kwargs): self.load(*args, **kwargs) def load(self, mode: Optional[str] = None, vocab_parallel: bool = False, parallel_input_1d: bool = False, summa_dim: int = None, tesseract_dim: int = None, tesseract_dep: int = None, depth_3d: int = None, input_group_3d=None, weight_group_3d=None, output_group_3d=None, input_x_weight_group_3d=None, output_x_weight_group_3d=None): self.mode = mode self.vocab_parallel = vocab_parallel self.parallel_input_1d = parallel_input_1d self.summa_dim = summa_dim self.tesseract_dim = tesseract_dim self.tesseract_dep = tesseract_dep self.depth_3d = depth_3d self.input_group_3d = input_group_3d self.weight_group_3d = weight_group_3d self.output_group_3d = output_group_3d self.input_x_weight_group_3d = input_x_weight_group_3d self.output_x_weight_group_3d = output_x_weight_group_3d def save(self): return dict(mode=self.mode, vocab_parallel=self.vocab_parallel, parallel_input_1d=self.parallel_input_1d, summa_dim=self.summa_dim, tesseract_dim=self.tesseract_dim, tesseract_dep=self.tesseract_dep, depth_3d=self.depth_3d, input_group_3d=self.input_group_3d, weight_group_3d=self.weight_group_3d, output_group_3d=self.output_group_3d, input_x_weight_group_3d=self.input_x_weight_group_3d, output_x_weight_group_3d=self.output_x_weight_group_3d) tensor_parallel_env = TensorParallelEnv()
#!/usr/bin/env python # -*- encoding: utf-8 -*- import argparse import os import pprint from pathlib import Path from typing import Callable, Dict, Iterable, List, Optional, Tuple, Union import torch import torch.nn as nn from torch.nn.modules.loss import _Loss from torch.nn.parallel import DistributedDataParallel as DDP from torch.optim.lr_scheduler import _LRScheduler from torch.optim.optimizer import Optimizer from torch.utils.data import DataLoader from colossalai.amp import AMP_TYPE, convert_to_amp from colossalai.amp.naive_amp import NaiveAMPModel from colossalai.builder.builder import build_gradient_handler from colossalai.context import Config, ConfigException, ParallelMode from colossalai.context.moe_context import MOE_CONTEXT from colossalai.core import global_context as gpc from colossalai.engine import Engine from colossalai.engine.gradient_accumulation import accumulate_gradient from colossalai.engine.schedule import ( InterleavedPipelineSchedule, NonPipelineSchedule, PipelineSchedule, get_tensor_shape, ) from colossalai.gemini.ophooks import BaseOpHook from colossalai.logging import get_dist_logger from colossalai.nn.optimizer.colossalai_optimizer import ColossalaiOptimizer from colossalai.utils import get_current_device, is_using_ddp, is_using_pp, is_using_sequence, sync_model_param from colossalai.utils.moe import sync_moe_model_param from colossalai.zero import convert_to_zero_v2 from colossalai.zero.sharded_optim.sharded_optim_v2 import ShardedOptimizerV2 def get_default_parser(): """Reads user command line and uses an argument parser to parse the input arguments. Input arguments include configuration, host, port, world size, local rank, backend for torch.distributed. Returns: Namespace: Returns the parser with the default arguments, the user may add customized arguments into this parser. """ parser = argparse.ArgumentParser() parser.add_argument('--config', type=str, help='path to the config file') parser.add_argument('--host', type=str, help='the master address for distributed training') parser.add_argument('--port', type=int, help='the master port for distributed training') parser.add_argument('--world_size', type=int, help='world size for distributed training') parser.add_argument('--rank', type=int, help='rank for the default process group') parser.add_argument('--local_rank', type=int, help='local rank on the node') parser.add_argument('--backend', type=str, default='nccl', help='backend for distributed communication') return parser def launch(config: Union[str, Path, Config, Dict], rank: int, world_size: int, host: str, port: int, backend: str = 'nccl', local_rank: int = None, seed: int = 1024, verbose: bool = True): """This function first parses the configuration arguments, using :func:`parse_args()` in case one of the input arguments are not given. Then initialize and set distributed environment by calling global_context's functions. Args: config (Union[str, dict, Config]): Config file or config file path are both acceptable rank (int): Rank for the default process group world_size (int): World size of the default process group host (str): The master address for distributed training port (str): The master port for distributed training backend (str, optional): Backend for ``torch.distributed``, defaults to ``nccl`` local_rank (int, optional): Rank for the process on the node and is used to set the default CUDA device, defaults to None. If local_rank = None, the default device ordinal will be calculated automatically. seed (int, optional): Specified random seed for every process. Defaults to 1024. verbose (bool, optional): Whether to print logs. Defaults to True. Raises: Exception: Raise exception when config type is wrong """ gpc.verbose = verbose # set config assert isinstance(config, (Config, str, Path, dict)), \ f'expected argument config to be Config, str or Path, but got {type(config)}' if not isinstance(config, Config) and isinstance(config, dict): config = Config(config) if isinstance(config, (str, Path)): config = Config.from_file(config) gpc.load_config(config) # init default process group gpc.init_global_dist(rank, world_size, backend, host, port) # init process groups for different parallel modes from config gpc.init_parallel_groups() # set cuda device if torch.cuda.is_available(): # if local rank is not given, calculate automatically gpc.set_device(local_rank) # set the number of processes running on the same node gpc.detect_num_processes_on_current_node() gpc.set_seed(seed) if verbose: logger = get_dist_logger() logger.info( f'Distributed environment is initialized, ' f'data parallel size: {gpc.data_parallel_size}, pipeline parallel size: {gpc.pipeline_parallel_size}, ' f'tensor parallel size: {gpc.tensor_parallel_size}', ranks=[0]) def launch_from_slurm(config: Union[str, Path, Config, Dict], host: str, port: int, backend: str = 'nccl', seed: int = 1024, verbose: bool = True): """A wrapper for colossalai.launch for SLURM launcher by reading rank and world size from the environment variables set by SLURM Args: config (Union[str, dict, Config]): Config file or config file path are both acceptable host (str): The master address for distributed training port (str): The master port for distributed training backend (str, optional): Backend for ``torch.distributed``, defaults to ``nccl`` seed (int, optional): Specified random seed for every process. Defaults to 1024. verbose (bool, optional): Whether to print logs. Defaults to True. """ try: rank = int(os.environ['SLURM_PROCID']) world_size = int(os.environ['SLURM_NPROCS']) except KeyError as e: raise RuntimeError( f"Could not find {e} in the SLURM environment, visit https://www.colossalai.org/ for more information on launching with SLURM" ) launch(config=config, rank=rank, world_size=world_size, host=host, port=port, backend=backend, seed=seed, verbose=verbose) def launch_from_openmpi(config: Union[str, Path, Config, Dict], host: str, port: int, backend: str = 'nccl', seed: int = 1024, verbose: bool = True): """A wrapper for colossalai.launch for OpenMPI launcher by reading rank and world size from the environment variables set by OpenMPI Args: config (Union[str, dict, Config]): Config file or config file path are both acceptable host (str): The master address for distributed training port (str): The master port for distributed training backend (str, optional): Backend for ``torch.distributed``, defaults to ``nccl`` seed (int, optional): Specified random seed for every process. Defaults to 1024. verbose (bool, optional): Whether to print logs. Defaults to True. """ try: rank = int(os.environ['OMPI_COMM_WORLD_RANK']) local_rank = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK']) world_size = int(os.environ['OMPI_COMM_WORLD_SIZE']) except KeyError as e: raise RuntimeError( f"Could not find {e} in the OpenMPI environment, visit https://www.colossalai.org/ for more information on launching with OpenMPI" ) launch(config=config, local_rank=local_rank, rank=rank, world_size=world_size, host=host, port=port, backend=backend, seed=seed, verbose=verbose) def launch_from_torch(config: Union[str, Path, Config, Dict], backend: str = 'nccl', seed: int = 1024, verbose: bool = True): """A wrapper for colossalai.launch for torchrun or torch.distributed.launch by reading rank and world size from the environment variables set by PyTorch Args: config (Union[str, dict, Config]): Config file or config file path are both acceptable backend (str, optional): Backend for ``torch.distributed``, defaults to ``nccl`` seed (int, optional): Specified random seed for every process. Defaults to 1024. verbose (bool, optional): Whether to print logs. Defaults to True. """ try: rank = int(os.environ['RANK']) local_rank = int(os.environ['LOCAL_RANK']) world_size = int(os.environ['WORLD_SIZE']) host = os.environ['MASTER_ADDR'] port = int(os.environ['MASTER_PORT']) except KeyError as e: raise RuntimeError( f"Could not find {e} in the torch environment, visit https://www.colossalai.org/ for more information on launching with torch" ) launch(config=config, local_rank=local_rank, rank=rank, world_size=world_size, host=host, port=port, backend=backend, seed=seed, verbose=verbose) def initialize(model: nn.Module, optimizer: Optimizer, criterion: Optional[_Loss] = None, train_dataloader: Optional[Iterable] = None, test_dataloader: Optional[Iterable] = None, lr_scheduler: Optional[_LRScheduler] = None, ophooks: Optional[List[BaseOpHook]] = None, verbose: bool = True) -> Tuple[Engine, DataLoader, DataLoader, _LRScheduler]: """Core function to wrap the essential training components with our functionality based on the config which is loaded into gpc.config. Args: model (:class:`torch.nn.Module` or Callbale): Your model instance or a function to build the model. optimizer (:class:`torch.optim.optimizer.Optimizer` or :class:`Type[torch.optim.optimizer]`): Your optimizer instance. criterion (:class:`torch.nn.modules.loss._Loss`, optional): Your criterion instance. train_dataloader (:class:`torch.utils.data.DataLoader`, optional): Dataloader for training. test_dataloader (:class:`torch.utils.data.DataLoader`, optional): Dataloader for testing. lr_scheduler (:class:`torch.nn.lr_scheduler._LRScheduler`, optional): Your lr scheduler instance, optional. verbose (bool, optional): Whether to print logs. Returns: Tuple (engine, train_dataloader, test_dataloader, lr_scheduler): A tuple of ``(engine, train_dataloader, test_dataloader, lr_scheduler)`` where only ``engine`` could not be None. """ # get logger logger = get_dist_logger() gpc.verbose = verbose # get config from gpc config = gpc.config # print config if verbose: logger.info( f"\n========== Your Config ========\n" f"{pprint.pformat(gpc.config)}\n" f"================================\n", ranks=[0]) # cudnn cudnn_benchmark = config.get('cudnn_benchmark', False) cudnn_deterministic = config.get('cudnn_deterministic', False) torch.backends.cudnn.benchmark = cudnn_benchmark torch.backends.cudnn.deterministic = cudnn_deterministic if verbose: logger.info(f"cuDNN benchmark = {cudnn_benchmark}, deterministic = {cudnn_deterministic}", ranks=[0]) # zero use_zero = hasattr(gpc.config, 'zero') if use_zero: zero_cfg = gpc.config.get('zero', None) if zero_cfg is not None: cfg_ = zero_cfg.copy() else: cfg_ = {} optimizer_config = zero_cfg.get('optimizer_config', None) model_config = zero_cfg.get('model_config', None) model, optimizer = convert_to_zero_v2(model, optimizer, model_config=model_config, optimizer_config=optimizer_config) logger.info("Initializing ZeRO model and optimizer finished!", ranks=[0]) else: if isinstance(model, nn.Module): # first sync model across dp ranks model.to(get_current_device()) elif isinstance(model, Callable): model = model().to(get_current_device()) # optimizer maybe a optimizer_cls if isinstance(optimizer, Callable): optimizer = optimizer(model.parameters()) logger.warning("Initializing an non ZeRO model with optimizer class") if not use_zero: if is_using_sequence(): sync_model_param(model, ParallelMode.SEQUENCE_DP) elif MOE_CONTEXT.is_initialized: sync_moe_model_param(model) elif is_using_ddp(): sync_model_param(model, ParallelMode.DATA) else: logger.warning( "The parameters of models is not automatically synchronized.\n" "Please make sure that all parameters are the same in data parallel group.", ranks=[0]) # check amp and zero fp16_cfg = gpc.config.get('fp16', None) if fp16_cfg is not None and fp16_cfg.mode is not None and use_zero: raise ConfigException( "It is not allowed to set fp16 and zero configuration in your config file at the same time") # clip grad norm clip_grad_norm = gpc.config.get('clip_grad_norm', 0.0) # initialize amp amp_mode = None if fp16_cfg is not None and fp16_cfg.mode is not None: cfg_ = fp16_cfg.copy() amp_mode = cfg_.pop('mode') if is_using_pp(): assert amp_mode == AMP_TYPE.NAIVE, 'Pipeline only support NaiveAMP currently' if amp_mode == AMP_TYPE.NAIVE: cfg_['clip_grad_norm'] = clip_grad_norm model, optimizer, criterion = convert_to_amp(model=model, optimizer=optimizer, criterion=criterion, mode=amp_mode, amp_config=cfg_) # get torch ddp config torch_ddp_cfg = gpc.config.get('torch_ddp', dict()) # gradient handler gradient_handler_cfg = gpc.config.get('gradient_handler', None) if gradient_handler_cfg is None: # if gradient handler is not specified in the configuration file, # check in the following order # 1. if optimizer is ZERO, then use zero grad handler # 2. if dp size is larger than 1 and pipeline is not used, use pytorch ddp # 3. if using pipeline and dp size larger than 1, use data parallel grad handler if isinstance(optimizer, ShardedOptimizerV2): gradient_handler_cfg = [dict(type='ZeROGradientHandler')] if verbose: logger.info( "Training with zero is detected, ZeROGradientHandler is automatically " "added even though not specified in the configuration", ranks=[0]) elif is_using_ddp() and MOE_CONTEXT.is_initialized: gradient_handler_cfg = [dict(type='MoeGradientHandler')] if verbose: logger.info( "Data parallel training is detected with moe parallel, MoeGradientHandler is automatically " "added even though not specified in the configuration", ranks=[0]) elif is_using_sequence(): model = DDP(model, process_group=gpc.get_group(ParallelMode.SEQUENCE_DP), device_ids=[torch.cuda.current_device()], **torch_ddp_cfg) if verbose: logger.info('Model is using torch.nn.parallel.DistributedDataParallel for Sequence Parallelism', ranks=[0]) elif is_using_ddp() and not is_using_pp() and amp_mode != AMP_TYPE.NAIVE: model = DDP(model, process_group=gpc.get_group(ParallelMode.DATA), device_ids=[torch.cuda.current_device()], **torch_ddp_cfg) if verbose: logger.info('Model is using torch.nn.parallel.DistributedDataParallel for Data Parallelism', ranks=[0]) elif is_using_ddp(): gradient_handler_cfg = [dict(type='DataParallelGradientHandler')] if verbose: logger.info( "Data parallel training is detected when using pipeline parallel, " "DataParallelGradientHandler is automatically " "added even though not specified in the configuration", ranks=[0]) # add pipeline parallel gradient handler, if pipeline shared module is detected for param in model.parameters(): if getattr(param, 'pipeline_shared_module_pg', None) is not None: if gradient_handler_cfg is None: gradient_handler_cfg = [dict(type='PipelineSharedModuleGradientHandler')] else: gradient_handler_cfg.append(dict(type='PipelineSharedModuleGradientHandler')) if verbose: logger.info( "pipeline_shared_module is detected, PipelineSharedModuleGradientHandler is automatically " "added even though not specified in the configuration", ranks=[0]) break else: if not isinstance(gradient_handler_cfg, list): raise ConfigException( f"expected gradient_handler in the configuration file to be a list but got {type(gradient_handler_cfg)}" ) # turn off sync buffer for NaiveAMPModel if using torch DDP and NaiveAMPModel at the same time # to avoid duplicated buffer synchronization if isinstance(model, DDP) and isinstance(model.module, NaiveAMPModel): model.module.sync_buffer = False # initialize schedule for engine if is_using_pp(): tensor_shape = get_tensor_shape() use_interleaved = hasattr(gpc.config, 'model') and hasattr(gpc.config.model, 'num_chunks') if gpc.is_initialized(ParallelMode.PARALLEL_1D): scatter_gather = True else: scatter_gather = False if use_interleaved: if isinstance(model, nn.Sequential): model = nn.ModuleList([model]) schedule = InterleavedPipelineSchedule(gpc.config.NUM_MICRO_BATCHES, gpc.config.model.num_chunks, tensor_shape=tensor_shape, scatter_gather_tensors=scatter_gather) else: schedule = PipelineSchedule(gpc.config.NUM_MICRO_BATCHES, tensor_shape=tensor_shape, scatter_gather_tensors=scatter_gather) else: schedule = NonPipelineSchedule() if gradient_handler_cfg is None: gradient_handlers = None if verbose and not isinstance(model, DDP): logger.warning( "No PyTorch DDP or gradient handler is set up, please make sure you do not need " "to all-reduce the gradients after a training step.", ranks=[0]) else: gradient_handlers = [build_gradient_handler(cfg, model, optimizer) for cfg in gradient_handler_cfg] # check if optimizer is ColossalaiOptimizer if not isinstance(optimizer, (ColossalaiOptimizer, ShardedOptimizerV2)): optimizer = ColossalaiOptimizer(optim=optimizer) # gradient accumulation grad_accum_size = gpc.config.get('gradient_accumulation', None) if grad_accum_size is not None: optimizer, train_dataloader, gradient_handlers, lr_scheduler = accumulate_gradient( model=model, optimizer=optimizer, dataloader=train_dataloader, accumulate_size=grad_accum_size, gradient_handlers=gradient_handlers, lr_scheduler=lr_scheduler) engine = Engine(model=model, optimizer=optimizer, criterion=criterion, gradient_handlers=gradient_handlers, clip_grad_norm=clip_grad_norm, ophook_list=ophooks, schedule=schedule) return engine, train_dataloader, test_dataloader, lr_scheduler
#!/usr/bin/env python # -*- encoding: utf-8 -*- ALLOWED_MODES = [None, '1d', '2d', '2.5d', '3d', 'sequence'] TENSOR_PARALLEL_MODE = 'tensor_parallel_mode' # initializer INITIALIZER_MAPPING = { 'data': 'Initializer_Data', 'tensor': 'Initializer_Tensor', 'pipeline': 'Initializer_Pipeline', 'embedding': 'Initializer_Embedding', '1d': 'Initializer_1D', '2d': 'Initializer_2D', '2.5d': 'Initializer_2p5D', '3d': 'Initializer_3D', 'sequence': 'Initializer_Sequence', 'model': 'Initializer_Model', 'moe': 'Initializer_Moe' } # 3D parallelism groups INPUT_GROUP_3D = 'input_group_3d' WEIGHT_GROUP_3D = 'weight_group_3d' OUTPUT_GROUP_3D = 'output_group_3d' INPUT_X_WEIGHT_3D = 'input_x_weight_group_3d' OUTPUT_X_WEIGHT_3D = 'output_x_weight_group_3d' # Attributes of tensor parallel parameters IS_TENSOR_PARALLEL = 'is_tensor_parallel' NUM_PARTITIONS = 'num_partitions' TENSOR_PARALLEL_ATTRIBUTES = [IS_TENSOR_PARALLEL, NUM_PARTITIONS]
from .initialize import ( get_default_parser, initialize, launch, launch_from_openmpi, launch_from_slurm, launch_from_torch, ) try: # .version will be created by setup.py from .version import __version__ except ModuleNotFoundError: # this will only happen if the user did not run `pip install` # and directly set PYTHONPATH to use Colossal-AI which is a bad practice __version__ = '0.0.0' print('please install Colossal-AI from https://www.colossalai.org/download or from source')
#!/usr/bin/env python # -*- encoding: utf-8 -*- from colossalai.context.parallel_context import global_context __all__ = ['global_context']
from typing import List, Dict, Tuple import os import threading from torch.distributed import rpc import torch.distributed as dist from colossalai.tensor import ProcessGroup class PipelineProcessGroup: # TODO : flexible API for DP size and TP size # In the future design mode, dp_degree and tp_degree should be removed def __init__(self) -> None: self.is_initialize = False def set_global_info(self, rank: int, world_size: int, dp_degree: int = 1, tp_degree: int = 1, num_worker_threads: int = 1, device: str = "cuda") -> None: device_mesh_size = dp_degree * tp_degree assert world_size % device_mesh_size == 0, "world_size must be the multiple of dp_degree * tp_degree !!!" self._num_worker_threads = num_worker_threads self._device_mesh_size = device_mesh_size self._rank = rank self._world_size = world_size self._dp_degree = dp_degree self._tp_degree = tp_degree self.device = device self._stage_num = world_size // device_mesh_size self._pp_rank = rank // device_mesh_size self._pp_ranks = [(rank % device_mesh_size) + i * device_mesh_size for i in range(self._stage_num)] self._local_stage_ranks = [(rank // device_mesh_size * device_mesh_size) + i for i in range(device_mesh_size)] # pp_ranks self._initialize_pp_process_group() # initialise tp dp process groups self._initialize_tp_dp_process_group() # status self._is_first_pp_rank = self._pp_rank == 0 self._is_last_pp_rank = self._pp_rank == self._stage_num - 1 self.is_initialize = True # lock self.initialise_lock = threading.Lock() self.chimera_lock = threading.Lock() def _initialize_process_group(self): stage_num = self.get_stage_num() if stage_num == 1: return device = self.device world_size = self.get_world_size() rank = self.get_global_rank() backend = 'nccl' if device == 'cuda' else 'gloo' dist.init_process_group(backend, world_size=world_size, rank=rank, group_name='main_group') def _initialize_pp_process_group(self) -> None: rank = self.get_global_rank() world_size = self.get_world_size() # build rpc connection options = rpc.TensorPipeRpcBackendOptions(num_worker_threads=self._num_worker_threads) for pp_rank in self._pp_ranks: options.set_device_map(f'work{pp_rank}', {rank: pp_rank}) rpc.init_rpc(name=f'work{rank}', rank=rank, world_size=world_size, rpc_backend_options=options) def _initialize_tp_dp_process_group(self) -> None: rank = self.get_global_rank() local_stage_ranks = self.get_local_stage_global_ranks() dp_degree = self.get_dp_degree() tp_degree = self.get_tp_degree() self._tp_dp_process_group = ProcessGroup(rank, local_stage_ranks, tp_degree, dp_degree) def get_global_rank(self): return self._rank def get_world_size(self): return self._world_size def get_dp_degree(self) -> int: return self._dp_degree def get_tp_degree(self) -> int: return self._tp_degree def get_local_device_mesh_size(self) -> int: return self._device_mesh_size def get_device_mesh_num(self) -> int: pass def get_stage_num(self) -> int: return self._stage_num def is_first_stage(self) -> bool: return self._is_first_pp_rank def is_last_stage(self) -> bool: return self._is_last_pp_rank def check_pp_rank_valid(self, pp_rank: int) -> bool: return -1 < pp_rank < self._stage_num def get_local_pp_rank(self) -> int: return self._pp_rank def get_prev_pp_rank(self) -> int: prev_pp_rank = self._pp_rank - 1 if not self.check_pp_rank_valid(prev_pp_rank): assert ValueError(f"current rank's pp_rank: {self._pp_rank} doesn't have a previous stage!") return prev_pp_rank def get_next_pp_rank(self) -> int: next_pp_rank = self._pp_rank + 1 if not self.check_pp_rank_valid(next_pp_rank): assert ValueError(f"current rank's pp_rank: {self._pp_rank} doesn't have a next stage!") return next_pp_rank def get_local_stage_global_ranks(self) -> List[int]: return self._local_stage_ranks def local_dp_rank(self) -> int: return self._tp_dp_process_group.dp_local_rank() def local_tp_rank(self) -> int: return self._tp_dp_process_group.tp_local_rank() def get_pp_global_ranks(self) -> int: return self._pp_ranks def get_dp_global_ranks(self): pass def get_tp_global_ranks(self): pass def get_chimera_all_reduce_group(self, pp_rank: int): with self.chimera_lock: if not hasattr(self, 'chimera_groups'): world_size = self.get_world_size() stage_num = self.get_stage_num() assert world_size % 2 == 0, 'world_size must be even in chimera!' self.chimera_groups = {} for rank in range(world_size // 2): pair = [rank, world_size - 1 - rank] group = dist.new_group(pair) self.chimera_groups[pair[0]] = group self.chimera_groups[pair[1]] = group self.chimera_groups[pair[0] + stage_num] = group self.chimera_groups[pair[1] + stage_num] = group self.chimera_step_lock = threading.Lock() self.chimera_step_lock.acquire() return self.chimera_groups[pp_rank] ppg = PipelineProcessGroup()
import torch import inspect from colossalai.utils.model.utils import InsertPostInitMethodToModuleSubClasses from .utils import partition_uniform, partition_balanced, build_kwargs_for_function, \ build_kwargs_for_module, exec_func_with_kwargs, exec_funcs_with_kwargs, \ call_module, customized_partition from colossalai.nn.layer.utils import CheckpointModule from colossalai.tensor import ColoParameter from colossalai.core import global_context as gpc from colossalai.context import ParallelMode from .layer_spec import LayerSpec class PipelinableContext(InsertPostInitMethodToModuleSubClasses): """ A context manager to split the model into pipeline stages. """ def __init__(self, policy: str = "balanced"): super().__init__() self._layer_spec_dict = {} self._root_children = None self._model = None self._layer_spec_list = [] self._func_dict = {} self._policy = policy @property def policy(self): return self._policy @policy.setter def policy(self, policy: str): self._policy = policy @property def layers_count(self): return len(self._layer_spec_list) @property def funcs_count(self): return len(self._func_dict) def _pre_context_exec(self): """ The Callback function when entering the context """ # reserve rng states self.cpu_rng_state = torch.get_rng_state() self.cuda_rng_state = torch.cuda.get_rng_state() def _post_context_exec(self): """ The callback function when exiting context. """ # reset rng states torch.set_rng_state(self.cpu_rng_state) torch.cuda.set_rng_state(self.cuda_rng_state) def _post_init_method(self, module: torch.nn.Module, *args, **kwargs): """ The function to call at the end of the constructor of each module. NOTE() The module may be passed to this function multiple times. """ # iterate over the positional arguments # to check if an argument is a torch Module # if found any torch Module, replace it with its layer spec # for storage purpose modified_args = [] for arg in args: if isinstance(arg, torch.nn.Module): # if nn.Module is an argument of a non-root module, then we should convert it to layer spec, which make sure the correct init method used in the real build. # if nn.Module is an argument of the root module, then we should just record the module instance itself, because those instance has been built outside of the context. if id(arg) in self._layer_spec_dict: arg = self._layer_spec_dict[id(arg)] modified_args.append(arg) # to the same for the keyword arguments modified_kwargs = {} for k, v in kwargs.items(): if isinstance(v, torch.nn.Module): v = self._layer_spec_dict[id(v)] # (lyl)TODO: analyse ColoTensor as well modified_kwargs[k] = v # keep track of the module children # as torch.nn.Module.__init__ is called from inner module to outer module, # the final value of self._model will be the outermost model # e.g. if the model is torchvision.models.resnet18, then the final value of self._model # will be the ``ResNet`` object. self._root_children = list(module.children()) self._model = module # store the children to keep the module hierarchy layer_spec = LayerSpec(module.__class__, *modified_args, **modified_kwargs) layer_spec.set_children(module.children()) # store the layer spec in this context module_id = id(module) self._layer_spec_dict[module_id] = layer_spec # convert all torch.nn.Parameter to colossalai.tensor.ColoParameter name_list = [] for name, param in module.named_parameters(): if isinstance(param, ColoParameter): continue name_list.append((name, param)) for name, param in name_list: if hasattr(module, name): delattr(module, name) setattr(module, name, ColoParameter.from_torch_tensor(tensor=param.data, requires_grad=param.requires_grad)) def to_layer_list(self, exec_seq=None): """ Create a layer spec list and func list with execution sequence given by user. If exec_seq is None, we will take the module initizing order as execution order. """ self._exec_seq = exec_seq if exec_seq is None: # if user do not provide the model executing sequence, we use the initialization order as the executing order. children_name = [] for child in self._root_children: layer_spec = self._layer_spec_dict[id(child)] if layer_spec.typename in (torch.nn.modules.container.ModuleList, torch.nn.modules.container.Sequential): for child_in_container in layer_spec.children: self._layer_spec_list.append(self._layer_spec_dict[id(child_in_container)]) for name, module in self._model.named_modules(): if id(module) == id(child_in_container): children_name.append(name) break else: self._layer_spec_list.append(layer_spec) for name, module in self._model.named_modules(): if id(module) == id(child): children_name.append(name) break else: front_funcs_list = [] named_modules = dict(self._model.named_modules()) for index, element in enumerate(exec_seq): if isinstance(element, str): if element == 'SPLIT_NODE': continue assert element in named_modules, f'Found invalid module name {element}, please check if you spell the module name correctly.' # get the layer spec based on the module ID module = named_modules[element] layer_spec = self._layer_spec_dict[id(module)] # check whether there are functions which should be executed before this module if len(front_funcs_list) != 0: func_key = (layer_spec, "front") if func_key not in self._func_dict: self._func_dict[func_key] = [] for f in front_funcs_list: self._func_dict[func_key].append(f) front_funcs_list = [] func_key = (layer_spec, "behind") self._layer_spec_list.append(layer_spec) elif isinstance(element, tuple) and element[1] == "front": front_funcs_list.append(element[0]) else: if func_key not in self._func_dict: self._func_dict[func_key] = [] if isinstance(element, tuple): self._func_dict[func_key].append(element[0]) else: self._func_dict[func_key].append(element) def partition(self, num_chunks, pipeline_size, rank): """ Partitioned model will be built respect to partion policy. The real module instance will be built in this method. """ if isinstance(self._policy, str): if self._policy == "uniform": parts = partition_uniform(len(self._layer_spec_list), pipeline_size, num_chunks)[rank] elif self._policy == "balanced": param_counts = [] for layer_spec in self._layer_spec_list: param_counts.append(layer_spec.count_params()) parts = partition_balanced(param_counts, pipeline_size, num_chunks)[rank] elif self._policy == "customized": assert self._exec_seq is not None, f'An explicit exec_seq must be defined by user in customized policy mode.' self.customized_parts = customized_partition(self._exec_seq) assert len(self.customized_parts) == gpc.get_world_size( ParallelMode.PIPELINE ), f'World size is {gpc.get_world_size(ParallelMode.PIPELINE)}, but the number of partions is {len(self.customized_parts)}' parts = self.customized_parts[rank] else: raise ValueError("A string partition policy should be one of ['uniform', 'balanced', 'customized'].") elif isinstance(self._policy, dict): parts = self._policy[rank] else: raise ValueError("A partition policy should be either a string or a dictionary.") layers_to_build = [] for start, end in parts: layers_to_build += self._layer_spec_list[start:end] behind_func_dict_in_partition = {} front_func_dict_in_partition = {} module_list_in_partition = [] for layer in layers_to_build: module = layer.build() module_list_in_partition.append(module) if (layer, "front") in self._func_dict: front_func_dict_in_partition[id(module)] = self._func_dict[(layer, "front")] elif (layer, "behind") in self._func_dict: behind_func_dict_in_partition[id(module)] = self._func_dict[(layer, "behind")] module_list_in_partition = torch.nn.ModuleList(module_list_in_partition) pipeline_model = PipelinableModel(module_list_in_partition, front_func_dict_in_partition, behind_func_dict_in_partition) return pipeline_model class PipelinableModel(torch.nn.Module): def __init__(self, module_list, front_func_dict, behind_func_dict): super().__init__() self._module_list = module_list self._front_func_dict = front_func_dict self._behind_func_dict = behind_func_dict def forward(self, *input_tensor, **kwargs): for module in self._module_list: if id(module) in self._front_func_dict: input_tensor = exec_funcs_with_kwargs(self._front_func_dict, id(module), input_tensor, kwargs) if isinstance(module, CheckpointModule): forward_func = module._forward else: forward_func = module.forward module_kwargs = build_kwargs_for_module(forward_func, input_tensor, kwargs) if input_tensor is None: input_tensor = call_module(module, kwargs=module_kwargs) elif isinstance(input_tensor, torch.Tensor): input_tensor = call_module(module, args=(input_tensor,), kwargs=module_kwargs) else: input_tensor = call_module(module, args=input_tensor, kwargs=module_kwargs) if id(module) in self._behind_func_dict: input_tensor = exec_funcs_with_kwargs(self._behind_func_dict, id(module), input_tensor, kwargs) return input_tensor