python_code
stringlengths
0
108k
#!/usr/bin/env python # -*- encoding: utf-8 -*- import time import torch from colossalai.constants import INPUT_GROUP_3D, OUTPUT_GROUP_3D, WEIGHT_GROUP_3D from colossalai.core import global_context from colossalai.logging import get_dist_logger from colossalai.nn import (Classifier3D, CrossEntropyLoss3D, Embedding3D, LayerNorm3D, Linear3D, PatchEmbedding3D, VanillaClassifier, VanillaPatchEmbedding, VocabParallelClassifier3D, VocabParallelCrossEntropyLoss3D, VocabParallelEmbedding3D) from colossalai.nn.layer.parallel_3d._utils import get_parallel_mode_from_env from colossalai.utils import get_current_device, print_rank_0 from .common import BATCH_SIZE, DEPTH, HIDDEN_SIZE, IMG_SIZE, NUM_CLASSES, SEQ_LENGTH, VOCAB_SIZE, check_equal def check_linear(): rank = torch.distributed.get_rank() logger = get_dist_logger() device = get_current_device() dtype = torch.float32 INPUT_SIZE = HIDDEN_SIZE OUTPUT_SIZE = 2 * HIDDEN_SIZE input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D) weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D) output_parallel_mode = get_parallel_mode_from_env(OUTPUT_GROUP_3D) j = global_context.get_local_rank(input_parallel_mode) i = global_context.get_local_rank(weight_parallel_mode) k = global_context.get_local_rank(output_parallel_mode) layer = Linear3D(INPUT_SIZE, OUTPUT_SIZE, dtype=dtype, bias=True) layer = layer.to(device) layer_master = torch.nn.Linear(INPUT_SIZE, OUTPUT_SIZE) layer_master = layer_master.to(device) weight_master = layer_master.weight.data.transpose(0, 1) torch.distributed.broadcast(weight_master, src=0) weight = torch.chunk(weight_master, DEPTH, dim=0)[k] weight = torch.chunk(weight, DEPTH, dim=-1)[j] weight = torch.chunk(weight, DEPTH, dim=-1)[i] layer.weight.data.copy_(weight) bias_master = layer_master.bias.data torch.distributed.broadcast(bias_master, src=0) bias = torch.chunk(bias_master, DEPTH)[j] layer.bias.data.copy_(bias) A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE) A_master = torch.randn(A_shape, dtype=dtype, device=device) torch.distributed.broadcast(A_master, src=0) A = torch.chunk(A_master, DEPTH, dim=0)[i] A = torch.chunk(A, DEPTH, dim=-1)[k] A = torch.chunk(A, DEPTH, dim=0)[j] A = A.clone() A.requires_grad = True fwd_start = time.time() out = layer(A) torch.cuda.synchronize() fwd_end = time.time() print_rank_0( 'linear forward: {0} --> {1} | {2:.3f} s'.format(tuple(A.shape), tuple(out.shape), fwd_end - fwd_start), logger) A_master = A_master.clone() A_master.requires_grad = True C_master = layer_master(A_master) C = torch.chunk(C_master, DEPTH, dim=0)[i] C = torch.chunk(C, DEPTH, dim=-1)[j] C = torch.chunk(C, DEPTH, dim=0)[k] logger.info('Rank {} linear forward: {}'.format(rank, check_equal(out, C))) grad_shape = C_master.shape grad_master = torch.randn(grad_shape, dtype=dtype, device=get_current_device()) torch.distributed.broadcast(grad_master, src=0) grad = torch.chunk(grad_master, DEPTH, dim=0)[i] grad = torch.chunk(grad, DEPTH, dim=-1)[j] grad = torch.chunk(grad, DEPTH, dim=0)[k] bwd_start = time.time() out.backward(grad) torch.cuda.synchronize() bwd_end = time.time() print_rank_0('linear backward: {:.3f} s'.format(bwd_end - bwd_start), logger) C_master.backward(grad_master) A_grad = A_master.grad A_grad = torch.chunk(A_grad, DEPTH, dim=0)[i] A_grad = torch.chunk(A_grad, DEPTH, dim=-1)[k] A_grad = torch.chunk(A_grad, DEPTH, dim=0)[j] logger.info('Rank {} linear backward (input_grad): {}'.format(rank, check_equal(A_grad, A.grad))) B_grad = layer_master.weight.grad.transpose(0, 1) B_grad = torch.chunk(B_grad, DEPTH, dim=0)[k] B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[j] B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[i] logger.info('Rank {} linear backward (weight_grad): {}'.format(rank, check_equal(B_grad, layer.weight.grad))) bias_grad = layer_master.bias.grad bias_grad = torch.chunk(bias_grad, DEPTH)[j] logger.info('Rank {} linear backward (bias_grad): {}'.format(rank, check_equal(bias_grad, layer.bias.grad))) return fwd_end - fwd_start, bwd_end - bwd_start def check_layernorm(): rank = torch.distributed.get_rank() logger = get_dist_logger() device = get_current_device() dtype = torch.float32 INPUT_SIZE = HIDDEN_SIZE input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D) weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D) output_parallel_mode = get_parallel_mode_from_env(OUTPUT_GROUP_3D) j = global_context.get_local_rank(input_parallel_mode) i = global_context.get_local_rank(weight_parallel_mode) k = global_context.get_local_rank(output_parallel_mode) norm = LayerNorm3D(INPUT_SIZE, eps=1e-6, dtype=dtype) norm = norm.to(device) norm_master = torch.nn.LayerNorm(INPUT_SIZE, eps=1e-6) norm_master = norm_master.to(device) weight_master = norm_master.weight.data torch.distributed.broadcast(weight_master, src=0) weight = torch.chunk(weight_master, DEPTH)[k] norm.weight.data.copy_(weight) bias_master = norm_master.bias.data torch.distributed.broadcast(bias_master, src=0) bias = torch.chunk(bias_master, DEPTH)[k] norm.bias.data.copy_(bias) A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE) A_master = torch.randn(A_shape, dtype=dtype, device=device) torch.distributed.broadcast(A_master, src=0) A = torch.chunk(A_master, DEPTH, dim=0)[i] A = torch.chunk(A, DEPTH, dim=-1)[k] A = torch.chunk(A, DEPTH, dim=0)[j] A = A.clone() A.requires_grad = True fwd_start = time.time() out = norm(A) torch.cuda.synchronize() fwd_end = time.time() print_rank_0( 'layer norm forward: pass | {0} --> {1} | {2:.3f} s'.format(tuple(A.shape), tuple(out.shape), fwd_end - fwd_start), logger) A_master = A_master.clone() A_master.requires_grad = True C_master = norm_master(A_master) C = torch.chunk(C_master, DEPTH, dim=0)[i] C = torch.chunk(C, DEPTH, dim=-1)[k] C = torch.chunk(C, DEPTH, dim=0)[j] logger.info('Rank {} layernorm forward: {}'.format(rank, check_equal(out, C))) grad_shape = C_master.shape grad_master = torch.randn(grad_shape, dtype=dtype, device=device) torch.distributed.broadcast(grad_master, src=0) grad = torch.chunk(grad_master, DEPTH, dim=0)[i] grad = torch.chunk(grad, DEPTH, dim=-1)[k] grad = torch.chunk(grad, DEPTH, dim=0)[j] bwd_start = time.time() out.backward(grad) torch.cuda.synchronize() bwd_end = time.time() print_rank_0('layer norm backward: pass | {:.3f} s'.format(bwd_end - bwd_start), logger) C_master.backward(grad_master) A_grad = A_master.grad A_grad = torch.chunk(A_grad, DEPTH, dim=0)[i] A_grad = torch.chunk(A_grad, DEPTH, dim=-1)[k] A_grad = torch.chunk(A_grad, DEPTH, dim=0)[j] logger.info('Rank {} layernorm backward (input_grad): {}'.format(rank, check_equal(A_grad, A.grad))) bias_grad = norm_master.weight.grad bias_grad = torch.chunk(bias_grad, DEPTH)[k] logger.info('Rank {} layernorm backward (weight_grad): {}'.format(rank, check_equal(bias_grad, norm.weight.grad))) bias_grad = norm_master.bias.grad bias_grad = torch.chunk(bias_grad, DEPTH)[k] logger.info('Rank {} layernorm backward (bias_grad): {}'.format(rank, check_equal(bias_grad, norm.bias.grad))) return fwd_end - fwd_start, bwd_end - bwd_start def check_classifier_no_given_weight(): rank = torch.distributed.get_rank() logger = get_dist_logger() device = get_current_device() dtype = torch.float32 INPUT_SIZE = HIDDEN_SIZE input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D) weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D) output_parallel_mode = get_parallel_mode_from_env(OUTPUT_GROUP_3D) j = global_context.get_local_rank(input_parallel_mode) i = global_context.get_local_rank(weight_parallel_mode) k = global_context.get_local_rank(output_parallel_mode) layer = Classifier3D(INPUT_SIZE, NUM_CLASSES, dtype=dtype, bias=True) layer = layer.to(device) layer_master = VanillaClassifier(INPUT_SIZE, NUM_CLASSES, bias=True, dtype=dtype) layer_master = layer_master.to(device) weight_master = layer_master.weight.data torch.distributed.broadcast(weight_master, src=0) weight = torch.chunk(weight_master, DEPTH, dim=-1)[k] layer.weight.data.copy_(weight) bias_master = layer_master.bias.data torch.distributed.broadcast(bias_master, src=0) layer.bias.data.copy_(bias_master) A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE) A_master = torch.randn(A_shape, dtype=dtype, device=device) torch.distributed.broadcast(A_master, src=0) A = torch.chunk(A_master, DEPTH, dim=0)[i] A = torch.chunk(A, DEPTH, dim=-1)[k] A = torch.chunk(A, DEPTH, dim=0)[j] A = A.clone() A.requires_grad = True fwd_start = time.time() out = layer(A) torch.cuda.synchronize() fwd_end = time.time() print_rank_0( 'classifier (no given weight) forward: pass | {0} --> {1} | {2:.3f} s'.format( tuple(A.shape), tuple(out.shape), fwd_end - fwd_start), logger) A_master = A_master.clone() A_master.requires_grad = True C_master = layer_master(A_master) C = torch.chunk(C_master, DEPTH, dim=0)[i] C = torch.chunk(C, DEPTH, dim=0)[j] logger.info('Rank {} classifier (no given weight) forward: {}'.format(rank, check_equal(out, C))) grad_shape = C_master.shape grad_master = torch.randn(grad_shape, dtype=dtype, device=get_current_device()) torch.distributed.broadcast(grad_master, src=0) grad = torch.chunk(grad_master, DEPTH, dim=0)[i] grad = torch.chunk(grad, DEPTH, dim=0)[j] grad = grad.clone() bwd_start = time.time() out.backward(grad) torch.cuda.synchronize() bwd_end = time.time() print_rank_0('classifier (no given weight) backward: pass | {:.3f} s'.format(bwd_end - bwd_start), logger) grad_master = grad_master.clone() C_master.backward(grad_master) A_grad = A_master.grad A_grad = torch.chunk(A_grad, DEPTH, dim=0)[i] A_grad = torch.chunk(A_grad, DEPTH, dim=-1)[k] A_grad = torch.chunk(A_grad, DEPTH, dim=0)[j] logger.info('Rank {} classifier (no given weight) backward (input_grad): {}'.format( rank, check_equal(A_grad, A.grad))) B_grad = layer_master.weight.grad B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[k] if j == k: logger.info('Rank {} classifier (no given weight) backward (weight_grad): {}'.format( rank, check_equal(B_grad, layer.weight.grad))) else: logger.info('Rank {} classifier (no given weight) backward (weight_grad): {}'.format( rank, layer.weight.grad is None)) bias_grad = layer_master.bias.grad logger.info('Rank {} classifier (no given weight) backward (bias_grad): {}'.format( rank, check_equal(bias_grad, layer.bias.grad))) return fwd_end - fwd_start, bwd_end - bwd_start def check_vocab_parallel_classifier_no_given_weight(): rank = torch.distributed.get_rank() logger = get_dist_logger() device = get_current_device() dtype = torch.float32 INPUT_SIZE = HIDDEN_SIZE input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D) weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D) output_parallel_mode = get_parallel_mode_from_env(OUTPUT_GROUP_3D) j = global_context.get_local_rank(input_parallel_mode) i = global_context.get_local_rank(weight_parallel_mode) k = global_context.get_local_rank(output_parallel_mode) layer = VocabParallelClassifier3D(INPUT_SIZE, VOCAB_SIZE, bias=True) layer = layer.to(dtype).to(device) layer_master = VanillaClassifier(INPUT_SIZE, VOCAB_SIZE, bias=True) layer_master = layer_master.to(dtype).to(device) weight_master = layer_master.weight.data torch.distributed.broadcast(weight_master, src=0) weight = torch.chunk(weight_master, DEPTH, dim=0)[j] weight = torch.chunk(weight, DEPTH, dim=0)[i] weight = torch.chunk(weight, DEPTH, dim=-1)[k] layer.weight.data.copy_(weight) bias_master = layer_master.bias.data torch.distributed.broadcast(bias_master, src=0) bias = torch.chunk(bias_master, DEPTH)[j] layer.bias.data.copy_(bias) A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE) A_master = torch.randn(A_shape, dtype=dtype, device=device) torch.distributed.broadcast(A_master, src=0) A = torch.chunk(A_master, DEPTH, dim=0)[i] A = torch.chunk(A, DEPTH, dim=-1)[k] A = torch.chunk(A, DEPTH, dim=0)[j] A = A.clone() A.requires_grad = True fwd_start = time.time() out = layer(A) torch.cuda.synchronize() fwd_end = time.time() print_rank_0( 'vocab parallel classifier (no given weight) forward: pass | {0} --> {1} | {2:.3f} s'.format( tuple(A.shape), tuple(out.shape), fwd_end - fwd_start), logger) A_master = A_master.clone() A_master.requires_grad = True C_master = layer_master(A_master) C = torch.chunk(C_master, DEPTH, dim=0)[i] C = torch.chunk(C, DEPTH, dim=-1)[j] C = torch.chunk(C, DEPTH, dim=0)[k] logger.info('Rank {} vocab parallel classifier (no given weight) forward: {}'.format(rank, check_equal(out, C))) grad_shape = C_master.shape grad_master = torch.randn(grad_shape, dtype=dtype, device=device) torch.distributed.broadcast(grad_master, src=0) grad = torch.chunk(grad_master, DEPTH, dim=0)[i] grad = torch.chunk(grad, DEPTH, dim=-1)[j] grad = torch.chunk(grad, DEPTH, dim=0)[k] grad = grad.clone() bwd_start = time.time() out.backward(grad) torch.cuda.synchronize() bwd_end = time.time() print_rank_0('vocab parallel classifier (no given weight) backward: pass | {:.3f} s'.format(bwd_end - bwd_start), logger) grad_master = grad_master.clone() C_master.backward(grad_master) A_grad = A_master.grad A_grad = torch.chunk(A_grad, DEPTH, dim=0)[i] A_grad = torch.chunk(A_grad, DEPTH, dim=-1)[k] A_grad = torch.chunk(A_grad, DEPTH, dim=0)[j] logger.info('Rank {} vocab parallel classifier (no given weight) backward (input_grad): {}'.format( rank, check_equal(A_grad, A.grad))) B_grad = layer_master.weight.grad B_grad = torch.chunk(B_grad, DEPTH, dim=0)[j] B_grad = torch.chunk(B_grad, DEPTH, dim=0)[i] B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[k] logger.info('Rank {} vocab parallel classifier (no given weight) backward (weight_grad): {}'.format( rank, check_equal(B_grad, layer.weight.grad))) bias_grad = layer_master.bias.grad bias_grad = torch.chunk(bias_grad, DEPTH)[j] logger.info('Rank {} vocab parallel classifier (no given weight) backward (bias_grad): {}'.format( rank, check_equal(bias_grad, layer.bias.grad))) return fwd_end - fwd_start, bwd_end - bwd_start def check_classifier_given_embed_weight(): rank = torch.distributed.get_rank() logger = get_dist_logger() device = get_current_device() dtype = torch.float32 input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D) weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D) output_parallel_mode = get_parallel_mode_from_env(OUTPUT_GROUP_3D) j = global_context.get_local_rank(input_parallel_mode) i = global_context.get_local_rank(weight_parallel_mode) k = global_context.get_local_rank(output_parallel_mode) embed = Embedding3D(VOCAB_SIZE, HIDDEN_SIZE) embed = embed.to(dtype).to(device) embed_master = torch.nn.Embedding(VOCAB_SIZE, HIDDEN_SIZE) embed_master = embed_master.to(dtype).to(device) weight_master = embed_master.weight.data torch.distributed.broadcast(weight_master, src=0) weight = torch.chunk(weight_master, DEPTH, dim=-1)[k] embed.weight.data.copy_(weight) layer = Classifier3D(HIDDEN_SIZE, VOCAB_SIZE, weight=embed.weight, bias=False) layer = layer.to(dtype).to(device) layer_master = VanillaClassifier(HIDDEN_SIZE, VOCAB_SIZE, weight=embed_master.weight, bias=False) layer_master = layer_master.to(dtype).to(device) A_shape = (BATCH_SIZE, SEQ_LENGTH) A_master = torch.randint(VOCAB_SIZE, A_shape, device=device) torch.distributed.broadcast(A_master, src=0) A = A_master.clone() fwd_start = time.time() out = layer(embed(A)) torch.cuda.synchronize() fwd_end = time.time() print_rank_0( 'classifier (given embed weight) forward: pass | {0} --> {1} | {2:.3f} s'.format( tuple(A.shape), tuple(out.shape), fwd_end - fwd_start), logger) A_master = A_master.clone() C_master = layer_master(embed_master(A_master)) C = torch.chunk(C_master, DEPTH, dim=0)[i] C = torch.chunk(C, DEPTH, dim=0)[j] logger.info('Rank {} classifier (given embed weight) forward: {}'.format(rank, check_equal(out, C))) grad_shape = C_master.shape grad_master = torch.randn(grad_shape, dtype=dtype, device=get_current_device()) torch.distributed.broadcast(grad_master, src=0) grad = torch.chunk(grad_master, DEPTH, dim=0)[i] grad = torch.chunk(grad, DEPTH, dim=0)[j] grad = grad.clone() bwd_start = time.time() out.backward(grad) torch.cuda.synchronize() bwd_end = time.time() print_rank_0('classifier (given embed weight) backward: pass | {:.3f} s'.format(bwd_end - bwd_start), logger) grad_master = grad_master.clone() C_master.backward(grad_master) B_grad = embed_master.weight.grad B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[k] if j == k: logger.info('Rank {} classifier (given embed weight) backward (weight_grad): {}'.format( rank, check_equal(B_grad, embed.weight.grad))) else: logger.info('Rank {} classifier (given embed weight) backward (weight_grad): {}'.format( rank, embed.weight.grad is None)) return fwd_end - fwd_start, bwd_end - bwd_start def check_vocab_parallel_classifier_given_embed_weight(): rank = torch.distributed.get_rank() logger = get_dist_logger() device = get_current_device() dtype = torch.float32 input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D) weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D) output_parallel_mode = get_parallel_mode_from_env(OUTPUT_GROUP_3D) j = global_context.get_local_rank(input_parallel_mode) i = global_context.get_local_rank(weight_parallel_mode) k = global_context.get_local_rank(output_parallel_mode) embed = VocabParallelEmbedding3D(VOCAB_SIZE, HIDDEN_SIZE) embed = embed.to(dtype).to(device) embed_master = torch.nn.Embedding(VOCAB_SIZE, HIDDEN_SIZE) embed_master = embed_master.to(dtype).to(device) weight_master = embed_master.weight.data torch.distributed.broadcast(weight_master, src=0) weight = torch.chunk(weight_master, DEPTH, dim=0)[j] weight = torch.chunk(weight, DEPTH, dim=0)[i] weight = torch.chunk(weight, DEPTH, dim=-1)[k] embed.weight.data.copy_(weight) layer = VocabParallelClassifier3D(HIDDEN_SIZE, VOCAB_SIZE, weight=embed.weight, bias=False) layer = layer.to(dtype).to(device) layer_master = VanillaClassifier(HIDDEN_SIZE, VOCAB_SIZE, weight=embed_master.weight, bias=False) layer_master = layer_master.to(dtype).to(device) A_shape = (BATCH_SIZE, SEQ_LENGTH) A_master = torch.randint(VOCAB_SIZE, A_shape, device=device) torch.distributed.broadcast(A_master, src=0) A = A_master.clone() fwd_start = time.time() out = layer(embed(A)) torch.cuda.synchronize() fwd_end = time.time() print_rank_0( 'vocab parallel classifier (given embed weight) forward: pass | {0} --> {1} | {2:.3f} s'.format( tuple(A.shape), tuple(out.shape), fwd_end - fwd_start), logger) A_master = A_master.clone() C_master = layer_master(embed_master(A_master)) C = torch.chunk(C_master, DEPTH, dim=0)[i] C = torch.chunk(C, DEPTH, dim=-1)[j] C = torch.chunk(C, DEPTH, dim=0)[k] logger.info('Rank {} vocab parallel classifier (given embed weight) forward: {}'.format(rank, check_equal(out, C))) grad_shape = C_master.shape grad_master = torch.randn(grad_shape, dtype=dtype, device=device) torch.distributed.broadcast(grad_master, src=0) grad = torch.chunk(grad_master, DEPTH, dim=0)[i] grad = torch.chunk(grad, DEPTH, dim=-1)[j] grad = torch.chunk(grad, DEPTH, dim=0)[k] grad = grad.clone() bwd_start = time.time() out.backward(grad) torch.cuda.synchronize() bwd_end = time.time() print_rank_0('vocab parallel classifier (given embed weight) backward: pass | {:.3f} s'.format(bwd_end - bwd_start), logger) grad_master = grad_master.clone() C_master.backward(grad_master) B_grad = embed_master.weight.grad B_grad = torch.chunk(B_grad, DEPTH, dim=0)[j] B_grad = torch.chunk(B_grad, DEPTH, dim=0)[i] B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[k] logger.info('Rank {} vocab parallel embed backward (weight_grad): {}'.format(rank, check_equal(B_grad, embed.weight.grad))) return fwd_end - fwd_start, bwd_end - bwd_start def check_patch_embed(): rank = torch.distributed.get_rank() device = get_current_device() logger = get_dist_logger() dtype = torch.float32 input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D) weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D) output_parallel_mode = get_parallel_mode_from_env(OUTPUT_GROUP_3D) j = global_context.get_local_rank(input_parallel_mode) i = global_context.get_local_rank(weight_parallel_mode) k = global_context.get_local_rank(output_parallel_mode) layer = PatchEmbedding3D(IMG_SIZE, 4, 3, HIDDEN_SIZE, dtype=dtype) torch.nn.init.ones_(layer.cls_token) torch.nn.init.ones_(layer.pos_embed) layer = layer.to(device) layer_master = VanillaPatchEmbedding(IMG_SIZE, 4, 3, HIDDEN_SIZE, dtype=dtype) torch.nn.init.ones_(layer_master.cls_token) torch.nn.init.ones_(layer_master.pos_embed) layer_master = layer_master.to(device) proj_weight_master = layer_master.weight.data torch.distributed.broadcast(proj_weight_master, src=0) proj_weight = torch.chunk(proj_weight_master, DEPTH, dim=0)[k] layer.weight.data.copy_(proj_weight) proj_bias_master = layer_master.bias.data torch.distributed.broadcast(proj_bias_master, src=0) proj_bias = torch.chunk(proj_bias_master, DEPTH)[k] layer.bias.data.copy_(proj_bias) A_shape = (BATCH_SIZE, 3, IMG_SIZE, IMG_SIZE) A_master = torch.randn(A_shape, dtype=dtype, device=device) torch.distributed.broadcast(A_master, src=0) A = A_master.clone() fwd_start = time.time() out = layer(A) torch.cuda.synchronize() fwd_end = time.time() print_rank_0( 'patch embed forward: pass | {0} --> {1} | {2:.3f} s'.format(tuple(A.shape), tuple(out.shape), fwd_end - fwd_start), logger) A_master = A_master.clone() C_master = layer_master(A_master) C = torch.chunk(C_master, DEPTH, dim=0)[i] C = torch.chunk(C, DEPTH, dim=-1)[k] C = torch.chunk(C, DEPTH, dim=0)[j] logger.info('Rank {} patch embed forward: {}'.format(rank, check_equal(out, C))) grad_shape = C_master.shape grad_master = torch.randn(grad_shape, dtype=dtype, device=device) torch.distributed.broadcast(grad_master, src=0) grad = torch.chunk(grad_master, DEPTH, dim=0)[i] grad = torch.chunk(grad, DEPTH, dim=-1)[k] grad = torch.chunk(grad, DEPTH, dim=0)[j] grad = grad.clone() bwd_start = time.time() out.backward(grad) torch.cuda.synchronize() bwd_end = time.time() print_rank_0('patch embed backward: pass | {:.3f} s'.format(bwd_end - bwd_start), logger) grad_master = grad_master.clone() C_master.backward(grad_master) cls_grad_master = layer_master.cls_token.grad cls_grad = torch.chunk(cls_grad_master, DEPTH, dim=-1)[k] logger.info('Rank {} patch embed backward (cls_grad): {}'.format(rank, check_equal(cls_grad, layer.cls_token.grad))) pos_grad_master = layer_master.pos_embed.grad pos_grad = torch.chunk(pos_grad_master, DEPTH, dim=-1)[k] logger.info('Rank {} patch embed backward (pos_embed_grad): {}'.format(rank, check_equal(pos_grad, layer.pos_embed.grad))) B_grad = layer_master.weight.grad B_grad = torch.chunk(B_grad, DEPTH, dim=0)[k] logger.info('Rank {} patch embed backward (proj_weight_grad): {}'.format(rank, check_equal(B_grad, layer.weight.grad))) bias_grad = layer_master.bias.grad bias_grad = torch.chunk(bias_grad, DEPTH)[k] logger.info('Rank {} patch embed backward (proj_bias_grad): {}'.format(rank, check_equal(bias_grad, layer.bias.grad))) return fwd_end - fwd_start, bwd_end - bwd_start def check_embed(): rank = torch.distributed.get_rank() device = get_current_device() logger = get_dist_logger() dtype = torch.float32 input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D) weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D) output_parallel_mode = get_parallel_mode_from_env(OUTPUT_GROUP_3D) j = global_context.get_local_rank(input_parallel_mode) i = global_context.get_local_rank(weight_parallel_mode) k = global_context.get_local_rank(output_parallel_mode) layer = Embedding3D(VOCAB_SIZE, HIDDEN_SIZE) layer = layer.to(dtype).to(device) layer_master = torch.nn.Embedding(VOCAB_SIZE, HIDDEN_SIZE) layer_master = layer_master.to(dtype).to(device) weight_master = layer_master.weight.data torch.distributed.broadcast(weight_master, src=0) weight = torch.chunk(weight_master, DEPTH, dim=-1)[k] layer.weight.data.copy_(weight) A_shape = (BATCH_SIZE, SEQ_LENGTH) A_master = torch.randint(VOCAB_SIZE, A_shape, device=device) torch.distributed.broadcast(A_master, src=0) A = A_master.clone() fwd_start = time.time() out = layer(A) torch.cuda.synchronize() fwd_end = time.time() logger.info('embed forward: pass | {0} --> {1} | {2:.3f} s'.format(tuple(A.shape), tuple(out.shape), fwd_end - fwd_start), ranks=[0]) A_master = A_master.clone() C_master = layer_master(A_master) C = torch.chunk(C_master, DEPTH, dim=0)[i] C = torch.chunk(C, DEPTH, dim=-1)[k] C = torch.chunk(C, DEPTH, dim=0)[j] logger.info('Rank {} embed forward: {}'.format(rank, check_equal(out, C))) grad_shape = C_master.shape grad_master = torch.randn(grad_shape, dtype=dtype, device=device) torch.distributed.broadcast(grad_master, src=0) grad = torch.chunk(grad_master, DEPTH, dim=0)[i] grad = torch.chunk(grad, DEPTH, dim=-1)[k] grad = torch.chunk(grad, DEPTH, dim=0)[j] grad = grad.clone() bwd_start = time.time() out.backward(grad) torch.cuda.synchronize() bwd_end = time.time() logger.info('embed backward: pass | {:.3f} s'.format(bwd_end - bwd_start), ranks=[0]) grad_master = grad_master.clone() C_master.backward(grad_master) B_grad = layer_master.weight.grad B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[k] if j == k: logger.info('Rank {} embed backward (weight_grad): {}'.format(rank, check_equal(B_grad, layer.weight.grad))) else: logger.info('Rank {} embed backward (weight_grad): {}'.format(rank, layer.weight.grad is None)) return fwd_end - fwd_start, bwd_end - bwd_start def check_vocab_parallel_embed(): rank = torch.distributed.get_rank() device = get_current_device() logger = get_dist_logger() dtype = torch.float32 input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D) weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D) output_parallel_mode = get_parallel_mode_from_env(OUTPUT_GROUP_3D) j = global_context.get_local_rank(input_parallel_mode) i = global_context.get_local_rank(weight_parallel_mode) k = global_context.get_local_rank(output_parallel_mode) layer = VocabParallelEmbedding3D(VOCAB_SIZE, HIDDEN_SIZE) layer = layer.to(dtype).to(device) layer_master = torch.nn.Embedding(VOCAB_SIZE, HIDDEN_SIZE) layer_master = layer_master.to(dtype).to(device) weight_master = layer_master.weight.data torch.distributed.broadcast(weight_master, src=0) weight = torch.chunk(weight_master, DEPTH, dim=0)[j] weight = torch.chunk(weight, DEPTH, dim=0)[i] weight = torch.chunk(weight, DEPTH, dim=-1)[k] layer.weight.data.copy_(weight) A_shape = (BATCH_SIZE, SEQ_LENGTH) A_master = torch.randint(VOCAB_SIZE, A_shape, device=device) torch.distributed.broadcast(A_master, src=0) A = A_master.clone() fwd_start = time.time() out = layer(A) torch.cuda.synchronize() fwd_end = time.time() logger.info('vocab parallel embed forward: pass | {0} --> {1} | {2:.3f} s'.format( tuple(A.shape), tuple(out.shape), fwd_end - fwd_start), ranks=[0]) A_master = A_master.clone() C_master = layer_master(A_master) C = torch.chunk(C_master, DEPTH, dim=0)[i] C = torch.chunk(C, DEPTH, dim=-1)[k] C = torch.chunk(C, DEPTH, dim=0)[j] logger.info('Rank {} vocab parallel embed forward: {}'.format(rank, check_equal(out, C))) grad_shape = C_master.shape grad_master = torch.randn(grad_shape, dtype=dtype, device=device) torch.distributed.broadcast(grad_master, src=0) grad = torch.chunk(grad_master, DEPTH, dim=0)[i] grad = torch.chunk(grad, DEPTH, dim=-1)[k] grad = torch.chunk(grad, DEPTH, dim=0)[j] grad = grad.clone() bwd_start = time.time() out.backward(grad) torch.cuda.synchronize() bwd_end = time.time() logger.info('vocab parallel embed backward: pass | {:.3f} s'.format(bwd_end - bwd_start), ranks=[0]) grad_master = grad_master.clone() C_master.backward(grad_master) B_grad = layer_master.weight.grad B_grad = torch.chunk(B_grad, DEPTH, dim=0)[j] B_grad = torch.chunk(B_grad, DEPTH, dim=0)[i] B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[k] logger.info('Rank {} vocab parallel embed backward (weight_grad): {}'.format(rank, check_equal(B_grad, layer.weight.grad))) return fwd_end - fwd_start, bwd_end - bwd_start def check_loss(): rank = torch.distributed.get_rank() logger = get_dist_logger() device = get_current_device() dtype = torch.float32 input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D) weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D) j = global_context.get_local_rank(input_parallel_mode) i = global_context.get_local_rank(weight_parallel_mode) criterion = CrossEntropyLoss3D() criterion_master = torch.nn.CrossEntropyLoss() out_shape = (BATCH_SIZE, NUM_CLASSES) out_master = torch.randn(out_shape, dtype=dtype, device=device) target_master = torch.randint(NUM_CLASSES, (BATCH_SIZE, ), dtype=torch.long, device=device) torch.distributed.broadcast(out_master, src=0) torch.distributed.broadcast(target_master, src=0) out = torch.chunk(out_master, DEPTH, dim=0)[i] out = torch.chunk(out, DEPTH, dim=0)[j] out = out.clone() out.requires_grad = True fwd_start = time.time() loss = criterion(out, target_master) fwd_end = time.time() logger.info('cross entropy loss forward: pass | {0} --> {1} | {2:.3f} s'.format(tuple(out.shape), tuple(loss.shape), fwd_end - fwd_start), ranks=[0]) out_master = out_master.clone() out_master.requires_grad = True loss_master = criterion_master(out_master, target_master) logger.info('Rank {} cross entropy loss forward: {}'.format(rank, check_equal(loss, loss_master))) bwd_start = time.time() loss.backward() bwd_end = time.time() logger.info('cross entropy loss backward: pass | {:.3f} s'.format(bwd_end - bwd_start), ranks=[0]) loss_master.backward() out_grad = out_master.grad out_grad = torch.chunk(out_grad, DEPTH, dim=0)[i] out_grad = torch.chunk(out_grad, DEPTH, dim=0)[j] logger.info('Rank {} cross entropy loss backward: {}'.format(rank, check_equal(out_grad, out.grad))) return fwd_end - fwd_start, bwd_end - bwd_start def check_vocab_parallel_loss(): rank = torch.distributed.get_rank() logger = get_dist_logger() device = get_current_device() dtype = torch.float32 input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D) weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D) output_parallel_mode = get_parallel_mode_from_env(OUTPUT_GROUP_3D) j = global_context.get_local_rank(input_parallel_mode) i = global_context.get_local_rank(weight_parallel_mode) k = global_context.get_local_rank(output_parallel_mode) criterion = VocabParallelCrossEntropyLoss3D() criterion_master = torch.nn.CrossEntropyLoss() out_shape = (BATCH_SIZE, NUM_CLASSES) out_master = torch.randn(out_shape, dtype=dtype, device=device) target_master = torch.randint(NUM_CLASSES, (BATCH_SIZE, ), dtype=torch.long, device=device) torch.distributed.broadcast(out_master, src=0) torch.distributed.broadcast(target_master, src=0) out = torch.chunk(out_master, DEPTH, dim=0)[i] out = torch.chunk(out, DEPTH, dim=-1)[k] out = torch.chunk(out, DEPTH, dim=0)[j] out = out.clone() out.requires_grad = True fwd_start = time.time() loss = criterion(out, target_master) fwd_end = time.time() logger.info('vocab parallel cross entropy loss forward: pass | {0} --> {1} | {2:.3f} s'.format( tuple(out.shape), tuple(loss.shape), fwd_end - fwd_start), ranks=[0]) out_master = out_master.clone() out_master.requires_grad = True loss_master = criterion_master(out_master, target_master) logger.info('Rank {} vocab parallel cross entropy loss forward: {}'.format(rank, check_equal(loss, loss_master))) bwd_start = time.time() loss.backward() bwd_end = time.time() logger.info('vocab parallel cross entropy loss backward: pass | {:.3f} s'.format(bwd_end - bwd_start), ranks=[0]) loss_master.backward() out_grad = out_master.grad out_grad = torch.chunk(out_grad, DEPTH, dim=0)[i] out_grad = torch.chunk(out_grad, DEPTH, dim=-1)[k] out_grad = torch.chunk(out_grad, DEPTH, dim=0)[j] logger.info('Rank {} vocab parallel cross entropy loss backward: {}'.format(rank, check_equal(out_grad, out.grad))) return fwd_end - fwd_start, bwd_end - bwd_start
#!/usr/bin/env python # -*- encoding: utf-8 -*- train_data = dict( dataset=dict( type='CIFAR10Dataset', root='/path/to/data', download=True, transform_pipeline=[ dict(type='RandomResizedCrop', size=224), dict(type='RandomHorizontalFlip'), dict(type='ToTensor'), dict(type='Normalize', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) ] ), dataloader=dict( batch_size=64, pin_memory=True, num_workers=4, sampler=dict( type='DataParallelSampler', shuffle=True, ) ) )
#!/usr/bin/env python # -*- encoding: utf-8 -*- from pathlib import Path import pytest from colossalai.context.config import Config from colossalai.builder import build_ophooks @pytest.mark.cpu def test_load_config(): filename = Path(__file__).parent.joinpath('sample_config.py') config = Config.from_file(filename) assert config.train_data, 'cannot access train data as attribute' assert config.train_data.dataset, 'cannot access grandchild attribute' assert isinstance(config.train_data.dataset.transform_pipeline[0], dict), \ f'expected attribute transform_pipeline elements to be a dict, but found {type(config.train_data.dataset.transform_pipeline)}'
import torch import colossalai import torch.multiprocessing as mp from colossalai.amp import convert_to_naive_amp, convert_to_apex_amp from tests.components_to_test.registry import non_distributed_component_funcs from colossalai.testing import assert_close_loose, rerun_if_address_is_in_use from colossalai.utils import free_port from colossalai.amp import convert_to_naive_amp, convert_to_apex_amp from tests.components_to_test.registry import non_distributed_component_funcs import copy import pytest from functools import partial def check_equal(a, b): """ This function checks if two tensors are equal within tolerance """ assert torch.allclose(a.float(), b.float(), rtol=1e-4, atol=1e-3), f'a = {a}, b = {b}' def run_naive_amp(): """ In this test, we compare the naive fp16 optimizer implemented in colossalai and fp32 torch optimizer """ torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True # create layer test_models = ['repeated_computed_layers', 'nested_model', 'resnet18'] for test_name in test_models: get_component_func = non_distributed_component_funcs.get_callable(test_name) model_builder, train_dataloader, _, optim_class, _ = get_component_func() # create model naive_amp_model = model_builder(checkpoint=True).cuda() apex_amp_model = copy.deepcopy(naive_amp_model) # create optimizer naive_amp_optimizer = optim_class(naive_amp_model.parameters(), lr=1e-3) apex_amp_optimizer = optim_class(apex_amp_model.parameters(), lr=1e-3) # inject naive and apex amp naive_amp_config = dict(initial_scale=128) naive_amp_model, naive_amp_optimizer = convert_to_naive_amp(naive_amp_model, naive_amp_optimizer, naive_amp_config) apex_amp_config = dict(opt_level='O2', loss_scale=128, keep_batchnorm_fp32=False) apex_amp_model, apex_amp_optimizer = convert_to_apex_amp(apex_amp_model, apex_amp_optimizer, apex_amp_config) # create data data_iter = iter(train_dataloader) data, label = next(data_iter) data = data.cuda() # forward pass naive_amp_output = naive_amp_model(data) apex_amp_output = apex_amp_model(data) assert_close_loose(naive_amp_output, apex_amp_output) # backward naive_amp_optimizer.backward(naive_amp_output.mean()) apex_amp_optimizer.backward(apex_amp_output.mean()) # check grad for naive_amp_param, apex_amp_param in zip(naive_amp_model.parameters(), apex_amp_model.parameters()): assert_close_loose(naive_amp_param.grad, apex_amp_param.grad) # step naive_amp_optimizer.step() apex_amp_optimizer.step() # check updated param for naive_amp_param, apex_amp_param in zip(naive_amp_model.parameters(), apex_amp_model.parameters()): assert_close_loose(naive_amp_param, apex_amp_param) def run_dist(rank, world_size, port): colossalai.launch(config=dict(), rank=rank, world_size=world_size, port=port, host='localhost') run_naive_amp() @pytest.mark.dist @rerun_if_address_is_in_use() def test_naive_amp(): world_size = 1 run_func = partial(run_dist, world_size=world_size, port=free_port()) mp.spawn(run_func, nprocs=world_size) if __name__ == '__main__': test_naive_amp()
import os from functools import partial from pathlib import Path import colossalai import pytest import torch import torch.multiprocessing as mp from colossalai.amp.amp_type import AMP_TYPE from colossalai.builder import build_pipeline_model from colossalai.engine.schedule import PipelineSchedule from colossalai.logging import get_dist_logger from colossalai.nn import LinearWarmupLR from colossalai.nn.loss import CrossEntropyLoss from colossalai.trainer import Trainer, hooks from colossalai.utils import free_port, get_dataloader from colossalai.utils.gradient_accumulation import GradAccumLrSchedulerByStep from colossalai.testing import rerun_if_address_is_in_use from model_zoo.vit import vit_tiny_patch4_32 from torchvision import transforms from torchvision.datasets import CIFAR10 BATCH_SIZE = 4 NUM_EPOCHS = 60 WARMUP_EPOCHS = 5 CONFIG = dict(NUM_MICRO_BATCHES=2, parallel=dict(pipeline=2, tensor=dict(size=2, mode='1d')), fp16=dict(mode=AMP_TYPE.NAIVE), gradient_accumulation=2) def run_trainer(rank, world_size, port): colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') logger = get_dist_logger() model = vit_tiny_patch4_32() pipe_model = build_pipeline_model(model.layers, num_chunks=1) # build dataloaders transform_train = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) train_dataset = CIFAR10(root=Path(os.environ['DATA']), train=True, download=True, transform=transform_train) train_dataloader = get_dataloader(dataset=train_dataset, shuffle=True, batch_size=BATCH_SIZE, pin_memory=True) # build criterion criterion = CrossEntropyLoss() # optimizer optimizer = torch.optim.Adam(pipe_model.parameters(), lr=0.001, weight_decay=0) # lr_scheduler steps_per_epoch = GradAccumLrSchedulerByStep.compute_effective_steps_per_epoch(train_dataloader, accumulate_size=2) total_steps = steps_per_epoch * NUM_EPOCHS warmup_steps = steps_per_epoch * WARMUP_EPOCHS lr_scheduler = LinearWarmupLR(optimizer, total_steps=total_steps, warmup_steps=warmup_steps) engine, train_dataloader, _, lr_scheduler = colossalai.initialize(pipe_model, optimizer, criterion, train_dataloader, lr_scheduler=lr_scheduler) logger = get_dist_logger() trainer = Trainer(engine=engine, logger=logger) hook_list = [ hooks.LRSchedulerHook(lr_scheduler=lr_scheduler, by_epoch=False), ] trainer.fit(train_dataloader=train_dataloader, epochs=NUM_EPOCHS, max_steps=2, hooks=hook_list, display_progress=True) @pytest.mark.dist @rerun_if_address_is_in_use() def test_hybrid_parallel(): world_size = 8 run_func = partial(run_trainer, world_size=world_size, port=free_port()) mp.spawn(run_func, nprocs=world_size) if __name__ == '__main__': test_hybrid_parallel()
from functools import partial import colossalai import pytest import torch.multiprocessing as mp from colossalai.amp import AMP_TYPE from colossalai.core import global_context as gpc from colossalai.utils import free_port from tests.components_to_test.registry import non_distributed_component_funcs from colossalai.testing import parameterize, rerun_if_address_is_in_use CONFIG = dict(parallel=dict(pipeline=dict(size=1), tensor=dict(size=1, mode=None)), fp16=dict(mode=None), clip_grad_norm=1.0) @parameterize('model_name', ['repeated_computed_layers', 'resnet18', 'repeated_computed_layers']) @parameterize('amp_mode', [AMP_TYPE.APEX, AMP_TYPE.TORCH, AMP_TYPE.NAIVE, None]) def run_train(model_name, amp_mode): # FIXME: test bert get_components_func = non_distributed_component_funcs.get_callable(model_name) gpc.config.fp16['mode'] = amp_mode model_builder, train_dataloader, _, optimizer_class, criterion = get_components_func() model = model_builder(checkpoint=False) engine, train_dataloader, *args = colossalai.initialize(model=model, optimizer=optimizer_class(model.parameters(), lr=1e-3), criterion=criterion, train_dataloader=train_dataloader) try: engine.train() for data, label in train_dataloader: engine.zero_grad() data = data.cuda() label = label.cuda() if criterion: output = engine(data) loss = engine.criterion(output, label) else: loss = engine(data, label) engine.backward(loss) engine.step() break except IndexError: # if using apex amp, NetWithRepeatedlyComputedLayers will raise an index out of range issue # the following check fails in apex # if cached_x.grad_fn.next_functions[1][0].variable is not x: pass def run_engine(rank, world_size, port): # init dist env colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') run_train() @pytest.mark.dist @rerun_if_address_is_in_use() def test_engine(): world_size = 2 run_func = partial(run_engine, world_size=world_size, port=free_port()) mp.spawn(run_func, nprocs=world_size) if __name__ == '__main__': test_engine()
import pytest from colossalai.engine.paramhooks import BaseParamHookMgr from torch import nn import torch import torch.nn.functional as F import copy class SubNet(nn.Module): def __init__(self, out_features) -> None: super().__init__() self.bias = nn.Parameter(torch.zeros(out_features)) def forward(self, x, weight): return F.linear(x, weight, self.bias) class Net(nn.Module): def __init__(self, checkpoint=False) -> None: super().__init__() self.fc1 = nn.Linear(5, 5) self.sub_fc = SubNet(5) self.fc2 = nn.Linear(5, 1) def forward(self, x): x = self.fc1(x) x = self.sub_fc(x, self.fc1.weight) x = self.fc1(x) x = self.fc2(x) return x def net_data(): return (torch.randn(2, 5, dtype=torch.float, device='cuda'),) def allclose(tensor_a: torch.Tensor, tensor_b: torch.Tensor, loose=False) -> bool: if loose: return torch.allclose(tensor_a, tensor_b, atol=1e-3, rtol=1e-3) return torch.allclose(tensor_a, tensor_b) def test_base_param_hook(): torch.manual_seed(0) model = Net(checkpoint=True).cuda() model.train() inputs = net_data() def run_model(model, inputs, use_param_hook = False): if use_param_hook: class HooKWrapper: def __init__(self) -> None: self.hook_triggered_times = 0 def wrapper_func(self): def hook(param, grad) -> torch.Tensor or None: self.hook_triggered_times += 1 return grad return hook hookwrapper = HooKWrapper() param_list = [p for p in model.parameters()] hook_mgr = BaseParamHookMgr(param_list) hook_mgr.register_backward_hooks(hookwrapper.wrapper_func()) model.zero_grad(set_to_none=True) with torch.cuda.amp.autocast(): y = model(*inputs) loss = y.sum() loss.backward() if use_param_hook: hook_mgr.remove_hooks() return hookwrapper.hook_triggered_times model_copy = copy.deepcopy(model) run_model(model, inputs, False) ret2 = run_model(model_copy, inputs, True) # Make sure param hook has only be fired once in case of parameter sharing assert ret2 == len(list(model.parameters())) for p, p_copy in zip(model.parameters(), model_copy.parameters()): assert allclose(p.grad, p_copy.grad), f"{p.grad} vs {p_copy.grad}" if __name__ == '__main__': test_base_param_hook()
#!/usr/bin/env python class Registry: def __init__(self): self._registry = dict() def register(self, name): assert name not in self._registry def _regsiter(callable_): self._registry[name] = callable_ return _regsiter def get_callable(self, name: str): return self._registry[name] def __iter__(self): self._idx = 0 self._len = len(self._registry) self._names = list(self._registry.keys()) return self def __next__(self): if self._idx < self._len: key = self._names[self._idx] callable_ = self._registry[key] self._idx += 1 return callable_ else: raise StopIteration non_distributed_component_funcs = Registry() model_paralle_component_funcs = Registry() __all__ = ['non_distributed_component_funcs', 'model_paralle_component_funcs']
import torch import torch.nn as nn import torch.nn.functional as F from colossalai.nn import CheckpointModule from .utils import DummyDataGenerator from .registry import non_distributed_component_funcs class SubNet(nn.Module): def __init__(self, out_features) -> None: super().__init__() self.bias = nn.Parameter(torch.zeros(out_features)) def forward(self, x, weight): return F.linear(x, weight, self.bias) class NestedNet(CheckpointModule): def __init__(self, checkpoint=False) -> None: super().__init__(checkpoint) self.fc1 = nn.Linear(5, 5) self.sub_fc = SubNet(5) self.fc2 = nn.Linear(5, 2) def forward(self, x): x = self.fc1(x) x = self.sub_fc(x, self.fc1.weight) x = self.fc1(x) x = self.fc2(x) return x class DummyDataLoader(DummyDataGenerator): def generate(self): data = torch.rand(16, 5) label = torch.randint(low=0, high=2, size=(16,)) return data, label @non_distributed_component_funcs.register(name='nested_model') def get_training_components(): def model_builder(checkpoint=True): return NestedNet(checkpoint) trainloader = DummyDataLoader() testloader = DummyDataLoader() criterion = torch.nn.CrossEntropyLoss() return model_builder, trainloader, testloader, torch.optim.Adam, criterion
from . import repeated_computed_layer, resnet, nested_model, bert, no_leaf_module
from torchvision.models import resnet18 from .registry import non_distributed_component_funcs from pathlib import Path import os import torch from torchvision.transforms import transforms from torchvision.datasets import CIFAR10 from colossalai.utils import get_dataloader def get_cifar10_dataloader(train): # build dataloaders dataset = CIFAR10(root=Path(os.environ['DATA']), download=True, train=train, transform=transforms.Compose( [transforms.ToTensor(), transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])) dataloader = get_dataloader(dataset=dataset, shuffle=True, batch_size=16, drop_last=True) return dataloader @non_distributed_component_funcs.register(name='resnet18') def get_resnet_training_components(): def model_builder(checkpoint=False): return resnet18(num_classes=10) trainloader = get_cifar10_dataloader(train=True) testloader = get_cifar10_dataloader(train=False) criterion = torch.nn.CrossEntropyLoss() return model_builder, trainloader, testloader, torch.optim.Adam, criterion
import torch import torch.nn as nn import torch.nn.functional as F from colossalai.nn import CheckpointModule from .utils.dummy_data_generator import DummyDataGenerator from .registry import non_distributed_component_funcs class NoLeafModule(CheckpointModule): """ In this no-leaf module, it has subordinate nn.modules and a nn.Parameter. """ def __init__(self, checkpoint=False) -> None: super().__init__(checkpoint=checkpoint) self.proj1 = nn.Linear(4, 8) self.weight = nn.Parameter(torch.randn(8, 8)) self.proj2 = nn.Linear(8, 4) def forward(self, x): x = self.proj1(x) x = F.linear(x, self.weight) x = self.proj2(x) return x class DummyDataLoader(DummyDataGenerator): def generate(self): data = torch.rand(16, 4) label = torch.randint(low=0, high=2, size=(16,)) return data, label @non_distributed_component_funcs.register(name='no_leaf_module') def get_training_components(): def model_builder(checkpoint=True): return NoLeafModule(checkpoint) trainloader = DummyDataLoader() testloader = DummyDataLoader() criterion = torch.nn.CrossEntropyLoss() from colossalai.nn.optimizer import HybridAdam return model_builder, trainloader, testloader, HybridAdam, criterion
#!/usr/bin/env python import torch import torch.nn as nn from colossalai.nn import CheckpointModule from .utils.dummy_data_generator import DummyDataGenerator from .registry import non_distributed_component_funcs class NetWithRepeatedlyComputedLayers(CheckpointModule): """ This model is to test with layers which go through forward pass multiple times. In this model, the fc1 and fc2 call forward twice """ def __init__(self, checkpoint=False) -> None: super().__init__(checkpoint=checkpoint) self.fc1 = nn.Linear(5, 5) self.fc2 = nn.Linear(5, 5) self.fc3 = nn.Linear(5, 2) self.layers = [self.fc1, self.fc2, self.fc1, self.fc2, self.fc3] def forward(self, x): for layer in self.layers: x = layer(x) return x class DummyDataLoader(DummyDataGenerator): def generate(self): data = torch.rand(16, 5) label = torch.randint(low=0, high=2, size=(16,)) return data, label @non_distributed_component_funcs.register(name='repeated_computed_layers') def get_training_components(): def model_builder(checkpoint=True): return NetWithRepeatedlyComputedLayers(checkpoint) trainloader = DummyDataLoader() testloader = DummyDataLoader() criterion = torch.nn.CrossEntropyLoss() return model_builder, trainloader, testloader, torch.optim.Adam, criterion
import torch import transformers from packaging import version from torch.utils.data import SequentialSampler from transformers import BertConfig, BertForSequenceClassification from .registry import non_distributed_component_funcs def get_bert_data_loader( batch_size, total_samples, sequence_length, device=torch.device('cpu:0'), is_distrbuted=False, ): train_data = torch.randint( low=0, high=1000, size=(total_samples, sequence_length), device=device, dtype=torch.long, ) train_label = torch.randint(low=0, high=2, size=(total_samples,), device=device, dtype=torch.long) train_dataset = torch.utils.data.TensorDataset(train_data, train_label) if is_distrbuted: sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) else: sampler = SequentialSampler(train_dataset) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, sampler=sampler) return train_loader @non_distributed_component_funcs.register(name='bert') def get_training_components(): hidden_dim = 8 num_head = 4 sequence_length = 12 num_layer = 2 def bert_model_builder(checkpoint): config = BertConfig(gradient_checkpointing=checkpoint, hidden_size=hidden_dim, intermediate_size=hidden_dim * 4, num_attention_heads=num_head, max_position_embeddings=sequence_length, num_hidden_layers=num_layer, hidden_dropout_prob=0., attention_probs_dropout_prob=0.) print('building BertForSequenceClassification model') # adapting huggingface BertForSequenceClassification for single unitest calling interface class ModelAaptor(BertForSequenceClassification): def forward(self, input_ids, labels): """ inputs: data, label outputs: loss """ return super().forward(input_ids=input_ids, labels=labels)[0] model = ModelAaptor(config) if checkpoint and version.parse(transformers.__version__) >= version.parse("4.11.0"): model.gradient_checkpointing_enable() return model trainloader = get_bert_data_loader(batch_size=2, total_samples=10000, sequence_length=sequence_length, is_distrbuted=True) testloader = get_bert_data_loader(batch_size=2, total_samples=10000, sequence_length=sequence_length, is_distrbuted=True) criterion = None return bert_model_builder, trainloader, testloader, torch.optim.Adam, criterion
from abc import ABC, abstractmethod class DummyDataGenerator(ABC): def __init__(self, length=10): self.length = length @abstractmethod def generate(self): pass def __iter__(self): self.step = 0 return self def __next__(self): if self.step < self.length: self.step += 1 return self.generate() else: raise StopIteration def __len__(self): return self.length
from .dummy_data_generator import DummyDataGenerator
import torch import torch.nn as nn from torch.optim.adam import Adam from torch.optim import AdamW from colossalai.nn.optimizer.fused_adam import FusedAdam from colossalai.testing import parameterize class FC(nn.Module): def __init__(self) -> None: super().__init__() self.fc = nn.Sequential(nn.Linear(64, 64)) def forward(self, x): return self.fc(x) @parameterize('adamw', [False, True]) @parameterize('p_dtype', [torch.float, torch.half]) @parameterize('g_dtype', [torch.float, torch.half]) def test_adam(adamw, p_dtype, g_dtype): model = FC().cuda().to(p_dtype) state = model.state_dict() model_copy = FC().cuda().to(p_dtype) model_copy.load_state_dict(state.copy()) if adamw: optim = FusedAdam(model.parameters(), lr=1e-3, adamw_mode=True) torch_optim = AdamW(model_copy.parameters(), lr=1e-3) else: optim = FusedAdam(model.parameters(), lr=1e-3) torch_optim = Adam(model_copy.parameters(), lr=1e-3) data = torch.rand(1024, 64).cuda().to(p_dtype) data_copy = data.clone() label = torch.rand(1024, 64).cuda().to(p_dtype) for d, l in zip(data, label): y = model(d) loss = ((l - y) ** 2).sum() optim.zero_grad() loss.backward() if p_dtype != g_dtype: for i in range(len(optim.param_groups[0]['params'])): optim.param_groups[0]['params'][i].grad.data = optim.param_groups[0]['params'][i].grad.data.to(g_dtype) optim.step() for d, l in zip(data_copy, label): y = model_copy(d) loss = ((l - y) ** 2).sum() torch_optim.zero_grad() loss.backward() torch_optim.step() assert len(optim.param_groups[0]['params']) == len(torch_optim.param_groups[0]['params']) for i in range(len(optim.param_groups[0]['params'])): if torch.isnan(optim.param_groups[0]['params'][i]).any() \ or torch.isnan(torch_optim.param_groups[0]['params'][i]).any(): continue assert torch.allclose(optim.param_groups[0]['params'][i], torch_optim.param_groups[0]['params'][i], 2e-3, 2e-3)
import math import torch from colossalai.testing import parameterize def torch_adam_update( step, lr, beta1, beta2, eps, weight_decay, param, grad, exp_avg, exp_avg_sq, use_adamw, ): bias_correction1 = 1 - beta1**step bias_correction2 = 1 - beta2**step if weight_decay != 0: if use_adamw: # Perform stepweight decay param.mul_(1 - lr * weight_decay) else: grad = grad.add(param, alpha=weight_decay) # Decay the first and second moment running average coefficient exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps) step_size = lr / bias_correction1 param.addcdiv_(exp_avg, denom, value=-step_size) def assertLess(data_diff, threshold, msg): assert data_diff < threshold, msg def assertTrue(condition, msg): assert condition, msg @parameterize('adamw', [True, False]) @parameterize('step', [1, 2]) @parameterize('p_dtype', [torch.float, torch.half]) @parameterize('g_dtype', [torch.float, torch.half]) def test_cpu_adam(adamw, step, p_dtype, g_dtype): lr = 1e-3 beta1, beta2 = 0.9, 0.999 eps = 1e-8 weight_decay = 0 for i in range(1024): p_data = torch.rand(64, dtype=p_dtype) p_data_copy = p_data.clone().float() p_grad = torch.rand(64, dtype=g_dtype) p_grad_copy = p_grad.clone().float() exp_avg = torch.rand(p_data.shape) exp_avg_copy = exp_avg.clone() exp_avg_sq = torch.rand(p_data.shape) exp_avg_sq_copy = exp_avg_sq.clone() try: import cpu_adam cpu_adam_op = cpu_adam except: raise ImportError("Import cpu adam error, please install colossal from source code") cpu_adam_op.create_adam(0, lr, beta1, beta2, eps, weight_decay, adamw, False) cpu_adam_op.adam_update( 0, step, lr, beta1, beta2, eps, weight_decay, True, p_data.view(-1), # fp32 data p_grad.view(-1), # fp32 grad exp_avg.view(-1), exp_avg_sq.view(-1), -1, ) torch_adam_update( step, lr, beta1, beta2, eps, weight_decay, p_data_copy, # fp32 data p_grad_copy, # fp32 grad exp_avg_copy, exp_avg_sq_copy, adamw, ) var = p_data_copy - p_data data_diff = torch.max(torch.abs(var)) threshold = 1e-3 assertLess( data_diff, threshold, f"p_data diff {data_diff}. failed check, step {step}, lr {lr}, eps " f"{eps} beta1 {beta1} beta2 {beta2} weight_decay {weight_decay} p_dtype {p_dtype}, g_dtype {g_dtype}", ) max_grad_diff = torch.max(torch.abs(p_grad_copy - p_grad)) assertTrue(max_grad_diff < threshold, f"diff {max_grad_diff}") max_exp_avg_diff = torch.max(torch.abs(exp_avg_copy - exp_avg)) assertTrue(max_exp_avg_diff < threshold, f"max_exp_avg_diff {max_exp_avg_diff}") max_exp_avg_sq_diff = torch.max(torch.abs(exp_avg_sq_copy - exp_avg_sq)) assertTrue(max_exp_avg_sq_diff < threshold, f"max_exp_avg_sq_diff {max_exp_avg_sq_diff}")
import torch import torch.nn as nn from torch.optim.adam import Adam from torch.optim import AdamW from colossalai.nn.optimizer.hybrid_adam import HybridAdam from colossalai.testing import parameterize RE = 1024 @parameterize('adamw', [False, True]) @parameterize('device', ['cpu', 'cuda:0']) @parameterize('p_dtype', [torch.float]) @parameterize('g_dtype', [torch.float, torch.half]) def test_adam(adamw, device, p_dtype, g_dtype): rng_state = torch.get_rng_state() p = nn.Parameter(torch.rand(64).to(device, p_dtype)) torch.set_rng_state(rng_state) p_copy = nn.Parameter(torch.rand(64).to(device).float()) if adamw: optim = HybridAdam([p], lr=1e-3, adamw_mode=True) torch_optim = AdamW([p_copy], lr=1e-3) else: optim = HybridAdam([p], lr=1e-3) torch_optim = Adam([p_copy], lr=1e-3) print(f"adaw mode {adamw}, device {device}, p_dtype {p_dtype}, g_dtype {g_dtype}") for i in range(RE): p.grad = torch.rand(64).to(device, p_dtype) p_copy.grad = p.grad.clone().float() p.grad.data = p.grad.data.to(g_dtype) optim.step() torch_optim.step() if torch.isnan(p.data).any() or torch.isnan(p_copy.data).any(): continue assert torch.allclose(p.data, p_copy.data, 1e-4, 1e-2), \ f"adaw mode {adamw}, device {device}, p_dtype {p_dtype}, g_dtype {g_dtype}"
from numpy import dtype import torch import torch.nn as nn import math from colossalai.testing import parameterize from colossalai.utils import multi_tensor_applier def torch_adam_update( step, lr, beta1, beta2, eps, weight_decay, param, grad, exp_avg, exp_avg_sq, use_adamw, ): bias_correction1 = 1 - beta1**step bias_correction2 = 1 - beta2**step if weight_decay != 0: if use_adamw: # Perform stepweight decay param.mul_(1 - lr * weight_decay) else: grad = grad.add(param, alpha=weight_decay) # Decay the first and second moment running average coefficient exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps) step_size = lr / bias_correction1 param.addcdiv_(exp_avg, denom, value=-step_size) @parameterize('adamw', [False, True]) @parameterize('step', [1, 2]) @parameterize('p_dtype', [torch.float, torch.half]) @parameterize('g_dtype', [torch.float, torch.half]) def test_adam(adamw, step, p_dtype, g_dtype): try: import colossal_C fused_adam = colossal_C.multi_tensor_adam dummy_overflow_buf = torch.cuda.IntTensor([0]) except: raise ImportError("No colossal_C kernel installed.") count = 0 for i in range(1024): p = torch.rand(64, dtype=p_dtype).cuda() p_copy = p.clone().float() g = torch.rand(p.shape, dtype=g_dtype).cuda() g_copy = g.clone().float() m = torch.rand(p.shape).cuda() m_copy = m.clone() v = torch.rand(p.shape).cuda() v_copy = v.clone() lr = 1e-3 beta1, beta2 = 0.9, 0.999 eps = 1e-8 weight_decay = 0 multi_tensor_applier(fused_adam, dummy_overflow_buf, [[g], [p], [m], [v]], lr, beta1, beta2, eps, step, adamw, True, weight_decay) torch_adam_update( step, lr, beta1, beta2, eps, weight_decay, p_copy, # fp32 data g_copy, # fp32 grad m_copy, v_copy, adamw, ) if torch.isnan(p).any() or torch.isnan(p_copy).any(): count += 1 continue assert count < 200, "too many nans" assert torch.allclose(p.to(torch.float), p_copy.to(torch.float), 1e-5, 1e-5), f"failed check, adamw {adamw}, p_dtype {p_dtype}, g_dtype {g_dtype}"
from functools import partial import colossalai import pytest import torch import torch.multiprocessing as mp from colossalai.amp.amp_type import AMP_TYPE from colossalai.logging import get_dist_logger from colossalai.trainer import Trainer from colossalai.utils import MultiTimer, free_port from tests.components_to_test.registry import non_distributed_component_funcs from colossalai.testing import parameterize, rerun_if_address_is_in_use BATCH_SIZE = 4 IMG_SIZE = 32 NUM_EPOCHS = 200 CONFIG = dict(fp16=dict(mode=AMP_TYPE.TORCH)) @parameterize('model_name', ['repeated_computed_layers', 'resnet18', 'nested_model']) def run_trainer(model_name): get_components_func = non_distributed_component_funcs.get_callable(model_name) model_builder, train_dataloader, test_dataloader, optimizer_class, criterion = get_components_func() model = model_builder() optimizer = optimizer_class(model.parameters(), lr=1e-3) engine, train_dataloader, *_ = colossalai.initialize(model=model, optimizer=optimizer, criterion=criterion, train_dataloader=train_dataloader) logger = get_dist_logger() logger.info("engine is built", ranks=[0]) timer = MultiTimer() trainer = Trainer(engine=engine, logger=logger, timer=timer) logger.info("trainer is built", ranks=[0]) logger.info("start training", ranks=[0]) trainer.fit(train_dataloader=train_dataloader, test_dataloader=test_dataloader, epochs=NUM_EPOCHS, max_steps=3, display_progress=True, test_interval=5) torch.cuda.empty_cache() def run_dist(rank, world_size, port): colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') @pytest.mark.dist @rerun_if_address_is_in_use() def test_trainer_no_pipeline(): world_size = 4 run_func = partial(run_dist, world_size=world_size, port=free_port()) mp.spawn(run_func, nprocs=world_size) if __name__ == '__main__': test_trainer_no_pipeline()
import os from functools import partial from pathlib import Path import colossalai import pytest import torch import torch.multiprocessing as mp import torch.nn as nn from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc from colossalai.engine.schedule import PipelineSchedule from colossalai.logging import get_dist_logger from colossalai.trainer import Trainer from colossalai.utils import MultiTimer, free_port, get_dataloader from torch.optim import Adam from torchvision import transforms from torchvision.datasets import CIFAR10 from torchvision.models import resnet18 from colossalai.testing import rerun_if_address_is_in_use BATCH_SIZE = 4 IMG_SIZE = 32 NUM_EPOCHS = 200 CONFIG = dict( NUM_MICRO_BATCHES=2, parallel=dict(pipeline=2), ) def run_trainer_with_pipeline(rank, world_size, port): colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') # build model model = resnet18(num_classes=10) if gpc.get_local_rank(ParallelMode.PIPELINE) == 0: model = nn.Sequential(model.conv1, model.bn1, model.relu, model.maxpool, model.layer1, model.layer2) elif gpc.get_local_rank(ParallelMode.PIPELINE) == 1: class Flatten(nn.Module): def forward(self, x): return torch.flatten(x, 1) model = nn.Sequential(model.layer3, model.layer4, model.avgpool, Flatten(), model.fc) # build dataloaders train_dataset = CIFAR10(root=Path(os.environ['DATA']), download=True, transform=transforms.Compose([ transforms.Resize(size=(IMG_SIZE, IMG_SIZE)), transforms.ToTensor(), transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) ])) train_dataloader = get_dataloader(dataset=train_dataset, shuffle=True, batch_size=BATCH_SIZE, pin_memory=True, drop_last=True) # build optimizer optimizer = Adam(model.parameters(), lr=0.001) criterion = nn.CrossEntropyLoss() engine, train_dataloader, *args = colossalai.initialize(model=model, optimizer=optimizer, criterion=criterion, train_dataloader=train_dataloader) logger = get_dist_logger() logger.info("engine is built", ranks=[0]) timer = MultiTimer() trainer = Trainer(engine=engine, logger=logger, timer=timer) logger.info("trainer is built", ranks=[0]) logger.info("start training", ranks=[0]) trainer.fit(train_dataloader=train_dataloader, epochs=NUM_EPOCHS, max_steps=3, display_progress=True, test_interval=5) gpc.destroy() torch.cuda.empty_cache() @pytest.mark.dist @rerun_if_address_is_in_use() def test_trainer_with_pipeline(): world_size = 4 run_func = partial(run_trainer_with_pipeline, world_size=world_size, port=free_port()) mp.spawn(run_func, nprocs=world_size) if __name__ == '__main__': test_trainer_with_pipeline()
#!/usr/bin/env python # -*- encoding: utf-8 -*- from functools import partial import pytest import torch import torch.distributed as dist import torch.multiprocessing as mp from colossalai.communication import (recv_backward, recv_forward, recv_tensor_meta, send_backward, send_backward_recv_forward, send_forward, send_forward_recv_backward, send_tensor_meta) from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc from colossalai.initialize import launch from colossalai.logging import get_dist_logger from colossalai.utils import free_port, get_current_device from colossalai.testing import rerun_on_exception BATCH_SIZE = 4 SEQ_LENGTH = 2 HIDDEN_SIZE = 16 CONFIG = dict(parallel=dict(pipeline=dict(size=4), tensor=dict(size=1, mode=None)), seed=1024) def check_equal(A, B): return torch.allclose(A, B, rtol=1e-5, atol=1e-3) def check_forward(output_tensor, rank, logger): dist.barrier() if gpc.is_first_rank(ParallelMode.PIPELINE): tensor = output_tensor.clone() else: tensor = recv_forward(output_tensor.shape) logger.info('Rank {} received forward. Correct tensor: {}'.format(rank, check_equal(tensor, output_tensor))) if not gpc.is_last_rank(ParallelMode.PIPELINE): send_forward(tensor) logger.info('Rank {} sent forward.'.format(rank)) def check_backward(output_grad, rank, logger): dist.barrier() if gpc.is_last_rank(ParallelMode.PIPELINE): grad = output_grad.clone() else: grad = recv_backward(output_grad.shape) logger.info('Rank {} received backward. Correct grad: {}'.format(rank, check_equal(grad, output_grad))) if not gpc.is_first_rank(ParallelMode.PIPELINE): send_backward(grad) logger.info('Rank {} sent backward.'.format(rank)) def check_forward_backward(output_tensor, output_grad, rank, logger): dist.barrier() if not gpc.is_first_rank(ParallelMode.PIPELINE): tensor = send_backward_recv_forward(output_grad, output_tensor.shape) logger.info('Rank {} sent backward received forward. Correct tensor: {}'.format( rank, check_equal(tensor, output_tensor))) if not gpc.is_last_rank(ParallelMode.PIPELINE): grad = send_forward_recv_backward(output_tensor, output_grad.shape) logger.info('Rank {} sent forward received backward. Correct grad: {}'.format( rank, check_equal(grad, output_grad))) def check_comm(size, rank, prev_rank, next_rank, logger): dtype = torch.float32 device = get_current_device() tensor_shape = (BATCH_SIZE, SEQ_LENGTH, HIDDEN_SIZE) grad_shape = (BATCH_SIZE, SEQ_LENGTH, HIDDEN_SIZE) tensor = torch.randn(tensor_shape, dtype=dtype, device=device) dist.all_reduce(tensor) grad = torch.randn(grad_shape, dtype=dtype, device=device) dist.all_reduce(grad) check_forward(tensor, rank, logger) check_backward(grad, rank, logger) check_forward_backward(tensor, grad, rank, logger) def run_check(rank, world_size, port): launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') logger = get_dist_logger() rank = gpc.get_global_rank() prev_rank = gpc.get_prev_global_rank(ParallelMode.PIPELINE) next_rank = gpc.get_next_global_rank(ParallelMode.PIPELINE) logger.info('Rank {0}: prev rank {1}, next rank {2}'.format(rank, prev_rank, next_rank)) logger.info('Distributed environment is initialzied.') check_comm(world_size, rank, prev_rank, next_rank, logger) gpc.destroy() torch.cuda.empty_cache() @pytest.mark.dist @rerun_on_exception(exception_type=mp.ProcessRaisedException, pattern=".*Address already in use.*") def test_p2p(): world_size = 4 run_func = partial(run_check, world_size=world_size, port=free_port()) mp.spawn(run_func, nprocs=world_size) if __name__ == '__main__': test_p2p()
# referenced from Megatron and used to testify communication import os import os.path as osp from functools import partial from pathlib import Path import colossalai import pytest import torch import torch.multiprocessing as mp from colossalai.builder import build_pipeline_model_from_cfg from colossalai.core import global_context as gpc from colossalai.engine.schedule import PipelineSchedule from colossalai.initialize import launch from colossalai.utils import free_port, get_dataloader, print_rank_0 from colossalai.testing import rerun_on_exception from torchvision import transforms from torchvision.datasets import CIFAR10 BATCH_SIZE = 4 DIR_PATH = osp.dirname(osp.realpath(__file__)) CONFIG_PATH = osp.join(DIR_PATH, './resnet_config.py') def run_schedule(rank, world_size, port): launch(config=CONFIG_PATH, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') # build model model = build_pipeline_model_from_cfg(gpc.config.model, 1) print_rank_0('model is created') train_dataset = CIFAR10(root=Path(os.environ['DATA']), download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.2010]), ])) train_dataloader = get_dataloader( dataset=train_dataset, shuffle=True, add_sampler=True, batch_size=BATCH_SIZE, pin_memory=True, ) # build criterion criterion = torch.nn.CrossEntropyLoss() # optimizer optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=0) # initialize engine, train_dataloader, _, _ = colossalai.initialize(model, optimizer, criterion, train_dataloader) # build pipeline schedule schedule = engine.schedule # run schedule data_iter = iter(train_dataloader) schedule.forward_backward_step(engine, data_iter) gpc.destroy() torch.cuda.empty_cache() @pytest.mark.dist @rerun_on_exception(exception_type=mp.ProcessRaisedException, pattern=".*Address already in use.*") def test_pipeline_schedule(): world_size = 4 run_func = partial(run_schedule, world_size=world_size, port=free_port()) mp.spawn(run_func, nprocs=world_size) if __name__ == '__main__': test_pipeline_schedule()
import os.path as osp import pytest import torch import torch.multiprocessing as mp from colossalai.builder.pipeline import build_pipeline_model_from_cfg from colossalai.core import global_context from colossalai.initialize import launch from colossalai.logging import get_dist_logger from functools import partial from colossalai.utils import free_port from colossalai.testing import rerun_on_exception DIR_PATH = osp.dirname(osp.realpath(__file__)) CONFIG_PATH = osp.join(DIR_PATH, 'resnet_config.py') def run_partition(rank, world_size, port): launch(config=CONFIG_PATH, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') logger = get_dist_logger() logger.info('finished initialization') # build model model = build_pipeline_model_from_cfg(global_context.config.model, 1, verbose=True) assert isinstance(model, torch.nn.Module) logger.info('model is created') global_context.destroy() logger.info('training finished') torch.cuda.empty_cache() @pytest.mark.dist @rerun_on_exception(exception_type=mp.ProcessRaisedException, pattern=".*Address already in use.*") def test_partition(): world_size = 4 run_func = partial(run_partition, world_size=world_size, port=free_port()) mp.spawn(run_func, nprocs=world_size) if __name__ == '__main__': test_partition()
import os import model from pathlib import Path BATCH_SIZE = 128 IMG_SIZE = 224 DIM = 768 NUM_CLASSES = 10 NUM_ATTN_HEADS = 12 NUM_MICRO_BATCHES = 2 # resnet 18 model = dict(type='VanillaResNet', block_type='ResNetBasicBlock', layers=[2, 2, 2, 2], num_cls=10) parallel = dict( pipeline=dict(size=4), tensor=dict(size=1, mode=None) )
from .layers import * from .resnet import VanillaResNet
#!/usr/bin/env python # -*- encoding: utf-8 -*- from typing import List, Optional import torch import torch.nn as nn from torch import Tensor from colossalai.registry import LAYERS from colossalai.registry import MODELS from colossalai.nn.model import ModelFromConfig @MODELS.register_module class VanillaResNet(ModelFromConfig): """ResNet from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_. """ def __init__( self, num_cls: int, block_type: str, layers: List[int], norm_layer_type: str = 'BatchNorm2d', in_channels: int = 3, groups: int = 1, width_per_group: int = 64, zero_init_residual: bool = False, replace_stride_with_dilation: Optional[List[bool]] = None, dilations=(1, 1, 1, 1) ) -> None: super().__init__() self.inplanes = 64 self.zero_init_residual = zero_init_residual self.blocks = layers self.block_expansion = LAYERS.get_module(block_type).expansion self.dilations = dilations self.reslayer_common_cfg = dict( type='ResLayer', block_type=block_type, norm_layer_type=norm_layer_type, groups=groups, base_width=width_per_group ) if replace_stride_with_dilation is None: # each element in the tuple indicates if we should replace # the 2x2 stride with a dilated convolution instead replace_stride_with_dilation = [False, False, False] if len(replace_stride_with_dilation) != 3: raise ValueError("replace_stride_with_dilation should be None " "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) self.layers_cfg = [ # conv1 dict(type='Conv2d', in_channels=in_channels, out_channels=self.inplanes, kernel_size=7, stride=2, padding=3, bias=False), # bn1 dict( type=norm_layer_type, num_features=self.inplanes ), # relu dict( type='ReLU', inplace=True ), # maxpool dict( type='MaxPool2d', kernel_size=3, stride=2, padding=1 ), # layer 1 dict( inplanes=self.inplanes, planes=64, blocks=self.blocks[0], dilation=self.dilations[0], **self.reslayer_common_cfg ), # layer 2 dict( inplanes=64 * self.block_expansion, planes=128, blocks=self.blocks[1], stride=2, dilate=replace_stride_with_dilation[0], dilation=self.dilations[1], **self.reslayer_common_cfg ), # layer 3 dict( inplanes=128 * self.block_expansion, planes=256, blocks=layers[2], stride=2, dilate=replace_stride_with_dilation[1], dilation=self.dilations[2], **self.reslayer_common_cfg ), # layer 4 dict( inplanes=256 * self.block_expansion, planes=512, blocks=layers[3], stride=2, dilate=replace_stride_with_dilation[2], dilation=self.dilations[3], **self.reslayer_common_cfg ), # avg pool dict( type='AdaptiveAvgPool2d', output_size=(1, 1) ), # flatten dict( type='LambdaWrapper', func=lambda mod, x: torch.flatten(x, 1) ), # linear dict( type='Linear', in_features=512 * self.block_expansion, out_features=num_cls ) ] def forward(self, x: Tensor): for layer in self.layers: x = layer(x) return x def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_( m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) # Zero-initialize the last BN in each residual branch, # so that the residual branch starts with zeros, and each residual block behaves like an identity. # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 if self.zero_init_residual: for m in self.modules(): if isinstance(m, LAYERS.get_module('ResNetBottleneck')): # type: ignore[arg-type] nn.init.constant_(m.bn3.weight, 0) elif isinstance(m, LAYERS.get_module('ResNetBasicBlock')): # type: ignore[arg-type] nn.init.constant_(m.bn2.weight, 0)
#!/usr/bin/env python # -*- encoding: utf-8 -*- from typing import Optional, Callable import torch.nn as nn from torch import Tensor from colossalai.registry import LAYERS from .conv import conv3x3 @LAYERS.register_module class ResNetBasicBlock(nn.Module): """Basic ResNet block """ expansion: int = 1 def __init__( self, inplanes: int, planes: int, stride: int = 1, downsample: Optional[nn.Module] = None, groups: int = 1, base_width: int = 64, dilation: int = 1, norm_layer: Optional[Callable[..., nn.Module]] = None ) -> None: super().__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d if groups != 1 or base_width != 64: raise ValueError( 'BasicBlock only supports groups=1 and base_width=64') if dilation > 1: raise NotImplementedError( "Dilation > 1 not supported in BasicBlock") # Both self.conv1 and self.downsample layers downsample the input when stride != 1 self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = norm_layer(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = norm_layer(planes) self.downsample = downsample self.stride = stride def forward(self, x: Tensor) -> Tensor: identity = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: identity = self.downsample(x) out += identity out = self.relu(out) return out
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch.nn as nn from colossalai.registry import LAYERS from .conv import conv1x1 @LAYERS.register_module class ResLayer(nn.Module): def __init__(self, block_type: str, norm_layer_type: str, inplanes: int, planes: int, blocks: int, groups: int, base_width: int, stride: int = 1, dilation: int = 1, dilate: bool = False, ): super().__init__() self.block = LAYERS.get_module(block_type) self.norm_layer = LAYERS.get_module(norm_layer_type) self.inplanes = inplanes self.planes = planes self.blocks = blocks self.groups = groups self.dilation = dilation self.base_width = base_width self.dilate = dilate self.stride = stride self.layer = self._make_layer() def _make_layer(self): norm_layer = self.norm_layer downsample = None previous_dilation = self.dilation if self.dilate: self.dilation *= self.stride self.stride = 1 if self.stride != 1 or self.inplanes != self.planes * self.block.expansion: downsample = nn.Sequential( conv1x1(self.inplanes, self.planes * self.block.expansion, self.stride), norm_layer(self.planes * self.block.expansion), ) layers = [] layers.append(self.block(self.inplanes, self.planes, self.stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer)) self.inplanes = self.planes * self.block.expansion for _ in range(1, self.blocks): layers.append(self.block(self.inplanes, self.planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer)) return nn.Sequential(*layers) def forward(self, x): return self.layer(x)
from .basic_block import ResNetBasicBlock from .bottleneck import ResNetBottleneck from .reslayer import ResLayer
#!/usr/bin/env python # -*- encoding: utf-8 -*- from typing import Optional, Callable import torch.nn as nn from torch import Tensor from colossalai.registry import LAYERS from .conv import conv3x3, conv1x1 @LAYERS.register_module class ResNetBottleneck(nn.Module): # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2) # while original implementation places the stride at the first 1x1 convolution(self.conv1) # according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385. # This variant is also known as ResNet V1.5 and improves accuracy according to # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch. expansion: int = 4 def __init__( self, inplanes: int, planes: int, stride: int = 1, downsample: Optional[nn.Module] = None, groups: int = 1, base_width: int = 64, dilation: int = 1, norm_layer: Optional[Callable[..., nn.Module]] = None ) -> None: super().__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d width = int(planes * (base_width / 64.)) * groups # Both self.conv2 and self.downsample layers downsample the input when stride != 1 self.conv1 = conv1x1(inplanes, width) self.bn1 = norm_layer(width) self.conv2 = conv3x3(width, width, stride, groups, dilation) self.bn2 = norm_layer(width) self.conv3 = conv1x1(width, planes * self.expansion) self.bn3 = norm_layer(planes * self.expansion) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x: Tensor) -> Tensor: identity = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: identity = self.downsample(x) out += identity out = self.relu(out) return out
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch.nn as nn def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d: """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation) def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d: """1x1 convolution""" return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
from functools import partial import pytest import torch import torch.distributed as dist import torch.multiprocessing as mp from colossalai.communication import all_gather, all_reduce, reduce_scatter from colossalai.context import ParallelMode from colossalai.core import global_context as gpc from colossalai.initialize import launch from colossalai.utils import free_port, get_current_device from colossalai.testing import rerun_if_address_is_in_use CONFIG = dict(parallel=dict(data=8, pipeline=1, tensor=dict(mode=None, size=1))) SIZE = 8 def check_all_gather(): tensor = torch.tensor([dist.get_rank() * SIZE + j for j in range(SIZE)]) tensor = tensor.to(get_current_device()) print('Before: Rank {0} - {1}'.format(dist.get_rank(), tensor)) tensor, op = all_gather(tensor, 0, ParallelMode.GLOBAL, async_op=True) print('After: Rank {0} - {1}'.format(dist.get_rank(), tensor)) op.wait() print('Complete: Rank {0} - {1}'.format(dist.get_rank(), tensor)) torch.cuda.synchronize() def check_reduce_scatter(): tensor = torch.tensor([dist.get_rank() * SIZE + j for j in range(SIZE)]) tensor = tensor.to(get_current_device()) print('Before: Rank {0} - {1}'.format(dist.get_rank(), tensor)) tensor, op = reduce_scatter(tensor, 0, ParallelMode.GLOBAL, async_op=True) print('After: Rank {0} - {1}'.format(dist.get_rank(), tensor)) op.wait() print('Complete: Rank {0} - {1}'.format(dist.get_rank(), tensor)) torch.cuda.synchronize() def check_all_reduce(): tensor = torch.tensor([dist.get_rank() * SIZE + j for j in range(SIZE)]) tensor = tensor.to(get_current_device()) print('Before: Rank {0} - {1}'.format(dist.get_rank(), tensor)) tensor, op = all_reduce(tensor, ParallelMode.GLOBAL, async_op=True) print('After: Rank {0} - {1}'.format(dist.get_rank(), tensor)) op.wait() print('Complete: Rank {0} - {1}'.format(dist.get_rank(), tensor)) torch.cuda.synchronize() def check_layer(rank, world_size, port): launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') assert dist.get_rank() == gpc.get_global_rank() print('Rank {} / {}'.format(dist.get_rank(), dist.get_world_size())) check_all_gather() check_reduce_scatter() check_all_reduce() gpc.destroy() torch.cuda.empty_cache() @pytest.mark.dist @rerun_if_address_is_in_use() def test_comm(): world_size = 4 run_func = partial(check_layer, world_size=world_size, port=free_port()) mp.spawn(run_func, nprocs=world_size) if __name__ == '__main__': test_comm()
from functools import partial import colossalai import pytest import torch import torch.multiprocessing as mp from colossalai.amp import convert_to_apex_amp from colossalai.nn.optimizer import CPUAdam from colossalai.testing import parameterize, rerun_if_address_is_in_use from colossalai.utils import free_port from colossalai.zero.init_ctx import ZeroInitContext from colossalai.zero.shard_utils import (BucketTensorShardStrategy, TensorShardStrategy) from colossalai.zero.sharded_model import ShardedModelV2 from colossalai.zero.sharded_model.utils import col_model_deepcopy from colossalai.zero.sharded_optim import ShardedOptimizerV2 from colossalai.zero.sharded_optim._utils import has_inf_or_nan from colossalai.utils import get_current_device from tests.components_to_test.registry import non_distributed_component_funcs from colossalai.engine.gradient_handler import MoeGradientHandler from colossalai.context import MOE_CONTEXT from colossalai.testing import assert_equal_in_group from tests.test_zero.common import CONFIG, check_sharded_model_params from tests.test_moe.test_moe_zero_init import MoeModel def _run_step(model, optimizer, data, label, criterion, grad_handler): model.train() optimizer.zero_grad() if criterion: y = model(data) loss = criterion(y, label) else: loss = model(data, label) loss = loss.float() if isinstance(model, ShardedModelV2): optimizer.backward(loss) else: loss.backward() if grad_handler is not None: grad_handler.handle_gradient() optimizer.step() @parameterize("cpu_offload", [True]) @parameterize("use_cpuadam", [True]) # We do not use Hybrid Adam right now, since it has a little bug @parameterize("reuse_fp16_shard", [True, False]) @parameterize("shard_strategy_class", [TensorShardStrategy, BucketTensorShardStrategy]) def _run_test_sharded_optim_v2(cpu_offload, shard_strategy_class, use_cpuadam, reuse_fp16_shard, gpu_margin_mem_ratio=0.0): shard_strategy = shard_strategy_class() if use_cpuadam and cpu_offload is False: return MOE_CONTEXT.reset_loss() get_components_func = non_distributed_component_funcs.get_callable('no_leaf_module') _, train_dataloader, _, optimizer_class, criterion = get_components_func() with ZeroInitContext(target_device=torch.device('cpu') if cpu_offload else get_current_device(), shard_strategy=shard_strategy, shard_param=True): zero_model = MoeModel(checkpoint=True) zero_model = ShardedModelV2(zero_model, shard_strategy, tensor_placement_policy='cpu' if cpu_offload else 'cuda', reuse_fp16_shard=reuse_fp16_shard) # check whether parameters are identical in ddp for name, p in zero_model.named_parameters(): if not p.colo_attr.param_is_sharded and p.colo_attr.is_replicated: assert_equal_in_group(p.colo_attr.data_payload.to(get_current_device())) model = MoeModel(checkpoint=True).half() col_model_deepcopy(zero_model, model) model = model.cuda().float() if use_cpuadam: optimizer_class = CPUAdam optim = optimizer_class(model.parameters(), lr=1e-3) sharded_optim = optimizer_class(zero_model.parameters(), lr=1e-3) sharded_optim = ShardedOptimizerV2(zero_model, sharded_optim, initial_scale=2**5, gpu_margin_mem_ratio=gpu_margin_mem_ratio) amp_config = dict(opt_level='O2', keep_batchnorm_fp32=False) apex_model, apex_optimizer = convert_to_apex_amp(model, optim, amp_config) apex_grad_handler = MoeGradientHandler(model) # Since MOE is not compatible with apex_amp now, we need to convert gate weight to fp32 for (n, p), zp in zip(apex_model.named_parameters(), zero_model.parameters()): if 'gate' in n: p.data = p.float() p.data.copy_(zp.colo_attr.data_payload) for i, (data, label) in enumerate(train_dataloader): if i > 5: break data, label = data.cuda(), label.cuda() _run_step(apex_model, apex_optimizer, data, label, criterion, apex_grad_handler) _run_step(zero_model, sharded_optim, data, label, criterion, None) check_sharded_model_params(model, zero_model, loose=True, reuse_fp16_shard=use_cpuadam) for param in model.parameters(): assert not has_inf_or_nan(param) def _run_dist(rank, world_size, port): colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') MOE_CONTEXT.setup(seed=42) _run_test_sharded_optim_v2() # use_cpuadam = True can be used with cpu_offload = False @pytest.mark.dist @pytest.mark.parametrize("world_size", [2]) @rerun_if_address_is_in_use() def test_moe_zero_optim(world_size): run_func = partial(_run_dist, world_size=world_size, port=free_port()) mp.spawn(run_func, nprocs=world_size) if __name__ == '__main__': test_moe_zero_optim(world_size=4)
from functools import partial import colossalai import pytest import torch import torch.multiprocessing as mp from colossalai.testing import parameterize, rerun_if_address_is_in_use from colossalai.utils import free_port from colossalai.zero.init_ctx import ZeroInitContext from colossalai.zero.shard_utils import (BucketTensorShardStrategy, TensorShardStrategy) from colossalai.zero.sharded_model import ShardedModelV2 from colossalai.zero.sharded_model._utils import cast_tensor_to_fp16 from colossalai.zero.sharded_model.utils import col_model_deepcopy from tests.components_to_test.registry import non_distributed_component_funcs from colossalai.engine.gradient_handler import MoeGradientHandler from colossalai.context import MOE_CONTEXT from colossalai.testing import assert_equal_in_group from tests.test_zero.common import CONFIG, check_grads_padding, run_fwd_bwd from tests.test_moe.test_moe_zero_init import MoeModel @parameterize("enable_autocast", [False]) @parameterize("shard_strategy_class", [TensorShardStrategy, BucketTensorShardStrategy]) def run_model_test(enable_autocast, shard_strategy_class): shard_strategy = shard_strategy_class() get_components_func = non_distributed_component_funcs.get_callable('no_leaf_module') _, train_dataloader, _, _, criterion = get_components_func() with ZeroInitContext(target_device=torch.device('cuda', torch.cuda.current_device()), shard_strategy=shard_strategy, shard_param=True): zero_model = MoeModel(checkpoint=True) zero_model = ShardedModelV2(zero_model, shard_strategy) # check whether parameters are identical in ddp for name, p in zero_model.named_parameters(): if not p.colo_attr.param_is_sharded and p.colo_attr.is_replicated: assert_equal_in_group(p.colo_attr.data_payload) model = MoeModel(checkpoint=True).half() col_model_deepcopy(zero_model, model) model = model.cuda() grad_handler = MoeGradientHandler(model) for i, (data, label) in enumerate(train_dataloader): if i > 5: break data, label = cast_tensor_to_fp16(data).cuda(), label.cuda() run_fwd_bwd(model, data, label, criterion, enable_autocast) run_fwd_bwd(zero_model, data, label, criterion, enable_autocast) grad_handler.handle_gradient() check_grads_padding(model, zero_model, loose=True) def run_dist(rank, world_size, port): colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') MOE_CONTEXT.setup(seed=42) MOE_CONTEXT.reset_loss() run_model_test() @pytest.mark.dist @pytest.mark.parametrize("world_size", [2]) @rerun_if_address_is_in_use() def test_moe_zero_model(world_size): run_func = partial(run_dist, world_size=world_size, port=free_port()) mp.spawn(run_func, nprocs=world_size) if __name__ == '__main__': test_moe_zero_model(world_size=2)
from functools import partial import pytest import torch import torch.nn as nn import torch.multiprocessing as mp import colossalai from colossalai.context import ParallelMode from colossalai.core import global_context as gpc from colossalai.utils import free_port, get_current_device from colossalai.nn.layer.moe import Top1Router, Top2Router, MoeLayer, Experts from colossalai.context.moe_context import MOE_CONTEXT from colossalai.testing import rerun_if_address_is_in_use BATCH_SIZE = 16 NUM_EXPERTS = 4 CONFIG = dict() def check_equal(tensor_a, tensor_b, atol=1e-06): assert torch.allclose(tensor_a, tensor_b, rtol=0, atol=atol) is True def run_routing(rank, world_size, port, rs=2, hidden_size=128, data_type=torch.float32, router=Top2Router): # Here we do not need TF32, since it brings absolute error on results torch.backends.cuda.matmul.allow_tf32 = False colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') local_rank = gpc.get_local_rank(ParallelMode.GLOBAL) MOE_CONTEXT.setup(42) # MOE environment initialization MOE_CONTEXT.reset_loss() torch.manual_seed(rs + local_rank) # set each process has different random seed # get randomized data tokens = torch.randn(BATCH_SIZE, hidden_size, dtype=data_type, device=get_current_device(), requires_grad=True) expert_module = nn.Linear expert_factor = dict(in_features=hidden_size, out_features=hidden_size, device=get_current_device()) expert = Experts(expert_module, NUM_EXPERTS, **expert_factor) layer = MoeLayer(hidden_size, NUM_EXPERTS, router(capacity_factor_train=1.0), expert) if data_type == torch.float16: layer = layer.half() # use matrix multiplication instead of COL_MOE_KERNL in MOE dispatch and combine layer.use_kernel = False old_out = layer(tokens) ech = old_out.shape grad = torch.randn(ech, device=get_current_device()) old_out.backward(grad) # get gradient # save all results o_tk_grad = tokens.grad.data.clone() o_gt_grad = layer.gate.weight.grad.data.clone() # reset all gradients tokens.grad.zero_() layer.gate.weight.grad.zero_() layer.use_kernel = True new_out = layer(tokens) # get ouputs through colossal kernel if data_type == torch.float32: check_equal(old_out, new_out) else: check_equal(old_out, new_out, 1e-2) # forward function passed new_out.backward(grad) # get new type gradient n_tk_grad = tokens.grad.data.clone() n_gt_grad = layer.gate.weight.grad.data.clone() if data_type == torch.float32: check_equal(o_tk_grad, n_tk_grad) else: check_equal(o_tk_grad, o_tk_grad, 1e-2) # tokens gradient is correct if data_type == torch.float32: check_equal(o_gt_grad, n_gt_grad, 5e-05) else: check_equal(o_gt_grad, n_gt_grad, 2e-01) # bias gradient is correct @pytest.mark.dist @pytest.mark.parametrize("rs", [131]) @pytest.mark.parametrize("hidden_size", [32, 144]) @pytest.mark.parametrize("data_type", [torch.float32, torch.float16]) @pytest.mark.parametrize("router", [Top1Router, Top2Router]) @rerun_if_address_is_in_use() def test_moe_kernel(rs, hidden_size, data_type, router): world_size = 4 run_func = partial(run_routing, world_size=world_size, port=free_port(), rs=rs, hidden_size=hidden_size, data_type=data_type, router=router) mp.spawn(run_func, nprocs=world_size) if __name__ == '__main__': test_moe_kernel(2, 256, torch.float16, Top2Router)
from functools import partial import pytest import torch.nn as nn import torch.multiprocessing as mp import torch.distributed as dist import colossalai from colossalai.utils import free_port, get_current_device from colossalai.nn.layer.moe import Experts from colossalai.context.moe_context import MOE_CONTEXT from colossalai.utils.moe import sync_moe_model_param from colossalai.testing import assert_equal_in_group, rerun_if_address_is_in_use D_MODEL = 4 D_FF = 8 CONFIG = dict() def run_test(rank, port): world_size = 4 colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') expert_module = nn.Linear expert_factor = dict(in_features=D_MODEL, out_features=D_FF, device=get_current_device()) MOE_CONTEXT.setup(42) # MOE environment initialization exp0 = Experts(expert_module, 1, **expert_factor) exp1 = Experts(expert_module, 2, **expert_factor) exp2 = Experts(expert_module, 4, **expert_factor) exp3 = Experts(expert_module, 8, **expert_factor) assert exp0.num_local_experts == 1 assert exp1.num_local_experts == 1 assert exp2.num_local_experts == 1 assert exp3.num_local_experts == 2 # experts deployment passed parallel_info_dict = MOE_CONTEXT.parallel_info_dict rank = dist.get_rank() assert len(parallel_info_dict) == 3 assert dist.get_rank(parallel_info_dict[4].ep_group) == rank assert dist.get_rank(parallel_info_dict[2].ep_group) == rank % 2 assert dist.get_rank(parallel_info_dict[1].ep_group) == 0 assert dist.get_rank(parallel_info_dict[4].dp_group) == 0 assert dist.get_rank(parallel_info_dict[2].dp_group) == rank // 2 assert dist.get_rank(parallel_info_dict[1].dp_group) == rank # group creation passed model = nn.ModuleList([exp0, exp1, exp2, exp3]) model = model.to(get_current_device()) sync_moe_model_param(model) assert_equal_in_group(exp0.experts[0].weight.data, parallel_info_dict[1].dp_group) assert_equal_in_group(exp0.experts[0].bias.data, parallel_info_dict[1].dp_group) # MOE experts layout success when ep_size = 1 assert_equal_in_group(exp1.experts[0].weight.data, parallel_info_dict[2].dp_group) assert_equal_in_group(exp1.experts[0].bias.data, parallel_info_dict[2].dp_group) # MOE experts layout success when ep_size = 2 @pytest.mark.dist @rerun_if_address_is_in_use() def test_moe_initialization(): world_size = 4 run_func = partial(run_test, port=free_port()) mp.spawn(run_func, nprocs=world_size) if __name__ == '__main__': test_moe_initialization()
from functools import partial import colossalai import pytest import torch import torch.multiprocessing as mp import torch.nn as nn from colossalai.nn import CheckpointModule from colossalai.logging import get_dist_logger from colossalai.testing import parameterize from colossalai.utils import free_port from colossalai.context import MOE_CONTEXT from colossalai.nn.layer import MoeModule from colossalai.zero.init_ctx import ZeroInitContext from colossalai.zero.shard_utils import (BucketTensorShardStrategy, TensorShardStrategy) from colossalai.testing import rerun_if_address_is_in_use from colossalai.utils import get_current_device from tests.test_zero.common import CONFIG class MoeModel(CheckpointModule): def __init__(self, checkpoint: bool = False): super().__init__(checkpoint) self.proj1 = nn.Linear(4, 16) expert_cls = nn.Linear expert_args_dict = dict(in_features=16, out_features=16) self.moe = MoeModule(dim_model=16, num_experts=8, use_residual=True, expert_cls=expert_cls, **expert_args_dict) self.proj2 = nn.Linear(16, 4) def forward(self, x): x = self.proj1(x) x = self.moe(x) x = self.proj2(x) return x @parameterize("init_device_type", ['cpu', 'cuda']) @parameterize("shard_strategy_class", [TensorShardStrategy, BucketTensorShardStrategy]) def run_moe_zero_init(init_device_type, shard_strategy_class): logger = get_dist_logger("test_moe_zero_init") if init_device_type == 'cuda': init_device = get_current_device() elif init_device_type == 'cpu': init_device = torch.device("cpu") else: raise NotImplementedError("Unknown device found.") model_numel_tensor = torch.zeros(1, dtype=torch.int) with ZeroInitContext(target_device=init_device, shard_strategy=shard_strategy_class(), shard_param=True, model_numel_tensor=model_numel_tensor): model = MoeModel(checkpoint=True) for name, param in model.named_parameters(): assert hasattr(param, 'colo_attr') # the weights in the gate should be fp32 if 'gate' in name: assert param.colo_attr.sharded_data_tensor.dtype == torch.float32 else: assert param.colo_attr.sharded_data_tensor.dtype == torch.half # the parameters in moe experts and its gate should not be sharded if ('experts' in name) or ('gate' in name) or ('residual_combine' in name): assert not param.colo_attr.sharded_data_tensor.is_sharded else: assert param.colo_attr.sharded_data_tensor.is_sharded # the parameters in moe experts is not replicated if 'experts' in name: assert not param.colo_attr.is_replicated else: assert param.colo_attr.is_replicated if param.colo_attr.param_is_sharded: assert param.colo_attr.data_payload.device.type == init_device.type, \ f'{param.colo_attr.data_payload.device.type} vs. {init_device.type}' else: assert param.colo_attr.data_payload.device.type == 'cuda' def _run_dist(rank, world_size, port): colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') MOE_CONTEXT.setup(seed=42) run_moe_zero_init() @pytest.mark.dist @pytest.mark.parametrize("world_size", [2, 4]) @rerun_if_address_is_in_use() def test_moe_zero_init(world_size): run_func = partial(_run_dist, world_size=world_size, port=free_port()) mp.spawn(run_func, nprocs=world_size) if __name__ == '__main__': test_moe_zero_init(world_size=2)
from functools import partial import pytest import torch import torch.nn as nn import torch.multiprocessing as mp import torch.distributed as dist import colossalai from colossalai.utils import free_port, get_current_device from colossalai.nn.layer.moe import Top1Router, UniformNoiseGenerator, MoeLayer, Experts from colossalai.context.moe_context import MOE_CONTEXT from colossalai.utils.moe import sync_moe_model_param from colossalai.engine.gradient_handler import MoeGradientHandler from colossalai.testing import assert_equal_in_group, rerun_if_address_is_in_use BATCH_SIZE = 4 DIM = 16 CONFIG = dict() def run_test(rank, world_size, port): colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') expert_module = nn.Linear expert_factor = dict(in_features=DIM, out_features=DIM, device=get_current_device()) MOE_CONTEXT.setup(42) # MOE initialization noisy_func = UniformNoiseGenerator() router = Top1Router(noisy_func=noisy_func) num_experts_list = [1, 2, 4] layer_list = [] for num_experts in num_experts_list: exp = Experts(expert_module, num_experts, **expert_factor) moe_layer = MoeLayer(DIM, num_experts, router, exp) layer_list.append(moe_layer) model = nn.Sequential(*layer_list) model = model.to(get_current_device()) sync_moe_model_param(model) dist_dict = MOE_CONTEXT.parallel_info_dict assert_equal_in_group(layer_list[0].experts.experts[0].weight.data, dist_dict[1].dp_group) assert_equal_in_group(layer_list[1].experts.experts[0].weight.data, dist_dict[2].dp_group) # MoE model synchronization passed grad_handler = MoeGradientHandler(model, 0) rank = dist.get_rank() torch.cuda.manual_seed(78 + rank) data = torch.randn(BATCH_SIZE, DIM, device=get_current_device()) grad = torch.randn_like(data) MOE_CONTEXT.reset_loss() outputs = model(data) outputs.backward(grad) grad_handler.handle_gradient() assert_equal_in_group(layer_list[0].experts.experts[0].weight.grad, dist_dict[1].dp_group) assert_equal_in_group(layer_list[0].experts.experts[0].bias.grad, dist_dict[1].dp_group) assert_equal_in_group(layer_list[1].experts.experts[0].weight.grad, dist_dict[2].dp_group) assert_equal_in_group(layer_list[1].experts.experts[0].bias.grad, dist_dict[2].dp_group) # MoE grad handler test passed @pytest.mark.dist @rerun_if_address_is_in_use() def test_grad_handler(): world_size = 4 run_func = partial(run_test, world_size=world_size, port=free_port()) mp.spawn(run_func, nprocs=world_size) if __name__ == '__main__': test_grad_handler()
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- import datetime # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys sys.path.insert(0, os.path.abspath('..')) # -- Project information ----------------------------------------------------- project = 'Colossal-AI' copyright = f'{datetime.datetime.now().year}, HPC-AI Tech' author = 'HPC-AI Technology Inc.' # The full version, including alpha/beta/rc tags release = '0.0.1' # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.mathjax', 'sphinx.ext.napoleon', 'sphinx.ext.linkcode', 'myst_parser', ] # Disable docstring inheritance autodoc_inherit_docstrings = False # Disable displaying type annotations, these can be very verbose autodoc_typehints = 'none' # Enable overriding of function signatures in the first line of the docstring. autodoc_docstring_signature = True autodoc_default_options = { 'member-order': 'bysource', } # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ['.build', 'Thumbs.db', '.DS_Store'] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' html_show_sourcelink = False html_theme_options = { 'navigation_depth': 3, } html_context = { 'display_github': False, 'github_user': 'hpcaitech', 'github_repo': 'ColossalAI', # 'github_version': 'master/docs/', } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] html_css_files = [ 'css/rtd_theme.css', ] # -- Extension configuration ------------------------------------------------- source_suffix = ['.rst', '.md', '.MD'] import inspect import colossalai def linkcode_resolve(domain, info): """ Determine the URL corresponding to Python object """ if domain != 'py': return None modname = info['module'] fullname = info['fullname'] submod = sys.modules.get(modname) if submod is None: return None obj = submod for part in fullname.split('.'): try: obj = getattr(obj, part) except Exception: return None try: fn = inspect.getsourcefile(obj) except Exception: fn = None if not fn: return None try: source, lineno = inspect.findsource(obj) except Exception: lineno = None if lineno: linespec = "#L%d" % (lineno + 1) else: linespec = "" fn = os.path.relpath(fn, start=os.path.dirname(colossalai.__file__)) github = "https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/{}{}" return github.format(fn, linespec)
import torch import torch.nn as nn from colossalai.nn.layer import WrappedDropPath as DropPath class TransformerLayer(nn.Module): """Transformer layer builder. """ def __init__(self, att: nn.Module, ffn: nn.Module, norm1: nn.Module, norm2: nn.Module, droppath=None, droppath_rate: float = 0): super().__init__() self.att = att self.ffn = ffn self.norm1 = norm1 self.norm2 = norm2 self.droppath = DropPath(droppath_rate) if droppath is None else droppath def forward(self, x): x = x + self.droppath(self.att(self.norm1(x))) x = x + self.droppath(self.ffn(self.norm2(x))) return x
from .gpt import *
import math from typing import Callable import torch from colossalai import nn as col_nn from colossalai.builder.pipeline import partition_uniform from colossalai.context import ParallelMode from colossalai.core import global_context as gpc from colossalai.logging import get_dist_logger from colossalai.nn.layer.utils import CheckpointModule, divide from colossalai.nn.layer.wrapper import PipelineSharedModuleWrapper from colossalai.registry import LAYERS, LOSSES, MODELS from colossalai.utils import get_current_device from torch import dtype, nn __all__ = [ 'GPT', 'GPTLMLoss', 'gpt2_small', 'gpt2_medium', 'gpt2_large', 'gpt2_xl', 'gpt2_8B', 'gpt2_xl_pipeline', 'gpt2_8B_pipeline', 'gpt3', 'gpt3_pipeline' ] @LAYERS.register_module class GPTEmbedding(nn.Module): def __init__(self, embedding_dim: int, vocab_size: int, max_position_embeddings: int, num_tokentypes: int = 0, padding_idx: int = None, dropout: float = 0., dtype: dtype = None) -> None: super().__init__() self.word_embeddings = col_nn.Embedding(vocab_size, embedding_dim, padding_idx=padding_idx, dtype=dtype) self.position_embeddings = col_nn.Embedding(max_position_embeddings, embedding_dim, dtype=dtype) if num_tokentypes > 0: self.tokentype_embeddings = col_nn.Embedding(num_tokentypes, embedding_dim, dtype=dtype) else: self.tokentype_embeddings = None self.dropout = col_nn.Dropout(dropout) @property def word_embedding_weight(self): return self.word_embeddings.weight def forward(self, input_ids, position_ids=None, tokentype_ids=None): seq_length = input_ids.size(1) if position_ids is None: position_ids = torch.arange(seq_length, dtype=torch.long, device=get_current_device()).unsqueeze(0) x = self.word_embeddings(input_ids) + self.position_embeddings(position_ids) if self.tokentype_embeddings is not None and tokentype_ids is not None: x = x + self.tokentype_embeddings(tokentype_ids) x = self.dropout(x) return x @LAYERS.register_module class GPTSelfAttention(nn.Module): def __init__(self, dim: int, num_heads: int, attention_dropout: float, dropout: float, bias: bool = True, fuse_scale_mask_softmax: bool = False, dtype: dtype = None) -> None: super().__init__() self.fuse_scale_mask_softmax = fuse_scale_mask_softmax self.attention_head_size = divide(dim, num_heads) self.query_key_value = col_nn.Linear(dim, 3 * dim, dtype=dtype, bias=bias) if fuse_scale_mask_softmax: from colossalai.kernel import FusedScaleMaskSoftmax from colossalai.kernel.cuda_native.scaled_softmax import \ AttnMaskType self.softmax = FusedScaleMaskSoftmax(input_in_fp16=True, input_in_bf16=False, attn_mask_type=AttnMaskType.causal, scaled_masked_softmax_fusion=True, mask_func=None, softmax_in_fp32=True, scale=math.sqrt(self.attention_head_size)) else: self.softmax = nn.Softmax(dim=-1) self.attention_dropout = col_nn.Dropout(attention_dropout) self.dense = col_nn.Linear(dim, dim, dtype=dtype, bias=True) self.dropout = col_nn.Dropout(dropout) def forward(self, x, attention_mask=None): qkv = self.query_key_value(x) all_head_size = qkv.shape[-1] // 3 num_attention_heads = divide(all_head_size, self.attention_head_size) new_qkv_shape = qkv.shape[:-1] + \ (num_attention_heads, 3 * self.attention_head_size) qkv = qkv.view(new_qkv_shape) qkv = qkv.permute((0, 2, 1, 3)) q, k, v = torch.chunk(qkv, 3, dim=-1) x = torch.matmul(q, k.transpose(-1, -2)) if self.fuse_scale_mask_softmax: x = self.softmax(x, attention_mask) else: x = x / math.sqrt(self.attention_head_size) # causal mask q_len, k_len = q.size(-2), k.size(-2) causal_mask = torch.tril(torch.ones((q_len, k_len), dtype=torch.uint8, device=get_current_device())).view(1, 1, q_len, k_len).bool() x = torch.where(causal_mask, x, torch.tensor(-1e4, dtype=x.dtype, device=get_current_device())) if attention_mask is not None: x = x + attention_mask x = self.softmax(x) x = self.attention_dropout(x) x = torch.matmul(x, v) x = x.transpose(1, 2) new_context_layer_shape = x.size()[:-2] + (all_head_size,) x = x.reshape(new_context_layer_shape) x = self.dense(x) x = self.dropout(x) return x @LAYERS.register_module class GPTMLP(nn.Module): def __init__(self, dim: int, mlp_ratio: float, activation: Callable, dropout: float, dtype: dtype = None, bias: bool = True): super().__init__() intermediate_dim = int(dim * mlp_ratio) self.dense_1 = col_nn.Linear(dim, intermediate_dim, dtype=dtype, bias=bias) self.activation = activation self.dense_2 = col_nn.Linear(intermediate_dim, dim, dtype=dtype, bias=bias) self.dropout = col_nn.Dropout(dropout) def forward(self, x): x = self.dense_1(x) x = self.activation(x) x = self.dense_2(x) x = self.dropout(x) return x @LAYERS.register_module class GPTBlock(CheckpointModule): def __init__(self, dim: int, num_heads: int, mlp_ratio: float, activation: Callable, attention_dropout: float = 0., dropout: float = 0., layernorm_epsilon: float = 1e-5, dtype: dtype = None, bias: bool = True, apply_post_layernorm: bool = False, fuse_scale_mask_softmax: bool = False, checkpoint: bool = False, activation_offload: bool = False): super().__init__(checkpoint, activation_offload) self.apply_post_layernorm = apply_post_layernorm self.norm1 = col_nn.LayerNorm(normalized_shape=dim, eps=layernorm_epsilon, dtype=dtype) self.attn = GPTSelfAttention(dim=dim, num_heads=num_heads, attention_dropout=attention_dropout, dropout=dropout, bias=bias, fuse_scale_mask_softmax=fuse_scale_mask_softmax, dtype=dtype) self.norm2 = col_nn.LayerNorm(normalized_shape=dim, eps=layernorm_epsilon, dtype=dtype) self.mlp = GPTMLP(dim=dim, mlp_ratio=mlp_ratio, activation=activation, dropout=dropout, dtype=dtype, bias=bias) def _forward(self, x, attention_mask=None): if not self.apply_post_layernorm: residual = x x = self.norm1(x) if self.apply_post_layernorm: residual = x x = residual + self.attn(x, attention_mask) if not self.apply_post_layernorm: residual = x x = self.norm2(x) if self.apply_post_layernorm: residual = x x = residual + self.mlp(x) return x, attention_mask @LAYERS.register_module class GPTLMHead(nn.Module): def __init__(self, dim: int, vocab_size: int, word_embeeding_weight: nn.Parameter = None, bias: bool = False, dtype: dtype = None) -> None: super().__init__() self.dense = col_nn.Classifier(dim, vocab_size, word_embeeding_weight, bias=bias, dtype=dtype) @property def weight(self): return self.dense.weight def forward(self, x): x = self.dense(x) return x @LOSSES.register_module class GPTLMLoss(nn.Module): def __init__(self): super().__init__() self.loss = col_nn.CrossEntropyLoss() def forward(self, logits, labels): shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens return self.loss(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) @MODELS.register_module class GPT(nn.Module): def __init__(self, vocab_size: int = 50304, max_position_embeddings: int = 1024, dim: int = 768, num_heads: int = 12, depth: int = 12, mlp_ratio: float = 4.0, dropout: float = 0.1, embedding_dropout: float = 0.1, attention_dropout: float = 0.1, layernorm_epsilon: float = 1e-5, activation: Callable = nn.functional.gelu, padding_idx: int = None, dtype: dtype = None, bias: bool = True, apply_post_layernorm: bool = False, fuse_scale_mask_softmax: bool = False, checkpoint: bool = False, activation_offload: bool = False) -> None: super().__init__() self.embed = GPTEmbedding(embedding_dim=dim, vocab_size=vocab_size, max_position_embeddings=max_position_embeddings, padding_idx=padding_idx, dropout=embedding_dropout, dtype=dtype) self.blocks = nn.ModuleList([ GPTBlock( dim=dim, num_heads=num_heads, mlp_ratio=mlp_ratio, activation=activation, attention_dropout=attention_dropout, dropout=dropout, layernorm_epsilon=layernorm_epsilon, dtype=dtype, bias=bias, apply_post_layernorm=apply_post_layernorm, fuse_scale_mask_softmax=fuse_scale_mask_softmax, checkpoint=checkpoint, activation_offload=activation_offload ) for _ in range(depth) ]) self.norm = col_nn.LayerNorm(normalized_shape=dim, eps=layernorm_epsilon, dtype=dtype) self.head = GPTLMHead(dim=dim, vocab_size=vocab_size, word_embeeding_weight=self.embed.word_embedding_weight, dtype=dtype) def forward(self, input_ids, attention_mask=None): x = self.embed(input_ids) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # Adapted from huggingface if attention_mask is not None: batch_size = input_ids.shape[0] attention_mask = attention_mask.view(batch_size, -1) attention_mask = col_nn.partition_batch(attention_mask) attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) attention_mask = attention_mask.to(dtype=x.dtype) # fp16 compatibility attention_mask = (1.0 - attention_mask) * -10000.0 for block in self.blocks: x, attention_mask = block(x, attention_mask) x = self.head(self.norm(x)) return x class PipelineGPT(nn.Module): def __init__(self, vocab_size: int = 50304, max_position_embeddings: int = 1024, dim: int = 768, num_heads: int = 12, depth: int = 12, mlp_ratio: float = 4.0, dropout: float = 0.1, embedding_dropout: float = 0.1, attention_dropout: float = 0.1, layernorm_epsilon: float = 1e-5, activation: Callable = nn.functional.gelu, padding_idx: int = None, dtype: dtype = None, bias: bool = True, apply_post_layernorm: bool = False, fuse_scale_mask_softmax: bool = False, checkpoint: bool = False, first: bool = False, last: bool = False): super().__init__() self.checkpoint = checkpoint self.first = first self.last = last if first: self.embed = GPTEmbedding(embedding_dim=dim, vocab_size=vocab_size, max_position_embeddings=max_position_embeddings, padding_idx=padding_idx, dropout=embedding_dropout, dtype=dtype) self.blocks = nn.ModuleList([ GPTBlock( dim=dim, num_heads=num_heads, mlp_ratio=mlp_ratio, activation=activation, attention_dropout=attention_dropout, dropout=dropout, layernorm_epsilon=layernorm_epsilon, dtype=dtype, bias=bias, apply_post_layernorm=apply_post_layernorm, fuse_scale_mask_softmax=fuse_scale_mask_softmax, checkpoint=checkpoint, ) for _ in range(depth) ]) if self.last: self.norm = col_nn.LayerNorm(normalized_shape=dim, eps=layernorm_epsilon, dtype=dtype) self.head = GPTLMHead(dim=dim, vocab_size=vocab_size, dtype=dtype) def forward(self, x=None, input_ids=None, attention_mask=None): if self.first: x = self.embed(input_ids) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # Adapted from huggingface if attention_mask is not None: if self.first: batch_size = input_ids.shape[0] else: batch_size = x.shape[0] attention_mask = attention_mask.view(batch_size, -1) attention_mask = col_nn.partition_batch(attention_mask) attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) attention_mask = attention_mask.to(dtype=x.dtype) # fp16 compatibility attention_mask = (1.0 - attention_mask) * -10000.0 for block in self.blocks: x, attention_mask = block(x, attention_mask) if self.last: x = self.head(self.norm(x)) return x def _create_gpt_model(**model_kwargs): model = GPT(**model_kwargs) return model def _create_gpt_pipeline_model(depth=48, num_chunks=1, layer_partitions=None, **model_kwargs): logger = get_dist_logger() pipeline_size = gpc.get_world_size(ParallelMode.PIPELINE) pipeline_rank = gpc.get_local_rank(ParallelMode.PIPELINE) rank = gpc.get_global_rank() wrapper = PipelineSharedModuleWrapper([0, pipeline_size - 1]) parts = partition_uniform(depth, pipeline_size, num_chunks)[pipeline_rank] if layer_partitions is None else layer_partitions models = [] for start, end in parts: model_kwargs['first'] = start == 0 model_kwargs['last'] = end == depth model_kwargs['depth'] = end - start chunk = PipelineGPT(**model_kwargs).to(get_current_device()) if start == 0: wrapper.register_parameter(chunk.embed.word_embedding_weight) elif end == depth: wrapper.register_parameter(chunk.head.weight) models.append(chunk) logger.info(f'==> Rank {rank} built layer {start}-{end} / total {depth}') if len(models) == 1: model = models[0] else: model = nn.ModuleList(models) return model @MODELS.register_module def gpt2_small(**kwargs): model_kwargs = dict(dim=768, depth=12, num_heads=12, **kwargs) return _create_gpt_model(**model_kwargs) @MODELS.register_module def gpt2_medium(**kwargs): model_kwargs = dict(dim=1024, depth=24, num_heads=8, **kwargs) return _create_gpt_model(**model_kwargs) @MODELS.register_module def gpt2_large(**kwargs): model_kwargs = dict(dim=1536, depth=36, num_heads=12, **kwargs) return _create_gpt_model(**model_kwargs) @MODELS.register_module def gpt2_xl(**kwargs): model_kwargs = dict(dim=1600, depth=48, num_heads=16, **kwargs) return _create_gpt_model(**model_kwargs) @MODELS.register_module def gpt2_8B(**kwargs): model_kwargs = dict(dim=3072, depth=72, num_heads=24, **kwargs) return _create_gpt_model(**model_kwargs) @MODELS.register_module def gpt2_xl_pipeline(**kwargs): model_kwargs = dict(dim=1600, depth=48, num_heads=20, **kwargs) return _create_gpt_pipeline_model(**model_kwargs) @MODELS.register_module def gpt2_8B_pipeline(**kwargs): model_kwargs = dict(dim=3072, depth=72, num_heads=24, **kwargs) return _create_gpt_pipeline_model(**model_kwargs) @MODELS.register_module def gpt3(**kwargs): model_kwargs = dict(dim=12288, depth=96, num_heads=96, **kwargs) return _create_gpt_model(**model_kwargs) @MODELS.register_module def gpt3_pipeline(**kwargs): model_kwargs = dict(dim=12288, depth=96, num_heads=96, **kwargs) return _create_gpt_pipeline_model(**model_kwargs)
from .vit import *
import math from typing import Callable import torch from colossalai import nn as col_nn from colossalai.nn.layer.utils import CheckpointModule from colossalai.registry import LAYERS, MODELS from torch import dtype, nn __all__ = [ 'VisionTransformer', 'vit_lite_depth7_patch4_32', 'vit_tiny_patch4_32', 'vit_tiny_patch16_224', 'vit_tiny_patch16_384', 'vit_small_patch16_224', 'vit_small_patch16_384', 'vit_small_patch32_224', 'vit_small_patch32_384', 'vit_base_patch16_224', 'vit_base_patch16_384', 'vit_base_patch32_224', 'vit_base_patch32_384', 'vit_large_patch16_224', 'vit_large_patch16_384', 'vit_large_patch32_224', 'vit_large_patch32_384', ] _init_rules = dict( torch=dict( embed=dict( weight_initializer=col_nn.init.kaiming_uniform_(a=math.sqrt(5)), bias_initializer=col_nn.init.xavier_uniform_(a=1, scale=1), position_embed_initializer=col_nn.init.zeros_(), ), transformer=dict( weight_initializer=col_nn.init.kaiming_uniform_(a=math.sqrt(5)), bias_initializer=col_nn.init.xavier_uniform_(a=1, scale=1), ), head=dict( weight_initializer=col_nn.init.kaiming_uniform_(a=math.sqrt(5)), bias_initializer=col_nn.init.xavier_uniform_(a=1, scale=1), ), ), jax=dict( embed=dict( weight_initializer=col_nn.init.lecun_normal_(), bias_initializer=col_nn.init.zeros_(), position_embed_initializer=col_nn.init.trunc_normal_(std=.02), ), transformer=dict( weight_initializer=col_nn.init.xavier_uniform_(), bias_initializer=col_nn.init.normal_(std=1e-6), ), head=dict( weight_initializer=col_nn.init.zeros_(), bias_initializer=col_nn.init.zeros_(), ), ), ) @LAYERS.register_module class ViTEmbedding(nn.Module): def __init__(self, img_size: int, patch_size: int, in_chans: int, embedding_dim: int, dropout: float, dtype: dtype = None, flatten: bool = True, init_method: str = 'torch'): super().__init__() self.patch_embed = col_nn.PatchEmbedding(img_size, patch_size, in_chans, embedding_dim, dtype=dtype, flatten=flatten, **_init_rules[init_method]['embed']) self.dropout = col_nn.Dropout(dropout) def forward(self, x): x = self.patch_embed(x) x = self.dropout(x) return x @LAYERS.register_module class ViTSelfAttention(nn.Module): def __init__(self, dim: int, num_heads: int, attention_dropout: float, dropout: float, bias: bool = True, dtype: dtype = None, init_method: str = 'torch'): super().__init__() self.attention_head_size = dim // num_heads self.query_key_value = col_nn.Linear(dim, 3 * dim, dtype=dtype, bias=bias, **_init_rules[init_method]['transformer']) self.attention_dropout = col_nn.Dropout(attention_dropout) self.dense = col_nn.Linear(dim, dim, dtype=dtype, bias=True, **_init_rules[init_method]['transformer']) self.dropout = col_nn.Dropout(dropout) self.softmax = nn.Softmax(dim=-1) def forward(self, x): qkv = self.query_key_value(x) all_head_size = qkv.shape[-1] // 3 num_attention_heads = all_head_size // self.attention_head_size new_qkv_shape = qkv.shape[:-1] + \ (num_attention_heads, 3 * self.attention_head_size) qkv = qkv.view(new_qkv_shape) qkv = qkv.permute((0, 2, 1, 3)) q, k, v = torch.chunk(qkv, 3, dim=-1) x = torch.matmul(q, k.transpose(-1, -2)) x = x / math.sqrt(self.attention_head_size) x = self.softmax(x) x = self.attention_dropout(x) x = torch.matmul(x, v) x = x.transpose(1, 2) new_context_layer_shape = x.size()[:-2] + (all_head_size, ) x = x.reshape(new_context_layer_shape) x = self.dense(x) x = self.dropout(x) return x @LAYERS.register_module class ViTMLP(nn.Module): def __init__(self, dim: int, mlp_ratio: int, activation: Callable, dropout: float, dtype: dtype = None, bias: bool = True, init_method: str = 'torch'): super().__init__() self.dense_1 = col_nn.Linear(dim, mlp_ratio * dim, dtype=dtype, bias=bias, **_init_rules[init_method]['transformer']) self.activation = activation self.dropout_1 = col_nn.Dropout(dropout) self.dense_2 = col_nn.Linear(mlp_ratio * dim, dim, dtype=dtype, bias=bias, **_init_rules[init_method]['transformer']) self.dropout_2 = col_nn.Dropout(dropout) def forward(self, x): x = self.dense_1(x) x = self.activation(x) x = self.dropout_1(x) x = self.dense_2(x) x = self.dropout_2(x) return x @LAYERS.register_module class ViTHead(nn.Module): def __init__(self, dim: int, num_classes: int, representation_size: int = None, dtype: dtype = None, bias: bool = True, init_method: str = 'torch'): super().__init__() if representation_size: self.representation = col_nn.Linear(dim, representation_size, bias=bias, dtype=dtype, **_init_rules[init_method]['head']) else: self.representation = None representation_size = dim self.dense = col_nn.Classifier(representation_size, num_classes, dtype=dtype, bias=bias, **_init_rules[init_method]['head']) def forward(self, x): x = x[:, 0] if self.representation is not None: x = self.representation(x) x = self.dense(x) return x @LAYERS.register_module class ViTBlock(CheckpointModule): def __init__(self, dim: int, num_heads: int, mlp_ratio: int, activation: Callable, attention_dropout: float = 0., dropout: float = 0., drop_path: float = 0., layernorm_epsilon: float = 1e-6, dtype: dtype = None, bias: bool = True, checkpoint: bool = False, init_method: str = 'torch'): super().__init__(checkpoint) self.norm1 = col_nn.LayerNorm(normalized_shape=dim, eps=layernorm_epsilon, dtype=dtype) self.attn = ViTSelfAttention(dim=dim, num_heads=num_heads, attention_dropout=attention_dropout, dropout=dropout, bias=bias, dtype=dtype, init_method=init_method) self.drop_path = col_nn.DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = col_nn.LayerNorm(normalized_shape=dim, eps=layernorm_epsilon, dtype=dtype) self.mlp = ViTMLP(dim=dim, mlp_ratio=mlp_ratio, activation=activation, dropout=dropout, dtype=dtype, bias=bias, init_method=init_method) def _forward(self, x): x = x + self.drop_path(self.attn(self.norm1(x))) x = x + self.drop_path(self.mlp(self.norm2(x))) return x @MODELS.register_module class VisionTransformer(nn.Module): def __init__(self, img_size: int = 224, patch_size: int = 16, in_chans: int = 3, num_classes: int = 1000, depth: int = 12, num_heads: int = 12, dim: int = 768, mlp_ratio: int = 4, attention_dropout: float = 0., dropout: float = 0.1, drop_path: float = 0., layernorm_epsilon: float = 1e-6, activation: Callable = nn.functional.gelu, representation_size: int = None, dtype: dtype = None, bias: bool = True, checkpoint: bool = False, init_method: str = 'torch'): super().__init__() embed = ViTEmbedding(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embedding_dim=dim, dropout=dropout, dtype=dtype, init_method=init_method) # stochastic depth decay rule dpr = [x.item() for x in torch.linspace(0, drop_path, depth)] blocks = [ ViTBlock( dim=dim, num_heads=num_heads, mlp_ratio=mlp_ratio, attention_dropout=attention_dropout, dropout=dropout, drop_path=dpr[i], activation=activation, dtype=dtype, bias=bias, checkpoint=checkpoint, init_method=init_method, ) for i in range(depth) ] norm = col_nn.LayerNorm(normalized_shape=dim, eps=layernorm_epsilon, dtype=dtype) head = ViTHead(dim=dim, num_classes=num_classes, representation_size=representation_size, dtype=dtype, bias=bias, init_method=init_method) self.layers = nn.Sequential( embed, *blocks, norm, head, ) def forward(self, x): x = self.layers(x) return x def _create_vit_model(**model_kwargs): model = VisionTransformer(**model_kwargs) return model @MODELS.register_module def vit_lite_depth7_patch4_32(**kwargs): model_kwargs = dict(img_size=32, patch_size=4, dim=256, depth=7, num_heads=4, mlp_ratio=2, num_classes=10, **kwargs) return _create_vit_model(**model_kwargs) @MODELS.register_module def vit_tiny_patch4_32(**kwargs): model_kwargs = dict(img_size=32, patch_size=4, dim=512, depth=6, num_heads=8, mlp_ratio=1, num_classes=10, **kwargs) return _create_vit_model(**model_kwargs) @MODELS.register_module def vit_tiny_patch16_224(**kwargs): model_kwargs = dict(img_size=224, patch_size=16, dim=192, depth=12, num_heads=3, mlp_ratio=4, **kwargs) return _create_vit_model(**model_kwargs) @MODELS.register_module def vit_tiny_patch16_384(**kwargs): model_kwargs = dict(img_size=384, patch_size=16, dim=192, depth=12, num_heads=3, mlp_ratio=4, **kwargs) return _create_vit_model(**model_kwargs) @MODELS.register_module def vit_small_patch16_224(**kwargs): model_kwargs = dict(img_size=224, patch_size=16, dim=384, depth=12, num_heads=6, mlp_ratio=4, **kwargs) return _create_vit_model(**model_kwargs) @MODELS.register_module def vit_small_patch16_384(**kwargs): model_kwargs = dict(img_size=384, patch_size=16, dim=384, depth=12, num_heads=6, mlp_ratio=4, **kwargs) return _create_vit_model(**model_kwargs) @MODELS.register_module def vit_small_patch32_224(**kwargs): model_kwargs = dict(img_size=224, patch_size=32, dim=384, depth=12, num_heads=6, mlp_ratio=4, **kwargs) return _create_vit_model(**model_kwargs) @MODELS.register_module def vit_small_patch32_384(**kwargs): model_kwargs = dict(img_size=384, patch_size=32, dim=384, depth=12, num_heads=6, mlp_ratio=4, **kwargs) return _create_vit_model(**model_kwargs) @MODELS.register_module def vit_base_patch16_224(**kwargs): model_kwargs = dict(img_size=224, patch_size=16, dim=768, depth=12, num_heads=12, mlp_ratio=4, **kwargs) return _create_vit_model(**model_kwargs) @MODELS.register_module def vit_base_patch16_384(**kwargs): model_kwargs = dict(img_size=384, patch_size=16, dim=768, depth=12, num_heads=12, mlp_ratio=4, **kwargs) return _create_vit_model(**model_kwargs) @MODELS.register_module def vit_base_patch32_224(**kwargs): model_kwargs = dict(img_size=224, patch_size=32, dim=768, depth=12, num_heads=12, mlp_ratio=4, **kwargs) return _create_vit_model(**model_kwargs) @MODELS.register_module def vit_base_patch32_384(**kwargs): model_kwargs = dict(img_size=384, patch_size=32, dim=768, depth=12, num_heads=12, mlp_ratio=4, **kwargs) return _create_vit_model(**model_kwargs) @MODELS.register_module def vit_large_patch16_224(**kwargs): model_kwargs = dict(img_size=224, patch_size=16, dim=1024, depth=24, num_heads=16, mlp_ratio=4, **kwargs) return _create_vit_model(**model_kwargs) @MODELS.register_module def vit_large_patch16_384(**kwargs): model_kwargs = dict(img_size=384, patch_size=16, dim=1024, depth=24, num_heads=16, mlp_ratio=4, **kwargs) return _create_vit_model(**model_kwargs) @MODELS.register_module def vit_large_patch32_224(**kwargs): model_kwargs = dict(img_size=224, patch_size=32, dim=1024, depth=24, num_heads=16, mlp_ratio=4, **kwargs) return _create_vit_model(**model_kwargs) @MODELS.register_module def vit_large_patch32_384(**kwargs): model_kwargs = dict(img_size=384, patch_size=32, dim=1024, depth=24, num_heads=16, mlp_ratio=4, **kwargs) return _create_vit_model(**model_kwargs)
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch from colossalai.registry import MODELS from colossalai.nn.model.model_from_config import ModelFromConfig @MODELS.register_module class VisionTransformerFromConfig(ModelFromConfig): """Vision Transformer from `"An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale" <https://arxiv.org/pdf/2010.11929>`_. """ def __init__(self, embedding_cfg: dict, norm_cfg: dict, block_cfg: dict, head_cfg: dict, token_fusion_cfg: dict = None, embed_dim=768, depth=12, drop_path_rate=0., tensor_splitting_cfg: dict = None): super().__init__() self.embed_dim = embed_dim self.num_tokens = 1 self.tensor_splitting_cfg = tensor_splitting_cfg dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth) ] # stochastic depth decay rule if token_fusion_cfg is None: token_fusion_cfg = [] else: token_fusion_cfg = [token_fusion_cfg] self.layers_cfg = [ embedding_cfg, # input tensor splitting *self._generate_tensor_splitting_cfg(), *token_fusion_cfg, # blocks *self._generate_block_cfg( dpr=dpr, block_cfg=block_cfg, depth=depth), # norm norm_cfg, # head head_cfg ] def _fuse_tokens(self, x): cls_token = self.cls_token.expand(x.shape[0], -1, -1) x = torch.cat((cls_token, x), dim=1) return x def _generate_block_cfg(self, dpr, depth, block_cfg): blocks_cfg = [] for i in range(depth): _cfg = block_cfg.copy() _cfg['droppath_cfg']['drop_path'] = dpr[i] blocks_cfg.append(_cfg) return blocks_cfg def _generate_tensor_splitting_cfg(self): if self.tensor_splitting_cfg: return [self.tensor_splitting_cfg] else: return [] def forward(self, x): # [512, 3, 32, 32] for layer in self.layers: if isinstance(x, tuple): x = layer(*x) else: x = layer(x) return x # [256, 5] def init_weights(self): # TODO: add init weights pass
import math import torch import torch.nn as nn from colossalai.context import ParallelMode from colossalai.nn.layer import VanillaPatchEmbedding, VanillaClassifier, \ WrappedDropout as Dropout, WrappedDropPath as DropPath from colossalai.nn.layer.moe import build_ffn_experts, MoeLayer, Top2Router, NormalNoiseGenerator, MoeModule from .util import moe_sa_args, moe_mlp_args from ..helper import TransformerLayer from colossalai.context.moe_context import MOE_CONTEXT from colossalai.utils import get_current_device from typing import List class VanillaSelfAttention(nn.Module): """Standard ViT self attention. """ def __init__(self, d_model: int, n_heads: int, d_kv: int, attention_drop: float = 0, drop_rate: float = 0, bias: bool = True, dropout1=None, dropout2=None): super().__init__() self.n_heads = n_heads self.d_kv = d_kv self.scale = 1.0 / math.sqrt(self.d_kv) self.dense1 = nn.Linear(d_model, 3 * n_heads * d_kv, bias, device=get_current_device()) self.softmax = nn.Softmax(dim=-1) self.atten_drop = nn.Dropout(attention_drop) if dropout1 is None else dropout1 self.dense2 = nn.Linear(n_heads * d_kv, d_model, device=get_current_device()) self.dropout = nn.Dropout(drop_rate) if dropout2 is None else dropout2 def forward(self, x): qkv = self.dense1(x) new_shape = qkv.shape[:2] + (3, self.n_heads, self.d_kv) qkv = qkv.view(*new_shape) qkv = qkv.permute(2, 0, 3, 1, 4) q, k, v = qkv[:] x = torch.matmul(q, k.transpose(-2, -1)) * self.scale x = self.atten_drop(self.softmax(x)) x = torch.matmul(x, v) x = x.transpose(1, 2) new_shape = x.shape[:2] + (self.n_heads * self.d_kv,) x = x.reshape(*new_shape) x = self.dense2(x) x = self.dropout(x) return x class VanillaFFN(nn.Module): """FFN composed with two linear layers, also called MLP. """ def __init__(self, d_model: int, d_ff: int, activation=None, drop_rate: float = 0, bias: bool = True, dropout1=None, dropout2=None): super().__init__() dense1 = nn.Linear(d_model, d_ff, bias, device=get_current_device()) act = nn.GELU() if activation is None else activation dense2 = nn.Linear(d_ff, d_model, bias, device=get_current_device()) drop1 = nn.Dropout(drop_rate) if dropout1 is None else dropout1 drop2 = nn.Dropout(drop_rate) if dropout2 is None else dropout2 self.ffn = nn.Sequential(dense1, act, drop1, dense2, drop2) def forward(self, x): return self.ffn(x) class Widenet(nn.Module): def __init__(self, num_experts: int, capacity_factor_train: float = 1.25, capacity_factor_eval: float = 2.0, drop_tks: bool = True, img_size: int = 224, patch_size: int = 16, in_chans: int = 3, num_classes: int = 1000, depth: int = 12, d_model: int = 768, num_heads: int = 12, d_kv: int = 64, d_ff: int = 4096, attention_drop: float = 0., drop_rate: float = 0.1, drop_path: float = 0.): super().__init__() embedding = VanillaPatchEmbedding(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_size=d_model) embed_dropout = Dropout(p=drop_rate, mode=ParallelMode.TENSOR) shared_sa = VanillaSelfAttention(**moe_sa_args( d_model=d_model, n_heads=num_heads, d_kv=d_kv, attention_drop=attention_drop, drop_rate=drop_rate)) noisy_func = NormalNoiseGenerator(num_experts) shared_router = Top2Router(capacity_factor_train=capacity_factor_train, capacity_factor_eval=capacity_factor_eval, noisy_func=noisy_func, drop_tks=drop_tks) shared_experts = build_ffn_experts(num_experts, d_model, d_ff, drop_rate=drop_rate) # stochastic depth decay rule dpr = [x.item() for x in torch.linspace(0, drop_path, depth)] blocks = [ TransformerLayer(att=shared_sa, ffn=MoeLayer(dim_model=d_model, num_experts=num_experts, router=shared_router, experts=shared_experts), norm1=nn.LayerNorm(d_model, eps=1e-6), norm2=nn.LayerNorm(d_model, eps=1e-6), droppath=DropPath(p=dpr[i], mode=ParallelMode.TENSOR)) for i in range(depth) ] norm = nn.LayerNorm(d_model, eps=1e-6) self.linear = VanillaClassifier(in_features=d_model, num_classes=num_classes) nn.init.zeros_(self.linear.weight) nn.init.zeros_(self.linear.bias) self.widenet = nn.Sequential(embedding, embed_dropout, *blocks, norm) def forward(self, x): MOE_CONTEXT.reset_loss() x = self.widenet(x) x = torch.mean(x, dim=1) x = self.linear(x) return x class ViTMoE(nn.Module): def __init__(self, num_experts: int or List[int], use_residual: bool = False, capacity_factor_train: float = 1.25, capacity_factor_eval: float = 2.0, drop_tks: bool = True, img_size: int = 224, patch_size: int = 16, in_chans: int = 3, num_classes: int = 1000, depth: int = 12, d_model: int = 768, num_heads: int = 12, d_kv: int = 64, d_ff: int = 3072, attention_drop: float = 0., drop_rate: float = 0.1, drop_path: float = 0.): super().__init__() assert depth % 2 == 0, "The number of layers should be even right now" if isinstance(num_experts, list): assert len(num_experts) == depth // 2, \ "The length of num_experts should equal to the number of MOE layers" num_experts_list = num_experts else: num_experts_list = [num_experts] * (depth // 2) embedding = VanillaPatchEmbedding(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_size=d_model) embed_dropout = Dropout(p=drop_rate, mode=ParallelMode.TENSOR) # stochastic depth decay rule dpr = [x.item() for x in torch.linspace(0, drop_path, depth)] blocks = [] for i in range(depth): sa = VanillaSelfAttention(**moe_sa_args( d_model=d_model, n_heads=num_heads, d_kv=d_kv, attention_drop=attention_drop, drop_rate=drop_rate)) if i % 2 == 0: ffn = VanillaFFN(**moe_mlp_args(d_model=d_model, d_ff=d_ff, drop_rate=drop_rate)) else: num_experts = num_experts_list[i // 2] experts = build_ffn_experts(num_experts, d_model, d_ff, drop_rate=drop_rate) ffn = MoeModule(dim_model=d_model, num_experts=num_experts, top_k=1 if use_residual else 2, capacity_factor_train=capacity_factor_train, capacity_factor_eval=capacity_factor_eval, noisy_policy='Jitter' if use_residual else 'Gaussian', drop_tks=drop_tks, use_residual=use_residual, expert_instance=experts, expert_cls=VanillaFFN, **moe_mlp_args(d_model=d_model, d_ff=d_ff, drop_rate=drop_rate)) layer = TransformerLayer(att=sa, ffn=ffn, norm1=nn.LayerNorm(d_model, eps=1e-6), norm2=nn.LayerNorm(d_model, eps=1e-6), droppath=DropPath(p=dpr[i], mode=ParallelMode.TENSOR)) blocks.append(layer) norm = nn.LayerNorm(d_model, eps=1e-6) self.linear = VanillaClassifier(in_features=d_model, num_classes=num_classes) nn.init.zeros_(self.linear.weight) nn.init.zeros_(self.linear.bias) self.vitmoe = nn.Sequential(embedding, embed_dropout, *blocks, norm) def forward(self, x): MOE_CONTEXT.reset_loss() x = self.vitmoe(x) x = torch.mean(x, dim=1) x = self.linear(x) return x
from colossalai.context import ParallelMode from colossalai.nn.layer import WrappedDropout as Dropout def moe_sa_args(d_model: int, n_heads: int, d_kv: int, attention_drop: float = 0, drop_rate: float = 0, bias: bool = True): """This is an example for args in moe self attention, since lots of modules should be adapted before putting them in experts. """ dropout1 = Dropout(attention_drop, mode=ParallelMode.TENSOR) dropout2 = Dropout(drop_rate, mode=ParallelMode.TENSOR) return dict( d_model=d_model, n_heads=n_heads, d_kv=d_kv, bias=bias, dropout1=dropout1, dropout2=dropout2 ) def moe_mlp_args(d_model: int, d_ff: int, drop_rate: float, bias: bool = True): """This is an example for args of MLP in Experts, since lots of modules should be adapted before putting them in experts. """ dropout1 = Dropout(drop_rate, mode=ParallelMode.TENSOR) dropout2 = Dropout(drop_rate, mode=ParallelMode.TENSOR) return dict( d_model=d_model, d_ff=d_ff, bias=bias, dropout1=dropout1, dropout2=dropout2 )
from .models import Widenet, ViTMoE from .gpt import MOEGPT, prmoe_4b, prmoe_31b, prmoe_51b
from typing import Callable, List from torch import dtype, nn from colossalai import nn as col_nn from colossalai.registry import LAYERS, MODELS from colossalai.nn.layer import MoeModule from colossalai.context import MOE_CONTEXT from colossalai.logging import get_dist_logger from colossalai.nn.layer.utils import CheckpointModule, divide from model_zoo.gpt.gpt import GPTEmbedding, GPTSelfAttention, GPTMLP, GPTBlock, GPTLMHead @LAYERS.register_module class MOEGPTBlock(CheckpointModule): def __init__(self, num_experts: int, dim: int, num_heads: int, mlp_ratio: float, activation: Callable, capacity_factor_train: float = 1.0, capacity_factor_eval: float = 1.0, use_residual: bool = False, attention_dropout: float = 0., dropout: float = 0., layernorm_epsilon: float = 1e-5, dtype: dtype = None, bias: bool = True, apply_post_layernorm: bool = False, fuse_scale_mask_softmax: bool = False, checkpoint: bool = False): super().__init__(checkpoint) self.apply_post_layernorm = apply_post_layernorm self.norm1 = col_nn.LayerNorm(normalized_shape=dim, eps=layernorm_epsilon, dtype=dtype) self.attn = GPTSelfAttention(dim=dim, num_heads=num_heads, attention_dropout=attention_dropout, dropout=dropout, bias=bias, fuse_scale_mask_softmax=fuse_scale_mask_softmax, dtype=dtype) self.norm2 = col_nn.LayerNorm(normalized_shape=dim, eps=layernorm_epsilon, dtype=dtype) mpl_factory_dict = dict(dim=dim, mlp_ratio=mlp_ratio, activation=activation, dropout=dropout, dtype=dtype, bias=bias) self.mlp = MoeModule(dim_model=dim, num_experts=num_experts, top_k=1, capacity_factor_train=capacity_factor_train, capacity_factor_eval=capacity_factor_eval, noisy_policy='Jitter', use_residual=use_residual, expert_cls=GPTMLP, **mpl_factory_dict) def _forward(self, x, attention_mask=None): if not self.apply_post_layernorm: residual = x x = self.norm1(x) if self.apply_post_layernorm: residual = x x = residual + self.attn(x, attention_mask) if not self.apply_post_layernorm: residual = x x = self.norm2(x) if self.apply_post_layernorm: residual = x x = residual + self.mlp(x) return x, attention_mask @MODELS.register_module class MOEGPT(nn.Module): def __init__(self, num_experts: int or List[int], use_residual: bool = False, capacity_factor_train: float = 1.0, capacity_factor_eval: float = 1.0, vocab_size: int = 50304, max_position_embeddings: int = 1024, dim: int = 768, num_heads: int = 12, depth: int = 12, mlp_ratio: float = 4.0, dropout: float = 0.1, embedding_dropout: float = 0.1, attention_dropout: float = 0.1, layernorm_epsilon: float = 1e-5, activation: Callable = nn.functional.gelu, padding_idx: int = None, dtype: dtype = None, bias: bool = True, apply_post_layernorm: bool = False, fuse_scale_mask_softmax: bool = False, checkpoint: bool = False) -> None: super().__init__() half_depth = divide(depth, 2) if isinstance(num_experts, list): assert len(num_experts) == half_depth, \ "The length of num_experts should equal to the number of MOE layers" num_experts_list = num_experts else: num_experts_list = [num_experts] * half_depth self.embed = GPTEmbedding(embedding_dim=dim, vocab_size=vocab_size, max_position_embeddings=max_position_embeddings, padding_idx=padding_idx, dropout=embedding_dropout, dtype=dtype) block_list = [] block_factory_dict = dict(dim=dim, num_heads=num_heads, mlp_ratio=mlp_ratio, activation=activation, attention_dropout=attention_dropout, dropout=dropout, layernorm_epsilon=layernorm_epsilon, dtype=dtype, bias=bias, apply_post_layernorm=apply_post_layernorm, fuse_scale_mask_softmax=fuse_scale_mask_softmax, checkpoint=checkpoint) for i in range(depth): if i % 2 == 0: block_module = GPTBlock(**block_factory_dict) else: num_experts = num_experts_list[i // 2] block_module = MOEGPTBlock(num_experts=num_experts, capacity_factor_train=capacity_factor_train, capacity_factor_eval=capacity_factor_eval, use_residual=use_residual, **block_factory_dict) block_list.append(block_module) self.blocks = nn.ModuleList(block_list) self.norm = col_nn.LayerNorm(normalized_shape=dim, eps=layernorm_epsilon, dtype=dtype) self.head = GPTLMHead(dim=dim, vocab_size=vocab_size, word_embeeding_weight=self.embed.word_embedding_weight, dtype=dtype) def forward(self, input_ids, attention_mask=None): MOE_CONTEXT.reset_loss() x = self.embed(input_ids) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # Adapted from huggingface if attention_mask is not None: batch_size = input_ids.shape[0] attention_mask = attention_mask.view(batch_size, -1) attention_mask = col_nn.partition_batch(attention_mask) attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) attention_mask = attention_mask.to(dtype=x.dtype) # fp16 compatibility attention_mask = (1.0 - attention_mask) * -10000.0 for block in self.blocks: x, attention_mask = block(x, attention_mask) x = self.head(self.norm(x)) return x def _create_moegpt_model(**model_kwargs): model = MOEGPT(**model_kwargs) return model def _prmoe_check_sanity(kwargs_dict): logger = get_dist_logger() if not kwargs_dict.pop('use_residual', False): logger.warning( "If you want to use PR-MOE, please set 'use_residual' to True. " "Otherwise, we'll force 'use_residual' to True.", ranks=[0]) @MODELS.register_module def prmoe_4b(**kwargs): _prmoe_check_sanity(kwargs) model_kwargs = dict(num_experts=[32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 64, 64], use_residual=True, dim=1024, depth=24, num_heads=16, **kwargs) return _create_moegpt_model(**model_kwargs) @MODELS.register_module def prmoe_31b(**kwargs): _prmoe_check_sanity(kwargs) model_kwargs = dict(num_experts=[64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 128, 128], use_residual=True, dim=2048, depth=24, num_heads=16, **kwargs) return _create_moegpt_model(**model_kwargs) @MODELS.register_module def prmoe_51b(**kwargs): _prmoe_check_sanity(kwargs) model_kwargs = dict(num_experts=[32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 64, 64, 64, 64], use_residual=True, dim=3072, depth=32, num_heads=24, **kwargs) return _create_moegpt_model(**model_kwargs)
from typing import Optional class TensorParallelEnv(object): _instance = None def __new__(cls, *args, **kwargs): if cls._instance is None: cls._instance = object.__new__(cls, *args, **kwargs) return cls._instance def __init__(self, *args, **kwargs): self.load(*args, **kwargs) def load(self, mode: Optional[str] = None, vocab_parallel: bool = False, parallel_input_1d: bool = False, summa_dim: int = None, tesseract_dim: int = None, tesseract_dep: int = None, depth_3d: int = None, input_group_3d=None, weight_group_3d=None, output_group_3d=None): self.mode = mode self.vocab_parallel = vocab_parallel self.parallel_input_1d = parallel_input_1d self.summa_dim = summa_dim self.tesseract_dim = tesseract_dim self.tesseract_dep = tesseract_dep self.depth_3d = depth_3d self.input_group_3d = input_group_3d self.weight_group_3d = weight_group_3d self.output_group_3d = output_group_3d def save(self): return dict(mode=self.mode, vocab_parallel=self.vocab_parallel, parallel_input_1d=self.parallel_input_1d, summa_dim=self.summa_dim, tesseract_dim=self.tesseract_dim, tesseract_dep=self.tesseract_dep, depth_3d=self.depth_3d, input_group_3d=self.input_group_3d, weight_group_3d=self.weight_group_3d, output_group_3d=self.output_group_3d) tensor_parallel_env = TensorParallelEnv()
#!/usr/bin/env python # -*- encoding: utf-8 -*- import argparse import os import pprint from pathlib import Path from typing import Callable, Dict, Iterable, List, Optional, Tuple, Union import torch import torch.nn as nn from torch.nn.modules.loss import _Loss from torch.nn.parallel import DistributedDataParallel as DDP from torch.optim.lr_scheduler import _LRScheduler from torch.optim.optimizer import Optimizer from torch.utils.data import DataLoader from colossalai.amp import AMP_TYPE, convert_to_amp from colossalai.amp.naive_amp import NaiveAMPModel from colossalai.builder.builder import build_gradient_handler from colossalai.context import Config, ConfigException, ParallelMode from colossalai.core import global_context as gpc from colossalai.engine.schedule import NonPipelineSchedule, PipelineSchedule, InterleavedPipelineSchedule, get_tensor_shape from colossalai.context.moe_context import MOE_CONTEXT from colossalai.engine import Engine from colossalai.engine.ophooks import BaseOpHook from colossalai.logging import get_dist_logger from colossalai.nn.optimizer.colossalai_optimizer import ColossalaiOptimizer from colossalai.utils import (accumulate_gradient, get_current_device, is_using_ddp, is_using_pp, is_using_sequence, sync_model_param) from colossalai.utils.moe import sync_moe_model_param from colossalai.zero import convert_to_zero_v2 from colossalai.zero.sharded_optim.sharded_optim_v2 import ShardedOptimizerV2 def get_default_parser(): """Reads user command line and uses an argument parser to parse the input arguments. Input arguments include configuration, host, port, world size, local rank, backend for torch.distributed. Returns: Namespace: Returns the parser with the default arguments, the user may add customized arguments into this parser. """ parser = argparse.ArgumentParser() parser.add_argument('--config', type=str, help='path to the config file') parser.add_argument('--host', type=str, help='the master address for distributed training') parser.add_argument('--port', type=int, help='the master port for distributed training') parser.add_argument('--world_size', type=int, help='world size for distributed training') parser.add_argument('--rank', type=int, help='rank for the default process group') parser.add_argument('--local_rank', type=int, help='local rank on the node') parser.add_argument('--backend', type=str, default='nccl', help='backend for distributed communication') return parser def launch(config: Union[str, Path, Config, Dict], rank: int, world_size: int, host: str, port: int, backend: str = 'nccl', local_rank: int = None, seed: int = 1024, verbose: bool = True): """This function first parses the configuration arguments, using :func:`parse_args()` in case one of the input arguments are not given. Then initialize and set distributed environment by calling global_context's functions. Args: config (Union[str, dict, Config]): Config file or config file path are both acceptable rank (int): Rank for the default process group world_size (int): World size of the default process group host (str): The master address for distributed training port (str): The master port for distributed training backend (str, optional): Backend for ``torch.distributed``, defaults to ``nccl`` local_rank (int, optional): Rank for the process on the node and is used to set the default CUDA device, defaults to None. If local_rank = None, the default device ordinal will be calculated automatically. seed (int, optional): Specified random seed for every process. Defaults to 1024. verbose (bool, optional): Whether to print logs. Defaults to True. Raises: Exception: Raise exception when config type is wrong """ gpc.verbose = verbose # set config assert isinstance(config, (Config, str, Path, dict)), \ f'expected argument config to be Config, str or Path, but got {type(config)}' if not isinstance(config, Config) and isinstance(config, dict): config = Config(config) if isinstance(config, (str, Path)): config = Config.from_file(config) gpc.load_config(config) # init default process group gpc.init_global_dist(rank, world_size, backend, host, port) # init process groups for different parallel modes from config gpc.init_parallel_groups() # set cuda device if torch.cuda.is_available(): # if local rank is not given, calculate automatically gpc.set_device(local_rank) # set the number of processes running on the same node gpc.detect_num_processes_on_current_node() gpc.set_seed(seed) if verbose: logger = get_dist_logger() logger.info( f'Distributed environment is initialized, ' f'data parallel size: {gpc.data_parallel_size}, pipeline parallel size: {gpc.pipeline_parallel_size}, ' f'tensor parallel size: {gpc.tensor_parallel_size}', ranks=[0]) def launch_from_slurm(config: Union[str, Path, Config, Dict], host: str, port: int, backend: str = 'nccl', seed: int = 1024, verbose: bool = True): """A wrapper for colossalai.launch for SLURM launcher by reading rank and world size from the environment variables set by SLURM Args: config (Union[str, dict, Config]): Config file or config file path are both acceptable host (str): The master address for distributed training port (str): The master port for distributed training backend (str, optional): Backend for ``torch.distributed``, defaults to ``nccl`` seed (int, optional): Specified random seed for every process. Defaults to 1024. verbose (bool, optional): Whether to print logs. Defaults to True. """ rank = int(os.environ['SLURM_PROCID']) world_size = int(os.environ['SLURM_NPROCS']) launch(config=config, rank=rank, world_size=world_size, host=host, port=port, backend=backend, seed=seed, verbose=verbose) def launch_from_openmpi(config: Union[str, Path, Config, Dict], host: str, port: int, backend: str = 'nccl', seed: int = 1024, verbose: bool = True): """A wrapper for colossalai.launch for OpenMPI launcher by reading rank and world size from the environment variables set by OpenMPI Args: config (Union[str, dict, Config]): Config file or config file path are both acceptable host (str): The master address for distributed training port (str): The master port for distributed training backend (str, optional): Backend for ``torch.distributed``, defaults to ``nccl`` seed (int, optional): Specified random seed for every process. Defaults to 1024. verbose (bool, optional): Whether to print logs. Defaults to True. """ rank = int(os.environ['OMPI_COMM_WORLD_RANK']) local_rank = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK']) world_size = int(os.environ['OMPI_COMM_WORLD_SIZE']) launch(config=config, local_rank=local_rank, rank=rank, world_size=world_size, host=host, port=port, backend=backend, seed=seed, verbose=verbose) def launch_from_torch(config: Union[str, Path, Config, Dict], backend: str = 'nccl', seed: int = 1024, verbose: bool = True): """A wrapper for colossalai.launch for torchrun or torch.distributed.launch by reading rank and world size from the environment variables set by PyTorch Args: config (Union[str, dict, Config]): Config file or config file path are both acceptable backend (str, optional): Backend for ``torch.distributed``, defaults to ``nccl`` seed (int, optional): Specified random seed for every process. Defaults to 1024. verbose (bool, optional): Whether to print logs. Defaults to True. """ rank = int(os.environ['RANK']) local_rank = int(os.environ['LOCAL_RANK']) world_size = int(os.environ['WORLD_SIZE']) host = os.environ['MASTER_ADDR'] port = int(os.environ['MASTER_PORT']) launch(config=config, local_rank=local_rank, rank=rank, world_size=world_size, host=host, port=port, backend=backend, seed=seed, verbose=verbose) def initialize(model: nn.Module, optimizer: Optimizer, criterion: Optional[_Loss] = None, train_dataloader: Optional[Iterable] = None, test_dataloader: Optional[Iterable] = None, lr_scheduler: Optional[_LRScheduler] = None, ophooks: Optional[List[BaseOpHook]] = None, verbose: bool = True) -> Tuple[Engine, DataLoader, DataLoader, _LRScheduler]: """Core function to wrap the essential training components with our functionality based on the config which is loaded into gpc.config. Args: model (:class:`torch.nn.Module` or Callbale): Your model instance or a function to build the model. optimizer (:class:`torch.optim.optimizer.Optimizer` or :class:`Type[torch.optim.optimizer]`): Your optimizer instance. criterion (:class:`torch.nn.modules.loss._Loss`, optional): Your criterion instance. train_dataloader (:class:`torch.utils.data.DataLoader`, optional): Dataloader for training. test_dataloader (:class:`torch.utils.data.DataLoader`, optional): Dataloader for testing. lr_scheduler (:class:`torch.nn.lr_scheduler._LRScheduler`, optional): Your lr scheduler instance, optional. verbose (bool, optional): Whether to print logs. Returns: Tuple (engine, train_dataloader, test_dataloader, lr_scheduler): A tuple of ``(engine, train_dataloader, test_dataloader, lr_scheduler)`` where only ``engine`` could not be None. """ # get logger logger = get_dist_logger() gpc.verbose = verbose # get config from gpc config = gpc.config # print config if verbose: logger.info( f"\n========== Your Config ========\n" f"{pprint.pformat(gpc.config)}\n" f"================================\n", ranks=[0]) # cudnn cudnn_benchmark = config.get('cudnn_benchmark', True) cudnn_deterministic = config.get('cudnn_deterministic', False) torch.backends.cudnn.benchmark = cudnn_benchmark torch.backends.cudnn.deterministic = cudnn_deterministic if verbose: logger.info(f"cuDNN benchmark = {cudnn_benchmark}, deterministic = {cudnn_deterministic}", ranks=[0]) # zero use_zero = hasattr(gpc.config, 'zero') if use_zero: zero_cfg = gpc.config.get('zero', None) if zero_cfg is not None: cfg_ = zero_cfg.copy() else: cfg_ = {} optimizer_config = zero_cfg.get('optimizer_config', None) model_config = zero_cfg.get('model_config', None) model, optimizer = convert_to_zero_v2(model, optimizer, model_config=model_config, optimizer_config=optimizer_config) logger.info("Initializing ZeRO model and optimizer finished!", ranks=[0]) # FIXME() throw a warning if using zero with MP if gpc.get_world_size(ParallelMode.MODEL) > 1: logger.warning("ZeRO currently has not been tested with model parallelism.", ranks=[0]) else: if isinstance(model, nn.Module): # first sync model across dp ranks model.to(get_current_device()) elif isinstance(model, Callable): model = model().to(get_current_device()) # optimizer maybe a optimizer_cls logger.warning("Initializing an non ZeRO model with optimizer class") if isinstance(optimizer, Callable): optimizer = optimizer(model.parameters()) if not use_zero: if is_using_sequence(): sync_model_param(model, ParallelMode.SEQUENCE_DP) elif MOE_CONTEXT.is_initialized: sync_moe_model_param(model) elif is_using_ddp(): sync_model_param(model, ParallelMode.DATA) else: logger.warning( "The parameters of models is not automatically synchronized.\n" "Please make sure that all parameters are the same in data parallel group.", ranks=[0]) # check amp and zero fp16_cfg = gpc.config.get('fp16', None) if fp16_cfg is not None and fp16_cfg.mode is not None and use_zero: raise ConfigException( "It is not allowed to set fp16 and zero configuration in your config file at the same time") # clip grad norm clip_grad_norm = gpc.config.get('clip_grad_norm', 0.0) # initialize amp amp_mode = None if fp16_cfg is not None and fp16_cfg.mode is not None: cfg_ = fp16_cfg.copy() amp_mode = cfg_.pop('mode') if is_using_pp(): assert amp_mode == AMP_TYPE.NAIVE, 'Pipeline only support NaiveAMP currently' if amp_mode == AMP_TYPE.NAIVE: cfg_['clip_grad_norm'] = clip_grad_norm model, optimizer, criterion = convert_to_amp(model=model, optimizer=optimizer, criterion=criterion, mode=amp_mode, amp_config=cfg_) # gradient handler gradient_handler_cfg = gpc.config.get('gradient_handler', None) if gradient_handler_cfg is None: # if gradient handler is not specified in the configuration file, # check in the following order # 1. if optimizer is ZERO, then use zero grad handler # 2. if dp size is larger than 1 and pipeline is not used, use pytorch ddp # 3. if using pipeline and dp size larger than 1, use data parallel grad handler if isinstance(optimizer, ShardedOptimizerV2): gradient_handler_cfg = [dict(type='ZeROGradientHandler')] if verbose: logger.info( "Training with zero is detected, ZeROGradientHandler is automatically " "added even though not specified in the configuration", ranks=[0]) elif is_using_ddp() and MOE_CONTEXT.is_initialized: gradient_handler_cfg = [dict(type='MoeGradientHandler')] if verbose: logger.info( "Data parallel training is detected with moe parallel, MoeGradientHandler is automatically " "added even though not specified in the configuration", ranks=[0]) elif is_using_sequence(): model = DDP(model, process_group=gpc.get_group(ParallelMode.SEQUENCE_DP), device_ids=[torch.cuda.current_device()]) if verbose: logger.info('Model is using torch.nn.parallel.DistributedDataParallel for Sequence Parallelism', ranks=[0]) elif is_using_ddp() and not is_using_pp() and amp_mode != AMP_TYPE.NAIVE: model = DDP(model, process_group=gpc.get_group(ParallelMode.DATA), device_ids=[torch.cuda.current_device()]) if verbose: logger.info('Model is using torch.nn.parallel.DistributedDataParallel for Data Parallelism', ranks=[0]) elif is_using_ddp(): gradient_handler_cfg = [dict(type='DataParallelGradientHandler')] if verbose: logger.info( "Data parallel training is detected when using pipeline parallel, " "DataParallelGradientHandler is automatically " "added even though not specified in the configuration", ranks=[0]) # add pipeline parallel gradient handler, if pipeline shared module is detected for param in model.parameters(): if getattr(param, 'pipeline_shared_module_pg', None) is not None: if gradient_handler_cfg is None: gradient_handler_cfg = [dict(type='PipelineSharedModuleGradientHandler')] else: gradient_handler_cfg.append(dict(type='PipelineSharedModuleGradientHandler')) if verbose: logger.info( "pipeline_shared_module is detected, PipelineSharedModuleGradientHandler is automatically " "added even though not specified in the configuration", ranks=[0]) break else: if not isinstance(gradient_handler_cfg, list): raise ConfigException( f"expected gradient_handler in the configuration file to be a list but got {type(gradient_handler_cfg)}" ) # turn off sync buffer for NaiveAMPModel if using torch DDP and NaiveAMPModel at the same time # to avoid duplicated buffer synchronization if isinstance(model, DDP) and isinstance(model.module, NaiveAMPModel): model.module.sync_buffer = False # initialize schedule for engine if is_using_pp(): tensor_shape = get_tensor_shape() use_interleaved = hasattr(gpc.config, 'model') and hasattr(gpc.config.model, 'num_chunks') if gpc.is_initialized(ParallelMode.PARALLEL_1D): scatter_gather = True else: scatter_gather = False if use_interleaved: schedule = InterleavedPipelineSchedule(gpc.config.NUM_MICRO_BATCHES, gpc.config.model.num_chunks, tensor_shape=tensor_shape, scatter_gather_tensors=scatter_gather) else: schedule = PipelineSchedule(gpc.config.NUM_MICRO_BATCHES, tensor_shape=tensor_shape, scatter_gather_tensors=scatter_gather) else: schedule = NonPipelineSchedule() if gradient_handler_cfg is None: gradient_handlers = None if verbose and not isinstance(model, DDP): logger.warning( "No PyTorch DDP or gradient handler is set up, please make sure you do not need " "to all-reduce the gradients after a training step.", ranks=[0]) else: gradient_handlers = [build_gradient_handler(cfg, model, optimizer) for cfg in gradient_handler_cfg] # check if optimizer is ColossalaiOptimizer if not isinstance(optimizer, (ColossalaiOptimizer, ShardedOptimizerV2)): optimizer = ColossalaiOptimizer(optim=optimizer) # gradient accumulation grad_accum_size = gpc.config.get('gradient_accumulation', None) if grad_accum_size is not None: optimizer, train_dataloader, gradient_handlers, lr_scheduler = accumulate_gradient( model=model, optimizer=optimizer, dataloader=train_dataloader, accumulate_size=grad_accum_size, gradient_handlers=gradient_handlers, lr_scheduler=lr_scheduler) engine = Engine(model=model, optimizer=optimizer, criterion=criterion, gradient_handlers=gradient_handlers, clip_grad_norm=clip_grad_norm, ophook_list=ophooks, schedule=schedule) return engine, train_dataloader, test_dataloader, lr_scheduler
#!/usr/bin/env python # -*- encoding: utf-8 -*- ALLOWED_MODES = [None, '1d', '2d', '2.5d', '3d', 'sequence'] TENSOR_PARALLEL_MODE = 'tensor_parallel_mode' # intializer INITIALIZER_MAPPING = { 'data': 'Initializer_Data', 'tensor': 'Initializer_Tensor', 'pipeline': 'Initializer_Pipeline', 'embedding': 'Initializer_Embedding', '1d': 'Initializer_1D', '2d': 'Initializer_2D', '2.5d': 'Initializer_2p5D', '3d': 'Initializer_3D', 'sequence': 'Initializer_Sequence', 'model': 'Initializer_Model', 'moe': 'Initializer_Moe' } # 3D parallelism groups INPUT_GROUP_3D = 'input_group_3d' WEIGHT_GROUP_3D = 'weight_group_3d' OUTPUT_GROUP_3D = 'output_group_3d' # Attributes of tensor parallel parameters IS_TENSOR_PARALLEL = 'is_tensor_parallel' NUM_PARTITIONS = 'num_partitions' TENSOR_PARALLEL_ATTRIBUTES = [IS_TENSOR_PARALLEL, NUM_PARTITIONS]
from .initialize import (initialize, launch, launch_from_openmpi, launch_from_slurm, launch_from_torch, get_default_parser) __version__ = '0.0.1'
#!/usr/bin/env python # -*- encoding: utf-8 -*- from colossalai.context.parallel_context import global_context
#!/usr/bin/env python # -*- encoding: utf-8 -*- import inspect import sys from importlib.machinery import SourceFileLoader from pathlib import Path from colossalai.logging import get_dist_logger class Config(dict): """This is a wrapper class for dict objects so that values of which can be accessed as attributes. Args: config (dict): The dict object to be wrapped. """ def __init__(self, config: dict = None): if config is not None: for k, v in config.items(): self._add_item(k, v) def __missing__(self, key): raise KeyError(key) def __getattr__(self, key): try: value = super(Config, self).__getitem__(key) return value except KeyError: raise AttributeError(key) def __setattr__(self, key, value): super(Config, self).__setitem__(key, value) def _add_item(self, key, value): if isinstance(value, dict): self.__setattr__(key, Config(value)) else: self.__setattr__(key, value) def update(self, config): assert isinstance(config, (Config, dict)), 'can only update dictionary or Config objects.' for k, v in config.items(): self._add_item(k, v) return self @staticmethod def from_file(filename: str): """Reads a python file and constructs a corresponding :class:`Config` object. Args: filename (str): Name of the file to construct the return object. Returns: :class:`Config`: A :class:`Config` object constructed with information in the file. Raises: AssertionError: Raises an AssertionError if the file does not exist, or the file is not .py file """ # check config path if isinstance(filename, str): filepath = Path(filename).absolute() elif isinstance(filename, Path): filepath = filename.absolute() assert filepath.exists(), f'{filename} is not found, please check your configuration path' # check extension extension = filepath.suffix assert extension == '.py', 'only .py files are supported' # import the config as module remove_path = False if filepath.parent not in sys.path: sys.path.insert(0, (filepath)) remove_path = True module_name = filepath.stem source_file = SourceFileLoader(fullname=str(module_name), path=str(filepath)) module = source_file.load_module() # load into config config = Config() for k, v in module.__dict__.items(): if k.startswith('__') or inspect.ismodule(v) or inspect.isclass(v): continue else: config._add_item(k, v) logger = get_dist_logger() logger.debug('variables which starts with __, is a module or class declaration are omitted in config file') # remove module del sys.modules[module_name] if remove_path: sys.path.pop(0) return config class ConfigException(Exception): pass
from .config import Config, ConfigException from .parallel_context import ParallelContext from .parallel_mode import ParallelMode from .moe_context import MOE_CONTEXT from .process_group_initializer import * from .random import *
#!/usr/bin/env python # -*- encoding: utf-8 -*- import random import socket from collections import Counter from typing import Union import numpy as np import torch import torch.distributed as dist from colossalai.constants import ALLOWED_MODES, INITIALIZER_MAPPING from colossalai.context.config import Config from colossalai.global_variables import tensor_parallel_env as env from colossalai.logging import get_dist_logger from colossalai.registry import DIST_GROUP_INITIALIZER from .parallel_mode import ParallelMode from .random import add_seed, get_seeds, set_mode from colossalai.context.singleton_meta import SingletonMeta class ParallelContext(metaclass=SingletonMeta): """This class provides interface functions for users to get the parallel context, such as the global rank, the local rank, the world size, etc. of each device. Note: The parallel_mode used in this class should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_. """ def __init__(self): # distributed settings self._global_ranks = dict() self._local_ranks = dict() self._world_sizes = dict() self._groups = dict() self._cpu_groups = dict() self._ranks_in_group = dict() # load config from file self._config = None # default 3D parallel args, will be overwritten during process group intialization self.world_size = 1 self.data_parallel_size = 1 self.pipeline_parallel_size = 1 self.tensor_parallel_size = 1 self.num_processes_on_current_node = -1 self.virtual_pipeline_parallel_size = None self.virtual_pipeline_parallel_rank = None # logging self._verbose = False self._logger = get_dist_logger() @property def config(self): return self._config @property def verbose(self): return self._verbose @verbose.setter def verbose(self, verbose_: bool): self._verbose = verbose_ def load_config(self, config: Union[dict, str]): """Loads the configuration from either a dict or a file. Args: config (dict or str): Either a dict containing the configuration information or the filename of a file containing the configuration information. Raises: TypeError: Raises a TypeError if `config` is neither a dict nor a str. """ if isinstance(config, str): self._config = Config.from_file(config) elif isinstance(config, dict): self._config = Config(config) else: raise TypeError("Invalid type for config, only dictionary or string is supported") def detect_num_processes_on_current_node(self): hostname = socket.gethostname() hostname_list = [None for _ in range(self.get_world_size(ParallelMode.GLOBAL))] dist.all_gather_object(hostname_list, hostname, group=self.get_group(ParallelMode.GLOBAL)) counter = Counter(hostname_list) self.num_processes_on_current_node = counter[hostname] @staticmethod def _check_parallel_mode(parallel_mode: ParallelMode): assert isinstance(parallel_mode, ParallelMode) def get_global_rank(self): """Returns the global rank of the current device. Returns: int: The global rank of the current device """ return self._global_ranks[ParallelMode.GLOBAL] def add_global_rank(self, parallel_mode: ParallelMode, rank: int): """Adds the global rank of the current device for `parallel_mode` to the context. Args: parallel_mode (:class:`colossalai.context.ParallelMode`): The parallel mode for the rank. rank (int): The rank to be added Raises: AssertionError: Raises an AssertionError if `parallel_mode` is not an instance of :class:`colossalai.context.ParallelMode`. """ self._check_parallel_mode(parallel_mode) self._global_ranks[parallel_mode] = rank def get_local_rank(self, parallel_mode: ParallelMode): """Returns the local rank of the current device. Args: parallel_mode (:class:`colossalai.context.ParallelMode`): The chosen parallel mode. Raises: AssertionError: Raises an AssertionError if `parallel_mode` is not an instance of :class:`colossalai.context.ParallelMode`. Returns: int: The local rank of the current device for `parallel_mode`. """ self._check_parallel_mode(parallel_mode) return self._local_ranks[parallel_mode] def add_local_rank(self, parallel_mode: ParallelMode, rank: int): """Adds the local rank of the current device for `parallel_mode` to the context. Args: parallel_mode (:class:`colossalai.context.ParallelMode`): The parallel mode for the rank. rank (int): The rank to be added. Raises: AssertionError: Raises an AssertionError if `parallel_mode` is not an instance of :class:`colossalai.context.ParallelMode`. """ self._check_parallel_mode(parallel_mode) self._local_ranks[parallel_mode] = rank def get_next_global_rank(self, parallel_mode: ParallelMode): """Returns the global rank of the next device. Args: parallel_mode (:class:`colossalai.context.ParallelMode`): The chosen parallel mode. Raises: AssertionError: Raises an AssertionError if `parallel_mode` is not an instance of :class:`colossalai.context.ParallelMode`. Returns: int: The global rank of the next device for `parallel_mode`. """ self._check_parallel_mode(parallel_mode) # get rank and world size local_rank = self.get_local_rank(parallel_mode) world_size = self.get_world_size(parallel_mode) ranks_in_group = self.get_ranks_in_group(parallel_mode) return ranks_in_group[(local_rank + 1) % world_size] def get_prev_global_rank(self, parallel_mode: ParallelMode): """Returns the global rank of the previous device. Args: parallel_mode (:class:`colossalai.context.ParallelMode`): The chosen parallel mode. Raises: AssertionError: Raises an AssertionError if `parallel_mode` is not an instance of :class:`colossalai.context.ParallelMode`. Returns: int: The global rank of the previous device for `parallel_mode`. """ self._check_parallel_mode(parallel_mode) # get rank and world size local_rank = self.get_local_rank(parallel_mode) world_size = self.get_world_size(parallel_mode) ranks_in_group = self.get_ranks_in_group(parallel_mode) return ranks_in_group[(local_rank - 1) % world_size] def is_first_rank(self, parallel_mode: ParallelMode): """Returns a boolean value indicating whether the current device is the first one among its group for `parallel_mode`. Args: parallel_mode (:class:`colossalai.context.ParallelMode`): The chosen parallel mode. Raises: AssertionError: Raises an AssertionError if `parallel_mode` is not an instance of :class:`colossalai.context.ParallelMode`. Returns: bool: a boolean value indicating whether the current device is the first one among its group for `parallel_mode`. """ rank = self.get_local_rank(parallel_mode) return rank == 0 def is_last_rank(self, parallel_mode: ParallelMode): """Returns a boolean value indicating whether the current device is the last one among its group for `parallel_mode`. Args: parallel_mode (:class:`colossalai.context.ParallelMode`): The chosen parallel mode. Raises: AssertionError: Raises an AssertionError if `parallel_mode` is not an instance of :class:`colossalai.context.ParallelMode`. Returns: bool: a boolean value indicating whether the current device is the first one among its group for `parallel_mode`. """ rank = self.get_local_rank(parallel_mode) world_size = self.get_world_size(parallel_mode) return rank == world_size - 1 def is_pipeline_first_stage(self, ignore_virtual=False): if not ignore_virtual: if self.virtual_pipeline_parallel_size is not None and self.virtual_pipeline_parallel_rank != 0: return False return self.is_first_rank(ParallelMode.PIPELINE) def is_pipeline_last_stage(self, ignore_virtual=False): if not ignore_virtual: if self.virtual_pipeline_parallel_size \ is not None and self.virtual_pipeline_parallel_rank != self.virtual_pipeline_parallel_size - 1: return False return self.is_last_rank(ParallelMode.PIPELINE) def get_world_size(self, parallel_mode: ParallelMode): """Returns the world size for `parallel_mode`. Args: parallel_mode (:class:`colossalai.context.ParallelMode`): The chosen parallel mode. Raises: AssertionError: Raises an AssertionError if `parallel_mode` is not an instance of :class:`colossalai.context.ParallelMode`. Returns: int: The world size for `parallel_mode`. """ self._check_parallel_mode(parallel_mode) return self._world_sizes[parallel_mode] def add_world_size(self, parallel_mode: ParallelMode, world_size: int): """Adds world size for `parallel_mode`. Args: parallel_mode (:class:`colossalai.context.ParallelMode`): The chosen parallel mode. world_size (int): The world size to be added Raises: AssertionError: Raises an AssertionError if `parallel_mode` is not an instance of :class:`colossalai.context.ParallelMode`. """ self._check_parallel_mode(parallel_mode) self._world_sizes[parallel_mode] = world_size def get_group(self, parallel_mode: ParallelMode): """Returns the group of the current device for `parallel_mode`. Args: parallel_mode (:class:`colossalai.context.ParallelMode`): The chosen parallel mode. Raises: AssertionError: Raises an AssertionError if `parallel_mode` is not an instance of :class:`colossalai.context.ParallelMode`. Returns: torch.distributed.ProcessGroup: The group of the current device for `parallel_mode`. """ self._check_parallel_mode(parallel_mode) return self._groups[parallel_mode] def add_group(self, parallel_mode: ParallelMode, group: dist.ProcessGroup): """Adds the group of the current device for `parallel_mode`. Args: parallel_mode (:class:`colossalai.context.ParallelMode`): The chosen parallel mode. group (torch.distributed.ProcessGroup): The group to be added Raises: AssertionError: Raises an AssertionError if `parallel_mode` is not an instance of :class:`colossalai.context.ParallelMode`. """ self._check_parallel_mode(parallel_mode) self._groups[parallel_mode] = group def get_cpu_group(self, parallel_mode: ParallelMode): """Returns the Gloo group of the current device for `parallel_mode`. :param parallel_mode: The chosen parallel mode :type parallel_mode: :class:`colossalai.context.ParallelMode` :raises AssertionError: Raises an AssertionError if `parallel_mode` is not an instance of :class:`colossalai.context.ParallelMode` :return: The group of the current device for `parallel_mode` :rtype: torch.distributed.ProcessGroup """ self._check_parallel_mode(parallel_mode) return self._cpu_groups[parallel_mode] def add_cpu_group(self, parallel_mode: ParallelMode, group: dist.ProcessGroup): """Adds the Gloo group of the current device for `parallel_mode`. :param parallel_mode: The chosen parallel mode :type parallel_mode: :class:`colossalai.context.ParallelMode` :param group: The group to be added :type group: torch.distributed.ProcessGroup :raises AssertionError: Raises an AssertionError if `parallel_mode` is not an instance of :class:`colossalai.context.ParallelMode` """ self._check_parallel_mode(parallel_mode) self._cpu_groups[parallel_mode] = group def get_ranks_in_group(self, parallel_mode: ParallelMode): """Returns the rank of the current device for `parallel_mode` in the group. Args: parallel_mode (:class:`colossalai.context.ParallelMode`): The chosen parallel mode. Raises: AssertionError: Raises an AssertionError if `parallel_mode` is not an instance of :class:`colossalai.context.ParallelMode`. Returns: int: The rank of the current device for `parallel_mode` in the group. """ self._check_parallel_mode(parallel_mode) return self._ranks_in_group[parallel_mode] def add_ranks_in_group(self, parallel_mode: ParallelMode, ranks: list): """Adds the ranks of the current device for `parallel_mode` in the group. Args: parallel_mode (:class:`colossalai.context.ParallelMode`): The chosen parallel mode. ranks (list): List of ranks to be added Raises: AssertionError: Raises an AssertionError if `parallel_mode` is not an instance of :class:`colossalai.context.ParallelMode`. """ self._check_parallel_mode(parallel_mode) self._ranks_in_group[parallel_mode] = ranks def init_global_dist(self, rank: int, world_size: int, backend: str, host: str, port: int): """Initializes the global distributed environment Args: rank (int): rank for the default process group. world_size (int): world size of the default process group. backend (str): backend for ``torch.distributed`` host (str): the master address for distributed training. port (str): the master port for distributed training """ # initialize the default process group init_method = f'tcp://{host}:{port}' dist.init_process_group(rank=rank, world_size=world_size, backend=backend, init_method=init_method) # None will give the default global process group for pytorch dist operations ranks = list(range(world_size)) cpu_group = dist.new_group(ranks, backend='gloo') if dist.get_backend() != 'gloo' else None self._register_dist(rank, world_size, dist.GroupMember.WORLD, cpu_group, ranks, ParallelMode.GLOBAL) self.add_global_rank(ParallelMode.GLOBAL, rank) def _register_dist(self, local_rank, world_size, process_group, cpu_group, ranks_in_group, mode): self.add_local_rank(mode, local_rank) self.add_world_size(mode, world_size) self.add_group(mode, process_group) self.add_cpu_group(mode, cpu_group) self.add_ranks_in_group(mode, ranks_in_group) def check_sanity(self): """Checks sanity of the parallel context. Raises: AssertionError: Raises an AssertionError if the world size does not equal to the product of data parallel size, pipeline parallel size and tensor parallel size. """ dps = self.data_parallel_size pps = self.pipeline_parallel_size tps = self.tensor_parallel_size ws = self.world_size assert ws == dps * pps * \ tps, f"Expected the world size {ws} to be equal to data" \ f" parallel size ({dps}) * pipeline parallel size " \ f"({pps}) * tensor parallel size ({tps})" def _set_parallel_size_from_config(self, config: dict, key: str, attr_name: str): if key in config: ele = config[key] if isinstance(ele, int): setattr(self, attr_name, ele) elif isinstance(ele, dict): setattr(self, attr_name, ele['size']) else: raise NotImplementedError( f'{"Parallel configuration does not support this kind of argument, please use int or dict"}') def init_parallel_groups(self): """Initializes the parallel groups. Raises: AssertionError: Raises an AssertionError if the field parallel is not present in the config file. """ # get rank and world size rank = self.get_global_rank() world_size = self.get_world_size(ParallelMode.GLOBAL) self.world_size = world_size # set parallel size as attributes for global context parallel_config = self.config.get('parallel', None) if parallel_config is not None: self._set_parallel_size_from_config(parallel_config, 'pipeline', 'pipeline_parallel_size') self._set_parallel_size_from_config(parallel_config, 'tensor', 'tensor_parallel_size') # the user should not set the data parallel size manually # instead, it should be calculated based on other parallel config self.data_parallel_size = self.world_size // (self.pipeline_parallel_size * self.tensor_parallel_size) # get the tensor parallel mode and check tensor_parallel_mode = None if parallel_config is not None and 'tensor' in \ parallel_config and 'mode' in parallel_config['tensor']: tensor_parallel_mode = parallel_config['tensor']['mode'] assert tensor_parallel_mode in ALLOWED_MODES, \ f"mode in the parallel config must be set to one of {ALLOWED_MODES}" env.mode = tensor_parallel_mode self.check_sanity() pg_init = [] # LSG: init data parallel process group for compatibility with other parallel module such as zero pg_init.append(dict(type=INITIALIZER_MAPPING['data'])) # LSG: init model parallel process group for compatibility with amp and clip grad pg_init.append(dict(type=INITIALIZER_MAPPING['model'])) if self.pipeline_parallel_size > 1: pg_init.append(dict(type=INITIALIZER_MAPPING['pipeline'])) pg_init.append(dict(type=INITIALIZER_MAPPING['tensor'])) # init specific tensor parallel group if tensor_parallel_mode is not None: tensor_parallel_cfg = parallel_config['tensor'].copy() # remove duplicate parameters tensor_parallel_cfg.pop('mode') tensor_parallel_cfg.pop('size') # add this config to initialize later pg_init.append(dict(type=INITIALIZER_MAPPING[tensor_parallel_mode.lower()], **tensor_parallel_cfg)) # run initialization of different process groups for initializer_cfg in pg_init: cfg = initializer_cfg.copy() initializer_type = cfg.pop('type') initializer = DIST_GROUP_INITIALIZER.get_module(initializer_type)(rank, world_size, self.config, self.data_parallel_size, self.pipeline_parallel_size, self.tensor_parallel_size, **cfg) parallel_setting = initializer.init_dist_group() if isinstance(parallel_setting, list): for args in parallel_setting: self._register_dist(*args) else: self._register_dist(*parallel_setting) def is_initialized(self, parallel_mode: ParallelMode): """Returns a boolean value indicating whether `parallel_mode` is initialized in the current system. Args: parallel_mode (:class:`colossalai.context.ParallelMode`): The chosen parallel mode. Returns: bool: a boolean value indicating whether `parallel_mode` is initialized in the current system. """ return parallel_mode in self._groups def destroy(self): """Destroys the current distributed parallel environment. """ for mode, group in self._groups.items(): if mode is not ParallelMode.GLOBAL: dist.destroy_process_group(group) # destroy global process group dist.destroy_process_group() self._groups.clear() def set_device(self, device_ordinal: int = None): """Sets distributed processes to be bound to devices. Args: device_ordinal (int, optional): the device id to be bound to """ global_rank = self.get_global_rank() if device_ordinal is None: devices_per_node = torch.cuda.device_count() device_ordinal = global_rank % devices_per_node torch.cuda.set_device(device_ordinal) if self._verbose: self._logger.info(f'process rank {global_rank} is bound to device {device_ordinal}') def set_seed(self, seed: int): """Sets seeds for all random libraries. Args: seed (int): seed for random states """ random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) global_rank = self.get_global_rank() if torch.cuda.is_available(): # create random seed for different parallel modes # data parallel seed are kept the same parallel_seed = seed add_seed(ParallelMode.DATA, parallel_seed) # model parallel seeds are different across ranks pipeline_offset = self._local_ranks.get(ParallelMode.PIPELINE, 0) # add seed for data parallel and tensor parallel only if self.is_initialized(ParallelMode.TENSOR): tp_rank = self.get_local_rank(ParallelMode.TENSOR) # 100 is only to increase the diff in seeds between pipeline stages tp_rank_with_offset = tp_rank + pipeline_offset * 1024 tp_seed = seed + tp_rank_with_offset add_seed(ParallelMode.TENSOR, tp_seed) set_mode(ParallelMode.DATA) seeds = get_seeds() seed_str = ', '.join([f'{k}: {v}' for k, v in seeds.items()]) if self._verbose: self._logger.info(f"initialized seed on rank {global_rank}, " f"numpy: {seed}, python random: {seed}, {seed_str}," f"the default parallel seed is {ParallelMode.DATA}.") else: if self._verbose: self._logger.info( f"initialized seed on rank {global_rank}, " f"numpy: {seed}, python random: {seed}, pytorch: {seed}", ranks=[0]) self._logger.info( 'WARNING: CUDA is not available, thus CUDA RNG cannot be used to track CUDA random number states', ranks=[0]) def set_virtual_pipeline_parallel_size(self, size): self.virtual_pipeline_parallel_size = size def set_virtual_pipeline_parallel_rank(self, rank): self.virtual_pipeline_parallel_rank = rank global_context = ParallelContext()
import torch import torch.distributed as dist from colossalai.context.parallel_mode import ParallelMode from colossalai.context.singleton_meta import SingletonMeta from typing import Tuple def _check_sanity(): from colossalai.core import global_context as gpc if gpc.tensor_parallel_size > 1 or gpc.pipeline_parallel_size > 1: raise NotImplementedError("Moe is not compatible with tensor or " "pipeline parallel at present.") class MoeParallelInfo: """Moe parallelism information, storing parallel sizes and groups. """ def __init__(self, ep_size: int, dp_size: int): _check_sanity() self.ep_size = ep_size self.dp_size = dp_size self.ep_group = None # data parallel group for experts, since ep_group is different # we may have different dp_group from get_group(ParallelMode.DATA) self.dp_group = None # Here we assume tensor parallel size = 1 # Otherwise, MoE can't be used # Since TENSOR parallel group and DATA parallel group # have been created, we can use them directly. if ep_size == 1: from colossalai.core import global_context as gpc self.ep_group = gpc.get_group(ParallelMode.TENSOR) self.dp_group = gpc.get_group(ParallelMode.DATA) return if dp_size == 1: from colossalai.core import global_context as gpc self.ep_group = gpc.get_group(ParallelMode.DATA) self.dp_group = gpc.get_group(ParallelMode.TENSOR) return rank = dist.get_rank() # Create expert parallel group for i in range(dp_size): ranks = [i * ep_size + j for j in range(ep_size)] group = dist.new_group(ranks) if rank in ranks: self.ep_group = group # Create data parallel group for j in range(ep_size): ranks = [i * ep_size + j for i in range(dp_size)] group = dist.new_group(ranks) if rank in ranks: self.dp_group = group class MoeContext(metaclass=SingletonMeta): """MoE parallel context manager. This class manages different parallel groups in MoE context and MoE loss in training. """ def __init__(self): self.world_size = 1 # Users may want to set maximum expert parallel size smaller than the world size # since very low bandwidth across nodes may constrain the performance of MoE # When we have a maximum expert parallel size, we have a minimum data parallel size naturally self.max_ep_size = 1 self.min_dp_size = 1 self.aux_loss = None self.use_kernel_optim = True self.has_setup = False self._parallel_info_dict = dict() @property def parallel_info_dict(self): return self._parallel_info_dict @property def is_initialized(self): return self.has_setup def setup(self, seed: int, use_kernel_optim: bool = True): assert not self.is_initialized, "MoE distributed context shouldn't be set up again" _check_sanity() assert torch.cuda.is_available(), "MoE requires to enable CUDA first" self.world_size = dist.get_world_size() from colossalai.core import global_context as gpc self.max_ep_size = gpc.config.get('max_ep_size', self.world_size) assert self.world_size % self.max_ep_size == 0, \ "Maximum epxert parallel size must be a factor of the number of GPUs" self.min_dp_size = self.world_size // self.max_ep_size # Enabling kernel optimization may raise error in some cases # Users can close kernel optimization manually self.use_kernel_optim = use_kernel_optim from .random import moe_set_seed moe_set_seed(seed) self.has_setup = True def get_info(self, num_experts: int) -> Tuple[int, MoeParallelInfo]: """Calculate the Data Parallel Group and Expert Parallel Group. Parameters ---------- num_experts : int The number experts Returns ------- int, MoeParallelInfo number of local experts, the MoeParallelInfo of the current ep_size """ gt_flag = num_experts % self.max_ep_size == 0 # check whether num_experts is greater lt_flag = self.max_ep_size % num_experts == 0 # check whether num_experts is less assert gt_flag or lt_flag, "Automatic experts placement dose not not support expert number"\ " is not a multiple of ep size or vice versa." # If the number of experts is greater than maximum expert parallel size. a.k.a ep_size, # there are multiple experts in each GPU and each GPU has different experts # So it's data parallel size is 1 # Otherwise, there is only one expert in each GPU # The data parallel size should be calculated dp_size = 1 if gt_flag else self.max_ep_size // num_experts ep_size = self.max_ep_size // dp_size # Calculate the number of experts for each GPU num_local_experts = 1 if lt_flag else num_experts // self.max_ep_size # Don't forget to multiply minimum data parallel size dp_size *= self.min_dp_size if not (ep_size in self.parallel_info_dict): self.parallel_info_dict[ep_size] = MoeParallelInfo(ep_size, dp_size) return num_local_experts, self.parallel_info_dict[ep_size] def set_kernel_not_use(self): self.use_kernel_optim = False def reset_loss(self): self.aux_loss = 0 def add_loss(self, loss): self.aux_loss += loss def get_loss(self): return self.aux_loss MOE_CONTEXT = MoeContext()
class SingletonMeta(type): """ The Singleton class can be implemented in different ways in Python. Some possible methods include: base class, decorator, metaclass. We will use the metaclass because it is best suited for this purpose. """ _instances = {} def __call__(cls, *args, **kwargs): """ Possible changes to the value of the `__init__` argument do not affect the returned instance. """ if cls not in cls._instances: instance = super().__call__(*args, **kwargs) cls._instances[cls] = instance return cls._instances[cls]
#!/usr/bin/env python # -*- encoding: utf-8 -*- from enum import Enum # parallel modes class ParallelMode(Enum): """This is an enumeration class containing all possible parallel modes. """ GLOBAL = 'global' # common parallel DATA = 'data' # model parallel - containing tensor and pipeline parallel groups # this is added to facilitate amp and grad clipping in hybrid parallel MODEL = 'model' # pipeline parallel PIPELINE = 'pipe' # containing all ranks in tensor parallel TENSOR = 'tensor' # sequence parallel SEQUENCE = 'sequence' SEQUENCE_DP = 'sequence_dp' # 1D Parallel PARALLEL_1D = '1d' # 2D parallel PARALLEL_2D_ROW = '2d_row' PARALLEL_2D_COL = '2d_col' # 3D parallel PARALLEL_3D_INPUT = '3d_input' PARALLEL_3D_WEIGHT = '3d_weight' PARALLEL_3D_OUTPUT = '3d_output' # 2.5D parallel PARALLEL_2P5D_ROW = '2p5d_row' PARALLEL_2P5D_COL = '2p5d_col' PARALLEL_2P5D_DEP = '2p5d_dep' PARALLEL_2P5D_XZ = '2p5d_xz'
#!/usr/bin/env python # -*- encoding: utf-8 -*- from torch import distributed as dist from colossalai.registry import DIST_GROUP_INITIALIZER from .process_group_initializer import ProcessGroupInitializer from ..parallel_mode import ParallelMode @DIST_GROUP_INITIALIZER.register_module class Initializer_Pipeline(ProcessGroupInitializer): """A ProcessGroupInitializer for pipeline parallelism. Args: rank (int): The rank of current process world_size (int): Size of whole communication world config (Config): Running configuration data_parallel_size (int): Size of data parallel pipeline_parallel_size (int): Size of pipeline parallel tensor_parallel_size (int): Size of tensor parallel """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.data_group_size = self.world_size // self.data_parallel_size self.pipeline_stage_size = self.data_group_size // self.pipeline_parallel_size def init_dist_group(self): """Initialize pipeline parallel groups, and assign local_ranks and groups to each gpu. Returns: List[Tuple (local_rank, group_world_size, process_group, ranks_in_group, mode)]: A Pipeline parallelism's information in list of tuples. """ dist_settings = list() for i in range(self.data_parallel_size): for j in range(self.pipeline_stage_size): pipe_ranks = list( range(i * self.data_group_size + j, (i + 1) * self.data_group_size, self.pipeline_stage_size)) pipe_group_size = len(pipe_ranks) pipe_group = dist.new_group(pipe_ranks) group_cpu = dist.new_group(pipe_ranks, backend='gloo') if dist.get_backend() != 'gloo' else pipe_group if self.rank in pipe_ranks: local_rank = pipe_ranks.index(self.rank) group_world_size = pipe_group_size process_group = pipe_group cpu_group = group_cpu ranks_in_group = pipe_ranks dist_settings.append( tuple((local_rank, group_world_size, process_group, cpu_group, ranks_in_group, ParallelMode.PIPELINE))) return dist_settings
#!/usr/bin/env python # -*- encoding: utf-8 -*- import math import torch.distributed as dist from colossalai.global_variables import tensor_parallel_env as env from colossalai.registry import DIST_GROUP_INITIALIZER from ..parallel_mode import ParallelMode from .process_group_initializer import ProcessGroupInitializer def _check_depth_env_var(depth): # check global variable env_depth = env.depth_3d if env_depth: assert int(env_depth) == depth, \ 'DEPTH_3D has been set in the current environment and ' \ 'does not match with the value passed to this initialized' else: env.depth_3d = depth class Initializer_3D_Input(ProcessGroupInitializer): """3D tensor parallel initialization among input. Args: num_group (int): The number of all tensor groups. depth (int): Depth of 3D parallelism. rank (int): The rank of current process. world_size (int): Size of whole communication world. config (Config): Running configuration. data_parallel_size (int): Size of data parallel. pipeline_parallel_size (int): Size of pipeline parallel. tensor_parallel_size (int): Size of tensor parallel. """ def __init__(self, num_group: int, depth: int, *args): super().__init__(*args) self.num_group = num_group self.depth = depth def init_dist_group(self): """Initialize 3D tensor parallel groups among input, and assign local_ranks and groups to each gpu. Returns: Tuple (local_rank, group_world_size, process_group, ranks_in_group, mode): 3D tensor parallelism's information among input in a tuple. """ local_rank = None ranks_in_group = None process_group = None cpu_group = None group_world_size = None mode = ParallelMode.PARALLEL_3D_INPUT env.input_group_3d = mode for h in range(self.num_group): for i in range(self.depth): for k in range(self.depth): ranks = [h * self.depth**3 + i + self.depth * (j + self.depth * k) for j in range(self.depth)] group = dist.new_group(ranks) group_cpu = dist.new_group(ranks, backend='gloo') if dist.get_backend() != 'gloo' else group if self.rank in ranks: local_rank = ranks.index(self.rank) group_world_size = len(ranks) process_group = group cpu_group = group_cpu ranks_in_group = ranks return local_rank, group_world_size, process_group, cpu_group, ranks_in_group, mode class Initializer_3D_Weight(ProcessGroupInitializer): """3D tensor parallel initialization among weight. Args: num_group (int): The number of all tensor groups. depth (int): Depth of 3D parallelism. rank (int): The rank of current process. world_size (int): Size of whole communication world. config (Config): Running configuration. data_parallel_size (int): Size of data parallel. pipeline_parallel_size (int): Size of pipeline parallel. tensor_parallel_size (int): Size of tensor parallel. """ def __init__(self, num_group: int, depth: int, *args): super().__init__(*args) self.num_group = num_group self.depth = depth def init_dist_group(self): """Initialize 3D tensor parallel groups among weight, and assign local_ranks and groups to each gpu. Returns: Tuple (local_rank, group_world_size, process_group, ranks_in_group, mode): 3D tensor parallelism's information among weight in a tuple. """ local_rank = None ranks_in_group = None process_group = None cpu_group = None group_world_size = None mode = ParallelMode.PARALLEL_3D_WEIGHT env.weight_group_3d = mode for h in range(self.num_group): for k in range(self.depth): for j in range(self.depth): ranks = [h * self.depth**3 + i + self.depth * (j + self.depth * k) for i in range(self.depth)] group = dist.new_group(ranks) group_cpu = dist.new_group(ranks, backend='gloo') if dist.get_backend() != 'gloo' else group if self.rank in ranks: local_rank = ranks.index(self.rank) group_world_size = len(ranks) process_group = group cpu_group = group_cpu ranks_in_group = ranks return local_rank, group_world_size, process_group, cpu_group, ranks_in_group, mode class Initializer_3D_Output(ProcessGroupInitializer): """3D tensor parallel initialization among output. Args: num_group (int): The number of all tensor groups. depth (int): Depth of 3D parallelism. rank (int): The rank of current process. world_size (int): Size of whole communication world. config (Config): Running configuration. data_parallel_size (int): Size of data parallel. pipeline_parallel_size (int): Size of pipeline parallel. tensor_parallel_size (int): Size of tensor parallel. """ def __init__(self, num_group: int, depth: int, *args): super().__init__(*args) self.num_group = num_group self.depth = depth def init_dist_group(self): """Initialize 3D tensor parallel groups among output, and assign local_ranks and groups to each gpu. Returns: Tuple (local_rank, group_world_size, process_group, ranks_in_group, mode): 3D tensor parallelism's information among output in a tuple. """ local_rank = None ranks_in_group = None process_group = None cpu_group = None group_world_size = None mode = ParallelMode.PARALLEL_3D_OUTPUT env.output_group_3d = mode for h in range(self.num_group): for i in range(self.depth): for j in range(self.depth): ranks = [h * self.depth**3 + i + self.depth * (j + self.depth * k) for k in range(self.depth)] group = dist.new_group(ranks) group_cpu = dist.new_group(ranks, backend='gloo') if dist.get_backend() != 'gloo' else group if self.rank in ranks: local_rank = ranks.index(self.rank) group_world_size = len(ranks) process_group = group cpu_group = group_cpu ranks_in_group = ranks return local_rank, group_world_size, process_group, cpu_group, ranks_in_group, mode @DIST_GROUP_INITIALIZER.register_module class Initializer_3D(ProcessGroupInitializer): """Serve as the single entry point to 3D parallel initialization. Args: rank (int): The rank of current process. world_size (int): Size of whole communication world. config (Config): Running configuration. data_parallel_size (int): Size of data parallel. pipeline_parallel_size (int): Size of pipeline parallel. tensor_parallel_size (int): Size of tensor parallel. """ def __init__(self, *args): super().__init__(*args) self.num_group = self.world_size // self.tensor_parallel_size self.depth = round(math.pow(self.tensor_parallel_size, 1 / 3)) assert self.tensor_parallel_size == self.depth ** 3, \ f'3D depth ({self.depth}) if not cube root of tensor parallel size ({self.tensor_parallel_size})' _check_depth_env_var(self.depth) self.input_initializer = Initializer_3D_Input(self.num_group, self.depth, *args) self.weight_initializer = Initializer_3D_Weight(self.num_group, self.depth, *args) self.output_initializer = Initializer_3D_Output(self.num_group, self.depth, *args) def init_dist_group(self): """Initialize 3D tensor parallel groups, and assign local_ranks and groups to each gpu. Returns: List[Tuple (local_rank, group_world_size, process_group, ranks_in_group, mode)]: Whole 3D tensor parallelism's information in a list of tuples. """ parallel_setting = [ self.input_initializer.init_dist_group(), self.weight_initializer.init_dist_group(), self.output_initializer.init_dist_group() ] return parallel_setting
import math import torch.distributed as dist from colossalai.registry import DIST_GROUP_INITIALIZER from .process_group_initializer import ProcessGroupInitializer from ..parallel_mode import ParallelMode from colossalai.global_variables import tensor_parallel_env as env def _check_summa_env_var(summa_dim): # check environment variable for SUMMA env_summa_dim = env.summa_dim if env_summa_dim: assert int(env_summa_dim) == summa_dim, \ 'SUMMA_DIM has been set in the current environment and ' \ 'does not match with the value passed to this initialized' else: env.summa_dim = summa_dim class Initializer_2D_Row(ProcessGroupInitializer): """2d tensor parallel initialization among rows. Args: num_group (int): The number of all tensor groups. summa_dim (int): The dimension of SUMMA. rank (int): The rank of current process. world_size (int): Size of whole communication world. config (Config): Running configuration. data_parallel_size (int): Size of data parallel. pipeline_parallel_size (int): Size of pipeline parallel. tensor_parallel_size (int): Size of tensor parallel. """ def __init__(self, num_group, summa_dim, *args, **kwargs): super(Initializer_2D_Row, self).__init__(*args, **kwargs) self.num_group = num_group self.summa_dim = summa_dim def init_dist_group(self): """Initialize 2D tensor row parallel groups, and assign local_ranks and groups to each gpu. Returns: Tuple (local_rank, group_world_size, process_group, ranks_in_group, mode): 2D tensor row parallelism's information in a tuple. """ local_rank = None ranks_in_group = None process_group = None cpu_group = None group_world_size = None mode = ParallelMode.PARALLEL_2D_ROW for i in range(self.num_group): for j in range(self.summa_dim): ranks = [i * self.tensor_parallel_size + j * self.summa_dim + k for k in range(self.summa_dim)] group = dist.new_group(ranks) group_cpu = dist.new_group(ranks, backend='gloo') if dist.get_backend() != 'gloo' else group if self.rank in ranks: local_rank = ranks.index(self.rank) group_world_size = len(ranks) process_group = group cpu_group = group_cpu ranks_in_group = ranks return local_rank, group_world_size, process_group, cpu_group, ranks_in_group, mode class Initializer_2D_Col(ProcessGroupInitializer): """2d tensor parallel initialization among cols. Args: num_group (int): The number of all tensor groups. summa_dim (int): The dimension of SUMMA. rank (int): The rank of current process. world_size (int): Size of whole communication world. config (Config): Running configuration. data_parallel_size (int): Size of data parallel. pipeline_parallel_size (int): Size of pipeline parallel. tensor_parallel_size (int): Size of tensor parallel. """ def __init__(self, num_group, summa_dim, *args, **kwargs): super(Initializer_2D_Col, self).__init__(*args, **kwargs) self.num_group = num_group self.summa_dim = summa_dim def init_dist_group(self): """Initialize 2D tensor row parallel groups, and assign local_ranks and groups to each gpu. Returns: Tuple (local_rank, group_world_size, process_group, ranks_in_group, mode): 2D tensor col parallelism's information in a tuple. """ local_rank = None ranks_in_group = None process_group = None cpu_group = None group_world_size = None mode = ParallelMode.PARALLEL_2D_COL for i in range(self.num_group): for j in range(self.summa_dim): ranks = [i * self.tensor_parallel_size + j + k * self.summa_dim for k in range(self.summa_dim)] group = dist.new_group(ranks) group_cpu = dist.new_group(ranks, backend='gloo') if dist.get_backend() != 'gloo' else group if self.rank in ranks: local_rank = ranks.index(self.rank) group_world_size = len(ranks) process_group = group cpu_group = group_cpu ranks_in_group = ranks return local_rank, group_world_size, process_group, cpu_group, ranks_in_group, mode @DIST_GROUP_INITIALIZER.register_module class Initializer_2D(ProcessGroupInitializer): """ Serve as the single entry point to 2D parallel initialization. Args: rank (int): The rank of current process. world_size (int): Size of whole communication world. config (Config): Running configuration. data_parallel_size (int): Size of data parallel. pipeline_parallel_size (int): Size of pipeline parallel. tensor_parallel_size (int): Size of tensor parallel. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.num_group = self.world_size // self.tensor_parallel_size self.summa_dim = int(math.sqrt(self.tensor_parallel_size)) assert self.tensor_parallel_size == self.summa_dim ** 2, \ "2D summa dim should equal to tensor parallel size ^ 0.5" _check_summa_env_var(self.summa_dim) self.col_initializer = Initializer_2D_Col(self.num_group, self.summa_dim, *args, **kwargs) self.row_initializer = Initializer_2D_Row(self.num_group, self.summa_dim, *args, **kwargs) def init_dist_group(self): """Initialize 2D tensor row and col parallel groups, and assign local_ranks and groups to each gpu. Returns: List[Tuple (local_rank, group_world_size, process_group, ranks_in_group, mode)]: 2D tensor parallelism's information in a list of tuples. """ parallel_setting = [self.row_initializer.init_dist_group(), self.col_initializer.init_dist_group()] return parallel_setting
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch.distributed as dist from colossalai.global_variables import tensor_parallel_env as env from colossalai.registry import DIST_GROUP_INITIALIZER from ..parallel_mode import ParallelMode from .process_group_initializer import ProcessGroupInitializer @DIST_GROUP_INITIALIZER.register_module class Initializer_1D(ProcessGroupInitializer): """A ProcessGroupInitializer for 1d tensor parallelism. Args: rank (int): The rank of current process. world_size (int): Size of whole communication world. config (Config): Running configuration. data_parallel_size (int): Size of data parallel. pipeline_parallel_size (int): Size of pipeline parallel. tensor_parallel_size (int): Size of tensor parallel. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.num_group = self.world_size // self.tensor_parallel_size def init_dist_group(self): """Initialize 1D tensor parallel groups, and assign local_ranks and groups to each gpu. Returns: Tuple (local_rank, group_world_size, process_group, ranks_in_group, mode): 1D tensor parallelism's information in a tuple. """ local_rank = None ranks_in_group = None process_group = None cpu_group = None group_world_size = None mode = ParallelMode.PARALLEL_1D env.parallel_input_1d = False for i in range(self.num_group): ranks = [i * self.tensor_parallel_size + j for j in range(self.tensor_parallel_size)] group = dist.new_group(ranks) group_cpu = dist.new_group(ranks, backend='gloo') if dist.get_backend() != 'gloo' else group if self.rank in ranks: local_rank = ranks.index(self.rank) group_world_size = len(ranks) process_group = group cpu_group = group_cpu ranks_in_group = ranks return local_rank, group_world_size, process_group, cpu_group, ranks_in_group, mode
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch.distributed as dist from colossalai.registry import DIST_GROUP_INITIALIZER from .process_group_initializer import ProcessGroupInitializer from ..parallel_mode import ParallelMode @DIST_GROUP_INITIALIZER.register_module class Initializer_Tensor(ProcessGroupInitializer): """A ProcessGroupInitializer for tensor parallelism. Args: rank (int): The rank of current process. world_size (int): Size of whole communication world. config (Config): Running configuration. data_parallel_size (int): Size of data parallel. pipeline_parallel_size (int): Size of pipeline parallel. tensor_parallel_size (int): Size of tensor parallel. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.num_tensor_parallel_group = self.world_size // self.tensor_parallel_size def init_dist_group(self): """Initialize tensor parallel groups, and assign local_ranks and groups to each gpu. Returns: Tuple (local_rank, group_world_size, process_group, ranks_in_group, mode): A Tensor parallelism's information tuple. """ local_rank = None ranks_in_group = None process_group = None cpu_group = None group_world_size = None mode = ParallelMode.TENSOR for i in range(self.num_tensor_parallel_group): ranks = [i * self.tensor_parallel_size + j for j in range(self.tensor_parallel_size)] group = dist.new_group(ranks) group_cpu = dist.new_group(ranks, backend='gloo') if dist.get_backend() != 'gloo' else group if self.rank in ranks: local_rank = ranks.index(self.rank) group_world_size = len(ranks) process_group = group cpu_group = group_cpu ranks_in_group = ranks return local_rank, group_world_size, process_group, cpu_group, ranks_in_group, mode
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch.distributed as dist from colossalai.registry import DIST_GROUP_INITIALIZER from .process_group_initializer import ProcessGroupInitializer from ..parallel_mode import ParallelMode @DIST_GROUP_INITIALIZER.register_module class Initializer_Model(ProcessGroupInitializer): """A ProcessGroupInitializer for model parallelism (model parallel group contains pipeline and tensor parallel groups). Args: rank (int): The rank of current process. world_size (int): Size of whole communication world. config (Config): Running configuration. data_parallel_size (int): Size of data parallel. pipeline_parallel_size (int): Size of pipeline parallel. tensor_parallel_size (int): Size of tensor parallel. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.model_parallel_size = self.tensor_parallel_size * self.pipeline_parallel_size self.num_group = self.world_size // self.model_parallel_size def init_dist_group(self): """Initialize model parallel groups, and assign local_ranks and groups to each gpu. Returns: Tuple (local_rank, group_world_size, process_group, ranks_in_group, mode): A Model parallelism's information tuple. """ local_rank = None ranks_in_group = None process_group = None cpu_group = None group_world_size = None mode = ParallelMode.MODEL for i in range(self.num_group): ranks = [i * self.model_parallel_size + j for j in range(self.model_parallel_size)] group = dist.new_group(ranks) group_cpu = dist.new_group(ranks, backend='gloo') if dist.get_backend() != 'gloo' else group if self.rank in ranks: local_rank = ranks.index(self.rank) group_world_size = len(ranks) process_group = group cpu_group = group_cpu ranks_in_group = ranks return local_rank, group_world_size, process_group, cpu_group, ranks_in_group, mode
from .initializer_1d import Initializer_1D from .initializer_2d import Initializer_2D from .initializer_2p5d import Initializer_2p5D from .initializer_3d import Initializer_3D from .initializer_data import Initializer_Data from .initializer_pipeline import Initializer_Pipeline from .initializer_sequence import Initializer_Sequence from .initializer_tensor import Initializer_Tensor from .initializer_model import Initializer_Model from .process_group_initializer import ProcessGroupInitializer __all__ = [ 'Initializer_Tensor', 'Initializer_Sequence', 'Initializer_Pipeline', 'Initializer_Data', 'Initializer_2p5D', 'Initializer_2D', 'Initializer_3D', 'Initializer_1D', 'ProcessGroupInitializer', 'Initializer_Model' ]
#!/usr/bin/env python # -*- encoding: utf-8 -*- from abc import ABC, abstractmethod from colossalai.context import Config class ProcessGroupInitializer(ABC): """An object, knowing the parallelism configuration, that initializes parallel groups. Args: rank (int): The rank of current process. world_size (int): Size of whole communication world. config (Config): Running configuration. data_parallel_size (int): Size of data parallel. pipeline_parallel_size (int): Size of pipeline parallel. tensor_parallel_size (int): Size of tensor parallel. """ def __init__(self, rank: int, world_size: int, config: Config, data_parallel_size: int, pipeline_parallel_size: int, tensor_parallel_size: int): self.rank = rank self.world_size = world_size self.data_parallel_size = data_parallel_size self.config = config self.pipeline_parallel_size = pipeline_parallel_size self.tensor_parallel_size = tensor_parallel_size super().__init__() @abstractmethod def init_dist_group(self): pass
#!/usr/bin/env python # -*- encoding: utf-8 -*- import math import torch.distributed as dist from colossalai.context import Config from colossalai.global_variables import tensor_parallel_env as env from colossalai.registry import DIST_GROUP_INITIALIZER from ..parallel_mode import ParallelMode from .process_group_initializer import ProcessGroupInitializer def _check_tesseract_env_var(tesseract_dim: int, tesseract_dep: int): # check global variable for TESSERACT env_tesseract_dim = env.tesseract_dim env_tesseract_dep = env.tesseract_dep if env_tesseract_dim and env_tesseract_dep: assert int(env_tesseract_dim) == tesseract_dim, \ 'TESSERACT_DIM has been set in the current environment and ' \ 'does not match with the value passed to this initialized' assert int(env_tesseract_dep) == tesseract_dep, \ 'TESSERACT_DEP has been set in the current environment and ' \ 'does not match with the value passed to this initialized' else: env.tesseract_dim = tesseract_dim env.tesseract_dep = tesseract_dep # i row j col k dep class Initializer_2p5D_ROW(ProcessGroupInitializer): """2.5d tensor parallel initialization among rows. Args: tesseract_dim (int): The dimension of tesseract. tesseract_dep (int): The dimension of depth. rank (int): The rank of current process. world_size (int): Size of whole communication world. config (Config): Running configuration. data_parallel_size (int): Size of data parallel. pipeline_parallel_size (int): Size of pipeline parallel. tensor_parallel_size (int): Size of tensor parallel. """ def __init__(self, tesseract_dim: int, tesseract_dep: int, *args): super(Initializer_2p5D_ROW, self).__init__(*args) self.num_group = self.world_size // self.tensor_parallel_size self.tesseract_dep = tesseract_dep self.tesseract_dim = tesseract_dim assert self.tensor_parallel_size == self.tesseract_dim ** 2 * self.tesseract_dep, \ "Tensor parallel size should be depth * dim ** 2 in 2.5D parallel" def init_dist_group(self): """Initialize 2.5D tensor row parallel groups, and assign local_ranks and groups to each gpu. Returns: Tuple (local_rank, group_world_size, process_group, ranks_in_group, mode): 2.5D tensor row parallelism's information in a tuple. """ local_rank = None ranks_in_group = None process_group = None cpu_group = None group_world_size = None mode = ParallelMode.PARALLEL_2P5D_ROW for h in range(self.num_group): for j in range(self.tesseract_dim): for k in range(self.tesseract_dep): ranks = [ h * self.tensor_parallel_size + i + self.tesseract_dim * (j + self.tesseract_dim * k) for i in range(self.tesseract_dim) ] group = dist.new_group(ranks) group_cpu = dist.new_group(ranks, backend='gloo') if dist.get_backend() != 'gloo' else group if self.rank in ranks: local_rank = ranks.index(self.rank) group_world_size = len(ranks) process_group = group cpu_group = group_cpu ranks_in_group = ranks return local_rank, group_world_size, process_group, cpu_group, ranks_in_group, mode class Initializer_2p5D_Col(ProcessGroupInitializer): """2.5d tensor parallel initialization among cols. Args: tesseract_dim (int): The dimension of tesseract. tesseract_dep (int): The dimension of depth. rank (int): The rank of current process. world_size (int): Size of whole communication world. config (Config): Running configuration. data_parallel_size (int): Size of data parallel. pipeline_parallel_size (int): Size of pipeline parallel. tensor_parallel_size (int): Size of tensor parallel. """ def __init__(self, tesseract_dim: int, tesseract_dep: int, *args): super(Initializer_2p5D_Col, self).__init__(*args) self.num_group = self.world_size // self.tensor_parallel_size self.tesseract_dep = tesseract_dep self.tesseract_dim = tesseract_dim assert self.tensor_parallel_size == self.tesseract_dim ** 2 * self.tesseract_dep, \ "Tensor parallel size should be depth * dim ** 2 in 2.5D parallel" def init_dist_group(self): """Initialize 2.5D tensor col parallel groups, and assign local_ranks and groups to each gpu. Returns: Tuple (local_rank, group_world_size, process_group, ranks_in_group, mode): 2.5D tensor col parallelism's information in a tuple. """ local_rank = None ranks_in_group = None process_group = None cpu_group = None group_world_size = None mode = ParallelMode.PARALLEL_2P5D_COL for h in range(self.num_group): for i in range(self.tesseract_dim): for k in range(self.tesseract_dep): ranks = [ h * self.tensor_parallel_size + i + self.tesseract_dim * (j + self.tesseract_dim * k) for j in range(self.tesseract_dim) ] group = dist.new_group(ranks) group_cpu = dist.new_group(ranks, backend='gloo') if dist.get_backend() != 'gloo' else group if self.rank in ranks: local_rank = ranks.index(self.rank) group_world_size = len(ranks) process_group = group cpu_group = group_cpu ranks_in_group = ranks return local_rank, group_world_size, process_group, cpu_group, ranks_in_group, mode class Initializer_2p5D_Dep(ProcessGroupInitializer): """2.5D tensor parallel initialization among depths. Args: tesseract_dim (int): The dimension of tesseract. tesseract_dep (int): The dimension of depth. rank (int): The rank of current process. world_size (int): Size of whole communication world. config (Config): Running configuration. data_parallel_size (int): Size of data parallel. pipeline_parallel_size (int): Size of pipeline parallel. tensor_parallel_size (int): Size of tensor parallel. """ def __init__(self, tesseract_dim: int, tesseract_dep: int, *args): super(Initializer_2p5D_Dep, self).__init__(*args) self.num_group = self.world_size // self.tensor_parallel_size self.tesseract_dep = tesseract_dep self.tesseract_dim = tesseract_dim assert self.tensor_parallel_size == self.tesseract_dim ** 2 * self.tesseract_dep, \ "Tensor parallel size should be depth * dim ** 2 in 2.5D parallel" def init_dist_group(self): """Initialize 2.5D tensor depth parallel groups, and assign local_ranks and groups to each gpu. Returns: Tuple (local_rank, group_world_size, process_group, ranks_in_group, mode): 2.5D tensor depth parallelism's information in a tuple. """ local_rank = None ranks_in_group = None process_group = None cpu_group = None group_world_size = None mode = ParallelMode.PARALLEL_2P5D_DEP for h in range(self.num_group): for i in range(self.tesseract_dim): for j in range(self.tesseract_dim): ranks = [ h * self.tensor_parallel_size + i + self.tesseract_dim * (j + self.tesseract_dim * k) for k in range(self.tesseract_dep) ] group = dist.new_group(ranks) group_cpu = dist.new_group(ranks, backend='gloo') if dist.get_backend() != 'gloo' else group if self.rank in ranks: local_rank = ranks.index(self.rank) group_world_size = len(ranks) process_group = group cpu_group = group_cpu ranks_in_group = ranks return local_rank, group_world_size, process_group, cpu_group, ranks_in_group, mode # i row j col k dep class Initializer_2p5D_XZ(ProcessGroupInitializer): """2.5d tensor parallel initialization among cols times dep. Args: tesseract_dim (int): The dimension of tesseract. tesseract_dep (int): The dimension of depth. rank (int): The rank of current process. world_size (int): Size of whole communication world. config (Config): Running configuration. data_parallel_size (int): Size of data parallel. pipeline_parallel_size (int): Size of pipeline parallel. tensor_parallel_size (int): Size of tensor parallel. """ def __init__(self, tesseract_dim: int, tesseract_dep: int, *args): super(Initializer_2p5D_XZ, self).__init__(*args) self.num_group = self.world_size // self.tensor_parallel_size self.tesseract_dep = tesseract_dep self.tesseract_dim = tesseract_dim assert self.tensor_parallel_size == self.tesseract_dim ** 2 * self.tesseract_dep, \ "Tensor parallel size should be depth * dim ** 2 in 2.5D parallel" def init_dist_group(self): """Initialize 2.5D tensor colXdepth parallel groups, and assign local_ranks and groups to each gpu. Returns: Tuple (local_rank, group_world_size, process_group, ranks_in_group, mode): 2.5D tensor colXdepth parallelism's information in a tuple. """ local_rank = None ranks_in_group = None process_group = None cpu_group = None group_world_size = None mode = ParallelMode.PARALLEL_2P5D_XZ for h in range(self.num_group): for i in range(self.tesseract_dim): ranks = [ h * self.tensor_parallel_size + i + self.tesseract_dim * (j + self.tesseract_dim * k) for k in range(self.tesseract_dep) for j in range(self.tesseract_dim) ] group = dist.new_group(ranks) group_cpu = dist.new_group(ranks, backend='gloo') if dist.get_backend() != 'gloo' else group if self.rank in ranks: local_rank = ranks.index(self.rank) group_world_size = len(ranks) process_group = group cpu_group = group_cpu ranks_in_group = ranks return local_rank, group_world_size, process_group, cpu_group, ranks_in_group, mode @DIST_GROUP_INITIALIZER.register_module class Initializer_2p5D(ProcessGroupInitializer): """ Serve as the single entry point to Tesseract parallel initialization. Args: rank (int): The rank of current process. world_size (int): Size of whole communication world. config (Config): Running configuration. data_parallel_size (int): Size of data parallel. pipeline_parallel_size (int): Size of pipeline parallel. tensor_parallel_size (int): Size of tensor parallel. depth (int): The depth of 2.5d parallel. """ def __init__(self, rank: int, world_size: int, config: Config, data_parallel_size: int, pipeline_parallel_size: int, tensor_parallel_size: int, depth: int): args = (rank, world_size, config, data_parallel_size, pipeline_parallel_size, tensor_parallel_size) super().__init__(*args) self.num_group = self.world_size // self.tensor_parallel_size self.tesseract_dim = int(math.sqrt(self.tensor_parallel_size / depth)) self.tesseract_dep = depth assert self.tensor_parallel_size == self.tesseract_dim ** 2 * self.tesseract_dep, \ "2.5D tesseract dim should equal to (tensor parallel size / tesseract dep) ^ 0.5" _check_tesseract_env_var(self.tesseract_dim, self.tesseract_dep) self.col_initializer = Initializer_2p5D_Col(self.tesseract_dim, self.tesseract_dep, *args) self.row_initializer = Initializer_2p5D_ROW(self.tesseract_dim, self.tesseract_dep, *args) self.dep_initializer = Initializer_2p5D_Dep(self.tesseract_dim, self.tesseract_dep, *args) self.xz_initializer = Initializer_2p5D_XZ(self.tesseract_dim, self.tesseract_dep, *args) def init_dist_group(self): """Initialize 2.5D tensor row, col, depth, and colXdepth parallel groups, and assign local_ranks and groups to each gpu. Returns: List[Tuple (local_rank, group_world_size, process_group, ranks_in_group, mode)]: Whole 2.5D tensor parallelism's information in a list of tuples. """ parallel_setting = [ self.col_initializer.init_dist_group(), self.row_initializer.init_dist_group(), self.dep_initializer.init_dist_group(), self.xz_initializer.init_dist_group() ] return parallel_setting
#!/usr/bin/env python # -*- encoding: utf-8 -*- from torch import distributed as dist from colossalai.registry import DIST_GROUP_INITIALIZER from .process_group_initializer import ProcessGroupInitializer from ..parallel_mode import ParallelMode @DIST_GROUP_INITIALIZER.register_module class Initializer_Data(ProcessGroupInitializer): """A ProcessGroupInitializer for data parallelism. Args: rank (int): The rank of current process. world_size (int): Size of whole communication world. config (Config): Running configuration. data_parallel_size (int): Size of data parallel. pipeline_parallel_size (int): Size of pipeline parallel. tensor_parallel_size (int): Size of tensor parallel. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.num_data_parallel_group = self.world_size // self.data_parallel_size def init_dist_group(self): """Initialize data parallel groups, and assign local_ranks and groups to each gpu. Returns: Tuple (local_rank, group_world_size, process_group, ranks_in_group, mode): A Data parallelism's information tuple. """ local_rank = None ranks_in_group = None process_group = None cpu_group = None group_world_size = None mode = ParallelMode.DATA for i in range(self.num_data_parallel_group): ranks = [i + j * self.num_data_parallel_group for j in range(self.data_parallel_size)] group = dist.new_group(ranks) group_cpu = dist.new_group(ranks, backend='gloo') if dist.get_backend() != 'gloo' else group if self.rank in ranks: local_rank = ranks.index(self.rank) group_world_size = len(ranks) process_group = group cpu_group = group_cpu ranks_in_group = ranks return local_rank, group_world_size, process_group, cpu_group, ranks_in_group, mode
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch.distributed as dist from colossalai.registry import DIST_GROUP_INITIALIZER from .initializer_tensor import Initializer_Tensor from .process_group_initializer import ProcessGroupInitializer from ..parallel_mode import ParallelMode @DIST_GROUP_INITIALIZER.register_module class Initializer_Sequence_DP(ProcessGroupInitializer): """A ProcessGroupInitializer for sequence parallelism all-reduce. In Sequence Parallelism, each GPU holds the full copy of model weights, thus, gradient all-reduce occurs across all processes in the same pipeline stage Args: rank (int): The rank of current process world_size (int): Size of whole communication world config (Config): Running configuration data_parallel_size (int): Size of data parallel pipeline_parallel_size (int): Size of pipeline parallel tensor_parallel_size (int): Size of tensor parallel """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.dp_size = self.world_size // self.pipeline_parallel_size self.num_group = self.pipeline_parallel_size def init_dist_group(self): """Initialize Sequence Parallel process groups used for gradient all-reduce. Returns: Tuple: A tuple (local_rank, group_world_size, process_group, ranks_in_group, mode). """ local_rank = None ranks_in_group = None process_group = None cpu_group = None group_world_size = None mode = ParallelMode.SEQUENCE_DP for i in range(self.num_group): ranks = [i * self.dp_size + j for j in range(self.dp_size)] group = dist.new_group(ranks) group_cpu = dist.new_group(ranks, backend='gloo') if dist.get_backend() != 'gloo' else group if self.rank in ranks: local_rank = ranks.index(self.rank) group_world_size = len(ranks) process_group = group cpu_group = group_cpu ranks_in_group = ranks return local_rank, group_world_size, process_group, cpu_group, ranks_in_group, mode @DIST_GROUP_INITIALIZER.register_module class Initializer_Sequence(ProcessGroupInitializer): """A ProcessGroupInitializer for sequence parallelism. Args: rank (int): The rank of current process. world_size (int): Size of whole communication world. config (Config): Running configuration. data_parallel_size (int): Size of data parallel. pipeline_parallel_size (int): Size of pipeline parallel. tensor_parallel_size (int): Size of tensor parallel. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # reuse tensor parallel initializer code self._sequence_initializer = Initializer_Tensor(*args, **kwargs) self._sequence_dp_initializer = Initializer_Sequence_DP(*args, **kwargs) def init_dist_group(self): """Initialize Sequence parallel process groups and assign local_ranks and groups to each gpu. Sequence parallelism requires 2 process groups. The first is for model forward where several processes exchange partial query, key and value embedding to compute self attention values. The second is for all-reduce to synchronize the model parameters. Returns: List[Tuple (local_rank, group_world_size, process_group, ranks_in_group, mode)]: A Sequence parallelism's information in list of tuples. """ parallel_setting = [] local_rank, group_world_size, process_group, cpu_grop, ranks_in_group, mode = \ self._sequence_initializer.init_dist_group() # change mode to sequence mode = ParallelMode.SEQUENCE parallel_setting.append((local_rank, group_world_size, process_group, cpu_grop, ranks_in_group, mode)) parallel_setting.append(self._sequence_dp_initializer.init_dist_group()) return parallel_setting
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch from torch import Tensor from colossalai.context.parallel_mode import ParallelMode class SeedManager: """This class is a manager of all random seeds involved in the system. Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_. """ def __init__(self): self._current_mode = None self._seeds = dict() self._seed_states = dict() @property def current_mode(self): return self._current_mode @property def seeds(self): return self._seeds @property def seed_states(self): return self._seed_states def set_state(self, parallel_mode: ParallelMode, state: Tensor): """Sets the state of the seed manager for `parallel_mode`. Args: parallel_mode (:class:`colossalai.context.ParallelMode`): The chosen parallel mode. state (:class:`torch.Tensor`): the state to be set. Raises: AssertionError: Raises an AssertionError if `parallel_mode` is not found in the seed manager. """ assert parallel_mode in self._seed_states, f'Parallel mode {parallel_mode} is not found in the seed manager' self._seed_states[parallel_mode] = state def set_mode(self, parallel_mode: ParallelMode): """Sets the current mode of the seed manager. Args: parallel_mode (:class:`colossalai.context.ParallelMode`): The chosen parallel mode. """ if self.current_mode: # save the current state for current mode self._seed_states[self._current_mode] = torch.cuda.get_rng_state() # set the new state for new mode self._current_mode = parallel_mode torch.cuda.set_rng_state(self._seed_states[parallel_mode]) def add_seed(self, parallel_mode: ParallelMode, seed: int, overwrtie: bool = False): """Adds a seed to the seed manager for `parallel_mode`. Args: parallel_mode (:class:`colossalai.context.ParallelMode`): The chosen parallel mode. seed (int): The seed to be added. overwrtie (bool, optional): Whether allows to overwrite the seed that has been set already Raises: AssertionError: Raises an AssertionError if `parallel_mode` is not an instance of :class:`colossalai.context.ParallelMode` or the seed for `parallel_mode` has been added. """ assert isinstance(parallel_mode, ParallelMode), 'A valid ParallelMode must be provided' if overwrtie is False: assert parallel_mode not in self._seed_states, f'The seed for {parallel_mode} has been added' elif parallel_mode in self._seed_states: print(f"Warnning: {parallel_mode} seed has been overwritten.", flush=True) current_state = torch.cuda.get_rng_state() torch.cuda.manual_seed(seed) self._seed_states[parallel_mode] = torch.cuda.get_rng_state() self._seeds[parallel_mode] = seed torch.cuda.set_rng_state(current_state) def reset(self): self._current_mode = None self._seeds = dict() self._seed_states = dict()
#!/usr/bin/env python # -*- encoding: utf-8 -*- import functools from contextlib import contextmanager import torch.cuda from torch import Tensor from .seed_manager import SeedManager from ..parallel_mode import ParallelMode _SEED_MANAGER = SeedManager() def get_seeds(): """Returns the seeds of the seed manager. Returns: dict: The seeds of the seed manager. """ return _SEED_MANAGER.seeds def get_states(copy=False): """Returns the seed states of the seed manager. Returns: dict: The seed states of the seed manager. """ states = _SEED_MANAGER.seed_states if copy: new_states = dict() for parallel_mode, state in states.items(): new_states[parallel_mode] = state.clone() return new_states else: return _SEED_MANAGER.seed_states def get_current_mode(): """Returns the current mode of the seed manager. Returns: :class:`torch.ByteTensor`: The current mode of the seed manager. """ return _SEED_MANAGER.current_mode def add_seed(parallel_mode: ParallelMode, seed: int, overwrite: bool = False): """Adds a seed to the seed manager for `parallel_mode`. Args: parallel_mode (:class:`colossalai.context.ParallelMode`): The chosen parallel mode. seed (int): The seed to be added Raises: AssertionError: Raises an AssertionError if `parallel_mode` is not an instance of :class:`colossalai.context.ParallelMode` or the seed for `parallel_mode` has been added. Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_. """ _SEED_MANAGER.add_seed(parallel_mode, seed, overwrite) def set_mode(parallel_mode: ParallelMode): """Sets the current mode of the seed manager. Args: parallel_mode (:class:`colossalai.context.ParallelMode`): The chosen parallel mode. Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_. """ _SEED_MANAGER.set_mode(parallel_mode) def set_seed_states(parallel_mode: ParallelMode, state: Tensor): """Sets the state of the seed manager for `parallel_mode`. Args: parallel_mode (:class:`colossalai.context.ParallelMode`): The chosen parallel mode. state (:class:`torch.Tensor`): the state to be set. Raises: AssertionError: Raises an AssertionError if `parallel_mode` is not found in the seed manager. """ _SEED_MANAGER.set_state(parallel_mode, state) def sync_states(): current_mode = get_current_mode() current_states = torch.cuda.get_rng_state() set_seed_states(current_mode, current_states) @contextmanager def seed(parallel_mode: ParallelMode): """ A context for seed switch Examples:: with seed(ParallelMode.DATA): output = F.dropout(input) Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_. """ try: # set to new mode current_mode = _SEED_MANAGER.current_mode yield _SEED_MANAGER.set_mode(parallel_mode) finally: # recover _SEED_MANAGER.set_mode(current_mode) def with_seed(func, parallel_mode: ParallelMode): """ A function wrapper which executes the function with a specified seed. Examples:: # use with decorator @with_seed(ParallelMode.DATA) def forward(input): return F.dropout(input) out = forward(input) # OR use it inline def forward(input): return F.dropout(input) wrapper_forward = with_seed(forward, ParallelMode.DATA) out = wrapped_forward(input) Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_. """ @functools.wraps(func) def wrapper(*args, **kwargs): # switch mode current_mode = _SEED_MANAGER.current_mode _SEED_MANAGER.set_mode(parallel_mode) # exec func out = func(*args, **kwargs) # recover state _SEED_MANAGER.set_mode(current_mode) return out return wrapper def moe_set_seed(seed): if torch.cuda.is_available(): from colossalai.core import global_context as gpc global_rank = gpc.get_global_rank() diff_seed = seed + global_rank add_seed(ParallelMode.TENSOR, diff_seed, True) print(f"moe seed condition: {global_rank} with tensor seed {diff_seed}", flush=True) def reset_seeds(): _SEED_MANAGER.reset()
from ._helper import (seed, set_mode, with_seed, add_seed, get_seeds, get_states, get_current_mode, set_seed_states, sync_states, moe_set_seed, reset_seeds) __all__ = [ 'seed', 'set_mode', 'with_seed', 'add_seed', 'get_seeds', 'get_states', 'get_current_mode', 'set_seed_states', 'sync_states', 'moe_set_seed', 'reset_seeds' ]
from .stateful_tensor_mgr import StatefulTensorMgr from .tensor_placement_policy import TensorPlacementPolicyFactory __all__ = ['StatefulTensorMgr', 'TensorPlacementPolicyFactory']
from abc import ABC, abstractmethod from typing import List, Optional import torch from colossalai.utils import get_current_device from colossalai.zero.sharded_param.tensor_utils import colo_model_data_tensor_move_inline, colo_tensor_mem_usage from colossalai.utils.memory import colo_device_memory_capacity from colossalai.zero.sharded_param.tensorful_state import StatefulTensor from colossalai.utils.memory_tracer import MemStatsCollector from colossalai.utils.memory_tracer.model_data_memtracer import GLOBAL_MODEL_DATA_TRACER from typing import Type class TensorPlacementPolicy(ABC): def __init__(self, device: Optional[torch.device], mem_stats_collector: Optional[MemStatsCollector] = None) -> None: self.device: Optional[torch.device] = device self.mem_stats_collector: Optional[MemStatsCollector] = mem_stats_collector @abstractmethod def evict_tensors(self, hold_cuda_tensor_list: List[StatefulTensor], **kwargs) -> None: raise NotImplementedError class CPUTensorPlacementPolicy(TensorPlacementPolicy): def __init__(self, mem_stats_collector: Optional[MemStatsCollector] = None) -> None: super().__init__(torch.device('cpu'), mem_stats_collector=mem_stats_collector) def evict_tensors(self, hold_cuda_tensor_list: List[StatefulTensor], **kwargs) -> None: for t in hold_cuda_tensor_list: colo_model_data_tensor_move_inline(t, self.device) class CUDATensorPlacementPolicy(TensorPlacementPolicy): def __init__(self, mem_stats_collector: Optional[MemStatsCollector] = None) -> None: assert torch.cuda.is_available(), 'Cannot use CUDATensorPlacementPolicy when CUDA is not available' super().__init__(get_current_device(), mem_stats_collector=mem_stats_collector) def evict_tensors(self, hold_cuda_tensor_list: List[StatefulTensor], **kwargs) -> None: pass class AutoTensorPlacementPolicy(TensorPlacementPolicy): def __init__(self, mem_stats_collector: Optional[MemStatsCollector] = None) -> None: super().__init__(None, mem_stats_collector=mem_stats_collector) # model data will use 1-self._warmup_non_model_data_ratio CUDA memory in warmup phase # TODO(ver217): make these args configurable self._warmup_non_model_data_ratio: float = 0.8 self._steady_cuda_cap_ratio: float = 0.8 def evict_tensors(self, hold_cuda_tensor_list: List[StatefulTensor], cuda_demand: int = 0, warmup: bool = True, compute_list: List[StatefulTensor] = [], compute_idx: int = 0, **kwargs) -> None: cuda_capacity = colo_device_memory_capacity(get_current_device()) used_cuda_model_data = GLOBAL_MODEL_DATA_TRACER.cuda_usage if warmup: # We designate a part of CUDA memory for model data in warmup iterations. max_cuda_non_model_data_per_period = cuda_capacity * self._warmup_non_model_data_ratio else: # max non-model-data cuda memory consumption of this sampling moment and the next sampling moment. max_cuda_non_model_data_per_period = self.mem_stats_collector.next_period_non_model_data_usage('cuda') cuda_capacity *= self._steady_cuda_cap_ratio total_cuda_model_data = cuda_capacity - max_cuda_non_model_data_per_period avail_cuda_model_data = total_cuda_model_data - used_cuda_model_data if avail_cuda_model_data < cuda_demand: # Move cuda_demand - avail_cuda_model_data volume of tensors # to_free_cuda_model_data = cuda_demand - avail_cuda_model_data to_free_cuda_model_data = cuda_demand - avail_cuda_model_data freed_cuda_model_data = 0 to_free_tensor_list = hold_cuda_tensor_list if not warmup: next_compute_idx = {t: len(compute_list) for t in hold_cuda_tensor_list} for i in range(len(compute_list) - 1, compute_idx, -1): if compute_list[i] in next_compute_idx: next_compute_idx[compute_list[i]] = i next_compute_idx = sorted(next_compute_idx.items(), key=lambda pair: pair[1], reverse=True) to_free_tensor_list = [t for (t, idx) in next_compute_idx] for t in to_free_tensor_list: if freed_cuda_model_data >= to_free_cuda_model_data: break freed_cuda_model_data += colo_tensor_mem_usage(t)[0] colo_model_data_tensor_move_inline(t, torch.device('cpu')) if freed_cuda_model_data < to_free_cuda_model_data: raise RuntimeError( f"Adjust layout failed! No enough CUDA memory! Need {to_free_cuda_model_data}, freed {freed_cuda_model_data}" ) class TensorPlacementPolicyFactory: @staticmethod def create(policy_name: str) -> Type[TensorPlacementPolicy]: if policy_name == 'cpu': return CPUTensorPlacementPolicy elif policy_name == 'cuda': return CUDATensorPlacementPolicy elif policy_name == 'auto': return AutoTensorPlacementPolicy else: raise TypeError(f"Unknown tensor placement policy {policy_name}")
import functools import torch import types from colossalai.utils.cuda import get_current_device from colossalai.zero.sharded_param.sharded_param import ShardedParamV2 from colossalai.zero.sharded_param.tensorful_state import StatefulTensor, TensorState from colossalai.zero.sharded_param.tensor_utils import colo_model_data_tensor_move_inline, colo_tensor_mem_usage from colossalai.gemini.tensor_placement_policy import TensorPlacementPolicy from typing import List from colossalai.logging import get_dist_logger class StatefulTensorMgr(object): """ Stateful Tensor Manager, inspired from PatrickStar PatrickStar: Parallel Training of Pre-trained Models via Chunk-based Memory Management https://arxiv.org/abs/2108.05818 """ def __init__(self, tensor_placement_policy: TensorPlacementPolicy) -> None: self._tensor_placement_policy: TensorPlacementPolicy = tensor_placement_policy self._stateful_tensor_list: List[StatefulTensor] = [] self._logger = get_dist_logger("StatefulTensorMgr") self._warmup = True self._compute_list: List[StatefulTensor] = [] self._compute_idx: int = -1 def register_stateful_param(self, param: ShardedParamV2) -> None: assert isinstance(param, ShardedParamV2) for t in param.get_payload_tensors(): assert isinstance(t, StatefulTensor) self._stateful_tensor_list.append(t) t.trans_state = types.MethodType(functools.partial(self._trans_state, t.trans_state), t) def adjust_layout(self) -> None: """ Adjust the layout of statefuil tensor according to the information provided by mem_stats_collector, which should belongs to a Sharded Model. """ # find stateful tensor in state COMPUTE cuda_demand = 0 move_to_cuda_tensor_list = [] hold_cuda_tensor_list = [] for tensor in self._stateful_tensor_list: if tensor.state == TensorState.FREE: continue if tensor.device.type == 'cuda': if tensor.state in [TensorState.HOLD, TensorState.HOLD_AFTER_BWD, TensorState.HOLD_AFTER_FWD]: hold_cuda_tensor_list.append(tensor) elif tensor.device.type == 'cpu': if tensor.state == TensorState.COMPUTE: move_to_cuda_tensor_list.append(tensor) cuda_demand += colo_tensor_mem_usage(tensor.payload)[1] else: raise RuntimeError self._tensor_placement_policy.evict_tensors(hold_cuda_tensor_list, cuda_demand=cuda_demand, warmup=self._warmup, compute_list=self._compute_list, compute_idx=self._compute_idx) # move COMPUTE tensors to CUDA for t in move_to_cuda_tensor_list: colo_model_data_tensor_move_inline(t, get_current_device()) def reset(self): """This function must be called when each iteration finishes """ self._warmup = False self._compute_idx = -1 def _trans_state(self, trans_state_func, stateful_tensor, state): trans_state_func(state) if state == TensorState.COMPUTE: self._compute_idx += 1 if self._warmup: self._compute_list.append(stateful_tensor)
from .layer import * from .loss import * from .lr_scheduler import * from .metric import * from .model import * from .optimizer import *
import math import warnings from torch import Tensor import torch.nn as nn def zeros_(): """Return the initializer filling the input Tensor with the scalar zeros""" def initializer(tensor: Tensor, fan_in: int = None, fan_out: int = None): return nn.init.zeros_(tensor) return initializer def ones_(): """Return the initializer filling the input Tensor with the scalar ones""" def initializer(tensor: Tensor, fan_in: int = None, fan_out: int = None): return nn.init.ones_(tensor) return initializer def uniform_(a: float = 0., b: float = 1.): r"""Return the initializer filling the input Tensor with values drawn from the uniform distribution :math:`\mathcal{U}(a, b)`. Args: a (float): the lower bound of the uniform distribution. Defaults 0.0. b (float): the upper bound of the uniform distribution. Defaults 1.0. """ def initializer(tensor: Tensor, fan_in: int = None, fan_out: int = None): return nn.init.uniform_(tensor, a, b) return initializer def normal_(mean: float = 0., std: float = 1.): r"""Return the initializer filling the input Tensor with values drawn from the normal distribution .. math:: \mathcal{N}(\text{mean}, \text{std}^2) Args: mean (float): the mean of the normal distribution. Defaults 0.0. std (float): the standard deviation of the normal distribution. Defaults 1.0. """ def initializer(tensor: Tensor, fan_in: int = None, fan_out: int = None): return nn.init.normal_(tensor, mean, std) return initializer def trunc_normal_(mean: float = 0., std: float = 1., a: float = -2., b: float = 2.): r"""Return the initializer filling the input Tensor with values drawn from a truncated normal distribution. The values are effectively drawn from the normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values outside :math:`[a, b]` redrawn until they are within the bounds. The method used for generating the random values works best when :math:`a \leq \text{mean} \leq b`. Args: mean (float): the mean of the normal distribution. Defaults 0.0. std (float): the standard deviation of the normal distribution. Defaults 1.0. a (float): the minimum cutoff value. Defaults -2.0. b (float): the maximum cutoff value. Defaults 2.0. """ def initializer(tensor: Tensor, fan_in: int = None, fan_out: int = None): return nn.init.trunc_normal_(tensor, mean, std, a, b) return initializer def kaiming_uniform_(a=0, mode='fan_in', nonlinearity='leaky_relu'): r"""Return the initializer filling the input `Tensor` with values according to the method described in `Delving deep into rectifiers: Surpassing human-level performance on ImageNet classification` - He, K. et al. (2015), using a uniform distribution. The resulting tensor will have values sampled from :math:`\mathcal{U}(-\text{bound}, \text{bound})` where .. math:: \text{bound} = \text{gain} \times \sqrt{\frac{3}{\text{fan_mode}}} Also known as 'He initialization'. Args: a (int): the negative slope of the rectifier used after this layer (only used with ``'leaky_relu'``). mode (str, optional): either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'`` preserves the magnitude of the variance of the weights in the forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the backwards pass. nonlinearity (str, optional): the non-linear function (`nn.functional` name), recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default). """ # adapted from torch.nn.init def initializer(tensor: Tensor, fan_in: int = None, fan_out: int = None): if 0 in tensor.shape: warnings.warn("Initializing zero-element tensors is a no-op") return tensor if mode == 'fan_in': assert fan_in is not None, 'Fan_in is not provided.' fan = fan_in elif mode == 'fan_out': assert fan_out is not None, 'Fan_out is not provided.' fan = fan_out else: raise ValueError(f'Invalid initialization mode \'{mode}\'') std = nn.init.calculate_gain(nonlinearity, a) / math.sqrt(fan) bound = math.sqrt(3.) * std return nn.init.uniform_(tensor, -bound, bound) return initializer def kaiming_normal_(a=0, mode='fan_in', nonlinearity='leaky_relu'): r"""Return the initializer filling the input `Tensor` with values according to the method described in `Delving deep into rectifiers: Surpassing human-level performance on ImageNet classification` - He, K. et al. (2015), using a normal distribution. The resulting tensor will have values sampled from :math:`\mathcal{N}(0, \text{std}^2)` where .. math:: \text{std} = \frac{\text{gain}}{\sqrt{\text{fan_mode}}} Also known as 'He initialization'. Args: a (int): the negative slope of the rectifier used after this layer (only used with ``'leaky_relu'``). mode (str, optional): either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'`` preserves the magnitude of the variance of the weights in the forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the backwards pass. nonlinearity (str, optional): the non-linear function (`nn.functional` name), recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default). """ # adapted from torch.nn.init def initializer(tensor: Tensor, fan_in: int = None, fan_out: int = None): if 0 in tensor.shape: warnings.warn("Initializing zero-element tensors is a no-op") return tensor if mode == 'fan_in': assert fan_in is not None, 'Fan_in is not provided.' fan = fan_in elif mode == 'fan_out': assert fan_out is not None, 'Fan_out is not provided.' fan = fan_out else: raise ValueError(f'Invalid initialization mode \'{mode}\'') std = nn.init.calculate_gain(nonlinearity, a) / math.sqrt(fan) return nn.init.normal_(tensor, 0, std) return initializer def xavier_uniform_(a: float = math.sqrt(3.), scale: float = 2., gain: float = 1.): r"""Return the initializer filling the input `Tensor` with values according to the method described in `Understanding the difficulty of training deep feedforward neural networks` - Glorot, X. & Bengio, Y. (2010), using a uniform distribution. The resulting tensor will have values sampled from :math:`\mathcal{U}(-a, a)` where .. math:: a = \text{gain} \times \sqrt{\frac{6}{\text{fan_in} + \text{fan_out}}} Also known as 'Glorot initialization'. Args: a (float, optional): an optional scaling factor used to calculate uniform bounds from standard deviation. Defaults ``math.sqrt(3.)``. scale (float, optional): an optional scaling factor used to calculate standard deviation. Defaults 2.0. gain (float, optional): an optional scaling factor. Defaults 1.0. """ # adapted from torch.nn.init def initializer(tensor: Tensor, fan_in: int = None, fan_out: int = None): assert fan_in is not None, 'Fan_in is not provided.' fan = fan_in if fan_out is not None: fan += fan_out std = gain * math.sqrt(scale / float(fan)) bound = a * std return nn.init.uniform_(tensor, -bound, bound) return initializer def xavier_normal_(scale: float = 2., gain: float = 1.): r"""Return the initializer filling the input `Tensor` with values according to the method described in `Understanding the difficulty of training deep feedforward neural networks` - Glorot, X. & Bengio, Y. (2010), using a normal distribution. The resulting tensor will have values sampled from :math:`\mathcal{N}(0, \text{std}^2)` where .. math:: \text{std} = \text{gain} \times \sqrt{\frac{2}{\text{fan_in} + \text{fan_out}}} Also known as 'Glorot initialization'. Args: scale (float, optional): an optional scaling factor used to calculate standard deviation. Defaults 2.0. gain (float, optional): an optional scaling factor. Defaults 1.0. """ # adapted from torch.nn.init def initializer(tensor: Tensor, fan_in: int = None, fan_out: int = None): assert fan_in is not None, 'Fan_in is not provided.' fan = fan_in if fan_out is not None: fan += fan_out std = gain * math.sqrt(scale / float(fan)) return nn.init.normal_(tensor, 0., std) return initializer def lecun_uniform_(): # adapted from jax.nn.initializers def initializer(tensor: Tensor, fan_in: int = None, fan_out: int = None): assert fan_in is not None, 'Fan_in is not provided.' var = 1.0 / fan_in bound = math.sqrt(3 * var) return nn.init.uniform_(tensor, -bound, bound) return initializer def lecun_normal_(): # adapted from jax.nn.initializers def initializer(tensor: Tensor, fan_in: int = None, fan_out: int = None): assert fan_in is not None, 'Fan_in is not provided.' std = math.sqrt(1.0 / fan_in) return nn.init.trunc_normal_(tensor, std=std / .87962566103423978) return initializer
# modified from https://github.com/NVIDIA/apex/blob/master/apex/optimizers/fused_adam.py import torch from colossalai.registry import OPTIMIZERS from colossalai.utils import multi_tensor_applier @OPTIMIZERS.register_module class FusedAdam(torch.optim.Optimizer): """Implements Adam algorithm. Currently GPU-only. Requires ColossalAI to be installed via ``pip install .``. This version of fused Adam implements 2 fusions. * Fusion of the Adam update's elementwise operations * A multi-tensor apply launch that batches the elementwise updates applied to all the model's parameters into one or a few kernel launches. :class:`colossalai.nn.optimizer.FusedAdam` may be used as a drop-in replacement for ``torch.optim.AdamW``, or ``torch.optim.Adam`` with ``adamw_mode=False`` :class:`colossalai.nn.optimizer.FusedAdam` may be used with or without Amp. Adam was been proposed in `Adam: A Method for Stochastic Optimization`_. Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups. lr (float, optional): learning rate. (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square. (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability. (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) amsgrad (boolean, optional): whether to use the AMSGrad variant of this algorithm from the paper `On the Convergence of Adam and Beyond`_ (default: False) NOT SUPPORTED in FusedAdam! adamw_mode (boolean, optional): Apply L2 regularization or weight decay True for decoupled weight decay(also known as AdamW) (default: True) set_grad_none (bool, optional): whether set grad to None when zero_grad() method is called. (default: True) .. _Adam\: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ """ def __init__(self, params, lr=1e-3, bias_correction=True, betas=(0.9, 0.999), eps=1e-8, adamw_mode=True, weight_decay=0., amsgrad=False, set_grad_none=True): if amsgrad: raise RuntimeError('FusedAdam does not support the AMSGrad variant.') defaults = dict(lr=lr, bias_correction=bias_correction, betas=betas, eps=eps, weight_decay=weight_decay) super(FusedAdam, self).__init__(params, defaults) self.adamw_mode = 1 if adamw_mode else 0 self.set_grad_none = set_grad_none if multi_tensor_applier.available: import colossal_C # Skip buffer self._dummy_overflow_buf = torch.cuda.IntTensor([0]) self.multi_tensor_adam = colossal_C.multi_tensor_adam else: raise RuntimeError('FusedAdam requires cuda extensions') def zero_grad(self, set_to_none=False): if set_to_none: for group in self.param_groups: for p in group['params']: p.grad = None else: super(FusedAdam, self).zero_grad() def step(self, closure=None, grads=None, output_params=None, scale=None, grad_norms=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. The remaining arguments are deprecated, and are only retained (for the moment) for error-checking purposes. """ if any(p is not None for p in [grads, output_params, scale, grad_norms]): raise RuntimeError( 'FusedAdam has been updated. Simply initialize it identically to torch.optim.Adam, and call step() with no arguments.' ) loss = None if closure is not None: loss = closure() for group in self.param_groups: bias_correction = 1 if group['bias_correction'] else 0 beta1, beta2 = group['betas'] # assume same step across group now to simplify things # per parameter step can be easily support by making it tensor, or pass list into kernel if 'step' in group: group['step'] += 1 else: group['step'] = 1 # create lists for multi-tensor apply g_l, p_l, m_l, v_l = [], [], [], [] for p in group['params']: if p.grad is None: continue if p.grad.data.is_sparse: raise RuntimeError( 'FusedAdam does not support sparse gradients, please consider SparseAdam instead') state = self.state[p] # State initialization if len(state) == 0: # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p.data) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p.data) if p.dtype not in [torch.float16, torch.float32]: raise RuntimeError('FusedAdam only support fp16 and fp32.') g_l.append(p.grad.data) p_l.append(p.data) m_l.append(state['exp_avg']) v_l.append(state['exp_avg_sq']) multi_tensor_applier(self.multi_tensor_adam, self._dummy_overflow_buf, [g_l, p_l, m_l, v_l], group['lr'], beta1, beta2, group['eps'], group['step'], self.adamw_mode, bias_correction, group['weight_decay']) return loss
""" Adapted from the pytorch-lamb library at https://github.com/cybertronai/pytorch-lamb """ import torch from torch.optim import Optimizer from colossalai.registry import OPTIMIZERS @OPTIMIZERS.register_module class Lamb(Optimizer): r"""Implements Lamb algorithm. It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_. Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-6) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) adam (bool, optional): always use trust ratio = 1, which turns this into Adam. Useful for comparison purposes. .. _Large Batch Optimization for Deep Learning\: Training BERT in 76 minutes: https://arxiv.org/abs/1904.00962 """ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0, adam=False): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= betas[0] < 1.0: raise ValueError( "Invalid beta parameter at index 0: {}".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError( "Invalid beta parameter at index 1: {}".format(betas[1])) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) self.adam = adam super(Lamb, self).__init__(params, defaults) def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.data if grad.is_sparse: raise RuntimeError( 'Lamb does not support sparse gradients, consider SparseAdam instad.') state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p.data) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p.data) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] beta1, beta2 = group['betas'] state['step'] += 1 # Decay the first and second moment running average coefficient # m_t exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) # v_t exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) # Paper v3 does not use debiasing. # bias_correction1 = 1 - beta1 ** state['step'] # bias_correction2 = 1 - beta2 ** state['step'] # Apply bias to lr to avoid broadcast. # * math.sqrt(bias_correction2) / bias_correction1 step_size = group['lr'] weight_norm = p.data.pow(2).sum().sqrt() adam_step = exp_avg / exp_avg_sq.sqrt().add(group['eps']) if group['weight_decay'] != 0: adam_step.add_(p.data, alpha=group['weight_decay']) adam_norm = adam_step.pow(2).sum().sqrt() if weight_norm == 0 or adam_norm == 0: trust_ratio = 1 else: trust_ratio = weight_norm / adam_norm state['weight_norm'] = weight_norm state['adam_norm'] = adam_norm state['trust_ratio'] = trust_ratio if self.adam: trust_ratio = 1 p.data.add_(adam_step, alpha=-step_size * trust_ratio) return loss
"""Adapted from https://github.com/NUS-HPC-AI-Lab/LARS-ImageNet-PyTorch/blob/main/lars.py""" from typing import Iterable import torch from torch.optim import Optimizer from colossalai.registry import OPTIMIZERS @OPTIMIZERS.register_module class Lars(Optimizer): r"""Implements the LARS optimizer from `"Large batch training of convolutional networks" <https://arxiv.org/pdf/1708.03888.pdf>`_. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-3) momentum (float, optional): momentum factor (default: 0) eeta (float, optional): LARS coefficient as used in the paper (default: 1e-3) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) """ def __init__( self, params: Iterable[torch.nn.Parameter], lr=1e-3, momentum=0, eeta=1e-3, weight_decay=0, epsilon=0.0 ) -> None: if not isinstance(lr, float) or lr < 0.0: raise ValueError("Invalid learning rate: {}".format(lr)) if momentum < 0.0: raise ValueError("Invalid momentum value: {}".format(momentum)) if weight_decay < 0.0: raise ValueError( "Invalid weight_decay value: {}".format(weight_decay)) if eeta <= 0 or eeta > 1: raise ValueError("Invalid eeta value: {}".format(eeta)) if epsilon < 0: raise ValueError("Invalid epsilon value: {}".format(epsilon)) defaults = dict(lr=lr, momentum=momentum, weight_decay=weight_decay, eeta=eeta, epsilon=epsilon, lars=True) super().__init__(params, defaults) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: weight_decay = group['weight_decay'] momentum = group['momentum'] eeta = group['eeta'] lr = group['lr'] lars = group['lars'] eps = group['epsilon'] for p in group['params']: if p.grad is None: continue decayed_grad = p.grad scaled_lr = lr if lars: w_norm = torch.norm(p) g_norm = torch.norm(p.grad) trust_ratio = torch.where( w_norm > 0 and g_norm > 0, eeta * w_norm / (g_norm + weight_decay * w_norm + eps), torch.ones_like(w_norm) ) trust_ratio.clamp_(0.0, 50) scaled_lr *= trust_ratio.item() if weight_decay != 0: decayed_grad = decayed_grad.add(p, alpha=weight_decay) decayed_grad = torch.clamp(decayed_grad, -10.0, 10.0) if momentum != 0: param_state = self.state[p] if 'momentum_buffer' not in param_state: buf = param_state['momentum_buffer'] = torch.clone( decayed_grad).detach() else: buf = param_state['momentum_buffer'] buf.mul_(momentum).add_(decayed_grad) decayed_grad = buf p.add_(decayed_grad, alpha=-scaled_lr) return loss
from .utils import CPU_ADAM_CNT from .colossalai_optimizer import ColossalaiOptimizer from .fused_adam import FusedAdam from .fused_lamb import FusedLAMB from .fused_sgd import FusedSGD from .lamb import Lamb from .lars import Lars from .cpu_adam import CPUAdam from .hybrid_adam import HybridAdam __all__ = ['ColossalaiOptimizer', 'FusedLAMB', 'FusedAdam', 'FusedSGD', 'Lamb', 'Lars', 'CPUAdam', 'HybridAdam', 'CPU_ADAM_CNT']
import math import torch from colossalai.registry import OPTIMIZERS from colossalai.nn.optimizer import CPU_ADAM_CNT @OPTIMIZERS.register_module class CPUAdam(torch.optim.Optimizer): """Implements Adam algorithm. Supports parameters updating on both GPU and CPU, depanding on the device of paramters. But the parameters and gradients should on the same device: * Parameters on CPU and gradients on CPU is allowed. * Parameters on GPU and gradients on GPU is allowed. * Parameters on GPU and gradients on CPU is **not** allowed. Requires ColossalAI to be installed via ``pip install .``. This version of CPU Adam accelates parameters updating on CPU with SIMD. Support of AVX2 or AVX512 is required. The GPU part is implemented in an naive way. CPU Adam also supports the hybrid precision calculation, eg. fp32 parameters and fp16 gradients. :class:`colossalai.nn.optimizer.CPUAdam` may be used as a drop-in replacement for ``torch.optim.AdamW``, or ``torch.optim.Adam`` with ``adamw_mode=False`` Adam was been proposed in `Adam: A Method for Stochastic Optimization`_. Arguments: model_params (iterable): iterable of parameters of dicts defining parameter groups. lr (float, optional): learning rate. (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square. (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability. (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) amsgrad (boolean, optional): whether to use the AMSGrad variant of this algorithm from the paper `On the Convergence of Adam and Beyond`_ (default: False) NOT SUPPORTED yet in CPUAdam! adamw_mode (boolean, optional): Apply L2 regularization or weight decay True for decoupled weight decay(also known as AdamW) (default: True) simd_log (boolean, optional): whether to show if you are using SIMD to accelerate. (default: False) .. _Adam\: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ """ # Number of fp32 shards for per parameter # Param weight, grad, momentum and variance num_fp32_shards_per_param = 4 def __init__(self, model_params, lr=1e-3, bias_correction=True, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, adamw_mode=True, simd_log=False): default_args = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, bias_correction=bias_correction) super(CPUAdam, self).__init__(model_params, default_args) self.opt_id = CPU_ADAM_CNT() self.adamw_mode = adamw_mode try: import cpu_adam except ImportError: raise ImportError('Please install colossalai from source code to use CPUAdam') self.cpu_adam_op = cpu_adam self.cpu_adam_op.create_adam(self.opt_id, lr, betas[0], betas[1], eps, weight_decay, adamw_mode, simd_log) def __del__(self): if self.cpu_adam_op: self.cpu_adam_op.destroy_adam(self.opt_id) def torch_adam_update(self, data, grad, exp_avg, exp_avg_sq, lr, beta1, beta2, eps, weight_decay, bias_correction1, bias_correction2, use_adamw=False): # FIXME(ver217): remove the below line when replace torch adam with fused adam grad = grad.float() if weight_decay != 0: if use_adamw: data.mul_(1 - lr * weight_decay) else: grad = grad.add(data, alpha=weight_decay) # Decay the first and second moment running average coefficient exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) # TODO(jiaruifang) dose not support amsgrad denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps) step_size = lr / bias_correction1 data.addcdiv_(exp_avg, denom, value=-step_size) @torch.no_grad() def step(self, closure=None): loss = None if closure is not None: with torch.enable_grad(): loss = closure() for _, group in enumerate(self.param_groups): for _, p in enumerate(group['params']): if p.grad is None: continue state = self.state[p] target_device = p.device if len(state) == 0: state['step'] = 0 # gradient momentums state['exp_avg'] = torch.zeros_like(p.data, dtype=torch.float, device=target_device) # gradient variances state['exp_avg_sq'] = torch.zeros_like(p.data, dtype=torch.float, device=target_device) state['step'] += 1 beta1, beta2 = group['betas'] if target_device.type == 'cpu': assert p.data.numel() == p.grad.data.numel(), "parameter and gradient should have the same size" assert state['exp_avg'].device.type == 'cpu', "exp_avg should stay on cpu" assert state['exp_avg_sq'].device.type == 'cpu', "exp_avg should stay on cpu" self.cpu_adam_op.adam_update(self.opt_id, state['step'], group['lr'], beta1, beta2, group['eps'], group['weight_decay'], group['bias_correction'], p.data, p.grad.data, state['exp_avg'], state['exp_avg_sq'], -1) elif target_device.type == 'cuda': assert state['exp_avg'].device.type == 'cuda', "exp_avg should stay on cuda" assert state['exp_avg_sq'].device.type == 'cuda', "exp_avg should stay on cuda" bias_correction1 = 1 - beta1**state['step'] bias_correction2 = 1 - beta2**state['step'] # adam on cuda self.torch_adam_update(p.data, p.grad.data, state['exp_avg'], state['exp_avg_sq'], group['lr'], beta1, beta2, group['eps'], group['weight_decay'], bias_correction1, bias_correction2, self.adamw_mode) else: raise RuntimeError return loss
class CpuAdamCounter(object): """Used to record the total number of CPU Adam. We must use it to avoid hybrid cpu adam and cpu adam using the same id. """ def __init__(self): self.number = 0 def __call__(self): self.number += 1 return self.number - 1 CPU_ADAM_CNT = CpuAdamCounter()
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch import torch.nn as nn from torch import Tensor from torch.optim import Optimizer from colossalai.utils import clip_grad_norm_fp32 class ColossalaiOptimizer(Optimizer): def __init__(self, optim: Optimizer): self.optim = optim @property def param_groups(self): return self.optim.param_groups @property def defaults(self): return self.optim.defaults def add_param_group(self, *args, **kwargs): return self.optim.add_param_group(*args, **kwargs) def step(self, *args, **kwargs): return self.optim.step(*args, **kwargs) def zero_grad(self, *args, **kwargs): self.optim.zero_grad(*args, **kwargs) def load_state_dict(self, *args, **kwargs): self.optim.load_state_dict(*args, **kwargs) def state_dict(self): return self.optim.state_dict() def backward(self, loss: Tensor): loss.backward() def backward_by_grad(self, tensor: Tensor, grad: Tensor): torch.autograd.backward(tensors=tensor, grad_tensors=grad) def clip_grad_norm(self, model: nn.Module, max_norm: float): if max_norm > 0.0: clip_grad_norm_fp32(model.parameters(), max_norm)
# modified from https://github.com/NVIDIA/apex/blob/master/apex/optimizers/fused_lamb.py import torch from colossalai.registry import OPTIMIZERS from colossalai.utils import multi_tensor_applier @OPTIMIZERS.register_module class FusedLAMB(torch.optim.Optimizer): """Implements LAMB algorithm. Currently GPU-only. Requires ColossalAI to be installed via ``pip install .``. This version of fused LAMB implements 2 fusions. * Fusion of the LAMB update's elementwise operations * A multi-tensor apply launch that batches the elementwise updates applied to all the model's parameters into one or a few kernel launches. :class:`colossalai.nn.optimizer.FusedLAMB`'s usage is identical to any ordinary Pytorch optimizer :class:`colossalai.nn.optimizer.FusedLAMB` may be used with or without Amp. LAMB was proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_. Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups. lr (float, optional): learning rate. (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its norm. (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability. (default: 1e-6) weight_decay (float, optional): weight decay (L2 penalty) (default: 0.01) amsgrad (boolean, optional): whether to use the AMSGrad variant of this algorithm from the paper `On the Convergence of Adam and Beyond`_ NOT SUPPORTED now! (default: False) adam_w_mode (boolean, optional): Apply L2 regularization or weight decay True for decoupled weight decay(also known as AdamW) (default: True) grad_averaging (bool, optional): whether apply (1-beta2) to grad when calculating running averages of gradient. (default: True) set_grad_none (bool, optional): whether set grad to None when zero_grad() method is called. (default: True) max_grad_norm (float, optional): value used to clip global grad norm (default: 1.0) use_nvlamb (boolean, optional): Apply adaptive learning rate to 0.0 weight decay parameter (default: False) .. _Large Batch Optimization for Deep Learning: Training BERT in 76 minutes: https://arxiv.org/abs/1904.00962 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ """ def __init__(self, params, lr=1e-3, bias_correction=True, betas=(0.9, 0.999), eps=1e-6, weight_decay=0.01, amsgrad=False, adam_w_mode=True, grad_averaging=True, set_grad_none=True, max_grad_norm=1.0, use_nvlamb=False): if amsgrad: raise RuntimeError('FusedLAMB does not support the AMSGrad variant.') defaults = dict(lr=lr, bias_correction=bias_correction, betas=betas, eps=eps, weight_decay=weight_decay, grad_averaging=grad_averaging, max_grad_norm=max_grad_norm) super(FusedLAMB, self).__init__(params, defaults) if multi_tensor_applier.available: import colossal_C self.multi_tensor_l2norm = colossal_C.multi_tensor_l2norm # Skip buffer self._dummy_overflow_buf = torch.tensor([0], dtype=torch.int, device=self.param_groups[0]["params"][0].device) self.multi_tensor_lamb = colossal_C.multi_tensor_lamb else: raise RuntimeError('FusedLAMB requires cuda extensions') self.adam_w_mode = 1 if adam_w_mode else 0 self.set_grad_none = set_grad_none self.use_nvlamb = use_nvlamb def zero_grad(self): if self.set_grad_none: for group in self.param_groups: for p in group['params']: p.grad = None else: super(FusedLAMB, self).zero_grad() def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() # create separate grad lists for fp32 and fp16 params g_all_32, g_all_16 = [], [] for group in self.param_groups: for p in group['params']: if p.grad is None: continue if p.dtype == torch.float32: g_all_32.append(p.grad.data) elif p.dtype == torch.float16: g_all_16.append(p.grad.data) else: raise RuntimeError('FusedLAMB only support fp16 and fp32.') device = self.param_groups[0]["params"][0].device g_norm_32, g_norm_16 = torch.zeros(1, device=device), torch.zeros(1, device=device) # compute grad norm for two lists if len(g_all_32) > 0: g_norm_32 = multi_tensor_applier(self.multi_tensor_l2norm, self._dummy_overflow_buf, [g_all_32], False)[0] if len(g_all_16) > 0: g_norm_16 = multi_tensor_applier(self.multi_tensor_l2norm, self._dummy_overflow_buf, [g_all_16], False)[0] # blend two grad norms to get global grad norm global_grad_norm = multi_tensor_applier(self.multi_tensor_l2norm, self._dummy_overflow_buf, [[g_norm_32, g_norm_16]], False)[0] max_grad_norm = self.defaults['max_grad_norm'] for group in self.param_groups: bias_correction = 1 if group['bias_correction'] else 0 beta1, beta2 = group['betas'] grad_averaging = 1 if group['grad_averaging'] else 0 # assume same step across group now to simplify things # per parameter step can be easily support by making it tensor, or pass list into kernel if 'step' in group: group['step'] += 1 else: group['step'] = 1 # create lists for multi-tensor apply g_16, p_16, m_16, v_16 = [], [], [], [] g_32, p_32, m_32, v_32 = [], [], [], [] for p in group['params']: if p.grad is None: continue if p.grad.data.is_sparse: raise RuntimeError( 'FusedLAMB does not support sparse gradients, please consider SparseAdam instead') state = self.state[p] # State initialization if len(state) == 0: # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p.data) # Exponential moving average of gradient values state['exp_avg_sq'] = torch.zeros_like(p.data) if p.dtype == torch.float16: g_16.append(p.grad.data) p_16.append(p.data) m_16.append(state['exp_avg']) v_16.append(state['exp_avg_sq']) elif p.dtype == torch.float32: g_32.append(p.grad.data) p_32.append(p.data) m_32.append(state['exp_avg']) v_32.append(state['exp_avg_sq']) else: raise RuntimeError('FusedLAMB only support fp16 and fp32.') if (len(g_16) > 0): multi_tensor_applier(self.multi_tensor_lamb, self._dummy_overflow_buf, [g_16, p_16, m_16, v_16], group['lr'], beta1, beta2, group['eps'], group['step'], bias_correction, group['weight_decay'], grad_averaging, self.adam_w_mode, global_grad_norm, max_grad_norm, self.use_nvlamb) if (len(g_32) > 0): multi_tensor_applier(self.multi_tensor_lamb, self._dummy_overflow_buf, [g_32, p_32, m_32, v_32], group['lr'], beta1, beta2, group['eps'], group['step'], bias_correction, group['weight_decay'], grad_averaging, self.adam_w_mode, global_grad_norm, max_grad_norm, self.use_nvlamb) return loss
import torch from colossalai.utils import multi_tensor_applier from colossalai.registry import OPTIMIZERS from colossalai.nn.optimizer import CPU_ADAM_CNT @OPTIMIZERS.register_module class HybridAdam(torch.optim.Optimizer): """Implements Adam algorithm. Supports parameters updating on both GPU and CPU, depanding on the device of paramters. But the parameters and gradients should on the same device: * Parameters on CPU and gradients on CPU is allowed. * Parameters on GPU and gradients on GPU is allowed. * Parameters on GPU and gradients on CPU is **not** allowed. Requires ColossalAI to be installed via ``pip install .`` This version of Hybrid Adam is an hybrid of CPUAdam and FusedAdam. * For parameters updating on CPU, it uses CPUAdam. * For parameters updating on GPU, it uses FusedAdam. * Hybird precision calculation of fp16 and fp32 is supported, eg fp32 parameters and fp16 gradients. :class:`colossalai.nn.optimizer.HybridAdam` may be used as a drop-in replacement for ``torch.optim.AdamW``, or ``torch.optim.Adam`` with ``adamw_mode=False`` Adam was been proposed in `Adam: A Method for Stochastic Optimization`_. Arguments: model_params (iterable): iterable of parameters of dicts defining parameter groups. lr (float, optional): learning rate. (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square. (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability. (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) amsgrad (boolean, optional): whether to use the AMSGrad variant of this algorithm from the paper `On the Convergence of Adam and Beyond`_ (default: False) NOT SUPPORTED yet in CPUAdam! adamw_mode (boolean, optional): Apply L2 regularization or weight decay True for decoupled weight decay(also known as AdamW) (default: True) simd_log (boolean, optional): whether to show if you are using SIMD to accelerate. (default: False) .. _Adam\: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ """ # Number of fp32 shards for per parameter # Param weight, grad, momentum and variance num_fp32_shards_per_param = 4 def __init__(self, model_params, lr=1e-3, bias_correction=True, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, adamw_mode=True, simd_log=False): default_args = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, bias_correction=bias_correction) super(HybridAdam, self).__init__(model_params, default_args) self.opt_id = CPU_ADAM_CNT() self.adamw_mode = adamw_mode try: import cpu_adam import colossal_C except ImportError: raise ImportError('Please install colossalai from source code to use HybridAdam') self.cpu_adam_op = cpu_adam self.cpu_adam_op.create_adam(self.opt_id, lr, betas[0], betas[1], eps, weight_decay, adamw_mode, simd_log) self.gpu_adam_op = colossal_C.multi_tensor_adam self._dummy_overflow_buf = torch.cuda.IntTensor([0]) def __del__(self): if self.cpu_adam_op: self.cpu_adam_op.destroy_adam(self.opt_id) @torch.no_grad() def step(self, closure=None): loss = None if closure is not None: with torch.enable_grad(): loss = closure() for _, group in enumerate(self.param_groups): g_l, p_l, m_l, v_l = [], [], [], [] group_step = 0 for _, p in enumerate(group['params']): if p.grad is None: continue state = self.state[p] target_device = p.device if len(state) == 0: state['step'] = 0 # gradient momentums state['exp_avg'] = torch.zeros_like(p.data, dtype=torch.float, device=target_device) # gradient variances state['exp_avg_sq'] = torch.zeros_like(p.data, dtype=torch.float, device=target_device) state['step'] += 1 group_step = state['step'] beta1, beta2 = group['betas'] if target_device.type == 'cpu': assert state['exp_avg'].device.type == 'cpu', "exp_avg should stay on cpu" assert state['exp_avg_sq'].device.type == 'cpu', "exp_avg should stay on cpu" self.cpu_adam_op.adam_update(self.opt_id, state['step'], group['lr'], beta1, beta2, group['eps'], group['weight_decay'], group['bias_correction'], p.data, p.grad.data, state['exp_avg'], state['exp_avg_sq'], -1) elif target_device.type == 'cuda': assert state['exp_avg'].device.type == 'cuda', "exp_avg should stay on cuda" assert state['exp_avg_sq'].device.type == 'cuda', "exp_avg should stay on cuda" # record the state by gruop and update at once g_l.append(p.grad.data) p_l.append(p.data) m_l.append(state['exp_avg']) v_l.append(state['exp_avg_sq']) else: raise RuntimeError if len(g_l) > 0: adamw_mode = 1 if self.adamw_mode else 0 bias_correction = 1 if group['bias_correction'] else 0 multi_tensor_applier(self.gpu_adam_op, self._dummy_overflow_buf, [g_l, p_l, m_l, v_l], group['lr'], group['betas'][0], group['betas'][1], group['eps'], group_step, adamw_mode, bias_correction, group['weight_decay']) return loss
# modified from https://github.com/NVIDIA/apex/blob/master/apex/optimizers/fused_sgd.py import torch from torch.optim.optimizer import Optimizer, required from colossalai.registry import OPTIMIZERS from colossalai.utils import multi_tensor_applier @OPTIMIZERS.register_module class FusedSGD(Optimizer): r"""Implements stochastic gradient descent (optionally with momentum). Currently GPU-only. Requires ColossalAI to be installed via ``pip install .``. This version of fused SGD implements 2 fusions. * Fusion of the SGD update's elementwise operations * A multi-tensor apply launch that batches the elementwise updates applied to all the model's parameters into one or a few kernel launches. :class:`colossalai.nn.optimizer.FusedSGD` may be used as a drop-in replacement for ``torch.optim.SGD`` :class:`colossalai.nn.optimizer.FusedSGD` may be used with or without Amp. Nesterov momentum is based on the formula from `On the importance of initialization and momentum in deep learning`__. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float): learning rate momentum (float, optional): momentum factor (default: 0) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) dampening (float, optional): dampening for momentum (default: 0) nesterov (bool, optional): enables Nesterov momentum (default: False) __ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf .. note:: The implementation of SGD with Momentum/Nesterov subtly differs from Sutskever et. al. and implementations in some other frameworks. Considering the specific case of Momentum, the update can be written as .. math:: v = \rho * v + g \\ p = p - lr * v where p, g, v and :math:`\rho` denote the parameters, gradient, velocity, and momentum respectively. This is in contrast to Sutskever et. al. and other frameworks which employ an update of the form .. math:: v = \rho * v + lr * g \\ p = p - v The Nesterov version is analogously modified. """ def __init__(self, params, lr=required, momentum=0, dampening=0, weight_decay=0, nesterov=False, wd_after_momentum=False, materialize_master_grads=True, set_grad_none=False): if lr is not required and lr < 0.0: raise ValueError("Invalid learning rate: {}".format(lr)) if momentum < 0.0: raise ValueError("Invalid momentum value: {}".format(momentum)) if weight_decay < 0.0: raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) defaults = dict(lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, nesterov=nesterov) if nesterov and (momentum <= 0 or dampening != 0): raise ValueError("Nesterov momentum requires a momentum and zero dampening") super(FusedSGD, self).__init__(params, defaults) self.wd_after_momentum = wd_after_momentum self.materialize_master_grads = materialize_master_grads self.most_recent_scale = 1.0 self.scale_set_by_backward = False self.set_grad_none = set_grad_none if multi_tensor_applier.available: import colossal_C # Skip buffer self._dummy_overflow_buf = torch.tensor([0], dtype=torch.int, device=self.param_groups[0]["params"][0].device) self.multi_tensor_sgd = colossal_C.multi_tensor_sgd else: raise RuntimeError('FusedSGD requires cuda extensions') def __setstate__(self, state): super(FusedSGD, self).__setstate__(state) for group in self.param_groups: group.setdefault('nesterov', False) def zero_grad(self): if self.set_grad_none: for group in self.param_groups: for p in group['params']: p.grad = None else: super(FusedSGD, self).zero_grad() def get_momentums(self, params): momentums = [] first_run = True for p in params: param_state = self.state[p] # torch.optim.SGD initializes momentum in the main loop, we have # to do it here, and track whether or not we've done so, so that # momentum application can be skipped in the main kernel. if 'momentum_buffer' not in param_state: first_run = True buf = param_state['momentum_buffer'] = torch.zeros_like(p.data) momentums.append(buf) else: first_run = False momentums.append(param_state['momentum_buffer']) return momentums, first_run def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() explicit_master_params = (hasattr(self, "_amp_stash") and hasattr(self._amp_stash, "fp32_from_fp16_groups")) for gid, group in enumerate(self.param_groups): weight_decay = group['weight_decay'] momentum = group['momentum'] dampening = group['dampening'] nesterov = group['nesterov'] # For each group, there are 3 possible combinations we need to consider: # grad_type, param_to_update_type, momentum_type, requires_fp16_model_copy # 1. fp16, fp16, fp16, No # 2. fp32, fp32, fp32, No # 3. fp16, fp32, fp32, Yes first_runs = [True, True] # I think a bit of code divergence in exchange for naming clarity is worthwhile if explicit_master_params: stash = self._amp_stash fp32_params = [p for p in stash.fp32_from_fp32_groups[gid] if p.grad is not None] fp32_grads = [p.grad for p in stash.fp32_from_fp32_groups[gid] if p.grad is not None] fp32_momentums, first_runs[1] = self.get_momentums(fp32_params) if self.materialize_master_grads: fp16_model_params = [ p for i, p in enumerate(stash.fp16_groups[gid]) if stash.fp32_from_fp16_groups[gid][i].grad is not None ] fp32_from_fp16_grads = [p.grad for p in stash.fp32_from_fp16_groups[gid] if p.grad is not None] fp32_from_fp16_params = [p for p in stash.fp32_from_fp16_groups[gid] if p.grad is not None] fp32_from_fp16_momentums, first_runs[0] = self.get_momentums(fp32_from_fp16_params) fp16_set = [ fp32_from_fp16_grads, fp32_from_fp16_params, fp32_from_fp16_momentums, fp16_model_params ] else: fp16_model_params = [p for p in stash.fp16_groups[gid] if p.grad is not None] fp16_model_grads = [p.grad for p in stash.fp16_groups[gid] if p.grad is not None] fp32_from_fp16_params = [ p for i, p in enumerate(stash.fp32_from_fp16_groups[gid]) if stash.fp16_groups[gid][i].grad is not None ] fp32_from_fp16_momentums, first_runs[0] = self.get_momentums(fp32_from_fp16_params) fp16_set = [fp16_model_grads, fp32_from_fp16_params, fp32_from_fp16_momentums, fp16_model_params] launch_sets = [fp16_set, [fp32_grads, fp32_params, fp32_momentums]] else: fp16_params = [p for p in group['params'] if (p.dtype == torch.float16 and p.grad is not None)] fp16_grads = [p.grad for p in group['params'] if (p.dtype == torch.float16 and p.grad is not None)] fp16_momentums, first_runs[0] = self.get_momentums(fp16_params) fp32_params = [p for p in group['params'] if (p.dtype == torch.float32 and p.grad is not None)] fp32_grads = [p.grad for p in group['params'] if (p.dtype == torch.float32 and p.grad is not None)] fp32_momentums, first_runs[1] = self.get_momentums(fp32_params) launch_sets = [[fp16_grads, fp16_params, fp16_momentums], [fp32_grads, fp32_params, fp32_momentums]] for s, (launch_set, first_run) in enumerate(zip(launch_sets, first_runs)): assert len(launch_set[0]) == len(launch_set[1]) assert len(launch_set[0]) == len(launch_set[2]) if len(launch_set[0]) > 0: multi_tensor_applier(self.multi_tensor_sgd, self._dummy_overflow_buf, launch_set, weight_decay, momentum, dampening, group['lr'], nesterov, first_run, self.wd_after_momentum, 1.0 / self.most_recent_scale) self.most_recent_scale = 1.0 self.scale_set_by_backward = False return loss
import torch.nn as nn from colossalai.registry import LOSSES from torch.nn.modules.loss import _Loss from colossalai.context.moe_context import MOE_CONTEXT @LOSSES.register_module class MoeCrossEntropyLoss(_Loss): r"""torch.nn.CrossEntropyLoss added with auxiliary loss. Args: input (:class:`torch.tensor`): Predicted unnormalized scores (often referred to as logits). target (:class:`torch.tensor`): Ground truth class indices or class probabilities. aux_weight (float, optional): Weight of auxiliary loss in total loss.Defaults 0.01. The ``args`` and ``kwargs`` should include parameters below: :: weight (Tensor, optional) size_average (bool, optional) ignore_index (int, optional) reduce (bool, optional) reduction (str, optional) label_smoothing (float, optional) More details about ``args``, ``kwargs`` and ``torch.nn.functional.cross_entropy`` could be found in `Cross_entropy <https://pytorch.org/docs/stable/generated/torch.nn.functional.cross_entropy.html#torch.nn.functional.cross_entropy>`_. """ def __init__(self, aux_weight: float = 0.01, *args, **kwargs): super().__init__() self.loss = nn.CrossEntropyLoss(*args, **kwargs) self.aux_weight = aux_weight def forward(self, *args): """ The ``args`` should at least include parameters below: :: input (:class:`torch.tensor`): Predicted unnormalized scores (often referred to as logits). target (:class:`torch.tensor`): Ground truth class indices or class probabilities. More details about ``args``, ``kwargs`` and ``torch.nn.functional.cross_entropy`` could be found in `Cross_entropy <https://pytorch.org/docs/stable/generated/torch.nn.functional.cross_entropy.html#torch.nn.functional.cross_entropy>`_. """ main_loss = self.loss(*args) aux_loss = MOE_CONTEXT.get_loss() return main_loss + self.aux_weight * aux_loss @LOSSES.register_module class MoeLoss(_Loss): """A wrapper class for any loss module to add with auxiliary loss. Args: aux_weight (float): Weight of auxiliary loss in total loss. loss_fn (``Callable``): Loss function. args (list): Args in loss function. kwargs (dict): Kwargs in loss function """ def __init__(self, aux_weight: float, loss_fn, *args, **kwargs): super().__init__() self.loss_fn = loss_fn(*args, **kwargs) self.aux_weight = aux_weight def forward(self, *args, **kwargs): """ The ``args`` and ``kwargs`` should at least include parameters below: :: input (:class:`torch.tensor`): Predicted unnormalized scores (often referred to as logits). target (:class:`torch.tensor`): Ground truth class indices or class probabilities. Note: The ``args`` and ``kwargs`` may include different parameters varying with different loss function. """ main_loss = self.loss_fn(*args, **kwargs) aux_loss = MOE_CONTEXT.get_loss() return main_loss + self.aux_weight * aux_loss
from colossalai.global_variables import tensor_parallel_env as env from colossalai.nn.layer.utils import get_tensor_parallel_mode from torch import nn from torch.nn.modules.loss import * from torch.nn.modules.loss import _Loss from .loss_1d import VocabParallelCrossEntropyLoss1D from .loss_2d import CrossEntropyLoss2D, VocabParallelCrossEntropyLoss2D from .loss_2p5d import CrossEntropyLoss2p5D, VocabParallelCrossEntropyLoss2p5D from .loss_3d import CrossEntropyLoss3D, VocabParallelCrossEntropyLoss3D from .loss_moe import MoeCrossEntropyLoss, MoeLoss _parallel_cross_entropy = { '2d': CrossEntropyLoss2D, '2.5d': CrossEntropyLoss2p5D, '3d': CrossEntropyLoss3D, } _vocab_parallel_cross_entropy = { '1d': VocabParallelCrossEntropyLoss1D, '2d': VocabParallelCrossEntropyLoss2D, '2.5d': VocabParallelCrossEntropyLoss2p5D, '3d': VocabParallelCrossEntropyLoss3D, } class CrossEntropyLoss(_Loss): def __init__(self, reduction: bool = True, *args, **kwargs): super().__init__() tensor_parallel = get_tensor_parallel_mode() if tensor_parallel is not None and env.vocab_parallel: self.loss = _vocab_parallel_cross_entropy[tensor_parallel](reduction=reduction, *args, **kwargs) elif tensor_parallel is None or tensor_parallel == '1d': reduction = 'mean' if reduction else 'none' self.loss = nn.CrossEntropyLoss(reduction=reduction, *args, **kwargs) else: self.loss = _parallel_cross_entropy[tensor_parallel](reduction=reduction, *args, **kwargs) def forward(self, *args): return self.loss(*args)
import torch from colossalai.context import ParallelMode from colossalai.core import global_context as gpc from colossalai.registry import LOSSES from torch.cuda.amp import custom_bwd, custom_fwd from torch.nn.modules.loss import _Loss class _VocabParallelCrossEntropy1D(torch.autograd.Function): @staticmethod @custom_fwd(cast_inputs=torch.float32) def forward(ctx, vocab_parallel_logits, targets): # Maximum value along vocab dimension across all GPUs. logits_max = torch.max(vocab_parallel_logits, dim=-1)[0] torch.distributed.all_reduce(logits_max, op=torch.distributed.ReduceOp.MAX, group=gpc.get_group(ParallelMode.PARALLEL_1D)) # Subtract the maximum value. vocab_parallel_logits.sub_(logits_max.unsqueeze(dim=-1)) # Get the partition's vocab indecies partition_vocab_size = vocab_parallel_logits.size()[-1] rank = gpc.get_local_rank(ParallelMode.PARALLEL_1D) vocab_start_index = partition_vocab_size * rank vocab_end_index = vocab_start_index + partition_vocab_size # Create a mask of valid vocab ids (1 means it needs to be masked). target_mask = (targets < vocab_start_index) | (targets >= vocab_end_index) masked_target = targets.clone() - vocab_start_index masked_target[target_mask] = 0 # Get predicted-logits = logits[target]. # For Simplicity, we convert logits to a 2-D tensor with size # [*, partition-vocab-size] and target to a 1-D tensor of size [*]. logits_2d = vocab_parallel_logits.view(-1, partition_vocab_size) masked_target_1d = masked_target.view(-1) arange_1d = torch.arange(start=0, end=logits_2d.size()[0], device=logits_2d.device) predicted_logits_1d = logits_2d[arange_1d, masked_target_1d] predicted_logits_1d = predicted_logits_1d.clone().contiguous() predicted_logits = predicted_logits_1d.view_as(targets) predicted_logits[target_mask] = 0.0 # All reduce is needed to get the chunks from other GPUs. torch.distributed.all_reduce(predicted_logits, op=torch.distributed.ReduceOp.SUM, group=gpc.get_group(ParallelMode.PARALLEL_1D)) # Sum of exponential of logits along vocab dimension across all GPUs. exp_logits = vocab_parallel_logits torch.exp(vocab_parallel_logits, out=exp_logits) sum_exp_logits = exp_logits.sum(dim=-1) torch.distributed.all_reduce(sum_exp_logits, op=torch.distributed.ReduceOp.SUM, group=gpc.get_group(ParallelMode.PARALLEL_1D)) # Loss = log(sum(exp(logits))) - predicted-logit. loss = torch.log(sum_exp_logits) - predicted_logits # Store softmax, target-mask and masked-target for backward pass. exp_logits.div_(sum_exp_logits.unsqueeze(dim=-1)) ctx.save_for_backward(exp_logits, target_mask, masked_target_1d) return loss @staticmethod @custom_bwd def backward(ctx, grad_output): # Retreive tensors from the forward path. softmax, target_mask, masked_target_1d = ctx.saved_tensors # All the inputs have softmax as thier gradient. grad_input = softmax # For simplicity, work with the 2D gradient. partition_vocab_size = softmax.size()[-1] grad_2d = grad_input.view(-1, partition_vocab_size) # Add the gradient from matching classes. arange_1d = torch.arange(start=0, end=grad_2d.size()[0], device=grad_2d.device) grad_2d[arange_1d, masked_target_1d] -= (1.0 - target_mask.view(-1).float()) # Finally elementwise multiplication with the output gradients. grad_input.mul_(grad_output.unsqueeze(dim=-1)) return grad_input, None @LOSSES.register_module class VocabParallelCrossEntropyLoss1D(_Loss): """Vocab parallel cross entropy loss for 1D parallelism. Args: reduction (bool, optional): whether to average the loss, defaults to True. """ def __init__(self, reduction=True): super().__init__() self.reduction_mean = reduction def forward(self, logits, targets): """Calculate loss between logits and targets. Args: logits (:class:`torch.tensor`): Predicted unnormalized scores (often referred to as logits). targets (:class:`torch.tensor`): Ground truth class indices or class probabilities. """ loss = _VocabParallelCrossEntropy1D.apply(logits, targets) if self.reduction_mean: loss = loss.mean() return loss
import torch import torch.distributed as dist from colossalai.context import ParallelMode from colossalai.core import global_context as gpc from colossalai.nn.layer.parallel_2d import reduce_by_batch_2d, split_batch_2d from colossalai.nn.layer.parallel_2d._utils import assert_summa_initialization from colossalai.registry import LOSSES from colossalai.utils import get_current_device from torch.cuda.amp import custom_bwd, custom_fwd from torch.nn.functional import cross_entropy from torch.nn.modules.loss import _Loss @LOSSES.register_module class CrossEntropyLoss2D(_Loss): r"""Cross entropy loss for 2D parallelism Args: reduction (bool, optional): whether to average the loss, defaults to True. The ``args`` and ``kwargs`` should include parameters below: :: weight (Tensor, optional) size_average (bool, optional) ignore_index (int, optional) reduce (bool, optional) label_smoothing (float, optional) More details about ``args``, ``kwargs`` and ``torch.nn.functional.cross_entropy`` could be found in `Cross_entropy <https://pytorch.org/docs/stable/generated/torch.nn.functional.cross_entropy.html#torch.nn.functional.cross_entropy>`_. """ def __init__(self, reduction=True, *args, **kwargs): super().__init__() assert_summa_initialization() self.reduction_mean = reduction self.loss_args = args self.loss_kwargs = kwargs def forward(self, logits, targets): """Calculate loss between logits and targets. Args: logits (:class:`torch.tensor`): Predicted unnormalized scores (often referred to as logits). targets (:class:`torch.tensor`): Ground truth class indices or class probabilities. Returns: float: the loss between logits and targets. """ targets = split_batch_2d(targets) loss = cross_entropy(logits, targets, reduction='none', *self.loss_args, **self.loss_kwargs) if self.reduction_mean: loss = loss.mean() loss = reduce_by_batch_2d(loss, True) return loss class _VocabParallelCrossEntropy2D(torch.autograd.Function): ### Modified based on megatron.mpu.cross_entropy ### @staticmethod @custom_fwd(cast_inputs=torch.float32) def forward(ctx, logits, targets): # logits: [b/q, h/q] # labels: [b/q] # loss: [b/q] # vocab_parallel_logits: [b/q, s, v/q] # target: [b/q, s] logits_max = torch.max(logits, dim=-1)[0] torch.distributed.all_reduce(logits_max, op=torch.distributed.ReduceOp.MAX, group=gpc.get_group(ParallelMode.PARALLEL_2D_ROW)) # Subtract the maximum value. # vocab_parallel_logits.sub_(logits_max.unsqueeze(dim=-1)) logits = logits - logits_max.unsqueeze(dim=-1) vocab_size = logits.size(-1) rank = gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW) vocab_start = rank * (vocab_size) vocab_end = (rank + 1) * (vocab_size) - 1 target_mask = (targets < vocab_start) | (targets > vocab_end) masked_target = targets.clone() - vocab_start masked_target[target_mask] = 0 arange_1d = torch.arange( start=0, end=logits.size()[0], ) predicted_logits = logits[arange_1d, masked_target] predicted_logits[target_mask] = 0. dist.all_reduce(predicted_logits, group=gpc.get_group(ParallelMode.PARALLEL_2D_ROW)) exp_logits = torch.exp(logits) sum_exp_logits = exp_logits.sum(dim=1) dist.all_reduce(sum_exp_logits, group=gpc.get_group(ParallelMode.PARALLEL_2D_ROW)) loss = torch.log(sum_exp_logits) - predicted_logits exp_logits.div_(sum_exp_logits.unsqueeze(dim=-1)) ctx.save_for_backward(exp_logits, target_mask, masked_target) return loss @staticmethod @custom_bwd def backward(ctx, output_grad): # Retreive tensors from the forward path. softmax, target_mask, masked_target = ctx.saved_tensors # All the inputs have softmax as their gradient. grad_input = softmax # For simplicity, work with the 2D gradient. partition_vocab_size = softmax.size()[-1] grad_2d = grad_input.view(-1, partition_vocab_size) # Add the gradient from matching classes. arange_1d = torch.arange(start=0, end=grad_2d.size()[0], device=get_current_device()) grad_2d[arange_1d, masked_target] -= (1.0 - target_mask.view(-1).float()) # Finally elementwise multiplication with the output gradients. grad_input.mul_(output_grad.unsqueeze(dim=-1)) return grad_input, None @LOSSES.register_module class VocabParallelCrossEntropyLoss2D(_Loss): """Vocab parallel cross entropy loss for 2D parallelism. Args: reduction (bool, optional): whether to average the loss, defaults to True. """ def __init__(self, reduction=True): super().__init__() self.reduction_mean = reduction def forward(self, logits, targets): """Calculate loss between logits and targets. Args: logits (:class:`torch.tensor`): Predicted unnormalized scores (often referred to as logits). targets (:class:`torch.tensor`): Ground truth class indices or class probabilities. """ targets = split_batch_2d(targets) loss = _VocabParallelCrossEntropy2D.apply( logits, targets, ) if self.reduction_mean: loss = loss.mean() loss = reduce_by_batch_2d(loss, True) return loss
import torch import torch.distributed as dist from colossalai.constants import INPUT_GROUP_3D, WEIGHT_GROUP_3D, OUTPUT_GROUP_3D from colossalai.core import global_context as gpc from colossalai.nn.layer.parallel_3d import reduce_by_batch_3d, split_tensor_3d from colossalai.nn.layer.parallel_3d._utils import get_parallel_mode_from_env from colossalai.registry import LOSSES from colossalai.utils import get_current_device from torch.cuda.amp import custom_bwd, custom_fwd from torch.nn.functional import cross_entropy from torch.nn.modules.loss import _Loss @LOSSES.register_module class CrossEntropyLoss3D(_Loss): r"""Cross entropy loss for 3D parallelism. Args: reduction (bool, optional): whether to average the loss, defaults to True. The ``args`` and ``kwargs`` should include parameters below: :: weight (Tensor, optional) size_average (bool, optional) ignore_index (int, optional) reduce (bool, optional) label_smoothing (float, optional) More details about ``args``, ``kwargs`` and ``torch.nn.functional.cross_entropy`` could be found in `Cross_entropy <https://pytorch.org/docs/stable/generated/torch.nn.functional.cross_entropy.html#torch.nn.functional.cross_entropy>`_. """ def __init__(self, reduction=True, *args, **kwargs): super().__init__() self.input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D) self.weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D) self.reduction_mean = reduction self.loss_args = args self.loss_kwargs = kwargs def forward(self, logits, targets): """Calculate loss between logits and targets. Args: logits (:class:`torch.tensor`): Predicted unnormalized scores (often referred to as logits). targets (:class:`torch.tensor`): Ground truth class indices or class probabilities. """ targets = split_tensor_3d(targets, 0, self.weight_parallel_mode) targets = split_tensor_3d(targets, 0, self.input_parallel_mode) loss = cross_entropy(logits, targets, reduction='none', *self.loss_args, **self.loss_kwargs) if self.reduction_mean: loss = loss.mean() loss = reduce_by_batch_3d(loss, self.input_parallel_mode, self.weight_parallel_mode, True) return loss class _VocabParallelCrossEntropy3D(torch.autograd.Function): # Adapted from megatron.mpu.cross_entropy # loss[i] = -logits[i][targets] + log(sum(exp(logits[i]))) @staticmethod @custom_fwd(cast_inputs=torch.float32) def forward(ctx, logits, targets, output_parallel_mode): # logits: [b/q^2, c/q] # labels: [b/q^2] # loss: [b/q^2] logits_max = torch.max(logits, dim=-1)[0] dist.all_reduce(logits_max, op=torch.distributed.ReduceOp.MAX, group=gpc.get_group(output_parallel_mode)) # Subtract the maximum value. logits = logits - logits_max.unsqueeze(dim=-1) vocab_size_per_partition = logits.size()[-1] rank = gpc.get_local_rank(output_parallel_mode) vocab_start = rank * vocab_size_per_partition vocab_end = (rank + 1) * vocab_size_per_partition - 1 # loss[i] = 0 if targets[i] < vocab_start or targets[i] > vocab_end target_mask = (targets < vocab_start) | (targets > vocab_end) masked_target = targets.clone() - vocab_start masked_target[target_mask] = 0 arange_1d = torch.arange(start=0, end=logits.size()[0], device=get_current_device()) predicted_logits = logits[arange_1d, masked_target] predicted_logits = predicted_logits.clone().contiguous().view_as(targets) predicted_logits[target_mask] = 0. dist.all_reduce(predicted_logits, group=gpc.get_group(output_parallel_mode)) # Loss = log(sum(exp(logits))) - predicted-logit. exp_logits = torch.exp(logits) sum_exp_logits = exp_logits.sum(dim=-1) dist.all_reduce(sum_exp_logits, group=gpc.get_group(output_parallel_mode)) loss = torch.log(sum_exp_logits) - predicted_logits exp_logits.div_(sum_exp_logits.unsqueeze(dim=-1)) ctx.save_for_backward(exp_logits, target_mask, masked_target) return loss @staticmethod @custom_bwd def backward(ctx, output_grad): # Retreive tensors from the forward path. softmax, target_mask, masked_target = ctx.saved_tensors # All the inputs have softmax as thier gradient. input_grad = softmax # For simplicity, work with the 2D gradient. partition_vocab_size = softmax.size()[-1] grad_2d = input_grad.view(-1, partition_vocab_size) # Add the gradient from matching classes. arange_1d = torch.arange(start=0, end=grad_2d.size()[0], device=get_current_device()) grad_2d[arange_1d, masked_target] -= (1.0 - target_mask.view(-1).float()) input_grad.mul_(output_grad.unsqueeze(dim=-1)) return input_grad, None, None, None @LOSSES.register_module class VocabParallelCrossEntropyLoss3D(_Loss): """Vocab parallel cross entropy loss for 2D parallelism. Args: reduction (bool, optional): whether to average the loss, defaults to True. """ def __init__(self, reduction=True): super().__init__() self.input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D) self.weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D) self.output_parallel_mode = get_parallel_mode_from_env(OUTPUT_GROUP_3D) self.reduction_mean = reduction def forward(self, logits, targets): """Calculate loss between logits and targets. Args: logits (:class:`torch.tensor`): Predicted unnormalized scores (often referred to as logits). targets (:class:`torch.tensor`): Ground truth class indices or class probabilities. """ targets = split_tensor_3d(targets, 0, self.weight_parallel_mode) targets = split_tensor_3d(targets, 0, self.input_parallel_mode) loss = _VocabParallelCrossEntropy3D.apply(logits, targets, self.output_parallel_mode) if self.reduction_mean: loss = loss.mean() loss = reduce_by_batch_3d(loss, self.input_parallel_mode, self.weight_parallel_mode, True) return loss
import torch import torch.distributed as dist from colossalai.context import ParallelMode from colossalai.core import global_context as gpc from colossalai.nn.layer.parallel_2p5d import reduce_by_batch_2p5d, split_batch_2p5d from colossalai.nn.layer.parallel_2p5d._utils import assert_tesseract_initialization from colossalai.registry import LOSSES from colossalai.utils import get_current_device from torch.cuda.amp import custom_bwd, custom_fwd from torch.nn.functional import cross_entropy from torch.nn.modules.loss import _Loss @LOSSES.register_module class CrossEntropyLoss2p5D(_Loss): r"""Cross entropy loss for 2.5D parallelism Args: reduction (bool, optional): whether to average the loss, defaults to True. The ``args`` and ``kwargs`` should include parameters below: :: weight (Tensor, optional) size_average (bool, optional) ignore_index (int, optional) reduce (bool, optional) label_smoothing (float, optional) More details about ``args``, ``kwargs`` and ``torch.nn.functional.cross_entropy`` could be found in `Cross_entropy <https://pytorch.org/docs/stable/generated/torch.nn.functional.cross_entropy.html#torch.nn.functional.cross_entropy>`_. """ def __init__(self, reduction=True, *args, **kwargs): super().__init__() assert_tesseract_initialization() self.reduction_mean = reduction self.loss_args = args self.loss_kwargs = kwargs def forward(self, logits, targets): """Calculate loss between logits and targets. Args: logits (:class:`torch.tensor`): Predicted unnormalized scores (often referred to as logits). targets (:class:`torch.tensor`): Ground truth class indices or class probabilities. """ targets = split_batch_2p5d(targets) loss = cross_entropy(logits, targets, reduction='none', *self.loss_args, **self.loss_kwargs) if self.reduction_mean: loss = loss.mean() loss = reduce_by_batch_2p5d(loss, True) return loss class _VocabParallelCrossEntropy2p5D(torch.autograd.Function): ### Modified based on megatron.mpu.cross_entropy ### @staticmethod @custom_fwd(cast_inputs=torch.float32) def forward(ctx, logits, targets): # logits: [b/dq, h/q] # loss: [b/dq] # targets: [b/dq, h/q] logits_max = torch.max(logits, dim=-1)[0] torch.distributed.all_reduce(logits_max, op=torch.distributed.ReduceOp.MAX, group=gpc.get_group(ParallelMode.PARALLEL_2P5D_ROW)) # Subtract the maximum value. logits = logits - logits_max.unsqueeze(dim=-1) vocab_size = logits.size(-1) rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW) vocab_start = rank * (vocab_size) vocab_end = (rank + 1) * (vocab_size) - 1 target_mask = (targets < vocab_start) | (targets > vocab_end) masked_target = targets.clone() - vocab_start masked_target[target_mask] = 0 arange_1d = torch.arange( start=0, end=logits.size()[0], ) predicted_logits = logits[arange_1d, masked_target] predicted_logits[target_mask] = 0. dist.all_reduce(predicted_logits, group=gpc.get_group(ParallelMode.PARALLEL_2P5D_ROW)) exp_logits = torch.exp(logits) sum_exp_logits = exp_logits.sum(dim=1) dist.all_reduce(sum_exp_logits, group=gpc.get_group(ParallelMode.PARALLEL_2P5D_ROW)) loss = torch.log(sum_exp_logits) - predicted_logits exp_logits.div_(sum_exp_logits.unsqueeze(dim=-1)) ctx.save_for_backward(exp_logits, target_mask, masked_target) return loss @staticmethod @custom_bwd def backward(ctx, output_grad): # Retreive tensors from the forward path. softmax, target_mask, masked_target = ctx.saved_tensors # All the inputs have softmax as their gradient. grad_input = softmax # For simplicity, work with the 2D gradient. partition_vocab_size = softmax.size()[-1] grad_2d = grad_input.view(-1, partition_vocab_size) # Add the gradient from matching classes. arange_1d = torch.arange(start=0, end=grad_2d.size()[0], device=get_current_device()) grad_2d[arange_1d, masked_target] -= (1.0 - target_mask.view(-1).float()) # Finally elementwise multiplication with the output gradients. grad_input.mul_(output_grad.unsqueeze(dim=-1)) return grad_input, None @LOSSES.register_module class VocabParallelCrossEntropyLoss2p5D(_Loss): """ Vocab parallel cross entropy loss for 2.5D parallelism Args: reduction (bool, optional): whether to average the loss, defaults to True. """ def __init__(self, reduction=True): super().__init__() self.reduction_mean = reduction def forward(self, logits, targets): """Calculate loss between logits and targets. Args: logits (:class:`torch.tensor`): Predicted unnormalized scores (often referred to as logits). targets (:class:`torch.tensor`): Ground truth class indices or class probabilities. """ targets = split_batch_2p5d(targets) loss = _VocabParallelCrossEntropy2p5D.apply(logits, targets) if self.reduction_mean: loss = loss.mean() loss = reduce_by_batch_2p5d(loss, True) return loss
from torch.optim.lr_scheduler import CosineAnnealingLR as _CosineAnnealingLR from colossalai.registry import LR_SCHEDULERS from .delayed import DelayerScheduler, WarmupDelayerScheduler, WarmupScheduler @LR_SCHEDULERS.register_module class CosineAnnealingLR(_CosineAnnealingLR): r"""Set the learning rate of each parameter group using a cosine annealing schedule, where :math:`\eta_{max}` is set to the initial lr and :math:`T_{cur}` is the number of epochs since the last restart in SGDR: .. math:: \begin{aligned} \eta_t & = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1 + \cos\left(\frac{T_{cur}}{T_{max}}\pi\right)\right), & T_{cur} \neq (2k+1)T_{max}; \\ \eta_{t+1} & = \eta_{t} + \frac{1}{2}(\eta_{max} - \eta_{min}) \left(1 - \cos\left(\frac{1}{T_{max}}\pi\right)\right), & T_{cur} = (2k+1)T_{max}. \end{aligned} When last_epoch=-1, sets initial lr as lr. Notice that because the schedule is defined recursively, the learning rate can be simultaneously modified outside this scheduler by other operators. If the learning rate is set solely by this scheduler, the learning rate at each step becomes: .. math:: \eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1 + \cos\left(\frac{T_{cur}}{T_{max}}\pi\right)\right) It has been proposed in `SGDR: Stochastic Gradient Descent with Warm Restarts`_. Note that this only implements the cosine annealing part of SGDR, and not the restarts. .. _SGDR\: Stochastic Gradient Descent with Warm Restarts: https://arxiv.org/abs/1608.03983 Args: optimizer (:class:`torch.optim.Optimizer`): Wrapped optimizer. total_steps (int): Number of total training steps. eta_min (int, optional): Minimum learning rate, defaults to 0. last_epoch (int, optional): The index of last epoch, defaults to -1. When last_epoch=-1, the schedule is started from the beginning or When last_epoch=-1, sets initial lr as lr. """ def __init__(self, optimizer, total_steps: int, eta_min: int = 0, last_epoch: int = -1, **kwargs): super().__init__(optimizer, total_steps, eta_min=eta_min, last_epoch=last_epoch) @LR_SCHEDULERS.register_module class CosineAnnealingWarmupLR(WarmupScheduler): """Cosine annealing learning rate scheduler with learning rate warmup. A linear warmup schedule will be applied. Args: optimizer (:class:`torch.optim.Optimizer`): Wrapped optimizer. total_steps (int): Number of total training steps. warmup_steps (int, optional): Number of warmup steps, defaults to 0. eta_min (int, optional): Minimum learning rate, defaults to 0. last_epoch (int, optional): The index of last epoch, defaults to -1. When last_epoch=-1, the schedule is started from the beginning or When last_epoch=-1, sets initial lr as lr. """ def __init__(self, optimizer, total_steps: int, warmup_steps: int = 0, eta_min: float = 0., last_epoch: int = -1): base_scheduler = _CosineAnnealingLR( optimizer, total_steps - warmup_steps, eta_min=eta_min, last_epoch=last_epoch) super().__init__(optimizer, warmup_steps, base_scheduler) @LR_SCHEDULERS.register_module class FlatAnnealingLR(DelayerScheduler): """Flat and cosine annealing learning rate scheduler. The learning rate will be a fixed value before starting decay. Args: optimizer (:class:`torch.optim.Optimizer`): Wrapped optimizer. total_steps (int): Number of total training steps. pct_start (float, optional): Percent of steps before starting learning rate decay, defaults to -0.72. last_epoch (int, optional): The index of last epoch, defaults to -1. When last_epoch=-1, the schedule is started from the beginning or When last_epoch=-1, sets initial lr as lr. """ def __init__(self, optimizer, total_steps: int, pct_start: float = 0.72, last_epoch: int = -1, **kwargs): if not (0.0 <= pct_start <= 1.0): raise ValueError( f'pct_start must >= 0.0 and <= 1.0, got {pct_start}') flat_steps = int(total_steps * pct_start) anneal_steps = total_steps - flat_steps base_scheduler = _CosineAnnealingLR( optimizer, anneal_steps) super().__init__(optimizer, flat_steps, base_scheduler, last_epoch=last_epoch) @LR_SCHEDULERS.register_module class FlatAnnealingWarmupLR(WarmupDelayerScheduler): """Flat and cosine annealing learning rate scheduler with learning rate warmup. A linear warmup schedule will be applied, and then the learning rate will be a fixed value before starting decay. Args: optimizer (:class:`torch.optim.Optimizer`): Wrapped optimizer. total_steps (int): Number of total training steps. warmup_steps (int, optional): Number of warmup steps, defaults to 0. pct_start (float, optional): Percent of steps before starting learning rate decay, defaults to -0.72. eta_min (int, optional): Minimum learning rate, defaults to 0. last_epoch (int, optional): The index of last epoch, defaults to -1. When last_epoch=-1, the schedule is started from the beginning or When last_epoch=-1, sets initial lr as lr. """ def __init__(self, optimizer, total_steps: int, warmup_steps: int = 0, pct_start: float = 0.72, eta_min: int = 0, last_epoch: int = -1, **kwargs): if not (0.0 <= pct_start <= 1.0): raise ValueError( f'pct_start must >= 0.0 and <= 1.0, got {pct_start}') flat_steps = int((total_steps - warmup_steps) * pct_start) anneal_steps = total_steps - warmup_steps - flat_steps base_scheduler = _CosineAnnealingLR( optimizer, anneal_steps, eta_min=eta_min) super().__init__(optimizer, warmup_steps, flat_steps, base_scheduler, last_epoch=last_epoch)
from torch.optim.lr_scheduler import _LRScheduler from colossalai.registry import LR_SCHEDULERS @LR_SCHEDULERS.register_module class LinearWarmupLR(_LRScheduler): """Linearly warmup learning rate and then linearly decay. Args: optimizer (:class:`torch.optim.Optimizer`): Wrapped optimizer. total_steps (int): Number of total training steps. warmup_steps (int, optional): Number of warmup steps, defaults to 0 last_epoch (int, optional): The index of last epoch, defaults to -1. When last_epoch=-1, the schedule is started from the beginning or When last_epoch=-1, sets initial lr as lr. """ def __init__(self, optimizer, total_steps: int, warmup_steps: int = 0, last_epoch: int = -1, **kwargs): self.warmup_steps = warmup_steps self.total_steps = total_steps super().__init__(optimizer, last_epoch=last_epoch) def get_lr(self): if self.last_epoch < self.warmup_steps: return [(self.last_epoch + 1) / (self.warmup_steps + 1) * lr for lr in self.base_lrs] else: return [(self.total_steps - self.last_epoch) / (self.total_steps - self.warmup_steps) * lr for lr in self.base_lrs]