File size: 2,221 Bytes
a5dc865 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .base import *
from deepspeed.model_implementations.transformers.ds_gpt import DeepSpeedGPTInference
from ..policy import TransformerPolicy
class DS_GPT2Container(BaseTransformerContainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# All model specific things should be defined here instead of the base class.
def create_module(self, config=None):
_config = config if config is not None else self.ds_model_config
self.module = DeepSpeedGPTInference(_config, mp_group=self.mp_group)
self.module.config.scale_attention = self.scale_attention
return self.module
class HFGPT2LayerPolicy(TransformerPolicy):
_orig_layer_class = None
def __init__(self, client_module, inference=True):
# HuggingFace GPT2 uses convolutional layer instead of linear layer
super().__init__(inference, linear_layer=False)
self.client_module = client_module
try:
import transformers
HFGPT2LayerPolicy._orig_layer_class = transformers.models.gpt2.modeling_gpt2.GPT2Block
except:
HFGPT2LayerPolicy._orig_layer_class = None
def get_hidden_heads(self):
return self.client_module.attn.embed_dim, \
self.client_module.attn.num_heads, \
self.client_module.ln_1.eps, \
DEFAULT_INTERMEDIATE_SIZE
def attention(self, enable_training=False):
return self.client_module.attn.c_attn.weight, \
self.client_module.attn.c_attn.bias, \
self.client_module.attn.c_proj.weight, \
self.client_module.attn.c_proj.bias
def mlp(self, enable_training=False):
return self.client_module.mlp.c_fc.weight, \
self.client_module.mlp.c_fc.bias, \
self.client_module.mlp.c_proj.weight, \
self.client_module.mlp.c_proj.bias
def layernorm(self):
return self.client_module.ln_2.weight, \
self.client_module.ln_2.bias, \
self.client_module.ln_1.weight, \
self.client_module.ln_1.bias
|