File size: 3,028 Bytes
8efe1e4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
from transformers.configuration_utils import PretrainedConfig
from loguru import  logger as logging


class BuddyGPTConfig(PretrainedConfig):
    """ TinyLLM 配置文件
    """
    
    model_type = "buddygpt"
    keys_to_ignore_at_inference = ["past_key_values"]

    def __init__(
        self, 
        vocab_size=151669,
        hidden_size=4096,
        intermediate_size=4096,
        num_hidden_layers=32,
        num_attention_heads=32,
        num_key_value_heads=None,
        hidden_act="silu",
        num_seq_len=2048,
        initializer_range=0.02,
        rms_norm_eps=1e-6,
        use_cache=True,
        pad_token_id=None,
        bos_token_id=None,
        eos_token_id=None,
        tie_word_embeddings=False,
        rope_theta=10000.0,
        attention_dropout=0.0,
        _attn_implementation="sdpa",
        q_lora_rank: int = 16,
        qk_rope_head_dim: int = 4,
        kv_lora_rank: int = 16,
        v_head_dim: int = 16,
        qk_nope_head_dim: int = 12,
        n_expert=None,
        n_expert_per_token=2,
        n_group=2,
        n_topk_group=1,
        norm_topk_prob=True,
        routed_scaling_factor=0.2,
        scoring_func='sigmoid',
        topk_method='noaux_tc',
        moe_intermediate_size=10,
        n_shared_experts=2,
        **kwargs,
    ):
        self.vocab_size = vocab_size
        self.num_seq_len = num_seq_len
        self.hidden_size = hidden_size
        self.intermediate_size = intermediate_size
        self.num_hidden_layers = num_hidden_layers
        self.num_attention_heads = num_attention_heads
        
        # for backward compatibility
        if num_key_value_heads is None:
            num_key_value_heads = num_attention_heads
            
        self.num_key_value_heads = num_key_value_heads
        self.hidden_act = hidden_act
        self.initializer_range = initializer_range
        self.rms_norm_eps = rms_norm_eps
        self.use_cache = use_cache
        self.rope_theta = rope_theta
        self.attention_dropout = attention_dropout   
        self._attn_implementation = _attn_implementation

        # mla
        self.q_lora_rank = q_lora_rank
        self.qk_rope_head_dim = qk_rope_head_dim
        self.kv_lora_rank = kv_lora_rank
        self.v_head_dim = v_head_dim
        self.qk_nope_head_dim = qk_nope_head_dim

        # moe
        self.n_expert = n_expert
        self.n_expert_per_token = n_expert_per_token
        self.n_group = n_group
        self.n_topk_group = n_topk_group
        self.norm_topk_prob = norm_topk_prob
        self.routed_scaling_factor=routed_scaling_factor
        self.scoring_func = scoring_func
        self.topk_method = topk_method
        self.moe_intermediate_size = moe_intermediate_size
        self.n_shared_experts = n_shared_experts
            
        super().__init__(
            pad_token_id=pad_token_id,
            bos_token_id=bos_token_id,
            eos_token_id=eos_token_id,
            tie_word_embeddings=tie_word_embeddings,
            **kwargs
        )