fredzzp commited on
Commit
612b51f
·
verified ·
1 Parent(s): 78f3e43

Initial model upload with self-contained custom code

Browse files
Files changed (1) hide show
  1. modeling_qwen2.py +712 -118
modeling_qwen2.py CHANGED
@@ -1,130 +1,724 @@
1
- import argparse
2
- import json
3
- import os
4
- import shutil
5
- import sys
6
- from pathlib import Path
7
-
8
- from huggingface_hub import create_repo, upload_folder
9
-
10
- def main():
11
- """Main function to handle model preparation and upload."""
12
- parser = argparse.ArgumentParser(
13
- description="Upload a custom Hugging Face model with its self-contained code."
14
- )
15
- parser.add_argument(
16
- "--model_code_path",
17
- type=str,
18
- required=True,
19
- help="Path to the self-contained, single Python model file.",
20
- )
21
- parser.add_argument(
22
- "--ckpt_dir",
23
- type=str,
24
- required=True,
25
- help="Directory containing the model weights and tokenizer files (hf_ckpt).",
26
- )
27
- parser.add_argument(
28
- "--repo",
29
- type=str,
30
- required=True,
31
- help="Name of the repository on Hugging Face Hub (e.g., 'username/repo-name').",
32
- )
33
- parser.add_argument(
34
- "--readme_path",
35
- type=str,
36
- required=True,
37
- help="Path to the README.md file to be included in the repository.",
38
- )
39
- parser.add_argument(
40
- "--private",
41
- action="store_true",
42
- help="If set, creates a private repository.",
43
- )
44
-
45
- args = parser.parse_args()
46
-
47
- staging_dir = Path("./temp_upload_staging")
48
- if staging_dir.exists():
49
- shutil.rmtree(staging_dir)
50
- staging_dir.mkdir()
51
- print(f"Created temporary staging directory: {staging_dir}")
52
-
53
- try:
54
- # --- 2. Copy All Necessary Files ---
55
- print("\nCopying files to staging directory...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
 
57
- # Copy checkpoint files
58
- for f in os.listdir(args.ckpt_dir):
59
- shutil.copy(os.path.join(args.ckpt_dir, f), staging_dir)
 
 
 
 
 
 
 
 
60
 
61
- # Copy the single, self-contained model code file
62
- model_code_source = Path(args.model_code_path)
63
- if not model_code_source.exists():
64
- print(f"Error: Model code file not found at {model_code_source}")
65
- sys.exit(1)
 
 
 
 
 
 
 
 
66
 
67
- # The destination file MUST be named correctly for auto_map to work.
68
- model_code_dest = staging_dir / "modeling_qwen2.py"
69
- print(f"Copying model code from {model_code_source} to {model_code_dest}")
70
- shutil.copy(model_code_source, model_code_dest)
 
 
 
 
 
71
 
72
- print("File copying complete.")
73
-
74
- # --- 3. Configure `config.json` for Auto-Loading ---
75
- print("\nConfiguring config.json for auto-loading...")
76
- config_path = staging_dir / "config.json"
77
- if not config_path.exists():
78
- print(f"Error: config.json not found in {args.ckpt_dir}")
79
- sys.exit(1)
80
-
81
- with open(config_path, "r", encoding="utf-8") as f:
82
- config_data = json.load(f)
83
-
84
- config_data["auto_map"] = {
85
- "AutoModelForCausalLM": "modeling_qwen2.Qwen2ForCausalLM"
86
- }
87
- config_data["architectures"] = ["Qwen2ForCausalLM"]
88
- config_data["trust_remote_code"] = True
89
-
90
- with open(config_path, "w", encoding="utf-8") as f:
91
- json.dump(config_data, f, indent=2)
92
- print("config.json updated successfully.")
93
-
94
- # --- 4. Copy `README.md` ---
95
- print("\nCopying README.md...")
96
- readme_source = Path(args.readme_path)
97
- if not readme_source.exists():
98
- print(f"Error: README file not found at {readme_source}")
99
- sys.exit(1)
100
 
101
- with open(readme_source, "r", encoding="utf-8") as f:
102
- readme_content = f.read()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
 
104
- readme_content = readme_content.replace("{repo_id}", args.repo)
 
105
 
106
- with open(staging_dir / "README.md", "w", encoding="utf-8") as f:
107
- f.write(readme_content)
108
- print("README.md copied and processed.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
 
110
- # --- 5. Upload to the Hub ---
111
- print(f"\nPreparing to upload to repository: {args.repo}")
112
- repo_url = create_repo(args.repo, repo_type="model", exist_ok=True, private=args.private)
113
 
114
- upload_folder(
115
- folder_path=staging_dir,
116
- repo_id=args.repo,
117
- repo_type="model",
118
- commit_message="Initial model upload with self-contained custom code",
 
119
  )
120
- print("\n🚀 Upload complete! 🚀")
121
- print(f"Check out your model at: {repo_url}")
122
 
123
- finally:
124
- # --- 6. Clean Up ---
125
- print("\nCleaning up temporary staging directory...")
126
- shutil.rmtree(staging_dir)
127
- print("Cleanup complete.")
128
 
129
- if __name__ == "__main__":
130
- main()
 
1
+ # Copyright 2024 The Qwen team, Alibaba Group and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # This is a fully self-contained version of the model script.
16
+ # It includes the MDMGenerationMixin and all necessary utilities for public release.
17
+
18
+ import logging
19
+ import warnings
20
+ import copy
21
+ from dataclasses import dataclass
22
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
23
+
24
+ import torch
25
+ import torch.distributions as dists
26
+ from torch import nn
27
+ from torch.nn import functional as F
28
+
29
+ from transformers.activations import ACT2FN
30
+ from transformers.cache_utils import Cache, DynamicCache, SlidingWindowCache, StaticCache
31
+ from transformers.generation.configuration_utils import GenerationConfig
32
+ from transformers.modeling_attn_mask_utils import AttentionMaskConverter
33
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
34
+ from transformers.modeling_outputs import (
35
+ BaseModelOutputWithPast,
36
+ CausalLMOutputWithPast,
37
+ ModelOutput,
38
+ )
39
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS
40
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
41
+ from transformers.models.qwen2.configuration_qwen2 import Qwen2Config
42
+ from transformers.processing_utils import Unpack
43
+ from transformers.utils import (
44
+ add_start_docstrings,
45
+ add_start_docstrings_to_model_forward,
46
+ replace_return_docstrings,
47
+ )
48
+
49
+ logger = logging.getLogger(__name__)
50
+
51
+ # ==============================================================================
52
+ # Start of Generation Utilities (Integrated directly into this file)
53
+ # ==============================================================================
54
+
55
+ def top_p_logits(logits, top_p=None):
56
+ sorted_logits, sorted_indices = torch.sort(logits, descending=True)
57
+ cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
58
+ sorted_indices_to_remove = cumulative_probs > top_p
59
+ sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
60
+ sorted_indices_to_remove[..., 0] = 0
61
+ mask = torch.zeros_like(logits, dtype=torch.bool, device=logits.device)
62
+ mask = mask.scatter_(-1, sorted_indices, sorted_indices_to_remove)
63
+ logits = logits.masked_fill(mask, torch.finfo(logits.dtype).min)
64
+ return logits
65
+
66
+ def top_k_logits(logits, top_k=None):
67
+ if top_k is None or top_k == 0:
68
+ return logits
69
+ top_k = min(top_k, logits.size(-1))
70
+ indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
71
+ logits = logits.masked_fill(indices_to_remove, torch.finfo(logits.dtype).min)
72
+ return logits
73
+
74
+ def sample_tokens(logits, temperature=0.0, top_p=None, top_k=None, margin_confidence=False, neg_entropy=False):
75
+ if temperature > 0:
76
+ logits = logits / temperature
77
+ if top_p is not None and top_p < 1:
78
+ logits = top_p_logits(logits, top_p)
79
+ if top_k is not None:
80
+ logits = top_k_logits(logits, top_k)
81
+ probs = torch.softmax(logits.float(), dim=-1)
82
+ if temperature > 0:
83
+ x0 = dists.Categorical(probs=probs).sample()
84
+ else:
85
+ _, x0 = probs.max(dim=-1)
86
+
87
+ confidence = torch.gather(probs, -1, x0.unsqueeze(-1)).squeeze(-1)
88
+
89
+ if margin_confidence:
90
+ sorted_probs, _ = torch.sort(probs, dim=-1, descending=True)
91
+ top1_probs = sorted_probs[..., 0]
92
+ top2_probs = sorted_probs[..., 1]
93
+ confidence = top1_probs - top2_probs
94
+ elif neg_entropy:
95
+ log_probs = torch.log(probs.clamp(min=1e-10))
96
+ confidence = (probs * log_probs).sum(dim=-1)
97
+
98
+ return confidence, x0
99
+
100
+
101
+ @dataclass
102
+ class MDMModelOutput(ModelOutput):
103
+ sequences: torch.LongTensor = None
104
+ history: Optional[Tuple[torch.FloatTensor]] = None
105
+
106
+ class MDMGenerationConfig(GenerationConfig):
107
+ def __init__(self, **kwargs):
108
+ super().__init__(**kwargs)
109
+ self.temperature: float = kwargs.pop("temperature", 0.0)
110
+ self.top_p: Optional[float] = kwargs.pop("top_p", None)
111
+ self.top_k: Optional[int] = kwargs.pop("top_k", None)
112
+ self.eps: float = kwargs.pop("eps", 1e-3)
113
+ self.steps: int = kwargs.pop("steps", 512)
114
+ self.alg: str = kwargs.pop("alg", 'entropy')
115
+ self.alg_temp: Optional[float] = kwargs.pop("alg_temp", 0.0)
116
+ self.output_history: bool = kwargs.pop("output_history", False)
117
+ self.mask_token_id = kwargs.pop("mask_token_id", None)
118
+
119
+
120
+ class MDMGenerationMixin:
121
+ """
122
+ Mixin class for Masked Diffusion Model generation.
123
+ """
124
+ @staticmethod
125
+ def _expand_inputs_for_generation(
126
+ expand_size: int = 1,
127
+ input_ids: Optional[torch.LongTensor] = None,
128
+ attention_mask: Optional[torch.LongTensor] = None
129
+ ) -> Tuple[torch.LongTensor, Dict[str, Any]]:
130
+ if expand_size == 1:
131
+ return input_ids, attention_mask
132
 
133
+ if input_ids is not None:
134
+ input_ids = input_ids.repeat_interleave(expand_size, dim=0)
135
+ if attention_mask is not None:
136
+ attention_mask = attention_mask.repeat_interleave(expand_size, dim=0)
137
+ return input_ids, attention_mask
138
+
139
+ def _prepare_generation_config(
140
+ self, generation_config: Optional[GenerationConfig], **kwargs
141
+ ) -> MDMGenerationConfig:
142
+ if generation_config is None:
143
+ generation_config = self.generation_config
144
 
145
+ if not isinstance(generation_config, MDMGenerationConfig):
146
+ generation_config = MDMGenerationConfig.from_dict(generation_config.to_dict())
147
+
148
+ generation_config.update(**kwargs)
149
+ return generation_config
150
+
151
+ @torch.no_grad()
152
+ def diffusion_generate(
153
+ self,
154
+ inputs: Optional[torch.Tensor] = None,
155
+ generation_config: Optional[MDMGenerationConfig] = None,
156
+ **kwargs,
157
+ ) -> Union[MDMModelOutput, torch.LongTensor]:
158
 
159
+ generation_config = self._prepare_generation_config(generation_config, **kwargs)
160
+ input_ids = inputs
161
+ attention_mask = kwargs.get("attention_mask", None)
162
+
163
+ if input_ids is None:
164
+ raise ValueError("`inputs` must be provided for diffusion generation.")
165
+
166
+ if generation_config.max_new_tokens is not None:
167
+ generation_config.max_length = input_ids.shape[-1] + generation_config.max_new_tokens
168
 
169
+ input_ids, attention_mask = self._expand_inputs_for_generation(
170
+ expand_size=generation_config.num_return_sequences,
171
+ input_ids=input_ids,
172
+ attention_mask=attention_mask
173
+ )
174
+ return self._sample(
175
+ input_ids,
176
+ attention_mask=attention_mask,
177
+ generation_config=generation_config
178
+ )
179
+
180
+ def _sample(
181
+ self,
182
+ input_ids: torch.LongTensor,
183
+ attention_mask: Optional[torch.LongTensor],
184
+ generation_config: MDMGenerationConfig
185
+ ) -> Union[MDMModelOutput, torch.LongTensor]:
 
 
 
 
 
 
 
 
 
 
 
186
 
187
+ max_length = generation_config.max_length
188
+ mask_token_id = generation_config.mask_token_id
189
+ if mask_token_id is None:
190
+ raise ValueError("`mask_token_id` must be set in the generation config.")
191
+
192
+ steps = generation_config.steps
193
+ eps = generation_config.eps
194
+ alg = generation_config.alg
195
+ alg_temp = generation_config.alg_temp
196
+ temperature = generation_config.temperature
197
+ top_p = generation_config.top_p
198
+ top_k = generation_config.top_k
199
+
200
+ histories = [] if generation_config.output_history else None
201
+ x = F.pad(input_ids, (0, max_length - input_ids.shape[1]), value=mask_token_id)
202
+ gen_attention_mask = (x != self.config.pad_token_id).long() if self.config.pad_token_id is not None else None
203
+ timesteps = torch.linspace(1, eps, steps + 1, device=x.device)
204
+
205
+ for i in range(steps):
206
+ mask_index = (x == mask_token_id)
207
+ if not mask_index.any():
208
+ break
209
+ outputs = self(input_ids=x, attention_mask=gen_attention_mask, is_causal=False)
210
+ logits = outputs.logits
211
+ logits = torch.cat([logits[:, :1], logits[:, :-1]], dim=1)
212
+ mask_logits = logits[mask_index]
213
+ t = timesteps[i]
214
+ s = timesteps[i + 1]
215
+
216
+ confidence_alg_map = {'maskgit_plus': False, 'topk_margin': True, 'entropy': True}
217
+ is_margin_conf = confidence_alg_map.get(alg, False)
218
+ is_neg_entropy = alg == 'entropy'
219
+
220
+ confidence, x0 = sample_tokens(mask_logits, temperature, top_p, top_k, margin_confidence=is_margin_conf, neg_entropy=is_neg_entropy)
221
+ num_masked = mask_index.sum(dim=-1, keepdim=True)
222
+ gamma = 1 - s / t
223
+ num_to_unmask = (num_masked * gamma).long()
224
+ full_confidence = torch.full_like(x, -torch.inf, device=self.device, dtype=confidence.dtype)
225
+ full_confidence[mask_index] = confidence
226
+
227
+ if (alg_temp is not None and alg_temp > 0):
228
+ unmask_probs = F.softmax(full_confidence / alg_temp, dim=-1)
229
+ unmask_indices = torch.multinomial(unmask_probs, num_samples=num_to_unmask.max(), replacement=False)
230
+ else:
231
+ _, unmask_indices = torch.topk(full_confidence, k=num_to_unmask.max(), dim=-1)
232
+
233
+ rows = torch.arange(x.size(0), device=x.device).unsqueeze(1)
234
+ unmask_selection_mask = torch.zeros_like(x, dtype=torch.bool)
235
+ unmask_selection_mask[rows, unmask_indices] = True
236
+ unmask_selection_mask = unmask_selection_mask & (torch.cumsum(unmask_selection_mask.long(), dim=-1) <= num_to_unmask)
237
+ x_unmasked_proposals = torch.full_like(x, fill_value=mask_token_id)
238
+ x_unmasked_proposals[mask_index] = x0
239
+ x[unmask_selection_mask] = x_unmasked_proposals[unmask_selection_mask]
240
+
241
+ if histories is not None:
242
+ histories.append(x.clone())
243
+
244
+ if generation_config.return_dict_in_generate:
245
+ return MDMModelOutput(sequences=x, history=histories)
246
+ else:
247
+ return x
248
+
249
+ # ==============================================================================
250
+ # End of Generation Utilities
251
+ # ==============================================================================
252
+
253
+
254
+ _CHECKPOINT_FOR_DOC = "meta-qwen2/Qwen2-2-7b-hf"
255
+ _CONFIG_FOR_DOC = "Qwen2Config"
256
+
257
+
258
+ class Qwen2MLP(nn.Module):
259
+ # ... (class unchanged)
260
+ def __init__(self, config):
261
+ super().__init__()
262
+ self.config = config
263
+ self.hidden_size = config.hidden_size
264
+ self.intermediate_size = config.intermediate_size
265
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
266
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
267
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
268
+ self.act_fn = ACT2FN[config.hidden_act]
269
+
270
+ def forward(self, x):
271
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
272
+ return down_proj
273
+
274
+ def rotate_half(x):
275
+ # ... (function unchanged)
276
+ x1 = x[..., : x.shape[-1] // 2]
277
+ x2 = x[..., x.shape[-1] // 2 :]
278
+ return torch.cat((-x2, x1), dim=-1)
279
+
280
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
281
+ # ... (function unchanged)
282
+ cos = cos.unsqueeze(unsqueeze_dim)
283
+ sin = sin.unsqueeze(unsqueeze_dim)
284
+ q_embed = (q * cos) + (rotate_half(q) * sin)
285
+ k_embed = (k * cos) + (rotate_half(k) * sin)
286
+ return q_embed, k_embed
287
+
288
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
289
+ # ... (function unchanged)
290
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
291
+ if n_rep == 1:
292
+ return hidden_states
293
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
294
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
295
+
296
+ class Qwen2Attention(nn.Module):
297
+ # ... (class unchanged)
298
+ def __init__(self, config: Qwen2Config, layer_idx: int):
299
+ super().__init__()
300
+ self.config = config
301
+ self.layer_idx = layer_idx
302
+ self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
303
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
304
+ self.scaling = self.head_dim**-0.5
305
+ self.attention_dropout = config.attention_dropout
306
+ self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=True)
307
+ self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=True)
308
+ self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=True)
309
+ self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
310
+
311
+ def forward(
312
+ self,
313
+ hidden_states: torch.Tensor,
314
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
315
+ attention_mask: Optional[torch.Tensor],
316
+ past_key_value: Optional[Cache] = None,
317
+ output_attentions: Optional[bool] = False,
318
+ cache_position: Optional[torch.LongTensor] = None,
319
+ is_causal: bool = True,
320
+ **kwargs: Unpack[FlashAttentionKwargs],
321
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
322
+ bsz, q_len, _ = hidden_states.size()
323
+ hidden_shape = (bsz, q_len, -1, self.head_dim)
324
+
325
+ query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
326
+ key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
327
+ value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
328
+
329
+ full_q_len = query_states.size(2)
330
+ cos, sin = position_embeddings
331
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
332
+
333
+ if past_key_value is not None:
334
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
335
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
336
+
337
+ attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get(self.config._attn_implementation, None)
338
+ if attention_interface is None:
339
+ raise ValueError(f"Attention implementation {self.config._attn_implementation} not found.")
340
+
341
+ if self.config._attn_implementation == "sdpa" and output_attentions:
342
+ logger.warning_once("Using SDPA with `output_attentions=True` requires eager attention.")
343
+ attention_interface = ALL_ATTENTION_FUNCTIONS["eager"]
344
+
345
+
346
+ attn_output, attn_weights = attention_interface(
347
+ query_states,
348
+ key_states,
349
+ value_states,
350
+ attention_mask=attention_mask,
351
+ dropout=self.attention_dropout if self.training else 0.0,
352
+ is_causal=is_causal,
353
+ **kwargs,
354
+ )
355
+ attn_output = attn_output.transpose(1, 2).contiguous()
356
+ attn_output = attn_output.reshape(bsz, q_len, self.config.hidden_size)
357
+ attn_output = self.o_proj(attn_output)
358
 
359
+ if not output_attentions:
360
+ attn_weights = None
361
 
362
+ return attn_output, attn_weights, past_key_value
363
+
364
+ class Qwen2RMSNorm(nn.Module):
365
+ # ... (class unchanged)
366
+ def __init__(self, hidden_size, eps=1e-6):
367
+ super().__init__()
368
+ self.weight = nn.Parameter(torch.ones(hidden_size))
369
+ self.variance_epsilon = eps
370
+
371
+ def forward(self, hidden_states):
372
+ input_dtype = hidden_states.dtype
373
+ hidden_states = hidden_states.to(torch.float32)
374
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
375
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
376
+ return self.weight * hidden_states.to(input_dtype)
377
+
378
+ class Qwen2DecoderLayer(nn.Module):
379
+ # ... (class unchanged)
380
+ def __init__(self, config: Qwen2Config, layer_idx: int):
381
+ super().__init__()
382
+ self.hidden_size = config.hidden_size
383
+ self.self_attn = Qwen2Attention(config=config, layer_idx=layer_idx)
384
+ self.mlp = Qwen2MLP(config)
385
+ self.input_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
386
+ self.post_attention_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
387
+
388
+ def forward(
389
+ self,
390
+ hidden_states: torch.Tensor,
391
+ attention_mask: Optional[torch.Tensor] = None,
392
+ position_ids: Optional[torch.LongTensor] = None,
393
+ past_key_value: Optional[Cache] = None,
394
+ output_attentions: Optional[bool] = False,
395
+ use_cache: Optional[bool] = False,
396
+ cache_position: Optional[torch.LongTensor] = None,
397
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
398
+ is_causal: bool = True,
399
+ **kwargs: Unpack[FlashAttentionKwargs],
400
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
401
+ residual = hidden_states
402
+ hidden_states = self.input_layernorm(hidden_states)
403
+
404
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
405
+ hidden_states=hidden_states,
406
+ attention_mask=attention_mask,
407
+ past_key_value=past_key_value,
408
+ output_attentions=output_attentions,
409
+ cache_position=cache_position,
410
+ position_embeddings=position_embeddings,
411
+ is_causal=is_causal,
412
+ **kwargs,
413
+ )
414
+ hidden_states = residual + hidden_states
415
+
416
+ residual = hidden_states
417
+ hidden_states = self.post_attention_layernorm(hidden_states)
418
+ hidden_states = self.mlp(hidden_states)
419
+ hidden_states = residual + hidden_states
420
+
421
+ outputs = (hidden_states,)
422
+ if output_attentions:
423
+ outputs += (self_attn_weights,)
424
+ if use_cache:
425
+ outputs += (present_key_value,)
426
+
427
+ return outputs
428
+
429
+ class Qwen2RotaryEmbedding(nn.Module):
430
+ # ... (class unchanged)
431
+ def __init__(self, config: Qwen2Config, device=None):
432
+ super().__init__()
433
+ if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
434
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
435
+ else:
436
+ self.rope_type = "default"
437
+ self.max_seq_len_cached = config.max_position_embeddings
438
+ self.original_max_seq_len = config.max_position_embeddings
439
+ self.config = config
440
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
441
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
442
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
443
+ self.original_inv_freq = self.inv_freq
444
+
445
+ def _dynamic_frequency_update(self, position_ids, device):
446
+ seq_len = torch.max(position_ids) + 1
447
+ if seq_len > self.max_seq_len_cached:
448
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, seq_len=seq_len)
449
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
450
+ self.max_seq_len_cached = seq_len
451
+ if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len:
452
+ self.original_inv_freq = self.original_inv_freq.to(device)
453
+ self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
454
+ self.max_seq_len_cached = self.original_max_seq_len
455
+
456
+ @torch.no_grad()
457
+ def forward(self, x, position_ids):
458
+ if "dynamic" in self.rope_type:
459
+ self._dynamic_frequency_update(position_ids, device=x.device)
460
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
461
+ position_ids_expanded = position_ids[:, None, :].float()
462
+ device_type = x.device.type
463
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
464
+ with torch.autocast(device_type=device_type, enabled=False):
465
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
466
+ emb = torch.cat((freqs, freqs), dim=-1)
467
+ cos = emb.cos()
468
+ sin = emb.sin()
469
+ cos = cos * self.attention_scaling
470
+ sin = sin * self.attention_scaling
471
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
472
+
473
+ @add_start_docstrings(
474
+ "The bare Qwen2 Model outputting raw hidden-states without any specific head on top.",
475
+ QWEN2_START_DOCSTRING,
476
+ )
477
+ class Qwen2PreTrainedModel(PreTrainedModel):
478
+ # ... (class unchanged)
479
+ config_class = Qwen2Config
480
+ base_model_prefix = "model"
481
+ supports_gradient_checkpointing = True
482
+ _no_split_modules = ["Qwen2DecoderLayer"]
483
+ _skip_keys_device_placement = ["past_key_values"]
484
+ _supports_flash_attn_2 = True
485
+ _supports_sdpa = True
486
+ _supports_cache_class = True
487
+
488
+ def _init_weights(self, module):
489
+ std = self.config.initializer_range
490
+ if isinstance(module, nn.Linear):
491
+ module.weight.data.normal_(mean=0.0, std=std)
492
+ if module.bias is not None:
493
+ module.bias.data.zero_()
494
+ elif isinstance(module, nn.Embedding):
495
+ module.weight.data.normal_(mean=0.0, std=std)
496
+ if module.padding_idx is not None:
497
+ module.weight.data[module.padding_idx].zero_()
498
+
499
+ class Qwen2Model(Qwen2PreTrainedModel):
500
+ # ... (class unchanged)
501
+ def __init__(self, config: Qwen2Config):
502
+ super().__init__(config)
503
+ self.padding_idx = config.pad_token_id
504
+ self.vocab_size = config.vocab_size
505
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
506
+ self.layers = nn.ModuleList(
507
+ [Qwen2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
508
+ )
509
+ self.norm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
510
+ self.rotary_emb = Qwen2RotaryEmbedding(config=config)
511
+ self.gradient_checkpointing = False
512
+ self.post_init()
513
+
514
+ def get_input_embeddings(self):
515
+ return self.embed_tokens
516
+
517
+ def set_input_embeddings(self, value):
518
+ self.embed_tokens = value
519
+
520
+ def forward(
521
+ self,
522
+ input_ids: torch.LongTensor = None,
523
+ attention_mask: Optional[torch.Tensor] = None,
524
+ position_ids: Optional[torch.LongTensor] = None,
525
+ past_key_values: Optional[Cache] = None,
526
+ inputs_embeds: Optional[torch.FloatTensor] = None,
527
+ use_cache: Optional[bool] = None,
528
+ output_attentions: Optional[bool] = None,
529
+ output_hidden_states: Optional[bool] = None,
530
+ return_dict: Optional[bool] = None,
531
+ cache_position: Optional[torch.LongTensor] = None,
532
+ is_causal: bool = True,
533
+ **flash_attn_kwargs: Unpack[FlashAttentionKwargs],
534
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
535
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
536
+ output_hidden_states = (
537
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
538
+ )
539
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
540
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
541
+
542
+ if (input_ids is None) ^ (inputs_embeds is not None):
543
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
544
+ if self.gradient_checkpointing and self.training and use_cache:
545
+ logger.warning_once("`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.")
546
+ use_cache = False
547
+ if inputs_embeds is None:
548
+ inputs_embeds = self.embed_tokens(input_ids)
549
+
550
+ past_key_values_length = 0
551
+ if use_cache:
552
+ if past_key_values is None:
553
+ past_key_values = DynamicCache()
554
+ past_key_values_length = past_key_values.get_seq_length()
555
+
556
+ if cache_position is None:
557
+ cache_position = torch.arange(
558
+ past_key_values_length, past_key_values_length + inputs_embeds.shape[1], device=inputs_embeds.device
559
+ )
560
+ if position_ids is None:
561
+ position_ids = cache_position.unsqueeze(0)
562
+
563
+ causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, is_causal)
564
+ hidden_states = inputs_embeds
565
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
566
+ all_hidden_states = () if output_hidden_states else None
567
+ all_self_attns = () if output_attentions else None
568
+ next_decoder_cache = () if use_cache else None
569
+
570
+ for decoder_layer in self.layers:
571
+ if output_hidden_states:
572
+ all_hidden_states += (hidden_states,)
573
+
574
+ layer_outputs = decoder_layer(
575
+ hidden_states,
576
+ attention_mask=causal_mask,
577
+ position_ids=position_ids,
578
+ past_key_value=past_key_values,
579
+ output_attentions=output_attentions,
580
+ use_cache=use_cache,
581
+ cache_position=cache_position,
582
+ position_embeddings=position_embeddings,
583
+ is_causal=is_causal,
584
+ **flash_attn_kwargs,
585
+ )
586
+ hidden_states = layer_outputs[0]
587
+ if use_cache:
588
+ next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
589
+ if output_attentions:
590
+ all_self_attns += (layer_outputs[1],)
591
+
592
+ hidden_states = self.norm(hidden_states)
593
+ if output_hidden_states:
594
+ all_hidden_states += (hidden_states,)
595
+
596
+ next_cache = next_decoder_cache if use_cache else None
597
+
598
+ if not return_dict:
599
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
600
+ return BaseModelOutputWithPast(
601
+ last_hidden_state=hidden_states,
602
+ past_key_values=next_cache,
603
+ hidden_states=all_hidden_states,
604
+ attentions=all_self_attns,
605
+ )
606
+
607
+ def _update_causal_mask(self, attention_mask, input_tensor, cache_position, is_causal):
608
+ if not is_causal:
609
+ return attention_mask
610
+
611
+ seq_len = input_tensor.shape[1]
612
+ if self.config._attn_implementation == "flash_attention_2":
613
+ if attention_mask is not None and 0.0 in attention_mask:
614
+ return attention_mask
615
+ return None
616
+
617
+ dtype = input_tensor.dtype
618
+ device = input_tensor.device
619
+
620
+ causal_mask = torch.triu(torch.full((seq_len, seq_len), torch.finfo(dtype).min, device=device), 1)
621
+ causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1)
622
+
623
+ if attention_mask is not None:
624
+ causal_mask = causal_mask.clone()
625
+ causal_mask = causal_mask + attention_mask[:, None, None, :]
626
+
627
+ return causal_mask
628
+
629
+ class Qwen2ForCausalLM(Qwen2PreTrainedModel, MDMGenerationMixin):
630
+ _tied_weights_keys = ["lm_head.weight"]
631
+
632
+ def __init__(self, config):
633
+ super().__init__(config)
634
+ self.model = Qwen2Model(config)
635
+ self.vocab_size = config.vocab_size
636
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
637
+ self.post_init()
638
+
639
+ def get_input_embeddings(self):
640
+ return self.model.embed_tokens
641
+
642
+ def set_input_embeddings(self, value):
643
+ self.model.embed_tokens = value
644
+
645
+ def get_output_embeddings(self):
646
+ return self.lm_head
647
+
648
+ def set_output_embeddings(self, new_embeddings):
649
+ self.lm_head = new_embeddings
650
+
651
+ def set_decoder(self, decoder):
652
+ self.model = decoder
653
+
654
+ def get_decoder(self):
655
+ return self.model
656
+
657
+ @add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
658
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
659
+ def forward(
660
+ self,
661
+ input_ids: torch.LongTensor = None,
662
+ attention_mask: Optional[torch.Tensor] = None,
663
+ position_ids: Optional[torch.LongTensor] = None,
664
+ past_key_values: Optional[Cache] = None,
665
+ inputs_embeds: Optional[torch.FloatTensor] = None,
666
+ labels: Optional[torch.LongTensor] = None,
667
+ use_cache: Optional[bool] = None,
668
+ output_attentions: Optional[bool] = None,
669
+ output_hidden_states: Optional[bool] = None,
670
+ return_dict: Optional[bool] = None,
671
+ cache_position: Optional[torch.LongTensor] = None,
672
+ is_causal: bool = True,
673
+ **kwargs: Unpack[FlashAttentionKwargs],
674
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
675
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
676
+ output_hidden_states = (
677
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
678
+ )
679
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
680
+
681
+ outputs = self.model(
682
+ input_ids=input_ids,
683
+ attention_mask=attention_mask,
684
+ position_ids=position_ids,
685
+ past_key_values=past_key_values,
686
+ inputs_embeds=inputs_embeds,
687
+ use_cache=use_cache,
688
+ output_attentions=output_attentions,
689
+ output_hidden_states=output_hidden_states,
690
+ return_dict=return_dict,
691
+ cache_position=cache_position,
692
+ is_causal=is_causal,
693
+ **kwargs,
694
+ )
695
+
696
+ hidden_states = outputs[0]
697
+ logits = self.lm_head(hidden_states)
698
+ logits = logits.float()
699
+ loss = None
700
+
701
+ if labels is not None:
702
+ shift_logits = logits[..., :-1, :].contiguous()
703
+ shift_labels = labels[..., 1:].contiguous()
704
+ loss_fct = torch.nn.CrossEntropyLoss()
705
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
706
+ shift_labels = shift_labels.view(-1)
707
+ shift_labels = shift_labels.to(shift_logits.device)
708
+ loss = loss_fct(shift_logits, shift_labels)
709
 
710
+ if not return_dict:
711
+ output = (logits,) + outputs[1:]
712
+ return (loss,) + output if loss is not None else output
713
 
714
+ return CausalLMOutputWithPast(
715
+ loss=loss,
716
+ logits=logits,
717
+ past_key_values=outputs.past_key_values,
718
+ hidden_states=outputs.hidden_states,
719
+ attentions=outputs.attentions,
720
  )
 
 
721
 
722
+ ModelClass = Qwen2ForCausalLM
 
 
 
 
723
 
724
+ __all__ = ["Qwen2ForCausalLM", "Qwen2Model", "Qwen2PreTrainedModel", "MDMGenerationMixin"]