AbstractPhil commited on
Commit
e9b0f2d
·
verified ·
1 Parent(s): e60b412

Create beeper_model.py

Browse files
Files changed (1) hide show
  1. beeper_model.py +271 -0
beeper_model.py ADDED
@@ -0,0 +1,271 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ beeper_model.py - Core model module for Beeper
3
+ Extracted from the training code for use in inference/apps
4
+ """
5
+
6
+ import os
7
+ import re
8
+ import math
9
+ import torch
10
+ import torch.nn as nn
11
+ import torch.nn.functional as F
12
+ from typing import Optional
13
+ from safetensors.torch import load_file as load_safetensors
14
+
15
+ # =========================================================================================
16
+ # Model Components
17
+ # =========================================================================================
18
+
19
+ class CausalSelfAttention(nn.Module):
20
+ def __init__(self, dim: int, n_heads: int, attn_dropout: float = 0.0):
21
+ super().__init__()
22
+ assert dim % n_heads == 0
23
+ self.nh = n_heads
24
+ self.hd = dim // n_heads
25
+ self.qkv = nn.Linear(dim, 3 * dim, bias=False)
26
+ self.proj = nn.Linear(dim, dim, bias=False)
27
+ self.attn_dropout = attn_dropout
28
+
29
+ def forward(self, x):
30
+ B, T, C = x.shape
31
+ qkv = self.qkv(x)
32
+ q, k, v = qkv.chunk(3, dim=-1)
33
+ q = q.view(B, T, self.nh, self.hd).transpose(1, 2)
34
+ k = k.view(B, T, self.nh, self.hd).transpose(1, 2)
35
+ v = v.view(B, T, self.nh, self.hd).transpose(1, 2)
36
+
37
+ # Use scaled_dot_product_attention when available
38
+ y = F.scaled_dot_product_attention(
39
+ q, k, v,
40
+ is_causal=True,
41
+ dropout_p=self.attn_dropout if self.training else 0.0,
42
+ )
43
+
44
+ y = y.transpose(1, 2).contiguous().view(B, T, C)
45
+ return self.proj(y)
46
+
47
+
48
+ class MLP(nn.Module):
49
+ def __init__(self, dim, mlp_ratio=4.0, dropout=0.1):
50
+ super().__init__()
51
+ hidden = int(dim * mlp_ratio)
52
+ self.fc1 = nn.Linear(dim, hidden)
53
+ self.fc2 = nn.Linear(hidden, dim)
54
+ self.drop = nn.Dropout(dropout)
55
+
56
+ def forward(self, x):
57
+ x = self.fc1(x)
58
+ x = F.gelu(x, approximate="tanh")
59
+ x = self.drop(x)
60
+ x = self.fc2(x)
61
+ x = self.drop(x)
62
+ return x
63
+
64
+
65
+ class BeeperRoseGPT(nn.Module):
66
+ def __init__(self, cfg: dict):
67
+ super().__init__()
68
+ V = cfg.get("vocab_size", 8192)
69
+ D = cfg.get("dim", 512)
70
+ Ctx = cfg.get("context", 512)
71
+ H = cfg.get("n_heads", 8)
72
+ L = cfg.get("n_layers", 6)
73
+ MR = cfg.get("mlp_ratio", 4.0)
74
+ RD = cfg.get("resid_dropout", 0.1)
75
+ AD = cfg.get("dropout", 0.0)
76
+
77
+ self.vocab_size = V
78
+ self.context = Ctx
79
+
80
+ # Core transformer components
81
+ self.token_emb = nn.Embedding(V, D)
82
+ self.pos_emb = nn.Parameter(torch.zeros(1, Ctx, D))
83
+ self.drop = nn.Dropout(RD)
84
+
85
+ self.blocks = nn.ModuleList([
86
+ nn.ModuleDict({
87
+ "norm1": nn.LayerNorm(D),
88
+ "attn": CausalSelfAttention(D, H, attn_dropout=AD),
89
+ "norm2": nn.LayerNorm(D),
90
+ "mlp": MLP(D, mlp_ratio=MR, dropout=RD),
91
+ }) for _ in range(L)
92
+ ])
93
+
94
+ self.norm = nn.LayerNorm(D)
95
+ self.lm_head = nn.Linear(D, V, bias=False)
96
+
97
+ # Weight tying
98
+ self.lm_head.weight = self.token_emb.weight
99
+
100
+ # Rose components (for compatibility, may not be used in inference)
101
+ self.rose_proj = nn.Linear(D, D, bias=False)
102
+ self.rose_anchors = nn.Parameter(torch.randn(3, D) / (D**0.5))
103
+
104
+ # Pentachora placeholders (not needed for inference but for weight compatibility)
105
+ self.register_buffer("pent_inited", torch.tensor(0, dtype=torch.uint8), persistent=False)
106
+ self.penta_coarse = None
107
+ self.penta_medium = None
108
+ self.penta_fine = None
109
+
110
+ self.apply(self._init)
111
+ self.grad_checkpoint = False
112
+
113
+ @staticmethod
114
+ def _init(m):
115
+ if isinstance(m, nn.Linear):
116
+ nn.init.normal_(m.weight, mean=0.0, std=0.02)
117
+ if m.bias is not None:
118
+ nn.init.zeros_(m.bias)
119
+ elif isinstance(m, nn.Embedding):
120
+ nn.init.normal_(m.weight, mean=0.0, std=0.02)
121
+
122
+ def _block_forward(self, blk, x):
123
+ x = x + blk["attn"](blk["norm1"](x))
124
+ x = x + blk["mlp"](blk["norm2"](x))
125
+ return x
126
+
127
+ def backbone(self, idx):
128
+ B, T = idx.shape
129
+ x = self.token_emb(idx) + self.pos_emb[:, :T, :]
130
+ x = self.drop(x)
131
+
132
+ for blk in self.blocks:
133
+ x = self._block_forward(blk, x)
134
+
135
+ return self.norm(x)
136
+
137
+ def forward(self, idx):
138
+ h = self.backbone(idx)
139
+ return self.lm_head(h)
140
+
141
+ def hidden_states(self, idx):
142
+ return self.backbone(idx)
143
+
144
+ def load_state_dict(self, state_dict, strict=False):
145
+ """Custom load that handles pentachora bank initialization gracefully"""
146
+ # Clean state dict keys
147
+ cleaned = {}
148
+ for k, v in state_dict.items():
149
+ if k.startswith("_orig_mod."):
150
+ k = k[10:]
151
+ if k.startswith("module."):
152
+ k = k[7:]
153
+ cleaned[k] = v
154
+
155
+ # Initialize pentachora if present in checkpoint
156
+ if "penta_coarse" in cleaned:
157
+ self.penta_coarse = nn.Parameter(cleaned["penta_coarse"])
158
+ if "penta_medium" in cleaned:
159
+ self.penta_medium = nn.Parameter(cleaned["penta_medium"])
160
+ if "penta_fine" in cleaned:
161
+ self.penta_fine = nn.Parameter(cleaned["penta_fine"])
162
+
163
+ return super().load_state_dict(cleaned, strict=strict)
164
+
165
+
166
+ # =========================================================================================
167
+ # Generation
168
+ # =========================================================================================
169
+
170
+ def _detokenize(text: str) -> str:
171
+ """Clean up tokenization artifacts"""
172
+ text = re.sub(r"\s+([,.;:!?%])", r"\1", text)
173
+ text = re.sub(r"\s+([\)\]\}])", r"\1", text)
174
+ text = re.sub(r"([\(\[\{])\s+", r"\1", text)
175
+ return text
176
+
177
+
178
+ @torch.no_grad()
179
+ def generate(
180
+ model: BeeperRoseGPT,
181
+ tok, # Tokenizer
182
+ cfg: dict,
183
+ prompt: str,
184
+ max_new_tokens: int = 120,
185
+ temperature: float = None,
186
+ top_k: int = None,
187
+ top_p: float = None,
188
+ repetition_penalty: float = None,
189
+ presence_penalty: float = None,
190
+ frequency_penalty: float = None,
191
+ device: Optional[torch.device] = None,
192
+ detokenize: bool = True
193
+ ) -> str:
194
+ """
195
+ Generate text from Beeper model with various sampling strategies.
196
+ """
197
+ # Use defaults from config if not specified
198
+ temperature = temperature if temperature is not None else cfg.get("temperature", 0.9)
199
+ top_k = top_k if top_k is not None else cfg.get("top_k", 40)
200
+ top_p = top_p if top_p is not None else cfg.get("top_p", 0.9)
201
+ repetition_penalty = repetition_penalty if repetition_penalty is not None else cfg.get("repetition_penalty", 1.1)
202
+ presence_penalty = presence_penalty if presence_penalty is not None else cfg.get("presence_penalty", 0.6)
203
+ frequency_penalty = frequency_penalty if frequency_penalty is not None else cfg.get("frequency_penalty", 0.0)
204
+
205
+ device = device or next(model.parameters()).device
206
+ model.eval()
207
+
208
+ # Encode prompt
209
+ ids = tok.encode(prompt).ids
210
+ x = torch.tensor([ids], dtype=torch.long, device=device)
211
+
212
+ # Track token frequencies for penalties
213
+ vocab_size = cfg.get("vocab_size", 8192)
214
+ counts = torch.zeros(vocab_size, dtype=torch.int32, device=device)
215
+ for t in ids:
216
+ if 0 <= t < vocab_size:
217
+ counts[t] += 1
218
+
219
+ # Generate tokens
220
+ for _ in range(max_new_tokens):
221
+ # Get logits for next token
222
+ context_window = cfg.get("context", 512)
223
+ logits = model(x[:, -context_window:])
224
+ logits = logits[:, -1, :]
225
+
226
+ # Apply repetition penalty
227
+ if repetition_penalty and repetition_penalty != 1.0:
228
+ mask = counts > 0
229
+ if mask.any():
230
+ pos = logits[:, mask] > 0
231
+ logits[:, mask][pos] /= repetition_penalty
232
+ logits[:, mask][~pos] *= repetition_penalty
233
+
234
+ # Apply presence and frequency penalties
235
+ if presence_penalty or frequency_penalty:
236
+ pen = counts.float() * (frequency_penalty or 0.0) + (counts > 0).float() * (presence_penalty or 0.0)
237
+ logits = logits - pen.unsqueeze(0)
238
+
239
+ # Temperature scaling
240
+ logits = logits / max(1e-8, temperature)
241
+
242
+ # Top-k filtering
243
+ if top_k and top_k > 0:
244
+ k = min(top_k, logits.size(-1))
245
+ v, ix = torch.topk(logits, k, dim=-1)
246
+ filt = torch.full_like(logits, float("-inf"))
247
+ logits = filt.scatter_(-1, ix, v)
248
+
249
+ # Top-p (nucleus) filtering
250
+ if top_p and top_p < 1.0:
251
+ sorted_logits, sorted_indices = torch.sort(logits, descending=True)
252
+ probs = F.softmax(sorted_logits, dim=-1)
253
+ cumulative_probs = torch.cumsum(probs, dim=-1)
254
+
255
+ # Find cutoff
256
+ cutoff_idx = (cumulative_probs > top_p).float().argmax(dim=-1)
257
+ mask = torch.arange(logits.size(-1), device=device).unsqueeze(0) > cutoff_idx.unsqueeze(-1)
258
+ sorted_logits = sorted_logits.masked_fill(mask, float("-inf"))
259
+ logits = torch.full_like(logits, float("-inf")).scatter(-1, sorted_indices, sorted_logits)
260
+
261
+ # Sample next token
262
+ probs = F.softmax(logits, dim=-1)
263
+ next_id = torch.multinomial(probs, num_samples=1)
264
+
265
+ # Append to sequence
266
+ x = torch.cat([x, next_id], dim=1)
267
+ counts[next_id.item()] += 1
268
+
269
+ # Decode output
270
+ output = tok.decode(x[0].tolist())
271
+ return _detokenize(output) if detokenize else output