Spaces:
Running
on
Zero
Running
on
Zero
Update beeper_model.py
Browse files- beeper_model.py +220 -106
beeper_model.py
CHANGED
@@ -1,22 +1,67 @@
|
|
1 |
"""
|
2 |
-
|
3 |
-
Extracted
|
4 |
"""
|
5 |
|
6 |
import os
|
7 |
-
import re
|
8 |
import math
|
9 |
import torch
|
10 |
import torch.nn as nn
|
11 |
import torch.nn.functional as F
|
12 |
-
from typing import Optional
|
13 |
-
from
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
#
|
18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
class CausalSelfAttention(nn.Module):
|
|
|
|
|
20 |
def __init__(self, dim: int, n_heads: int, attn_dropout: float = 0.0):
|
21 |
super().__init__()
|
22 |
assert dim % n_heads == 0
|
@@ -34,25 +79,34 @@ class CausalSelfAttention(nn.Module):
|
|
34 |
k = k.view(B, T, self.nh, self.hd).transpose(1, 2)
|
35 |
v = v.view(B, T, self.nh, self.hd).transpose(1, 2)
|
36 |
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
y = y.transpose(1, 2).contiguous().view(B, T, C)
|
45 |
return self.proj(y)
|
46 |
|
47 |
-
|
48 |
class MLP(nn.Module):
|
|
|
|
|
49 |
def __init__(self, dim, mlp_ratio=4.0, dropout=0.1):
|
50 |
super().__init__()
|
51 |
hidden = int(dim * mlp_ratio)
|
52 |
self.fc1 = nn.Linear(dim, hidden)
|
53 |
self.fc2 = nn.Linear(hidden, dim)
|
54 |
self.drop = nn.Dropout(dropout)
|
55 |
-
|
56 |
def forward(self, x):
|
57 |
x = self.fc1(x)
|
58 |
x = F.gelu(x, approximate="tanh")
|
@@ -61,23 +115,16 @@ class MLP(nn.Module):
|
|
61 |
x = self.drop(x)
|
62 |
return x
|
63 |
|
64 |
-
|
65 |
class BeeperRoseGPT(nn.Module):
|
|
|
|
|
66 |
def __init__(self, cfg: dict):
|
67 |
super().__init__()
|
68 |
-
V = cfg
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
MR = cfg.get("mlp_ratio", 4.0)
|
74 |
-
RD = cfg.get("resid_dropout", 0.1)
|
75 |
-
AD = cfg.get("dropout", 0.0)
|
76 |
-
|
77 |
-
self.vocab_size = V
|
78 |
-
self.context = Ctx
|
79 |
-
|
80 |
-
# Core transformer components
|
81 |
self.token_emb = nn.Embedding(V, D)
|
82 |
self.pos_emb = nn.Parameter(torch.zeros(1, Ctx, D))
|
83 |
self.drop = nn.Dropout(RD)
|
@@ -90,25 +137,22 @@ class BeeperRoseGPT(nn.Module):
|
|
90 |
"mlp": MLP(D, mlp_ratio=MR, dropout=RD),
|
91 |
}) for _ in range(L)
|
92 |
])
|
93 |
-
|
94 |
self.norm = nn.LayerNorm(D)
|
95 |
self.lm_head = nn.Linear(D, V, bias=False)
|
96 |
-
|
97 |
-
# Weight tying
|
98 |
self.lm_head.weight = self.token_emb.weight
|
99 |
|
100 |
-
# Rose
|
101 |
self.rose_proj = nn.Linear(D, D, bias=False)
|
102 |
self.rose_anchors = nn.Parameter(torch.randn(3, D) / (D**0.5))
|
103 |
|
104 |
-
#
|
105 |
self.register_buffer("pent_inited", torch.tensor(0, dtype=torch.uint8), persistent=False)
|
106 |
self.penta_coarse = None
|
107 |
self.penta_medium = None
|
108 |
self.penta_fine = None
|
109 |
|
110 |
self.apply(self._init)
|
111 |
-
self.grad_checkpoint =
|
112 |
|
113 |
@staticmethod
|
114 |
def _init(m):
|
@@ -119,6 +163,24 @@ class BeeperRoseGPT(nn.Module):
|
|
119 |
elif isinstance(m, nn.Embedding):
|
120 |
nn.init.normal_(m.weight, mean=0.0, std=0.02)
|
121 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
122 |
def _block_forward(self, blk, x):
|
123 |
x = x + blk["attn"](blk["norm1"](x))
|
124 |
x = x + blk["mlp"](blk["norm2"](x))
|
@@ -128,10 +190,13 @@ class BeeperRoseGPT(nn.Module):
|
|
128 |
B, T = idx.shape
|
129 |
x = self.token_emb(idx) + self.pos_emb[:, :T, :]
|
130 |
x = self.drop(x)
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
|
|
|
|
|
|
135 |
return self.norm(x)
|
136 |
|
137 |
def forward(self, idx):
|
@@ -141,66 +206,90 @@ class BeeperRoseGPT(nn.Module):
|
|
141 |
def hidden_states(self, idx):
|
142 |
return self.backbone(idx)
|
143 |
|
144 |
-
def
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
149 |
if k.startswith("_orig_mod."):
|
150 |
k = k[10:]
|
151 |
if k.startswith("module."):
|
152 |
k = k[7:]
|
153 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
154 |
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
self.penta_fine = nn.Parameter(cleaned["penta_fine"])
|
162 |
|
163 |
-
|
|
|
|
|
164 |
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
# =========================================================================================
|
169 |
-
|
170 |
-
def _detokenize(text: str) -> str:
|
171 |
-
"""Clean up tokenization artifacts"""
|
172 |
text = re.sub(r"\s+([,.;:!?%])", r"\1", text)
|
173 |
text = re.sub(r"\s+([\)\]\}])", r"\1", text)
|
174 |
text = re.sub(r"([\(\[\{])\s+", r"\1", text)
|
175 |
return text
|
176 |
|
177 |
-
|
178 |
@torch.no_grad()
|
179 |
-
def generate(
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
detokenize: bool = True
|
193 |
-
) -> str:
|
194 |
"""
|
195 |
-
Generate text from
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
196 |
"""
|
197 |
# Use defaults from config if not specified
|
198 |
-
temperature = temperature if temperature is
|
199 |
-
top_k = top_k if top_k is
|
200 |
-
top_p = top_p if top_p is
|
201 |
-
repetition_penalty = repetition_penalty if repetition_penalty is
|
202 |
-
presence_penalty = presence_penalty if presence_penalty is
|
203 |
-
frequency_penalty = frequency_penalty if frequency_penalty is
|
204 |
|
205 |
device = device or next(model.parameters()).device
|
206 |
model.eval()
|
@@ -208,19 +297,17 @@ def generate(
|
|
208 |
# Encode prompt
|
209 |
ids = tok.encode(prompt).ids
|
210 |
x = torch.tensor([ids], dtype=torch.long, device=device)
|
|
|
211 |
|
212 |
-
# Track token frequencies
|
213 |
-
vocab_size = cfg.get("vocab_size", 8192)
|
214 |
-
counts = torch.zeros(vocab_size, dtype=torch.int32, device=device)
|
215 |
for t in ids:
|
216 |
-
if 0 <= t < vocab_size:
|
217 |
counts[t] += 1
|
218 |
|
219 |
# Generate tokens
|
220 |
for _ in range(max_new_tokens):
|
221 |
# Get logits for next token
|
222 |
-
|
223 |
-
logits = model(x[:, -context_window:])
|
224 |
logits = logits[:, -1, :]
|
225 |
|
226 |
# Apply repetition penalty
|
@@ -248,24 +335,51 @@ def generate(
|
|
248 |
|
249 |
# Top-p (nucleus) filtering
|
250 |
if top_p and top_p < 1.0:
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
sorted_logits = sorted_logits.masked_fill(mask, float("-inf"))
|
259 |
-
logits = torch.full_like(logits, float("-inf")).scatter(-1, sorted_indices, sorted_logits)
|
260 |
|
261 |
# Sample next token
|
262 |
probs = F.softmax(logits, dim=-1)
|
263 |
next_id = torch.multinomial(probs, num_samples=1)
|
264 |
-
|
265 |
-
# Append to sequence
|
266 |
x = torch.cat([x, next_id], dim=1)
|
267 |
counts[next_id.item()] += 1
|
268 |
|
269 |
# Decode output
|
270 |
-
|
271 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
"""
|
2 |
+
Rose Beeper Model V4 Fixed - Inference Components
|
3 |
+
Extracted classes and utilities for model inference
|
4 |
"""
|
5 |
|
6 |
import os
|
|
|
7 |
import math
|
8 |
import torch
|
9 |
import torch.nn as nn
|
10 |
import torch.nn.functional as F
|
11 |
+
from typing import Optional, Tuple, Dict, Any
|
12 |
+
from contextlib import nullcontext
|
13 |
+
import re
|
14 |
+
import inspect
|
15 |
+
|
16 |
+
# ============================== Environment Setup ==============================
|
17 |
+
torch.set_float32_matmul_precision("high")
|
18 |
+
torch.backends.cuda.matmul.allow_tf32 = True
|
19 |
+
torch.backends.cudnn.allow_tf32 = True
|
20 |
+
|
21 |
+
# ============================== SDPA Helper ==============================
|
22 |
+
try:
|
23 |
+
from torch.nn.attention import sdpa_kernel as _sdpa_kernel_modern
|
24 |
+
from torch.nn.attention import SDPBackend as _SDPBackend
|
25 |
+
_SDPA_SIG = inspect.signature(_sdpa_kernel_modern)
|
26 |
+
_sdpa_kernel = _sdpa_kernel_modern
|
27 |
+
except Exception:
|
28 |
+
try:
|
29 |
+
from torch.backends.cuda import sdp_kernel as _sdpa_kernel_legacy
|
30 |
+
_SDPA_SIG = inspect.signature(_sdpa_kernel_legacy)
|
31 |
+
_SDPBackend = None
|
32 |
+
_sdpa_kernel = _sdpa_kernel_legacy
|
33 |
+
except Exception:
|
34 |
+
_SDPA_SIG = None
|
35 |
+
_SDPBackend = None
|
36 |
+
_sdpa_kernel = None
|
37 |
+
|
38 |
+
def sdpa_ctx_prefer_flash():
|
39 |
+
"""Bias SDPA toward FlashAttention when available; no-op if unknown."""
|
40 |
+
if _sdpa_kernel is None or _SDPA_SIG is None:
|
41 |
+
return nullcontext()
|
42 |
+
|
43 |
+
params = {p.name for p in _SDPA_SIG.parameters.values()}
|
44 |
+
try:
|
45 |
+
if "backends" in params and _SDPBackend is not None:
|
46 |
+
return _sdpa_kernel(backends=[
|
47 |
+
_SDPBackend.FLASH_ATTENTION,
|
48 |
+
_SDPBackend.EFFICIENT_ATTENTION,
|
49 |
+
_SDPBackend.MATH
|
50 |
+
])
|
51 |
+
if "backend" in params and _SDPBackend is not None:
|
52 |
+
return _sdpa_kernel(backend=_SDPBackend.FLASH_ATTENTION)
|
53 |
+
if {"enable_flash", "enable_math", "enable_mem_efficient"} <= params:
|
54 |
+
return _sdpa_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=True)
|
55 |
+
if {"use_flash", "use_math", "use_mem_efficient"} <= params:
|
56 |
+
return _sdpa_kernel(use_flash=True, use_math=False, use_mem_efficient=True)
|
57 |
+
except Exception:
|
58 |
+
pass
|
59 |
+
return nullcontext()
|
60 |
+
|
61 |
+
# ============================== Model Components ==============================
|
62 |
class CausalSelfAttention(nn.Module):
|
63 |
+
"""Multi-head causal self-attention with optional FlashAttention."""
|
64 |
+
|
65 |
def __init__(self, dim: int, n_heads: int, attn_dropout: float = 0.0):
|
66 |
super().__init__()
|
67 |
assert dim % n_heads == 0
|
|
|
79 |
k = k.view(B, T, self.nh, self.hd).transpose(1, 2)
|
80 |
v = v.view(B, T, self.nh, self.hd).transpose(1, 2)
|
81 |
|
82 |
+
if x.is_cuda:
|
83 |
+
with sdpa_ctx_prefer_flash():
|
84 |
+
y = F.scaled_dot_product_attention(
|
85 |
+
q, k, v,
|
86 |
+
is_causal=True,
|
87 |
+
dropout_p=self.attn_dropout if self.training else 0.0,
|
88 |
+
)
|
89 |
+
else:
|
90 |
+
scale = 1.0 / math.sqrt(self.hd)
|
91 |
+
att = (q @ k.transpose(-2, -1)) * scale
|
92 |
+
mask = torch.full((1, 1, T, T), float("-inf"), device=x.device)
|
93 |
+
mask = torch.triu(mask, diagonal=1)
|
94 |
+
att = (att + mask).softmax(dim=-1)
|
95 |
+
y = att @ v
|
96 |
+
|
97 |
y = y.transpose(1, 2).contiguous().view(B, T, C)
|
98 |
return self.proj(y)
|
99 |
|
|
|
100 |
class MLP(nn.Module):
|
101 |
+
"""Feed-forward MLP block with GELU activation."""
|
102 |
+
|
103 |
def __init__(self, dim, mlp_ratio=4.0, dropout=0.1):
|
104 |
super().__init__()
|
105 |
hidden = int(dim * mlp_ratio)
|
106 |
self.fc1 = nn.Linear(dim, hidden)
|
107 |
self.fc2 = nn.Linear(hidden, dim)
|
108 |
self.drop = nn.Dropout(dropout)
|
109 |
+
|
110 |
def forward(self, x):
|
111 |
x = self.fc1(x)
|
112 |
x = F.gelu(x, approximate="tanh")
|
|
|
115 |
x = self.drop(x)
|
116 |
return x
|
117 |
|
|
|
118 |
class BeeperRoseGPT(nn.Module):
|
119 |
+
"""Main Rose Beeper GPT model with pentachora banks."""
|
120 |
+
|
121 |
def __init__(self, cfg: dict):
|
122 |
super().__init__()
|
123 |
+
V, D, Ctx = cfg["vocab_size"], cfg["dim"], cfg["context"]
|
124 |
+
H, L, MR = cfg["n_heads"], cfg["n_layers"], cfg["mlp_ratio"]
|
125 |
+
RD, AD, CKPT = cfg["resid_dropout"], cfg["dropout"], cfg["grad_checkpoint"]
|
126 |
+
|
127 |
+
self.vocab_size, self.context = V, Ctx
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
128 |
self.token_emb = nn.Embedding(V, D)
|
129 |
self.pos_emb = nn.Parameter(torch.zeros(1, Ctx, D))
|
130 |
self.drop = nn.Dropout(RD)
|
|
|
137 |
"mlp": MLP(D, mlp_ratio=MR, dropout=RD),
|
138 |
}) for _ in range(L)
|
139 |
])
|
|
|
140 |
self.norm = nn.LayerNorm(D)
|
141 |
self.lm_head = nn.Linear(D, V, bias=False)
|
|
|
|
|
142 |
self.lm_head.weight = self.token_emb.weight
|
143 |
|
144 |
+
# Rose projection + anchors
|
145 |
self.rose_proj = nn.Linear(D, D, bias=False)
|
146 |
self.rose_anchors = nn.Parameter(torch.randn(3, D) / (D**0.5))
|
147 |
|
148 |
+
# Multi-level pentachora; lazily initialized
|
149 |
self.register_buffer("pent_inited", torch.tensor(0, dtype=torch.uint8), persistent=False)
|
150 |
self.penta_coarse = None
|
151 |
self.penta_medium = None
|
152 |
self.penta_fine = None
|
153 |
|
154 |
self.apply(self._init)
|
155 |
+
self.grad_checkpoint = CKPT
|
156 |
|
157 |
@staticmethod
|
158 |
def _init(m):
|
|
|
163 |
elif isinstance(m, nn.Embedding):
|
164 |
nn.init.normal_(m.weight, mean=0.0, std=0.02)
|
165 |
|
166 |
+
def ensure_pentachora(self, coarse_C: int, medium_C: int, fine_C: int, dim: int, device):
|
167 |
+
"""Initialize three pentachora banks."""
|
168 |
+
if self.pent_inited.item() == 1:
|
169 |
+
return
|
170 |
+
|
171 |
+
def bank(C):
|
172 |
+
pts = []
|
173 |
+
for _ in range(int(C)):
|
174 |
+
A = torch.randn(5, dim, device=device)
|
175 |
+
A = F.normalize(A - A.mean(dim=0, keepdim=True), dim=-1)
|
176 |
+
pts.append(A)
|
177 |
+
return nn.Parameter(torch.stack(pts, dim=0))
|
178 |
+
|
179 |
+
self.penta_coarse = bank(coarse_C)
|
180 |
+
self.penta_medium = bank(medium_C)
|
181 |
+
self.penta_fine = bank(fine_C)
|
182 |
+
self.pent_inited.fill_(1)
|
183 |
+
|
184 |
def _block_forward(self, blk, x):
|
185 |
x = x + blk["attn"](blk["norm1"](x))
|
186 |
x = x + blk["mlp"](blk["norm2"](x))
|
|
|
190 |
B, T = idx.shape
|
191 |
x = self.token_emb(idx) + self.pos_emb[:, :T, :]
|
192 |
x = self.drop(x)
|
193 |
+
if self.grad_checkpoint and self.training:
|
194 |
+
from torch.utils.checkpoint import checkpoint
|
195 |
+
for blk in self.blocks:
|
196 |
+
x = checkpoint(lambda _x: self._block_forward(blk, _x), x)
|
197 |
+
else:
|
198 |
+
for blk in self.blocks:
|
199 |
+
x = self._block_forward(blk, x)
|
200 |
return self.norm(x)
|
201 |
|
202 |
def forward(self, idx):
|
|
|
206 |
def hidden_states(self, idx):
|
207 |
return self.backbone(idx)
|
208 |
|
209 |
+
def rose_hidden_pool(self, h: torch.Tensor, mode="mean"):
|
210 |
+
return h.mean(dim=1) if mode == "mean" else h[:, -1, :]
|
211 |
+
|
212 |
+
# ============================== IO Utilities ==============================
|
213 |
+
class BeeperIO:
|
214 |
+
"""Utilities for loading and saving model checkpoints."""
|
215 |
+
|
216 |
+
@staticmethod
|
217 |
+
def clean_state(sd: dict):
|
218 |
+
out = {}
|
219 |
+
for k, v in sd.items():
|
220 |
if k.startswith("_orig_mod."):
|
221 |
k = k[10:]
|
222 |
if k.startswith("module."):
|
223 |
k = k[7:]
|
224 |
+
out[k] = v
|
225 |
+
return out
|
226 |
+
|
227 |
+
@staticmethod
|
228 |
+
def load_into_model(model: nn.Module, path: str, map_location="cpu", strict: bool = False):
|
229 |
+
"""Load weights from .pt or .safetensors file."""
|
230 |
+
ext = os.path.splitext(path)[1].lower()
|
231 |
|
232 |
+
if ext == ".safetensors":
|
233 |
+
from safetensors.torch import load_file as load_safetensors
|
234 |
+
sd = load_safetensors(path, device="cpu")
|
235 |
+
else:
|
236 |
+
raw = torch.load(path, map_location="cpu")
|
237 |
+
sd = raw["model"] if isinstance(raw, dict) and "model" in raw else raw
|
|
|
238 |
|
239 |
+
sd = BeeperIO.clean_state(sd)
|
240 |
+
result = model.load_state_dict(sd, strict=strict)
|
241 |
+
return result.missing_keys, result.unexpected_keys
|
242 |
|
243 |
+
# ============================== Generation ==============================
|
244 |
+
def _detok(text: str) -> str:
|
245 |
+
"""Clean up tokenization artifacts."""
|
|
|
|
|
|
|
|
|
246 |
text = re.sub(r"\s+([,.;:!?%])", r"\1", text)
|
247 |
text = re.sub(r"\s+([\)\]\}])", r"\1", text)
|
248 |
text = re.sub(r"([\(\[\{])\s+", r"\1", text)
|
249 |
return text
|
250 |
|
|
|
251 |
@torch.no_grad()
|
252 |
+
def generate(model: BeeperRoseGPT,
|
253 |
+
tok: "Tokenizer",
|
254 |
+
cfg: dict,
|
255 |
+
prompt: str,
|
256 |
+
max_new_tokens: int = 120,
|
257 |
+
temperature: float = None,
|
258 |
+
top_k: int = None,
|
259 |
+
top_p: float = None,
|
260 |
+
repetition_penalty: float = None,
|
261 |
+
presence_penalty: float = None,
|
262 |
+
frequency_penalty: float = None,
|
263 |
+
device: Optional[torch.device] = None,
|
264 |
+
detokenize: bool = True) -> str:
|
|
|
|
|
265 |
"""
|
266 |
+
Generate text from the model with various sampling strategies.
|
267 |
+
|
268 |
+
Args:
|
269 |
+
model: The BeeperRoseGPT model
|
270 |
+
tok: Tokenizer instance
|
271 |
+
cfg: Configuration dictionary
|
272 |
+
prompt: Input prompt string
|
273 |
+
max_new_tokens: Maximum tokens to generate
|
274 |
+
temperature: Sampling temperature
|
275 |
+
top_k: Top-k sampling parameter
|
276 |
+
top_p: Top-p (nucleus) sampling parameter
|
277 |
+
repetition_penalty: Penalty for repeated tokens
|
278 |
+
presence_penalty: Penalty for token presence
|
279 |
+
frequency_penalty: Penalty based on token frequency
|
280 |
+
device: Device to run on
|
281 |
+
detokenize: Whether to clean up tokenization
|
282 |
+
|
283 |
+
Returns:
|
284 |
+
Generated text string
|
285 |
"""
|
286 |
# Use defaults from config if not specified
|
287 |
+
temperature = cfg["temperature"] if temperature is None else temperature
|
288 |
+
top_k = cfg["top_k"] if top_k is None else top_k
|
289 |
+
top_p = cfg["top_p"] if top_p is None else top_p
|
290 |
+
repetition_penalty = cfg["repetition_penalty"] if repetition_penalty is None else repetition_penalty
|
291 |
+
presence_penalty = cfg["presence_penalty"] if presence_penalty is None else presence_penalty
|
292 |
+
frequency_penalty = cfg["frequency_penalty"] if frequency_penalty is None else frequency_penalty
|
293 |
|
294 |
device = device or next(model.parameters()).device
|
295 |
model.eval()
|
|
|
297 |
# Encode prompt
|
298 |
ids = tok.encode(prompt).ids
|
299 |
x = torch.tensor([ids], dtype=torch.long, device=device)
|
300 |
+
counts = torch.zeros(cfg["vocab_size"], dtype=torch.int32, device=device)
|
301 |
|
302 |
+
# Track token frequencies
|
|
|
|
|
303 |
for t in ids:
|
304 |
+
if 0 <= t < cfg["vocab_size"]:
|
305 |
counts[t] += 1
|
306 |
|
307 |
# Generate tokens
|
308 |
for _ in range(max_new_tokens):
|
309 |
# Get logits for next token
|
310 |
+
logits = model(x[:, -cfg["context"]:])
|
|
|
311 |
logits = logits[:, -1, :]
|
312 |
|
313 |
# Apply repetition penalty
|
|
|
335 |
|
336 |
# Top-p (nucleus) filtering
|
337 |
if top_p and top_p < 1.0:
|
338 |
+
sl, si = torch.sort(logits, descending=True)
|
339 |
+
ps = F.softmax(sl, dim=-1)
|
340 |
+
cdf = torch.cumsum(ps, dim=-1)
|
341 |
+
cutoff = (cdf > top_p).float().argmax(dim=-1)
|
342 |
+
mask = torch.arange(logits.size(-1), device=device).unsqueeze(0) > cutoff.unsqueeze(-1)
|
343 |
+
sl = sl.masked_fill(mask, float("-inf"))
|
344 |
+
logits = torch.full_like(logits, float("-inf")).scatter(-1, si, sl)
|
|
|
|
|
345 |
|
346 |
# Sample next token
|
347 |
probs = F.softmax(logits, dim=-1)
|
348 |
next_id = torch.multinomial(probs, num_samples=1)
|
|
|
|
|
349 |
x = torch.cat([x, next_id], dim=1)
|
350 |
counts[next_id.item()] += 1
|
351 |
|
352 |
# Decode output
|
353 |
+
out = tok.decode(x[0].tolist())
|
354 |
+
return _detok(out) if detokenize else out
|
355 |
+
|
356 |
+
# ============================== Default Configuration ==============================
|
357 |
+
def get_default_config():
|
358 |
+
"""Return the default configuration for the Rose Beeper model."""
|
359 |
+
return {
|
360 |
+
"name": "Rose-Beeper",
|
361 |
+
"context": 512,
|
362 |
+
"vocab_size": 8192,
|
363 |
+
"dim": 512,
|
364 |
+
"n_layers": 6,
|
365 |
+
"n_heads": 8,
|
366 |
+
"mlp_ratio": 4.0,
|
367 |
+
"dropout": 0.0,
|
368 |
+
"resid_dropout": 0.1,
|
369 |
+
"grad_checkpoint": False,
|
370 |
+
|
371 |
+
# Generation parameters
|
372 |
+
"temperature": 0.9,
|
373 |
+
"top_k": 40,
|
374 |
+
"top_p": 0.9,
|
375 |
+
"repetition_penalty": 1.10,
|
376 |
+
"presence_penalty": 0.6,
|
377 |
+
"frequency_penalty": 0.0,
|
378 |
+
|
379 |
+
# Capoera settings
|
380 |
+
"capoera": {
|
381 |
+
"enable": True,
|
382 |
+
"topic_bins": 512,
|
383 |
+
"mood_bins": 7,
|
384 |
+
}
|
385 |
+
}
|