# beeper.py # Beeper — Rose-based tiny GPT (inference, with runtime pentachora influence + class/topic/mood selection) from __future__ import annotations import math, re, inspect from contextlib import nullcontext from typing import Optional, Dict, Any, Iterable import torch import torch.nn as nn import torch.nn.functional as F torch.set_float32_matmul_precision("high") torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = True # ---- SDPA (FlashAttention) selection ---- try: from torch.nn.attention import sdpa_kernel as _sdpa_kernel_modern from torch.nn.attention import SDPBackend as _SDPBackend _SDPA_SIG = inspect.signature(_sdpa_kernel_modern) _sdpa_kernel = _sdpa_kernel_modern except Exception: try: from torch.backends.cuda import sdp_kernel as _sdpa_kernel_legacy _SDPA_SIG = inspect.signature(_sdpa_kernel_legacy) _SDPBackend = None _sdpa_kernel = _sdpa_kernel_legacy except Exception: _SDPA_SIG = None _SDPBackend = None _sdpa_kernel = None def sdpa_ctx_prefer_flash(): if _sdpa_kernel is None or _SDPA_SIG is None: return nullcontext() params = {p.name for p in _SDPA_SIG.parameters.values()} try: if "backends" in params and _SDPBackend is not None: return _sdpa_kernel(backends=[_SDPBackend.FLASH_ATTENTION, _SDPBackend.EFFICIENT_ATTENTION, _SDPBackend.MATH]) if "backend" in params and _SDPBackend is not None: return _sdpa_kernel(backend=_SDPBackend.FLASH_ATTENTION) if {"enable_flash","enable_math","enable_mem_efficient"} <= params: return _sdpa_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=True) if {"use_flash","use_math","use_mem_efficient"} <= params: return _sdpa_kernel(use_flash=True, use_math=False, use_mem_efficient=True) except Exception: pass return nullcontext() # ---------------- Blocks ---------------- class CausalSelfAttention(nn.Module): def __init__(self, dim: int, n_heads: int, attn_dropout: float = 0.0): super().__init__() assert dim % n_heads == 0 self.nh = n_heads self.hd = dim // n_heads self.qkv = nn.Linear(dim, 3*dim, bias=False) self.proj = nn.Linear(dim, dim, bias=False) self.attn_dropout = float(attn_dropout) def forward(self, x: torch.Tensor) -> torch.Tensor: B, T, C = x.shape qkv = self.qkv(x) q, k, v = qkv.chunk(3, dim=-1) q = q.view(B, T, self.nh, self.hd).transpose(1, 2) k = k.view(B, T, self.nh, self.hd).transpose(1, 2) v = v.view(B, T, self.nh, self.hd).transpose(1, 2) if x.is_cuda: with sdpa_ctx_prefer_flash(): y = F.scaled_dot_product_attention(q, k, v, is_causal=True, dropout_p=self.attn_dropout if self.training else 0.0) else: scale = 1.0 / math.sqrt(self.hd) att = (q @ k.transpose(-2, -1)) * scale mask = torch.triu(torch.full((1,1,T,T), float("-inf"), device=x.device), diagonal=1) y = (att + mask).softmax(dim=-1) @ v y = y.transpose(1, 2).contiguous().view(B, T, C) return self.proj(y) class MLP(nn.Module): def __init__(self, dim: int, mlp_ratio: float = 4.0, dropout: float = 0.1): super().__init__() h = int(dim*mlp_ratio) self.fc1 = nn.Linear(dim, h) self.fc2 = nn.Linear(h, dim) self.drop = nn.Dropout(dropout) def forward(self, x): x = F.gelu(self.fc1(x), approximate="tanh") x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x # --------------- Model --------------- class BeeperRoseGPT(nn.Module): """ Runtime pentachora control via self.runtime_cfg: { "enable": bool, "pool": "mean"|"last", "temp": 0.10, "coarse_alpha": float, "topic_alpha": float, "mood_alpha": float, # NEW: selection masks (ints or lists of ints) "coarse_select": Optional[Iterable[int]], "topic_select": Optional[Iterable[int]], "mood_select": Optional[Iterable[int]], } """ def __init__(self, cfg: dict): super().__init__() V, D, Ctx = cfg["vocab_size"], cfg["dim"], cfg["context"] H, L, MR = cfg["n_heads"], cfg["n_layers"], cfg["mlp_ratio"] RD, AD = cfg.get("resid_dropout", 0.1), cfg.get("dropout", 0.0) self.grad_checkpoint = bool(cfg.get("grad_checkpoint", False)) self.runtime_cfg: Dict[str, Any] = dict(cfg.get("runtime_pentachora", {}) or {}) self.vocab_size, self.context = int(V), int(Ctx) self.token_emb = nn.Embedding(V, D) self.pos_emb = nn.Parameter(torch.zeros(1, Ctx, D)) self.drop = nn.Dropout(RD) self.blocks = nn.ModuleList([ nn.ModuleDict({ "norm1": nn.LayerNorm(D), "attn": CausalSelfAttention(D, H, attn_dropout=AD), "norm2": nn.LayerNorm(D), "mlp": MLP(D, mlp_ratio=MR, dropout=RD), }) for _ in range(L) ]) self.norm = nn.LayerNorm(D) self.lm_head = nn.Linear(D, V, bias=False) self.lm_head.weight = self.token_emb.weight # Rose anchors (kept for compatibility) self.rose_proj = nn.Linear(D, D, bias=False) self.rose_anchors = nn.Parameter(torch.randn(3, D) / (D**0.5)) # Pentachora banks self.register_buffer("pent_inited", torch.tensor(0, dtype=torch.uint8), persistent=False) self.penta_coarse: Optional[nn.Parameter] = None # [C,5,D] self.penta_medium: Optional[nn.Parameter] = None # [T,5,D] self.penta_fine: Optional[nn.Parameter] = None # [M,5,D] self.apply(self._init) @staticmethod def _init(m): if isinstance(m, nn.Linear): nn.init.normal_(m.weight, mean=0.0, std=0.02) if m.bias is not None: nn.init.zeros_(m.bias) elif isinstance(m, nn.Embedding): nn.init.normal_(m.weight, mean=0.0, std=0.02) def ensure_pentachora(self, coarse_C: int, medium_C: int, fine_C: int, dim: int, device: torch.device): if self.pent_inited.item() == 1: return def bank(C: int) -> nn.Parameter: if C <= 0: return nn.Parameter(torch.zeros((0,5,dim), device=device)) pts = torch.randn(C, 5, dim, device=device) pts = F.normalize(pts - pts.mean(dim=1, keepdim=True), dim=-1) return nn.Parameter(pts) self.penta_coarse = bank(int(coarse_C)) self.penta_medium = bank(int(medium_C)) self.penta_fine = bank(int(fine_C)) self.pent_inited.fill_(1) def set_runtime_pentachora(self, cfg: Dict[str, Any]) -> None: self.runtime_cfg.update(cfg or {}) def _pool_hidden(self, h: torch.Tensor, mode: str) -> torch.Tensor: return h.mean(dim=1) if mode == "mean" else h[:, -1, :] @staticmethod def _normalize_indices(sel: Optional[Iterable[int]], C: int) -> Optional[torch.Tensor]: if sel is None: return None if isinstance(sel, int): sel = [sel] sel = [int(x) for x in sel if 0 <= int(x) < C] if not sel: return None return torch.as_tensor(sel, dtype=torch.long) @staticmethod def _weighted_nearest_vertex_target( pooled: torch.Tensor, # [B,D] bank: torch.Tensor, # [C,5,D] temp: float, restrict_idx: Optional[torch.Tensor] = None # [K] or None ) -> torch.Tensor: """ If restrict_idx is given, compute target within the selected classes only. """ B, D = pooled.shape if bank.size(0) == 0: return pooled if restrict_idx is not None: bank = bank.index_select(0, restrict_idx.to(bank.device)) # [K,5,D] diffs = pooled[:, None, None, :] - bank[None, :, :, :] # [B,C|K,5,D] dists = torch.norm(diffs, dim=-1) # [B,C|K,5] min_dists = dists.min(dim=2).values # [B,C|K] sims = -min_dists / max(1e-8, float(temp)) # [B,C|K] weights = F.softmax(sims, dim=-1) # [B,C|K] nearest = bank.unsqueeze(0).gather(2, dists.argmin(dim=2)[...,None,None].expand(B, weights.size(1), 1, D)).squeeze(2) # [B,C|K,D] target = (weights.unsqueeze(-1) * nearest).sum(dim=1) # [B,D] return target def _apply_runtime_vertex_pull(self, h: torch.Tensor, runtime_cfg: Dict[str, Any]) -> torch.Tensor: if not runtime_cfg or not runtime_cfg.get("enable", False): return h pool_mode = str(runtime_cfg.get("pool", "mean")) temp = float(runtime_cfg.get("temp", 0.10)) a_coarse = float(runtime_cfg.get("coarse_alpha", 0.0)) a_topic = float(runtime_cfg.get("topic_alpha", 0.0)) a_mood = float(runtime_cfg.get("mood_alpha", 0.0)) if a_coarse<=0 and a_topic<=0 and a_mood<=0: return h pooled = self._pool_hidden(h, pool_mode) # [B,D] delta = None if a_coarse>0 and getattr(self, "penta_coarse", None) is not None: C = self.penta_coarse.size(0) r = self._normalize_indices(runtime_cfg.get("coarse_select"), C) tgt = self._weighted_nearest_vertex_target(pooled, self.penta_coarse, temp, r) d = tgt - pooled delta = a_coarse * d if delta is None else delta + a_coarse * d if a_topic>0 and getattr(self, "penta_medium", None) is not None: C = self.penta_medium.size(0) r = self._normalize_indices(runtime_cfg.get("topic_select"), C) tgt = self._weighted_nearest_vertex_target(pooled, self.penta_medium, temp, r) d = tgt - pooled delta = a_topic * d if delta is None else delta + a_topic * d if a_mood>0 and getattr(self, "penta_fine", None) is not None: C = self.penta_fine.size(0) r = self._normalize_indices(runtime_cfg.get("mood_select"), C) tgt = self._weighted_nearest_vertex_target(pooled, self.penta_fine, temp, r) d = tgt - pooled delta = a_mood * d if delta is None else delta + a_mood * d if delta is None: return h return h + delta.unsqueeze(1) # broadcast across time # ---- forward ---- def _block_forward(self, blk: nn.ModuleDict, x: torch.Tensor) -> torch.Tensor: x = x + blk["attn"](blk["norm1"](x)) x = x + blk["mlp"](blk["norm2"](x)) return x def backbone(self, idx: torch.Tensor) -> torch.Tensor: B, T = idx.shape x = self.token_emb(idx) + self.pos_emb[:, :T, :] x = self.drop(x) if self.grad_checkpoint and self.training: from torch.utils.checkpoint import checkpoint for blk in self.blocks: x = checkpoint(lambda _x: self._block_forward(blk, _x), x) # type: ignore else: for blk in self.blocks: x = self._block_forward(blk, x) return self.norm(x) def forward(self, idx: torch.Tensor, runtime_cfg: Optional[Dict[str, Any]] = None) -> torch.Tensor: h = self.backbone(idx) cfg = self.runtime_cfg if runtime_cfg is None else {**self.runtime_cfg, **(runtime_cfg or {})} h = self._apply_runtime_vertex_pull(h, cfg) return self.lm_head(h) # Utilities def hidden_states(self, idx: torch.Tensor) -> torch.Tensor: return self.backbone(idx) def rose_hidden_pool(self, h: torch.Tensor, mode: str = "mean") -> torch.Tensor: return h.mean(dim=1) if mode=="mean" else h[:, -1, :] # ---- Loader helper ---- def prepare_model_for_state_dict(model: BeeperRoseGPT, state_dict: Dict[str, torch.Tensor], device: Optional[torch.device] = None) -> None: device = device or next(model.parameters()).device need = all(k in state_dict for k in ("penta_coarse","penta_medium","penta_fine")) if not need: return D = model.token_emb.embedding_dim pc, pt, pm = state_dict["penta_coarse"], state_dict["penta_medium"], state_dict["penta_fine"] ok = lambda t: (t.ndim==3 and t.size(1)==5 and t.size(2)==D) if not (ok(pc) and ok(pt) and ok(pm)): return model.ensure_pentachora(pc.size(0), pt.size(0), pm.size(0), dim=D, device=device) # ---- Generation ---- def _detok(text: str) -> str: text = re.sub(r"\s+([,.;:!?%])", r"\1", text) text = re.sub(r"\s+([\)\]\}])", r"\1", text) text = re.sub(r"([\(\[\{])\s+", r"\1", text) return text @torch.no_grad() def generate(model: BeeperRoseGPT, tok, cfg: dict, prompt: str, max_new_tokens: int = 120, temperature: float | None = None, top_k: int | None = None, top_p: float | None = None, repetition_penalty: float | None = None, presence_penalty: float | None = None, frequency_penalty: float | None = None, device: Optional[torch.device] = None, detokenize: bool = True, runtime_cfg: Optional[Dict[str, Any]] = None) -> str: temperature = cfg.get("temperature", 0.9) if temperature is None else float(temperature) top_k = cfg.get("top_k", 40) if top_k is None else int(top_k) top_p = cfg.get("top_p", 0.9) if top_p is None else float(top_p) repetition_penalty = cfg.get("repetition_penalty", 1.10) if repetition_penalty is None else float(repetition_penalty) presence_penalty = cfg.get("presence_penalty", 0.6) if presence_penalty is None else float(presence_penalty) frequency_penalty = cfg.get("frequency_penalty", 0.0) if frequency_penalty is None else float(frequency_penalty) device = device or next(model.parameters()).device model.eval() ids = tok.encode(prompt).ids x = torch.tensor([ids], dtype=torch.long, device=device) V = int(cfg["vocab_size"]) counts = torch.zeros(V, dtype=torch.int32, device=device) for t in ids: if 0 <= t < V: counts[t] += 1 for _ in range(int(max_new_tokens)): logits = model(x[:, -cfg["context"]:], runtime_cfg=runtime_cfg) logits = logits[:, -1, :] if repetition_penalty and repetition_penalty != 1.0: mask = counts > 0 if mask.any(): pos = logits[:, mask] > 0 logits[:, mask][pos] /= repetition_penalty logits[:, mask][~pos] *= repetition_penalty if presence_penalty or frequency_penalty: pen = counts.float() * (frequency_penalty or 0.0) + (counts>0).float() * (presence_penalty or 0.0) logits = logits - pen.unsqueeze(0) logits = logits / max(1e-8, temperature) if top_k and top_k > 0: k = min(top_k, logits.size(-1)) v, ix = torch.topk(logits, k, dim=-1) logits = torch.full_like(logits, float("-inf")).scatter(-1, ix, v) if top_p and top_p < 1.0: sl, si = torch.sort(logits, descending=True) ps = F.softmax(sl, dim=-1) cdf = torch.cumsum(ps, dim=-1) cutoff = (cdf > top_p).float().argmax(dim=-1) mask = torch.arange(logits.size(-1), device=device).unsqueeze(0) > cutoff.unsqueeze(-1) sl = sl.masked_fill(mask, float("-inf")) logits = torch.full_like(logits, float("-inf")).scatter(-1, si, sl) probs = F.softmax(logits, dim=-1) next_id = torch.multinomial(probs, num_samples=1) x = torch.cat([x, next_id], dim=1) nid = next_id.item() if 0 <= nid < V: counts[nid] += 1 out = tok.decode(x[0].tolist()) return _detok(out) if detokenize else out