Spaces:
Running
on
Zero
Running
on
Zero
""" | |
Rose Beeper Model - Inference Components | |
Extracted classes and utilities for model inference | |
""" | |
import os | |
import math | |
import torch | |
import torch.nn as nn | |
import torch.nn.functional as F | |
from typing import Optional, Tuple, Dict, Any | |
from contextlib import nullcontext | |
import inspect | |
import re | |
from tokenizers import Tokenizer | |
from safetensors.torch import load_file as load_safetensors | |
# ============================================================================ | |
# SDPA (Scaled Dot Product Attention) Configuration | |
# ============================================================================ | |
# Version-safe SDPA context helper | |
try: | |
from torch.nn.attention import sdpa_kernel as _sdpa_kernel_modern | |
from torch.nn.attention import SDPBackend as _SDPBackend | |
_SDPA_SIG = inspect.signature(_sdpa_kernel_modern) | |
_sdpa_kernel = _sdpa_kernel_modern | |
except Exception: | |
try: | |
from torch.backends.cuda import sdp_kernel as _sdpa_kernel_legacy | |
_SDPA_SIG = inspect.signature(_sdpa_kernel_legacy) | |
_SDPBackend = None | |
_sdpa_kernel = _sdpa_kernel_legacy | |
except Exception: | |
_SDPA_SIG = None | |
_SDPBackend = None | |
_sdpa_kernel = None | |
def sdpa_ctx_prefer_flash(): | |
"""Bias SDPA toward FlashAttention when available; no-op if unknown.""" | |
if _sdpa_kernel is None or _SDPA_SIG is None: | |
return nullcontext() | |
params = {p.name for p in _SDPA_SIG.parameters.values()} | |
try: | |
# Modern API (PyTorch 2.3+): backends=[...] | |
if "backends" in params and _SDPBackend is not None: | |
return _sdpa_kernel(backends=[ | |
_SDPBackend.FLASH_ATTENTION, | |
_SDPBackend.EFFICIENT_ATTENTION, | |
_SDPBackend.MATH | |
]) | |
# Modern API (alt): backend=... | |
if "backend" in params and _SDPBackend is not None: | |
return _sdpa_kernel(backend=_SDPBackend.FLASH_ATTENTION) | |
# Legacy boolean flags (old CUDA backend) | |
if {"enable_flash", "enable_math", "enable_mem_efficient"} <= params: | |
return _sdpa_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=True) | |
if {"use_flash", "use_math", "use_mem_efficient"} <= params: | |
return _sdpa_kernel(use_flash=True, use_math=False, use_mem_efficient=True) | |
except Exception: | |
pass | |
return nullcontext() | |
# ============================================================================ | |
# Model Components | |
# ============================================================================ | |
class CausalSelfAttention(nn.Module): | |
"""Multi-head causal self-attention with optional FlashAttention.""" | |
def __init__(self, dim: int, n_heads: int, attn_dropout: float = 0.0): | |
super().__init__() | |
assert dim % n_heads == 0 | |
self.nh = n_heads | |
self.hd = dim // n_heads | |
self.qkv = nn.Linear(dim, 3 * dim, bias=False) | |
self.proj = nn.Linear(dim, dim, bias=False) | |
self.attn_dropout = attn_dropout | |
def forward(self, x): | |
B, T, C = x.shape | |
qkv = self.qkv(x) | |
q, k, v = qkv.chunk(3, dim=-1) | |
q = q.view(B, T, self.nh, self.hd).transpose(1, 2) | |
k = k.view(B, T, self.nh, self.hd).transpose(1, 2) | |
v = v.view(B, T, self.nh, self.hd).transpose(1, 2) | |
if x.is_cuda: | |
with sdpa_ctx_prefer_flash(): | |
y = F.scaled_dot_product_attention( | |
q, k, v, | |
is_causal=True, | |
dropout_p=self.attn_dropout if self.training else 0.0, | |
) | |
else: | |
scale = 1.0 / math.sqrt(self.hd) | |
att = (q @ k.transpose(-2, -1)) * scale | |
mask = torch.full((1, 1, T, T), float("-inf"), device=x.device) | |
mask = torch.triu(mask, diagonal=1) | |
att = (att + mask).softmax(dim=-1) | |
y = att @ v | |
y = y.transpose(1, 2).contiguous().view(B, T, C) | |
return self.proj(y) | |
class MLP(nn.Module): | |
"""Feed-forward network with GELU activation.""" | |
def __init__(self, dim, mlp_ratio=4.0, dropout=0.1): | |
super().__init__() | |
hidden = int(dim * mlp_ratio) | |
self.fc1 = nn.Linear(dim, hidden) | |
self.fc2 = nn.Linear(hidden, dim) | |
self.drop = nn.Dropout(dropout) | |
def forward(self, x): | |
x = self.fc1(x) | |
x = F.gelu(x, approximate="tanh") | |
x = self.drop(x) | |
x = self.fc2(x) | |
x = self.drop(x) | |
return x | |
class BeeperRoseGPT(nn.Module): | |
"""Rose Beeper GPT model with pentachora banks for multi-level control.""" | |
def __init__(self, cfg: dict): | |
super().__init__() | |
V, D, Ctx = cfg["vocab_size"], cfg["dim"], cfg["context"] | |
H, L, MR = cfg["n_heads"], cfg["n_layers"], cfg["mlp_ratio"] | |
RD, AD, CKPT = cfg["resid_dropout"], cfg["dropout"], cfg["grad_checkpoint"] | |
self.vocab_size, self.context = V, Ctx | |
self.token_emb = nn.Embedding(V, D) | |
self.pos_emb = nn.Parameter(torch.zeros(1, Ctx, D)) | |
self.drop = nn.Dropout(RD) | |
self.blocks = nn.ModuleList([ | |
nn.ModuleDict({ | |
"norm1": nn.LayerNorm(D), | |
"attn": CausalSelfAttention(D, H, attn_dropout=AD), | |
"norm2": nn.LayerNorm(D), | |
"mlp": MLP(D, mlp_ratio=MR, dropout=RD), | |
}) for _ in range(L) | |
]) | |
self.norm = nn.LayerNorm(D) | |
self.lm_head = nn.Linear(D, V, bias=False) | |
self.lm_head.weight = self.token_emb.weight | |
# Optional Rose projection + anchors | |
self.rose_proj = nn.Linear(D, D, bias=False) | |
self.rose_anchors = nn.Parameter(torch.randn(3, D) / (D**0.5)) | |
# Multi-level pentachora; lazily initialized | |
self.register_buffer("pent_inited", torch.tensor(0, dtype=torch.uint8), persistent=False) | |
self.penta_coarse = None | |
self.penta_medium = None | |
self.penta_fine = None | |
self.apply(self._init) | |
self.grad_checkpoint = CKPT | |
def _init(m): | |
if isinstance(m, nn.Linear): | |
nn.init.normal_(m.weight, mean=0.0, std=0.02) | |
if m.bias is not None: | |
nn.init.zeros_(m.bias) | |
elif isinstance(m, nn.Embedding): | |
nn.init.normal_(m.weight, mean=0.0, std=0.02) | |
def ensure_pentachora(self, coarse_C: int, medium_C: int, fine_C: int, dim: int, device): | |
"""Initialize three pentachora banks.""" | |
if self.pent_inited.item() == 1: | |
return | |
def bank(C): | |
pts = [] | |
for _ in range(int(C)): | |
A = torch.randn(5, dim, device=device) | |
A = F.normalize(A - A.mean(dim=0, keepdim=True), dim=-1) | |
pts.append(A) | |
return nn.Parameter(torch.stack(pts, dim=0)) | |
self.penta_coarse = bank(coarse_C) | |
self.penta_medium = bank(medium_C) | |
self.penta_fine = bank(fine_C) | |
self.pent_inited.fill_(1) | |
def _block_forward(self, blk, x): | |
x = x + blk["attn"](blk["norm1"](x)) | |
x = x + blk["mlp"](blk["norm2"](x)) | |
return x | |
def backbone(self, idx): | |
B, T = idx.shape | |
x = self.token_emb(idx) + self.pos_emb[:, :T, :] | |
x = self.drop(x) | |
if self.grad_checkpoint and self.training: | |
from torch.utils.checkpoint import checkpoint | |
for blk in self.blocks: | |
x = checkpoint(lambda _x: self._block_forward(blk, _x), x) | |
else: | |
for blk in self.blocks: | |
x = self._block_forward(blk, x) | |
return self.norm(x) | |
def forward(self, idx): | |
h = self.backbone(idx) | |
return self.lm_head(h) | |
def hidden_states(self, idx): | |
return self.backbone(idx) | |
def rose_hidden_pool(self, h: torch.Tensor, mode="mean"): | |
return h.mean(dim=1) if mode == "mean" else h[:, -1, :] | |
# ============================================================================ | |
# Model I/O Utilities | |
# ============================================================================ | |
class BeeperIO: | |
"""Utilities for saving and loading model weights.""" | |
def clean_state(sd: dict): | |
"""Clean state dict keys from various wrappings.""" | |
out = {} | |
for k, v in sd.items(): | |
if k.startswith("_orig_mod."): | |
k = k[10:] | |
if k.startswith("module."): | |
k = k[7:] | |
out[k] = v | |
return out | |
def load_into_model(model: nn.Module, path: str, map_location="cpu", strict: bool = False): | |
"""Load weights from file into model.""" | |
ext = os.path.splitext(path)[1].lower() | |
if ext == ".safetensors": | |
sd = load_safetensors(path, device="cpu") | |
else: | |
raw = torch.load(path, map_location="cpu") | |
sd = raw["model"] if isinstance(raw, dict) and "model" in raw else raw | |
sd = BeeperIO.clean_state(sd) | |
result = model.load_state_dict(sd, strict=strict) | |
return result.missing_keys, result.unexpected_keys | |
# ============================================================================ | |
# Text Generation | |
# ============================================================================ | |
def _detok(text: str) -> str: | |
"""Clean up tokenized text spacing.""" | |
text = re.sub(r"\s+([,.;:!?%])", r"\1", text) | |
text = re.sub(r"\s+([\)\]\}])", r"\1", text) | |
text = re.sub(r"([\(\[\{])\s+", r"\1", text) | |
return text | |
def generate(model: BeeperRoseGPT, | |
tok: Tokenizer, | |
cfg: dict, | |
prompt: str, | |
max_new_tokens: int = 120, | |
temperature: float = None, | |
top_k: int = None, | |
top_p: float = None, | |
repetition_penalty: float = None, | |
presence_penalty: float = None, | |
frequency_penalty: float = None, | |
device: Optional[torch.device] = None, | |
detokenize: bool = True) -> str: | |
""" | |
Generate text from a prompt using the model. | |
Args: | |
model: The BeeperRoseGPT model | |
tok: Tokenizer instance | |
cfg: Configuration dictionary | |
prompt: Input text prompt | |
max_new_tokens: Maximum number of tokens to generate | |
temperature: Sampling temperature (higher = more random) | |
top_k: Top-k sampling parameter | |
top_p: Top-p (nucleus) sampling parameter | |
repetition_penalty: Penalty for repeated tokens | |
presence_penalty: Penalty for tokens that have appeared | |
frequency_penalty: Penalty based on token frequency | |
device: Device to run on | |
detokenize: Whether to clean up tokenization artifacts | |
Returns: | |
Generated text string | |
""" | |
# Use defaults from config if not specified | |
temperature = cfg["temperature"] if temperature is None else temperature | |
top_k = cfg["top_k"] if top_k is None else top_k | |
top_p = cfg["top_p"] if top_p is None else top_p | |
repetition_penalty = cfg["repetition_penalty"] if repetition_penalty is None else repetition_penalty | |
presence_penalty = cfg["presence_penalty"] if presence_penalty is None else presence_penalty | |
frequency_penalty = cfg["frequency_penalty"] if frequency_penalty is None else frequency_penalty | |
device = device or next(model.parameters()).device | |
model.eval() | |
# Tokenize prompt | |
ids = tok.encode(prompt).ids | |
x = torch.tensor([ids], dtype=torch.long, device=device) | |
# Track token counts for penalties | |
counts = torch.zeros(cfg["vocab_size"], dtype=torch.int32, device=device) | |
for t in ids: | |
if 0 <= t < cfg["vocab_size"]: | |
counts[t] += 1 | |
# Generate tokens | |
for _ in range(max_new_tokens): | |
# Get logits for next token | |
logits = model(x[:, -cfg["context"]:]) | |
logits = logits[:, -1, :] | |
# Apply repetition penalty | |
if repetition_penalty and repetition_penalty != 1.0: | |
mask = counts > 0 | |
if mask.any(): | |
pos = logits[:, mask] > 0 | |
logits[:, mask][pos] /= repetition_penalty | |
logits[:, mask][~pos] *= repetition_penalty | |
# Apply presence and frequency penalties | |
if presence_penalty or frequency_penalty: | |
pen = counts.float() * (frequency_penalty or 0.0) + (counts > 0).float() * (presence_penalty or 0.0) | |
logits = logits - pen.unsqueeze(0) | |
# Apply temperature | |
logits = logits / max(1e-8, temperature) | |
# Apply top-k sampling | |
if top_k and top_k > 0: | |
k = min(top_k, logits.size(-1)) | |
v, ix = torch.topk(logits, k, dim=-1) | |
filt = torch.full_like(logits, float("-inf")) | |
logits = filt.scatter_(-1, ix, v) | |
# Apply top-p (nucleus) sampling | |
if top_p and top_p < 1.0: | |
sl, si = torch.sort(logits, descending=True) | |
ps = F.softmax(sl, dim=-1) | |
cdf = torch.cumsum(ps, dim=-1) | |
cutoff = (cdf > top_p).float().argmax(dim=-1) | |
mask = torch.arange(logits.size(-1), device=device).unsqueeze(0) > cutoff.unsqueeze(-1) | |
sl = sl.masked_fill(mask, float("-inf")) | |
logits = torch.full_like(logits, float("-inf")).scatter(-1, si, sl) | |
# Sample next token | |
probs = F.softmax(logits, dim=-1) | |
next_id = torch.multinomial(probs, num_samples=1) | |
x = torch.cat([x, next_id], dim=1) | |
counts[next_id.item()] += 1 | |
# Decode output | |
out = tok.decode(x[0].tolist()) | |
return _detok(out) if detokenize else out | |
# ============================================================================ | |
# Default Configuration | |
# ============================================================================ | |
def get_default_config(): | |
"""Get the default configuration for the model.""" | |
return { | |
"name": "Rose-Beeper", | |
"context": 512, | |
"vocab_size": 8192, | |
"dim": 512, | |
"n_layers": 6, | |
"n_heads": 8, | |
"mlp_ratio": 4.0, | |
"dropout": 0.0, | |
"resid_dropout": 0.1, | |
"grad_checkpoint": False, | |
# Generation defaults | |
"temperature": 0.9, | |
"top_k": 40, | |
"top_p": 0.9, | |
"repetition_penalty": 1.10, | |
"presence_penalty": 0.6, | |
"frequency_penalty": 0.0, | |
# Capoera configuration | |
"capoera": { | |
"enable": True, | |
"topic_bins": 512, | |
"mood_bins": 7, | |
} | |
} |