Spaces:
Runtime error
Runtime error
""" | |
Mirel Harmony Inference β HF Space (Gradio) | |
ZeroGPU-ready, Harmony formatting, optional Rose-guided decoding | |
Single file: app.py | |
""" | |
from __future__ import annotations | |
import os, gc, json, threading, torch | |
from dataclasses import dataclass | |
from typing import List, Dict, Optional, Any | |
import gradio as gr | |
import spaces # required for ZeroGPU | |
from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer | |
# ----------------------- | |
# Config & runtime modes | |
# ----------------------- | |
DTYPE_MAP = {"bf16": torch.bfloat16, "fp16": torch.float16, "fp32": torch.float32} | |
MODEL_ID = os.getenv("MODEL_ID", "openai/gpt-oss-20b") | |
ADAPTER_ID = os.getenv("ADAPTER_ID") or None | |
ADAPTER_SUBFOLDER = os.getenv("ADAPTER_SUBFOLDER") or None | |
ATTN_IMPL = os.getenv("ATTN_IMPL", "eager") | |
DTYPE = DTYPE_MAP.get(os.getenv("DTYPE", "bf16").lower(), torch.bfloat16) | |
SYSTEM_DEF = os.getenv("SYSTEM_PROMPT", "You are Mirel, a memory-stable symbolic assistant..") | |
MAX_DEF = int(os.getenv("MAX_NEW_TOKENS", "256")) | |
ZEROGPU = os.getenv("ZEROGPU", os.getenv("ZERO_GPU", "0")) == "1" | |
LOAD_4BIT = os.getenv("LOAD_4BIT", "0") == "1" | |
# Optional: HF auth for private/private repos (Spaces Secrets friendly) | |
HF_TOKEN: Optional[str] = None | |
#def _hf_login() -> None: | |
# """Login to HF Hub using common env secret names. | |
# Works on Spaces with a single secret set. No CUDA touched here. | |
# """ | |
# global HF_TOKEN | |
# HF_TOKEN = ( | |
# os.getenv("HF_TOKEN") | |
# or os.getenv("HUGGING_FACE_HUB_TOKEN") | |
# or os.getenv("HUGGINGFACEHUB_API_TOKEN") | |
# ) | |
# if HF_TOKEN: | |
# try: | |
# from huggingface_hub import login, whoami | |
# login(token=HF_TOKEN, add_to_git_credential=True) | |
# try: | |
# who = whoami(token=HF_TOKEN) | |
# print(f"[hf] logged in as: {who.get('name') or who.get('email') or who.get('id')}") | |
# except Exception: | |
# pass | |
# except Exception as e: | |
# print(f"[hf] login failed: {e}") | |
# else: | |
# print("[hf] no token found; accessing only public repos") | |
# | |
#_hf_login() | |
os.environ["TOKENIZERS_PARALLELISM"] = "false" | |
# Is HF OAuth configured for this Space? (set automatically when README has `hf_oauth: true`) | |
OAUTH_READY = bool(os.getenv("OAUTH_CLIENT_ID")) | |
# Tokenizer is lightweight; load once (pass token for private models) | |
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True, token=HF_TOKEN) | |
# ----------------------- | |
# Lazy model loader (ZeroGPU-friendly) | |
# ----------------------- | |
_model: Optional[AutoModelForCausalLM] = None | |
_model_lock = threading.Lock() | |
try: | |
from peft import PeftModel | |
_HAS_PEFT = True | |
except Exception: | |
_HAS_PEFT = False | |
def _build_model_kwargs(device_map: Optional[str]) -> Dict[str, Any]: | |
kw: Dict[str, Any] = dict( | |
torch_dtype=DTYPE, | |
device_map=device_map, | |
attn_implementation=ATTN_IMPL if device_map != "cpu" else "eager", | |
trust_remote_code=True, | |
low_cpu_mem_usage=True, | |
) | |
# Only enable 4-bit when not explicitly CPU-bound | |
if LOAD_4BIT and device_map != "cpu": | |
try: | |
import bitsandbytes as _bnb # noqa: F401 | |
kw.update(load_in_4bit=True) | |
if kw["device_map"] is None: | |
kw["device_map"] = "auto" | |
except Exception: | |
pass | |
return kw | |
def _load_model_on(device_map: Optional[str]) -> AutoModelForCausalLM: | |
model = AutoModelForCausalLM.from_pretrained(MODEL_ID, token=HF_TOKEN, **_build_model_kwargs(device_map)) | |
if ADAPTER_ID: | |
if not _HAS_PEFT: | |
raise RuntimeError("peft is required when ADAPTER_ID is set.") | |
peft_kwargs: Dict[str, Any] = {} | |
if ADAPTER_SUBFOLDER: | |
peft_kwargs["subfolder"] = ADAPTER_SUBFOLDER | |
model = PeftModel.from_pretrained(model, ADAPTER_ID, is_trainable=False, token=HF_TOKEN, **peft_kwargs) | |
model.eval(); model.config.use_cache = True | |
return model | |
# ----------------------- | |
# Harmony formatting | |
# ----------------------- | |
def to_harmony_prompt(messages: List[Dict[str, str]]) -> str: | |
""" | |
Strict Harmony: rely on the tokenizer's official chat template. | |
If the template is missing, raise clearly so the Space uses a Harmony-enabled checkpoint. | |
""" | |
tmpl = getattr(tokenizer, "chat_template", None) | |
if not tmpl: | |
raise RuntimeError( | |
"Missing Harmony chat_template on this tokenizer. Use a Harmony-enabled repo (e.g., openai/gpt-oss-20b)." | |
) | |
return tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False) | |
# ----------------------- | |
# Optional Rose guidance (logits bias) | |
# ----------------------- (logits bias) | |
# ----------------------- | |
def build_bias_from_tokens(tokenizer, mapping: Dict[str, float]) -> torch.Tensor: | |
"""Create vocab bias from {token: weight}. Unknown tokens ignored. Positive promotes, negative demotes.""" | |
vocab_size = len(tokenizer) | |
bias = torch.zeros(vocab_size, dtype=torch.float32) | |
for tok, w in mapping.items(): | |
if tok is None: | |
continue | |
tid = tokenizer.convert_tokens_to_ids(tok) | |
if isinstance(tid, list): | |
for t in tid: | |
if isinstance(t, int) and t >= 0: | |
bias[t] += float(w) / max(1, len(tid)) | |
elif isinstance(tid, int) and t >= 0: | |
bias[tid] += float(w) | |
return bias | |
class RoseGuidedLogits(torch.nn.Module): | |
def __init__(self, bias_vec: torch.Tensor, alpha: float = 1.0): | |
super().__init__() | |
self.bias_vec = bias_vec | |
self.alpha = float(alpha) | |
def forward(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: | |
return scores + self.alpha * self.bias_vec.to(scores.device) | |
def zerogpu_generate(full_prompt: str, | |
gen_kwargs: Dict[str, Any], | |
rose_map: Optional[Dict[str, float]], | |
rose_alpha: float, | |
rose_score: Optional[float], | |
seed: Optional[int]) -> str: | |
"""Run **entire** inference on GPU (ZeroGPU-safe). No CUDA touches in main process.""" | |
if seed is not None: | |
torch.manual_seed(int(seed)) | |
# Load base + adapter directly on GPU inside the GPU context | |
model = _load_model_on("auto") | |
try: | |
logits_processor = None | |
if rose_map: | |
bias = build_bias_from_tokens(tokenizer, rose_map).to(next(model.parameters()).device) | |
eff_alpha = float(rose_alpha) * (float(rose_score) if rose_score is not None else 1.0) | |
logits_processor = [RoseGuidedLogits(bias, eff_alpha)] | |
inputs = tokenizer(full_prompt, return_tensors="pt").to(next(model.parameters()).device) | |
out_ids = model.generate( | |
**inputs, | |
do_sample=bool(gen_kwargs.get("do_sample", True)), | |
temperature=float(gen_kwargs.get("temperature", 0.7)), | |
top_p=float(gen_kwargs.get("top_p", 0.9)), | |
top_k=(int(gen_kwargs.get("top_k")) if gen_kwargs.get("top_k") else None), | |
max_new_tokens=int(gen_kwargs.get("max_new_tokens", 512)), | |
pad_token_id=tokenizer.eos_token_id, | |
eos_token_id=tokenizer.eos_token_id, | |
logits_processor=logits_processor, | |
) | |
# Decode only the generated tail (exclude prompt) and extract the `final` channel | |
prompt_len = int(inputs["input_ids"].shape[1]) | |
gen_ids = out_ids[0][prompt_len:] | |
decoded = tokenizer.decode(gen_ids, skip_special_tokens=False) | |
fb, ret, end = "<|channel|>final<|message|>", "<|return|>", "<|end|>" | |
idx = decoded.rfind(fb) | |
if idx != -1: | |
s = decoded[idx + len(fb):] | |
stop = s.find(ret) | |
if stop == -1: | |
stop = s.find(end) | |
if stop != -1: | |
s = s[:stop] | |
text = s.strip() | |
else: | |
text = decoded.strip() | |
return text | |
finally: | |
# Ensure no GPU state leaks back to the main process | |
try: | |
del model | |
except Exception: | |
pass | |
gc.collect() | |
try: | |
if torch.cuda.is_available(): | |
torch.cuda.empty_cache() | |
except Exception: | |
pass | |
# ----------------------- | |
# Gradio handlers and UI | |
# ----------------------- | |
class GenCfg: | |
temperature: float | |
top_p: float | |
top_k: Optional[int] | |
max_new_tokens: int | |
do_sample: bool | |
seed: Optional[int] | |
def chat_to_messages(history: List[Any], system_prompt: str) -> List[Dict[str, str]]: | |
msgs: List[Dict[str, str]] = [{"role": "system", "content": system_prompt or SYSTEM_DEF}] | |
for item in history: | |
if not item: | |
continue | |
if isinstance(item, dict) and "role" in item: | |
msgs.append(item) | |
continue | |
if isinstance(item, (list, tuple)) and len(item) == 2: | |
u, a = item | |
if u is not None: | |
msgs.append({"role": "user", "content": u}) | |
if a: | |
msgs.append({"role": "assistant", "content": a}) | |
return msgs | |
def generate_stream(message: Any, history: List[Any], system_prompt: str, | |
temperature: float, top_p: float, top_k: int, max_new_tokens: int, | |
do_sample: bool, seed: Optional[int], | |
rose_enable: bool, rose_alpha: float, rose_score: Optional[float], rose_tokens: str, rose_json: str): | |
"""ZeroGPU generator (non-streaming): do all CUDA work inside `zerogpu_generate` and | |
return a single string. This avoids h11 Content-Length issues on exceptions mid-stream. | |
""" | |
try: | |
# Normalize message and build Harmony prompt | |
if isinstance(message, dict): | |
message = message.get("content", "") | |
msgs = chat_to_messages(history, system_prompt) | |
msgs.append({"role": "user", "content": str(message)}) | |
prompt = to_harmony_prompt(msgs) | |
# Rose map | |
rose_map: Optional[Dict[str, float]] = None | |
if rose_enable: | |
rose_map = {} | |
tok_str = (rose_tokens or "").strip() | |
if tok_str: | |
for p in [p.strip() for p in tok_str.split(",") if p.strip()]: | |
if ":" in p: | |
k, v = p.split(":", 1) | |
try: | |
rose_map[k.strip()] = float(v) | |
except Exception: | |
pass | |
if rose_json: | |
try: | |
j = json.loads(rose_json) | |
if isinstance(j, dict): | |
for k, v in j.items(): | |
try: | |
rose_map[str(k)] = float(v) | |
except Exception: | |
pass | |
except Exception: | |
pass | |
if not rose_map: | |
rose_map = None | |
# Always use the GPU entrypoint; return once | |
text = zerogpu_generate( | |
prompt, | |
{ | |
"do_sample": bool(do_sample), | |
"temperature": float(temperature), | |
"top_p": float(top_p), | |
"top_k": (int(top_k) if int(top_k) > 0 else None), | |
"max_new_tokens": int(max_new_tokens), | |
}, | |
rose_map, | |
float(rose_alpha), | |
float(rose_score) if rose_score is not None else None, | |
int(seed) if seed is not None else None, | |
) | |
return text | |
except Exception as e: | |
# Return error as plain text (no streaming) to avoid Content-Length mismatches | |
return f"[error] {type(e).__name__}: {e}" | |
# ----------------------- | |
# Helper: login status banner (HF OAuth) | |
# ----------------------- | |
#def _login_status(profile: gr.OAuthProfile | None) -> str: | |
# """Show whether the visitor is logged in to Hugging Face. | |
# This affects ZeroGPU quotas (logged-in users get their own token/quota). | |
# Requires the Space to have `hf_oauth: true` in README metadata. | |
# """ | |
# # If OAuth isn't configured on the Space, inform clearly | |
# if not os.getenv("OAUTH_CLIENT_ID"): | |
# return ( | |
# "βΉοΈ OAuth is not configured on this Space. Add `hf_oauth: true` to README metadata " | |
# "so users can sign in and ZeroGPU can use their account quota." | |
# ) | |
# if profile is None: | |
# return ( | |
# "π Not signed in to Hugging Face β ZeroGPU will count as anonymous (lower quota). " | |
# "Click **Sign in with HF** above." | |
# ) | |
# name = getattr(profile, "name", None) or getattr(profile, "preferred_username", None) or getattr(profile, "id", "user") | |
# return f"π Signed in as **{name}** β ZeroGPU will use your account quota." | |
# ----------------------- | |
# UI | |
# ----------------------- | |
with gr.Blocks(theme=gr.themes.Soft()) as demo: | |
gr.Markdown( | |
""" | |
# Mirel β Harmony Inference (ZeroGPU-ready) | |
OSS-20B + optional Rose-SFT adapter. Harmony chat template is applied automatically. | |
""" | |
) | |
# Sign-in note | |
login_status = gr.Markdown( | |
"If you're logged into huggingface.co in this browser, ZeroGPU will use *your* quota automatically." | |
) | |
with gr.Row(): | |
system_prompt = gr.Textbox(label="System", value=SYSTEM_DEF) | |
with gr.Accordion("Generation settings", open=False): | |
with gr.Row(): | |
temperature = gr.Slider(0.0, 2.0, value=0.7, step=0.05, label="temperature") | |
top_p = gr.Slider(0.1, 1.0, value=0.9, step=0.01, label="top_p") | |
top_k = gr.Slider(0, 200, value=0, step=1, label="top_k (0=off)") | |
max_new = gr.Slider(16, 2048, value=MAX_DEF, step=8, label="max_new_tokens") | |
do_sample = gr.Checkbox(value=True, label="do_sample") | |
seed = gr.Number(value=None, label="seed (optional)") | |
with gr.Accordion("Rose guidance (optional)", open=False): | |
with gr.Row(): | |
rose_enable = gr.Checkbox(value=False, label="Enable Rose bias at decode") | |
rose_alpha = gr.Slider(0.0, 5.0, value=1.0, step=0.05, label="rose alpha (strength)") | |
rose_score = gr.Slider(0.0, 1.0, value=1.0, step=0.01, label="rose score (0β1)") | |
rose_tokens = gr.Textbox(label="token:weight list (comma-separated)", value="") | |
rose_json = gr.Textbox(label="JSON {token: weight}", value="") | |
chat = gr.ChatInterface( | |
fn=generate_stream, | |
type="messages", | |
additional_inputs=[system_prompt, temperature, top_p, top_k, max_new, do_sample, seed, rose_enable, rose_alpha, rose_score, rose_tokens, rose_json], | |
title="Mirel", | |
cache_examples=False, | |
) | |
gr.Markdown( | |
""" | |
**Notes** | |
- Set env `ZEROGPU=1` for just-in-time GPU allocation via @spaces.GPU. | |
- Set `ADAPTER_ID=AbstractPhil/mirel-gpt-oss-20b` and `ADAPTER_SUBFOLDER=checkpoints/checkpoint-516` to use the provided adapter. | |
- Use `torch==2.4.0` for ZeroGPU. | |
- Rose guidance biases logits; it does not change weights. | |
""" | |
) | |
if __name__ == "__main__": | |
demo.queue(max_size=8 if ZEROGPU else 32).launch(server_name="0.0.0.0", server_port=7860) | |