Spaces:
Running
Running
File size: 2,296 Bytes
914ff43 822e03e 914ff43 ac6bd9d 57a96b8 822e03e 914ff43 822e03e 914ff43 822e03e c92d178 822e03e 243324b 822e03e 914ff43 822e03e 243324b 822e03e 4ea805f 822e03e 914ff43 822e03e 4ea805f 822e03e 4ea805f 822e03e 914ff43 822e03e 4ea805f 914ff43 822e03e 914ff43 4ea805f 822e03e e95bc22 822e03e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 |
"""
HF Space Β· WFGY 1-click Variance Gate
"""
import gradio as gr
import numpy as np
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from wfgy_sdk import get_engine
from wfgy_sdk.evaluator import compare_logits
# ---------- tiny demo backbone ----------
MODEL = "sshleifer/tiny-gpt2"
tokenizer = AutoTokenizer.from_pretrained(MODEL)
model = AutoModelForCausalLM.from_pretrained(MODEL)
ENGINE = get_engine()
# ---------- core inference ----------
def wfgy_run(prompt: str, enable: bool, boost: float):
if not prompt.strip():
return "β", "β", "Please enter a prompt."
# 1) raw logits from tiny-GPT-2
toks = tokenizer(prompt, return_tensors="pt")
rawL = model(**toks).logits[0, -1].detach().cpu().numpy()
# 2) dummy semantic vectors (demo only)
G = np.random.randn(256).astype(np.float32)
I = (G + np.random.normal(scale=0.05, size=256).astype(np.float32)) * boost
# 3) WFGY gate
modL = ENGINE.run(I, G, rawL) if enable else rawL
# 4) text + metrics
raw_txt = prompt + tokenizer.decode(int(rawL.argmax()))
mod_txt = prompt + tokenizer.decode(int(modL.argmax()))
m = compare_logits(rawL, modL)
headline = f"variance βΌ {int(m['var_drop']*100)} % | KL {m['kl']:.2f} | top-1 {'β' if m['top1'] else 'β'}"
return raw_txt, mod_txt, headline
# ---------- UI ----------
with gr.Blocks(title="WFGY 1-click Variance Gate", theme="soft") as demo:
gr.Markdown("## π§ **WFGY 1-click Variance Gate**
Turn GPT-2 into a calmer thinker. Move the slider β watch variance dive.\n")
prompt = gr.Textbox(label="Prompt")
enable = gr.Checkbox(value=True, label="Enable WFGY")
boost = gr.Slider(0.5, 3.0, value=1.0, step=0.1,
label="Demo Boost (higher β bigger effect)")
run_btn = gr.Button("Run", variant="primary")
with gr.Row():
out_raw = gr.Textbox(label="Raw GPT-2")
out_mod = gr.Textbox(label="After WFGY")
headline = gr.Markdown("")
run_btn.click(
wfgy_run,
inputs=[prompt, enable, boost],
outputs=[out_raw, out_mod, headline],
)
# ---------- launch ----------
if __name__ == "__main__":
demo.queue() # default concurrency = 2
demo.launch()
|