wfgy-demo / app.py
OneStarDao's picture
Update app.py
822e03e verified
raw
history blame
2.3 kB
"""
HF Space Β· WFGY 1-click Variance Gate
"""
import gradio as gr
import numpy as np
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from wfgy_sdk import get_engine
from wfgy_sdk.evaluator import compare_logits
# ---------- tiny demo backbone ----------
MODEL = "sshleifer/tiny-gpt2"
tokenizer = AutoTokenizer.from_pretrained(MODEL)
model = AutoModelForCausalLM.from_pretrained(MODEL)
ENGINE = get_engine()
# ---------- core inference ----------
def wfgy_run(prompt: str, enable: bool, boost: float):
if not prompt.strip():
return "β€”", "β€”", "Please enter a prompt."
# 1) raw logits from tiny-GPT-2
toks = tokenizer(prompt, return_tensors="pt")
rawL = model(**toks).logits[0, -1].detach().cpu().numpy()
# 2) dummy semantic vectors (demo only)
G = np.random.randn(256).astype(np.float32)
I = (G + np.random.normal(scale=0.05, size=256).astype(np.float32)) * boost
# 3) WFGY gate
modL = ENGINE.run(I, G, rawL) if enable else rawL
# 4) text + metrics
raw_txt = prompt + tokenizer.decode(int(rawL.argmax()))
mod_txt = prompt + tokenizer.decode(int(modL.argmax()))
m = compare_logits(rawL, modL)
headline = f"variance β–Ό {int(m['var_drop']*100)} % | KL {m['kl']:.2f} | top-1 {'βœ”' if m['top1'] else '✘'}"
return raw_txt, mod_txt, headline
# ---------- UI ----------
with gr.Blocks(title="WFGY 1-click Variance Gate", theme="soft") as demo:
gr.Markdown("## 🧠 **WFGY 1-click Variance Gate**
Turn GPT-2 into a calmer thinker. Move the slider β†’ watch variance dive.\n")
prompt = gr.Textbox(label="Prompt")
enable = gr.Checkbox(value=True, label="Enable WFGY")
boost = gr.Slider(0.5, 3.0, value=1.0, step=0.1,
label="Demo Boost (higher β†’ bigger effect)")
run_btn = gr.Button("Run", variant="primary")
with gr.Row():
out_raw = gr.Textbox(label="Raw GPT-2")
out_mod = gr.Textbox(label="After WFGY")
headline = gr.Markdown("")
run_btn.click(
wfgy_run,
inputs=[prompt, enable, boost],
outputs=[out_raw, out_mod, headline],
)
# ---------- launch ----------
if __name__ == "__main__":
demo.queue() # default concurrency = 2
demo.launch()