File size: 2,098 Bytes
6aba93c
18538cc
ef37700
 
 
1348198
914ff43
a239ad1
57a96b8
ef37700
 
 
 
c92d178
6aba93c
ef37700
 
 
6aba93c
243324b
ef37700
 
243324b
6aba93c
ef37700
 
1348198
ef37700
 
 
6aba93c
ef37700
 
 
 
 
 
 
 
 
 
 
6aba93c
ef37700
6aba93c
 
ef37700
1348198
a239ad1
ef37700
1348198
4ea805f
ef37700
 
 
 
 
1348198
ef37700
914ff43
ef37700
 
6aba93c
 
ef37700
822e03e
e95bc22
6aba93c
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
# HF Space · WFGY variance gate demo (Gradio 4.31+)

import io
import numpy as np
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
from wfgy_sdk import get_engine
from wfgy_sdk.evaluator import compare_logits, plot_histogram

MODEL_ID = "sshleifer/tiny-gpt2"
tok = AutoTokenizer.from_pretrained(MODEL_ID)
mdl = AutoModelForCausalLM.from_pretrained(MODEL_ID)
eng = get_engine()


def run(prompt: str):
    prompt = prompt.strip()
    if not prompt:
        return "", "", "no prompt – nothing to show", None

    ids = tok(prompt, return_tensors="pt").input_ids
    logits_raw = mdl(ids).logits[0, -1].detach().cpu().numpy()

    # toy fingerprints
    G = np.random.randn(256).astype(np.float32)
    I = G + np.random.normal(scale=0.05, size=256).astype(np.float32)

    logits_mod = eng.run(I, G, logits_raw)
    m = compare_logits(logits_raw, logits_mod)

    headline = f"▼ var {m['var_drop']*100:4.1f} % | KL {m['kl']:.3f}"

    fig = plot_histogram(logits_raw, logits_mod)
    buf = io.BytesIO()
    fig.savefig(buf, format="png")
    buf.seek(0)

    raw_txt = prompt + tok.decode(int(logits_raw.argmax()))
    mod_txt = prompt + tok.decode(int(logits_mod.argmax()))
    return raw_txt, mod_txt, headline, buf


with gr.Blocks(title="WFGY variance gate") as demo:
    gr.Markdown(
        "# 🧠 WFGY simulation demo  \n"
        "Type any prompt and watch the logit variance collapse in real time."
    )

    prompt = gr.Textbox(label="Prompt", value="Explain Schrödinger's cat")
    btn = gr.Button("🚀 Run")

    with gr.Row():
        raw_box = gr.Textbox(label="Raw GPT-2")
        mod_box = gr.Textbox(label="After WFGY")

    headline = gr.Markdown()
    img = gr.Image(label="Logit histogram")

    btn.click(run, prompt, [raw_box, mod_box, headline, img])

    gr.Markdown(
        "---\n"
        "### ⭐ Help unlock **WFGY 2.0**  \n"
        "10 000 GitHub stars by **2025-08-01** → next-gen release."
    )

if __name__ == "__main__":
    # Gradio ≥4.31: queue() has no arg; use default queue size (=2)
    demo.queue().launch()