File size: 4,191 Bytes
ac6bd9d
8c8ffbc
ac6bd9d
 
 
 
 
 
 
 
57a96b8
ac6bd9d
c6b8ac6
57a96b8
6d21131
57a96b8
 
6d21131
57a96b8
ac6bd9d
 
 
c6b8ac6
 
243324b
ac6bd9d
c6b8ac6
 
243324b
ac6bd9d
24f3008
ac6bd9d
 
 
 
 
c6b8ac6
24f3008
ac6bd9d
 
 
 
 
 
 
 
 
 
24f3008
ac6bd9d
 
 
 
 
57a96b8
ac6bd9d
 
 
 
 
 
243324b
ac6bd9d
243324b
ac6bd9d
 
 
 
 
24f3008
 
243324b
ac6bd9d
 
57a96b8
24f3008
ac6bd9d
 
 
6d21131
24f3008
 
57a96b8
ac6bd9d
 
 
 
 
24f3008
ac6bd9d
 
8c8ffbc
ac6bd9d
57a96b8
ac6bd9d
 
 
57a96b8
 
24f3008
ac6bd9d
 
243324b
c6b8ac6
ac6bd9d
 
243324b
 
ac6bd9d
57a96b8
 
903544e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
import io, numpy as np, gradio as gr
import matplotlib.pyplot as plt
from PIL import Image
from transformers import (
    AutoModelForCausalLM, AutoTokenizer, set_seed
)

import wfgy_sdk as w
from wfgy_sdk.evaluator import compare_logits
from wfgy_sdk.visual    import plot_histogram

# ─────────────── tiny GPT-2 (fits free HF Space) ────────────────
MODEL = "sshleifer/tiny-gpt2"
tokenizer = AutoTokenizer.from_pretrained(MODEL)
model     = AutoModelForCausalLM.from_pretrained(MODEL)
set_seed(42)

ENGINE = w.get_engine()

# ───────────────────────── core helper ──────────────────────────
def one_pass(prompt: str, boost: float):
    """Return (raw_txt, mod_txt, metrics, raw_l, mod_l)."""
    ids        = tokenizer(prompt, return_tensors="pt").input_ids
    raw_logits = model(ids).logits[0, -1].detach().cpu().numpy()

    # demo vectors – boost multiplies semantic distance
    G = np.random.randn(256); G /= np.linalg.norm(G)
    I = G + np.random.normal(scale=boost, size=256)

    mod_logits = ENGINE.run(I, G, raw_logits, bbmc_scale=boost)

    metrics = compare_logits(raw_logits, mod_logits)
    return (
        prompt + tokenizer.decode(int(raw_logits.argmax())),
        prompt + tokenizer.decode(int(mod_logits.argmax())),
        metrics, raw_logits, mod_logits
    )

def wfgy_pipeline(prompt: str, enable: bool, boost: float):
    if not prompt.strip():
        return "", "", "<i>Please enter a prompt.</i>", None

    try:
        raw_txt, mod_txt, met, rl, ml = one_pass(prompt, boost if enable else 0.0)

        # safety: if enable & variance drop < 5 %, force BBCR collapse once
        if enable and (1 - met["std_ratio"]) < .05:
            _, mod_txt, met, rl, ml = one_pass(prompt, boost * 1.8)

        stats = (
            f"<b>variance β–Ό {(1-met['std_ratio'])*100:.0f}%</b> &nbsp;|&nbsp; "
            f"<b>KL {met['kl_divergence']:.2f}</b> &nbsp;|&nbsp; "
            f"top-1 {'βœ”' if met['top1_shift'] else '✘'}"
        )

        # histogram β†’ PIL
        fig = plot_histogram(rl, ml) or plt.gcf()
        buf = io.BytesIO()
        fig.savefig(buf, format="png", bbox_inches="tight")
        plt.close(fig)
        img = Image.open(buf)

        return raw_txt, mod_txt, stats, img

    except Exception as exc:
        return "", "", f"<b style='color:red'>Error:</b> {exc}", None

# ────────────────────────── UI layout ───────────────────────────
with gr.Blocks(title="WFGY Variance Gate", theme=gr.themes.Soft()) as demo:
    gr.Markdown(
        """
### 🧠 WFGY 1-click Variance Gate  
**Turn any modelβ€”even GPT-2β€”into a calmer thinker.**  
Move the slider and watch variance dive.

| Metric | Meaning |
| --- | --- |
| **variance β–Ό** | logits get less noisy |
| **KL** | distribution really changed |
| **top-1** | most-likely token swapped βœ” / ✘ |
        """
    )

    prompt = gr.Textbox(label="Prompt", lines=2, placeholder="Ask anything…")
    enable = gr.Checkbox(label="Enable WFGY", value=True)
    boost  = gr.Slider(0, 3, value=1.2, step=.1,
                       label="Demo Boost (higher β†’ bigger effect)")
    runbtn = gr.Button("Run")

    raw_box = gr.Textbox(label="Raw GPT-2")
    mod_box = gr.Textbox(label="After WFGY")
    metrics = gr.HTML()
    hist    = gr.Image(label="Logit distribution", width=460)

    runbtn.click(wfgy_pipeline,
                 inputs=[prompt, enable, boost],
                 outputs=[raw_box, mod_box, metrics, hist])

    gr.Markdown(
        """
**PDF mode ** – feed <code>I_am_not_lizardman/WFGY_1.0.pdf</code> to any chat-LLM,  
prepend <code>Use WFGY:</code> and enjoy sharper answers.

⭐ <a href="https://github.com/onestardao/WFGY" target="_blank">
GitHub repo – star to unlock WFGY 2.0 (10 k ⭐ before 2025-08-01)
</a>

πŸ“‚ Hidden folder <b>I_am_not_lizardman/</b> holds 8 + 1 β€œChallenge-Einstein” papers β€” tweet a screenshot if you find them!
        """
    )

demo.launch()