File size: 2,066 Bytes
914ff43
 
 
 
 
 
 
ac6bd9d
57a96b8
914ff43
 
 
 
 
 
 
 
 
c92d178
914ff43
 
 
243324b
914ff43
 
 
243324b
914ff43
 
 
 
 
 
 
4ea805f
914ff43
4ea805f
914ff43
 
 
4ea805f
914ff43
 
 
 
 
 
 
4ea805f
 
 
914ff43
 
 
 
 
 
 
4ea805f
 
914ff43
 
 
 
 
 
 
 
 
4ea805f
 
be61d22
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
"""
HF Space Β· WFGY 1-click Variance Gate
"""

import gradio as gr, numpy as np, torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from wfgy_sdk import get_engine
from wfgy_sdk.evaluator import compare_logits

MODEL  = "sshleifer/tiny-gpt2"
tok    = AutoTokenizer.from_pretrained(MODEL)
mdl    = AutoModelForCausalLM.from_pretrained(MODEL)
ENGINE = get_engine()


def run(prompt, enable, boost):
    if not prompt.strip():
        return gr.update(value="-", visible=True)

    # raw logits
    inputs   = tok(prompt, return_tensors="pt")
    rawL     = mdl(**inputs).logits[0, -1].detach().cpu().float().numpy()

    # demo-only fake semantic vectors
    I = np.random.randn(256).astype(np.float32)
    G = np.random.randn(256).astype(np.float32)

    if enable:
        modL = ENGINE.run(
            logits      = rawL,
            input_vec   = I,
            ground_vec  = G,
            boost       = boost,
        )
    else:
        modL = rawL

    raw_txt = prompt + tok.decode(int(rawL.argmax()))
    mod_txt = prompt + tok.decode(int(modL.argmax()))
    m       = compare_logits(rawL, modL)

    headline = f"variance β–Ό {int(m['var_drop']*100)} %  |  KL {m['kl']:.2f}  |  top-1 {'βœ”' if m['top1'] else '✘'}"

    return (
        raw_txt,
        mod_txt,
        headline,
    )


with gr.Blocks(title="WFGY 1-click Variance Gate") as demo:
    gr.Markdown("## 🧠 WFGY 1-click Variance Gate\nTurn GPT-2 into a calmer thinker. Move the slider β†’ watch variance dive.")

    prompt   = gr.Textbox(label="Prompt")
    enable   = gr.Checkbox(value=True, label="Enable WFGY")
    boost    = gr.Slider(0.5, 3.0, value=1.0, label="Demo Boost (higher β†’ bigger effect)")

    run_btn  = gr.Button("Run", variant="primary")

    with gr.Row():
        out_raw = gr.Textbox(label="Raw GPT-2")
        out_mod = gr.Textbox(label="After WFGY")

    headline = gr.Markdown("")

    run_btn.click(
        run,
        inputs=[prompt, enable, boost],
        outputs=[out_raw, out_mod, headline],
    )


demo.queue()      
demo.launch()