OneStarDao commited on
Commit
57a96b8
·
verified ·
1 Parent(s): 2d092eb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +84 -14
app.py CHANGED
@@ -1,19 +1,89 @@
1
- import gradio as gr, numpy as np, wfgy_sdk as w
 
 
 
 
 
 
 
2
  from wfgy_sdk.evaluator import compare_logits
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
- def run(prompt):
5
- logits = np.random.randn(32000) # demo: fake logits
 
 
 
 
 
 
 
6
  G = np.random.randn(256); G /= np.linalg.norm(G)
7
  I = G + np.random.normal(scale=0.05, size=256)
8
- out = w.get_engine().run(input_vec=I, ground_vec=G, logits=logits)
9
- m = compare_logits(logits, out)
10
- return f"variance ↓ {(1-m['std_ratio'])*100:.0f}% | KL {m['kl_divergence']:.2f}"
11
-
12
- demo = gr.Interface(
13
- fn=run,
14
- inputs=gr.Textbox(label="Prompt"),
15
- outputs=gr.Textbox(label="WFGY stats"),
16
- title="WFGY Quick Tester",
17
- description="Type anything, see variance & KL instantly."
18
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  demo.launch()
 
1
+ """
2
+ WFGY HuggingFace Space – deluxe demo
3
+ * Generates text before/after WFGY
4
+ * Shows variance, KL, top-1 shift
5
+ * Renders overlay histogram
6
+ """
7
+
8
+ import base64, io, numpy as np, gradio as gr, wfgy_sdk as w
9
  from wfgy_sdk.evaluator import compare_logits
10
+ from wfgy_sdk.visual import plot_histogram
11
+
12
+ import torch
13
+ from transformers import AutoModelForCausalLM, AutoTokenizer, set_seed
14
+
15
+ MODEL = "sshleifer/tiny-gpt2" # 124-MB, runs on CPU in ~2 s
16
+ tokenizer = AutoTokenizer.from_pretrained(MODEL)
17
+ model = AutoModelForCausalLM.from_pretrained(MODEL)
18
+ set_seed(42)
19
+
20
+ ENGINE = w.get_engine() # singleton
21
+
22
+
23
+ def gen_text(prompt, max_new_tokens=40):
24
+ ids = tokenizer(prompt, return_tensors="pt").input_ids
25
+ with torch.no_grad():
26
+ out = model.generate(ids, max_new_tokens=max_new_tokens, do_sample=False)
27
+ return tokenizer.decode(out[0, ids.shape[1]:], skip_special_tokens=True)
28
 
29
+
30
+ def wfgy_demo(prompt, enable_wfgy):
31
+ # ---- generate raw text & logits ----
32
+ ids = tokenizer(prompt, return_tensors="pt").input_ids
33
+ with torch.no_grad():
34
+ output = model(ids)
35
+ raw_logits = output.logits[0, -1].cpu().numpy()
36
+
37
+ # dummy semantic vectors for demo
38
  G = np.random.randn(256); G /= np.linalg.norm(G)
39
  I = G + np.random.normal(scale=0.05, size=256)
40
+
41
+ # run WFGY
42
+ if enable_wfgy:
43
+ mod_logits = ENGINE.run(input_vec=I, ground_vec=G, logits=raw_logits)
44
+ else:
45
+ mod_logits = raw_logits.copy()
46
+
47
+ # decode next-token text for both versions
48
+ next_raw = tokenizer.decode(int(raw_logits.argmax()))
49
+ next_mod = tokenizer.decode(int(mod_logits.argmax()))
50
+ raw_txt = prompt + next_raw
51
+ mod_txt = prompt + next_mod
52
+
53
+ # metrics
54
+ m = compare_logits(raw_logits, mod_logits)
55
+ badge = f"variance ↓ {(1-m['std_ratio'])*100:.0f}% | KL {m['kl_divergence']:.2f}"
56
+ top1 = "✔" if m["top1_shift"] else "✘"
57
+ badge += f" | top-1 changed {top1}"
58
+
59
+ # histogram
60
+ fig = plot_histogram(raw_logits, mod_logits, show=False)
61
+ buf = io.BytesIO(); fig.savefig(buf, format="png"); fig.clf()
62
+ img_b64 = "data:image/png;base64," + base64.b64encode(buf.getvalue()).decode()
63
+
64
+ return raw_txt, mod_txt, badge, img_b64
65
+
66
+
67
+ with gr.Blocks(title="WFGY variance gate") as demo:
68
+ gr.Markdown("## WFGY Live Demo — variance drop in real-time")
69
+
70
+ prompt = gr.Textbox(label="Prompt", placeholder="Ask anything…", lines=2)
71
+ enable = gr.Checkbox(label="Enable WFGY", value=True)
72
+ run_btn = gr.Button("Run")
73
+
74
+ with gr.Row():
75
+ raw_out = gr.Textbox(label="Raw GPT-2")
76
+ mod_out = gr.Textbox(label="After WFGY")
77
+
78
+ metrics = gr.HTML(label="Metrics")
79
+ hist = gr.Image(label="Logit distribution", elem_id="hist", width=450)
80
+
81
+ run_btn.click(wfgy_demo, [prompt, enable],
82
+ [raw_out, mod_out, metrics, hist])
83
+
84
+ gr.Markdown(
85
+ "⭐ If the variance drop looks magic, [**star the repo**]"
86
+ "(https://github.com/onestardao/WFGY) and help unlock WFGY 2.0!"
87
+ )
88
+
89
  demo.launch()