OneStarDao commited on
Commit
a239ad1
·
verified ·
1 Parent(s): af6931e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -38
app.py CHANGED
@@ -1,61 +1,50 @@
1
  """
2
- HF Space · WFGY 1-click Variance Gate
3
  """
4
 
5
- import gradio as gr, numpy as np
6
  from transformers import AutoTokenizer, AutoModelForCausalLM
7
  from wfgy_sdk import get_engine
8
- from wfgy_sdk.evaluator import compare_logits
9
 
10
  MODEL = "sshleifer/tiny-gpt2"
11
  tok = AutoTokenizer.from_pretrained(MODEL)
12
  mdl = AutoModelForCausalLM.from_pretrained(MODEL)
13
  ENG = get_engine()
14
 
15
- def run(prompt, enable, boost):
16
  if not prompt.strip():
17
- return "-", "-", "Please enter a prompt."
18
 
19
- inputs = tok(prompt, return_tensors="pt")
20
- rawL = mdl(**inputs).logits[0, -1].detach().cpu().numpy()
 
 
21
 
22
- # demo-only fake semantic vectors
23
- I = np.random.randn(256).astype(np.float32)
24
- G = np.random.randn(256).astype(np.float32)
25
 
26
- modL = ENG.run(I, G, rawL) if enable else rawL
27
- mets = compare_logits(rawL, modL)
 
28
 
29
- headline = f"variance {int(mets['var_drop']*100)} % | " \
30
- f"KL {mets['kl']:.2f} | " \
31
- f"top-1 {'✔' if mets['top1'] else '✘'}"
32
 
33
- return (
34
- prompt + tok.decode(int(rawL.argmax())),
35
- prompt + tok.decode(int(modL.argmax())),
36
- headline,
37
- )
38
-
39
-
40
- with gr.Blocks(title="WFGY 1-click Variance Gate") as demo:
41
- gr.Markdown("## 🧠 WFGY 1-click Variance Gate\n"
42
- "Turn GPT-2 into a calmer thinker. "
43
- "Move the slider → watch variance dive.")
44
-
45
- prompt = gr.Textbox(label="Prompt")
46
- enable = gr.Checkbox(True, label="Enable WFGY")
47
- boost = gr.Slider(0.5, 3.0, 1.0, label="Demo Boost (visual only)")
48
-
49
- run_btn = gr.Button("Run", variant="primary")
50
 
51
  with gr.Row():
52
- out_raw = gr.Textbox(label="Raw GPT-2")
53
- out_mod = gr.Textbox(label="After WFGY")
54
 
55
- headline = gr.Markdown()
 
56
 
57
- run_btn.click(run, [prompt, enable, boost],
58
- [out_raw, out_mod, headline])
59
 
60
  if __name__ == "__main__":
61
- demo.queue().launch()
 
1
  """
2
+ HF Space · WFGY 1-click Variance Gate (貼上就能部署)
3
  """
4
 
5
+ import io, numpy as np, gradio as gr, matplotlib.pyplot as plt
6
  from transformers import AutoTokenizer, AutoModelForCausalLM
7
  from wfgy_sdk import get_engine
8
+ from wfgy_sdk.evaluator import compare_logits, plot_histogram
9
 
10
  MODEL = "sshleifer/tiny-gpt2"
11
  tok = AutoTokenizer.from_pretrained(MODEL)
12
  mdl = AutoModelForCausalLM.from_pretrained(MODEL)
13
  ENG = get_engine()
14
 
15
+ def run(prompt:str):
16
  if not prompt.strip():
17
+ return "-", "-", "-", None
18
 
19
+ inp = tok(prompt, return_tensors="pt")
20
+ rawL = mdl(**inp).logits[0, -1].detach().cpu().numpy()
21
+ I, G = np.random.randn(2, 256).astype(np.float32)
22
+ modL = ENG.run(I, G, rawL)
23
 
24
+ mets = compare_logits(rawL, modL)
25
+ head = f"▼ Var {mets['var_drop']*100:.1f}% | KL {mets['kl']:.2f}"
 
26
 
27
+ # ── 圖表轉成 PNG buffer ──
28
+ fig = plot_histogram(rawL, modL)
29
+ buf = io.BytesIO(); fig.savefig(buf, format="png"); buf.seek(0)
30
 
31
+ raw_txt = prompt + tok.decode(int(rawL.argmax()))
32
+ mod_txt = prompt + tok.decode(int(modL.argmax()))
33
+ return raw_txt, mod_txt, head, buf
34
 
35
+ with gr.Blocks(title="WFGY 1-Click Variance Gate") as demo:
36
+ gr.Markdown("# 🧠 WFGY 模擬實驗\n*輸入任意 Prompt,立刻觀看 Logit 直方圖*")
37
+ prompt = gr.Textbox(label="Prompt", value="Explain Schrödinger's cat")
38
+ run_b = gr.Button("🚀 Run")
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
  with gr.Row():
41
+ raw = gr.Textbox(label="Raw GPT-2")
42
+ mod = gr.Textbox(label="After WFGY")
43
 
44
+ head = gr.Markdown()
45
+ img = gr.Image(label="Logit Histogram")
46
 
47
+ run_b.click(run, prompt, [raw, mod, head, img])
 
48
 
49
  if __name__ == "__main__":
50
+ demo.queue(default_concurrency_limit=2).launch()