File size: 4,880 Bytes
dc13b6d
 
f358820
dc13b6d
 
2f31c84
 
 
 
dc13b6d
da40aec
dc13b6d
e894885
812c80c
2a9fd77
cc1322f
dc13b6d
 
 
 
 
42dc39c
 
 
dc13b6d
 
 
 
 
2f31c84
 
 
dc13b6d
 
 
 
 
da40aec
041841e
da40aec
 
 
 
 
 
 
 
 
8722708
72f7051
 
2f31c84
041841e
 
72f7051
dc13b6d
 
91852e0
f4fd3fb
041841e
 
6277588
6bcfde9
44ac7e8
041841e
109cc2f
6bcfde9
041841e
44ac7e8
 
041841e
 
 
44ac7e8
041841e
 
 
44ac7e8
 
 
 
 
041841e
44ac7e8
041841e
 
44ac7e8
 
041841e
44ac7e8
 
041841e
 
44ac7e8
109cc2f
 
 
44ac7e8
 
 
109cc2f
da40aec
041841e
 
 
da40aec
dc13b6d
109cc2f
 
 
 
 
 
 
 
 
 
dc13b6d
109cc2f
 
 
 
 
 
 
 
041841e
 
44ac7e8
da40aec
44ac7e8
da40aec
 
44ac7e8
da40aec
 
c58eafa
da40aec
dc13b6d
0fd90d3
109cc2f
 
 
 
 
041841e
dc13b6d
 
 
 
6277588
041841e
6277588
 
 
dc13b6d
 
 
 
 
 
041841e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
import spaces
import gradio as gr
from gradio import update
from functools import lru_cache
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from opencc import OpenCC  # 用於簡體轉繁體

# 初始化簡體到繁體轉換器
cc = OpenCC('s2t')

# 可選模型列表
MODEL_LIST = [
    "liswei/Taiwan-ELM-270M",
    "Mxode/SmolLM-Chinese-180M",
    "flyingfishinwater/chinese-baby-llama2",
    "unsloth/gemma-3-1b-pt",
    "ckiplab/gpt2-tiny-chinese",
    "ckiplab/gpt2-base-chinese",
    "liswei/Taiwan-ELM-1_1B",
    "benchang1110/Qwen2.5-Taiwan-1.5B-Instruct",
    "benchang1110/Taiwan-tinyllama-v1.0-base",
    "lianghsun/Llama-3.2-Taiwan-3B",
    "twinkle-ai/Llama-3.2-3B-F1-Instruct",
    "Epiculous/Violet_Twilight-v0.2",
]

@lru_cache(maxsize=None)
def get_pipeline(model_name):
    tok = AutoTokenizer.from_pretrained(model_name)
    mdl = AutoModelForCausalLM.from_pretrained(
        model_name, weights_only=False, trust_remote_code=True
    )
    mdl.to("cuda")
    return pipeline("text-generation", model=mdl, tokenizer=tok, device=0)

@spaces.GPU
def suggest_next(text, model_name, k, m):
    """
    使用 Beam Search 產生 m 條候選,並一次更新候選列表,轉繁體並編號。
    """
    gen_pipe = get_pipeline(model_name)
    outs = gen_pipe(
        text,
        max_new_tokens=k,
        num_beams=m,
        num_return_sequences=m,
        do_sample=False,
        early_stopping=True
    )
    suggestions = [out["generated_text"][len(text):].strip() for out in outs]
    suggestions = [s for s in suggestions if s]
    suggestions = [cc.convert(s) for s in suggestions]
    numbered = [f"{i+1}. {s}" for i, s in enumerate(suggestions)]
    return update(choices=numbered, value=None)


def append_suggestion(current, choice):
    if choice is None:
        return current
    text = choice.split(". ", 1)[1] if ". " in choice else choice
    return current + text

# 自訂 CSS:模擬經典中文輸入法候選欄樣式
custom_css = """
#suggestions-bar {
    width: 100%;
    margin-bottom: 8px;
}
#suggestions-bar .candidate-list {
    display: flex;
    gap: 8px;
    background: #fff;
    border: 1px solid #999;
    border-radius: 4px;
    padding: 4px 6px;
    overflow-x: auto;
    white-space: nowrap;
}
#suggestions-bar .candidate-list input[type=radio] {
    display: none;
}
#suggestions-bar .candidate-list label {
    position: relative;
    cursor: pointer;
    padding: 4px 8px;
    font-size: 14px;
}
#suggestions-bar .candidate-list label:hover {
    background: #f5f5f5;
}
#suggestions-bar .candidate-list input[type=radio]:checked + label {
    background: #e6f7ff;
    border: 1px solid #1890ff;
}
#input-box {
    width: 100%;
}
"""

with gr.Blocks(css=custom_css) as demo:
    # 標題與說明
    gr.Markdown(
        "## 🇹🇼 繁體中文 IME 加速器  \
"
        "結合小型語言模型與 ZeroGPU,提供即時輸入法風格候選欄。"
    )

    # 垂直排列:候選欄置於上方,輸入框置於下方
    with gr.Column():
        suggestions = gr.Radio(
            [], label="", interactive=True, type="value",
            elem_id="suggestions-bar", elem_classes="candidate-list"
        )
        input_text = gr.Textbox(
            label="", placeholder="請輸入拼音或文字…",
            lines=1, max_lines=1, elem_id="input-box"
        )

    # 自動預測開關及預測按鈕(預測按鈕初始隱藏)
    with gr.Row():
        auto_predict = gr.Checkbox(
            value=True, label="自動預測(內容變更時觸發)", elem_id="auto-predict"
        )
        predict_button = gr.Button(
            "預測", elem_id="predict-button", visible=False
        )

    # 進階參數設定(可摺疊)
    with gr.Accordion("進階設定", open=False):
        model_selector = gr.Dropdown(
            MODEL_LIST, value=MODEL_LIST[0], label="模型"
        )
        k_slider = gr.Slider(
            minimum=1, maximum=50, step=1, value=1, label="K(最大新詞元數)"
        )
        m_slider = gr.Slider(
            minimum=1, maximum=30, step=1, value=6, label="M(建議數/Beam 數)"
        )

    # 事件綁定
    auto_predict.change(
        fn=lambda auto: update(visible=not auto),
        inputs=[auto_predict],
        outputs=[predict_button],
    )
    predict_button.click(
        fn=suggest_next,
        inputs=[input_text, model_selector, k_slider, m_slider],
        outputs=suggestions,
    )
    input_text.change(
        fn=lambda txt, mdl, k, m, auto: suggest_next(txt, mdl, k, m) if auto else update(choices=[], value=None),
        inputs=[input_text, model_selector, k_slider, m_slider, auto_predict],
        outputs=suggestions,
    )
    suggestions.change(
        fn=append_suggestion,
        inputs=[input_text, suggestions],
        outputs=input_text,
    )

    demo.launch()