Spaces:
Running
on
Zero
Running
on
Zero
import spaces | |
import gradio as gr | |
from gradio import update | |
from functools import lru_cache | |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline | |
from opencc import OpenCC # 用於簡體轉繁體 | |
# 初始化簡體到繁體轉換器 | |
cc = OpenCC('s2t') | |
# 可選模型列表 | |
MODEL_LIST = [ | |
"liswei/Taiwan-ELM-270M", | |
"Mxode/SmolLM-Chinese-180M", | |
"flyingfishinwater/chinese-baby-llama2", | |
"unsloth/gemma-3-1b-pt", | |
"ckiplab/gpt2-tiny-chinese", | |
"ckiplab/gpt2-base-chinese", | |
"liswei/Taiwan-ELM-1_1B", | |
"benchang1110/Qwen2.5-Taiwan-1.5B-Instruct", | |
"benchang1110/Taiwan-tinyllama-v1.0-base", | |
"lianghsun/Llama-3.2-Taiwan-3B", | |
"twinkle-ai/Llama-3.2-3B-F1-Instruct", | |
"Epiculous/Violet_Twilight-v0.2", | |
] | |
def get_pipeline(model_name): | |
tok = AutoTokenizer.from_pretrained(model_name) | |
mdl = AutoModelForCausalLM.from_pretrained( | |
model_name, weights_only=False, trust_remote_code=True | |
) | |
mdl.to("cuda") | |
return pipeline("text-generation", model=mdl, tokenizer=tok, device=0) | |
def suggest_next(text, model_name, k, m): | |
""" | |
使用 Beam Search 產生 M 條最可能的下段建議,並一次更新候選列表,最後將簡體字轉為繁體字。 | |
""" | |
gen_pipe = get_pipeline(model_name) | |
outs = gen_pipe( | |
text, | |
max_new_tokens=k, | |
num_beams=m, | |
num_return_sequences=m, | |
do_sample=False, | |
early_stopping=True | |
) | |
# 提取並清理生成內容 | |
suggestions = [out["generated_text"][len(text):].strip() for out in outs] | |
suggestions = [s for s in suggestions if s] | |
# 簡體轉繁體 | |
suggestions = [cc.convert(s) for s in suggestions] | |
return update(choices=suggestions, value=None) | |
def append_suggestion(current, choice): | |
if choice is None: | |
return current | |
# 模擬輸入法候選選中 | |
return current + choice | |
# 自訂 CSS:模擬經典中文輸入法候選欄樣式 | |
custom_css = """ | |
#suggestions-bar .candidate-list { | |
display: flex; | |
gap: 12px; | |
background: #ffffff; | |
border: 1px solid #ccc; | |
border-radius: 4px; | |
padding: 6px; | |
} | |
#suggestions-bar .candidate-list input[type=radio] { | |
display: none; | |
} | |
#suggestions-bar .candidate-list label { | |
cursor: pointer; | |
padding: 2px 6px; | |
border-radius: 4px; | |
} | |
#suggestions-bar .candidate-list label:hover { | |
background: #f0f0f0; | |
} | |
#suggestions-bar .candidate-list input[type=radio]:checked + label { | |
background: #e0e0e0; | |
border: 1px solid #888; | |
} | |
""" | |
with gr.Blocks(css=custom_css) as demo: | |
# 標題和說明 | |
gr.Markdown( | |
"## 🇹🇼 繁體中文輸入法加速器 \n" | |
"結合小型語言模型與 ZeroGPU,即時 IME 風格候選條。" | |
) | |
# 經典候選欄:水平排列 | |
suggestions = gr.Radio( | |
[], label="", interactive=True, type="value", | |
elem_id="suggestions-bar", elem_classes="candidate-list" | |
) | |
# 輸入區與按鈕:單行輸入框 + 小按鈕 | |
with gr.Row(): | |
input_text = gr.Textbox( | |
label="", placeholder="請輸入拼音或文字…", lines=1, max_lines=1 | |
) | |
gpu_button = gr.Button("建議") | |
# 進階參數設定(可折疊) | |
with gr.Accordion("進階設定", open=False): | |
model_selector = gr.Dropdown( | |
MODEL_LIST, value=MODEL_LIST[0], label="模型" | |
) | |
k_slider = gr.Slider( | |
minimum=1, maximum=50, step=1, value=1, label="K(最大新詞元數)" | |
) | |
m_slider = gr.Slider( | |
minimum=1, maximum=30, step=1, value=6, label="M(建議數/Beam 數)" | |
) | |
# 事件綁定 | |
gpu_button.click( | |
fn=suggest_next, | |
inputs=[input_text, model_selector, k_slider, m_slider], | |
outputs=suggestions, | |
) | |
suggestions.change( | |
fn=append_suggestion, | |
inputs=[input_text, suggestions], | |
outputs=input_text, | |
) | |
demo.launch() | |