Luigi's picture
fix 'No candidate PyTorch version found for ZeroGPU'
8722708
raw
history blame
2.1 kB
# app.py
import spaces
import gradio as gr
from functools import lru_cache
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
MODEL_LIST = [
"ckiplab/gpt2-tiny-chinese",
"ckiplab/gpt2-base-chinese",
"liswei/Taiwan-ELM-270M-Instruct",
"liswei/Taiwan-ELM-1_1B",
"google/gemma-3-1b-pt",
"benchang1110/Qwen2.5-Taiwan-1.5B-Instruct",
"benchang1110/Taiwan-tinyllama-v1.0-base",
]
@lru_cache(maxsize=None)
def get_pipeline(model_name):
tok = AutoTokenizer.from_pretrained(model_name)
# By setting weights_only=False we bypass the torch.load(weights_only=True)
# path that is disallowed for torch<2.6 due to CVE-2025-32434 :contentReference[oaicite:1]{index=1}.
mdl = AutoModelForCausalLM.from_pretrained(model_name, weights_only=False)
mdl.to("cuda")
return pipeline("text-generation", model=mdl, tokenizer=tok, device=0)
@spaces.GPU
def suggest_next(text, model_name, k, m):
outs = get_pipeline(model_name)(
text, max_new_tokens=k, num_return_sequences=m, do_sample=False
)
return [out["generated_text"][len(text):] for out in outs]
def append_suggestion(current, choice):
return current + choice
with gr.Blocks() as demo:
gr.Markdown("## 🇹🇼 台灣中文下段預測(ZeroGPU + Gradio v5)")
input_text = gr.TextArea(label="輸入文字", lines=4, placeholder="請在此輸入起始片段…")
with gr.Row():
model_selector = gr.Dropdown(MODEL_LIST, value=MODEL_LIST[0], label="選擇模型")
k_slider = gr.Slider(1, 50, value=5, label="K(最大新生成詞元)")
m_slider = gr.Slider(1, 10, value=5, label="M(建議數量)")
suggestions = gr.Dropdown([], label="建議清單", interactive=True)
gpu_button = gr.Button("使用 GPU 生成建議")
gpu_button.click(
fn=suggest_next,
inputs=[input_text, model_selector, k_slider, m_slider],
outputs=suggestions,
)
suggestions.change(
fn=append_suggestion,
inputs=[input_text, suggestions],
outputs=input_text,
)
demo.launch()