import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer import torch import os # 預先定義 Hugging Face 模型 MODEL_NAMES = { "DeepSeek-V3": "deepseek-ai/DeepSeek-V3", "DeepSeek-R1": "deepseek-ai/DeepSeek-R1", } HF_TOKEN = os.getenv("HF_TOKEN") def load_model(model_path): tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True, token=HF_TOKEN) model = AutoModelForCausalLM.from_pretrained( model_path, trust_remote_code=True, token=HF_TOKEN, torch_dtype=torch.float16, # 強制 FP16,避免 FP8 問題 device_map="auto", # 讓 transformers 自動決定使用 CPU/GPU revision="main" ) return model, tokenizer # 預設載入 DeepSeek-V3 current_model, current_tokenizer = load_model("deepseek-ai/DeepSeek-V3") def chat(message, history, model_name): """處理聊天訊息""" global current_model, current_tokenizer # 若模型不同則切換 if model_name != current_model: current_model, current_tokenizer = load_model(model_name) device = "cuda" if torch.cuda.is_available() else "cpu" inputs = current_tokenizer(message, return_tensors="pt").to(device) outputs = current_model.generate(**inputs, max_length=1024) response = current_tokenizer.decode(outputs[0], skip_special_tokens=True) return response with gr.Blocks() as app: gr.Markdown("## Chatbot with DeepSeek Models") with gr.Row(): chat_interface = gr.ChatInterface(chat, streaming=True, save_history=True) model_selector = gr.Dropdown( choices=list(MODEL_NAMES.keys()), value="DeepSeek-V3", label="Select Model" ) chat_interface.append(model_selector) app.launch()