Spaces:
Running
Running
import huggingface_hub as hf_hub | |
import openvino_genai as ov_genai | |
import gradio as gr | |
import re | |
# 下載模型 | |
model_id = "OpenVINO/Qwen3-0.6B-int4-ov" | |
model_path = "Qwen3-0.6B-int4-ov" | |
hf_hub.snapshot_download(model_id, local_dir=model_path, local_dir_use_symlinks=False) | |
# 建立推理管線 | |
device = "CPU" | |
pipe = ov_genai.LLMPipeline(model_path, device) | |
tokenizer = pipe.get_tokenizer() | |
tokenizer.set_chat_template(tokenizer.chat_template) | |
# 修改 generate_response 函數以支持流式輸出 | |
def generate_response(prompt): | |
try: | |
response = "" | |
tokens_per_sec = "N/A" # 預設值 | |
# 定義流式處理回呼函數 | |
def streamer(subword): | |
nonlocal response | |
response += subword # 拼接輸出 | |
print(subword, end='', flush=True) # 日誌輸出到控制台以便即時檢查 | |
return ov_genai.StreamingStatus.RUNNING | |
# 啟動流式生成 | |
pipe.start_chat() | |
pipe.generate(prompt, streamer=streamer, max_new_tokens=100) | |
pipe.finish_chat() | |
# 根據性能指標計算 tokens/sec | |
tokens_per_sec = f"{pipe.get_throughput():.2f}" | |
return tokens_per_sec, response | |
except Exception as e: | |
return "N/A", f"生成回應時發生錯誤:{e}" | |
# 建立 Gradio 介面(保持不變) | |
demo = gr.Interface( | |
fn=generate_response, | |
inputs=gr.Textbox(lines=1, label="輸入提示 (Prompt)"), | |
outputs=[ | |
gr.Textbox(label="tokens/sec"), | |
gr.Textbox(label="回應") | |
], | |
title="Qwen3-0.6B-int4-ov", | |
description="基於 Qwen3-0.6B-int4-ov 推理應用,支援思考過程分離與 GUI。" | |
) | |
if __name__ == "__main__": | |
demo.launch() |