import huggingface_hub as hf_hub import time import openvino_genai as ov_genai import numpy as np import gradio as gr import re # 下載模型 model_id = "OpenVINO/Qwen3-0.6B-int4-ov" model_path = "Qwen3-0.6B-int4-ov" hf_hub.snapshot_download(model_id, local_dir=model_path, local_dir_use_symlinks=False) # 建立推理管線 device = "CPU" pipe = ov_genai.LLMPipeline(model_path, device) tokenizer = pipe.get_tokenizer() tokenizer.set_chat_template(tokenizer.chat_template) def generate_response(prompt): try: generated = pipe.generate([prompt], max_length=1024) tokenpersec=f'{generated.perf_metrics.get_throughput().mean:.2f}' match = re.search(r"(.*?)(.*)", generated, re.DOTALL) if match: thinking = match.group(1).strip() content = match.group(2).strip() else: thinking = "模型沒有提供思考過程" content = generated # 或者 generated.text, 取決於 generated 物件的屬性 return tokenpersec, thinking, content except Exception as e: return "發生錯誤", "發生錯誤", f"生成回應時發生錯誤:{e}" # 建立 Gradio 介面 demo = gr.Interface( fn=generate_response, inputs=gr.Textbox(lines=5, label="輸入提示 (Prompt)"), outputs=[ gr.Textbox(label="tokens/sec"), gr.Textbox(label="思考過程"), gr.Textbox(label="最終回應") ], title="Qwen3-0.6B-int4-ov ", description="基於 Qwen3-0.6B-int4-ov 推理應用,支援思考過程分離與 GUI。" ) if __name__ == "__main__": demo.launch()