File size: 1,708 Bytes
6e96eae
 
e838cdf
582e4de
6e96eae
 
 
 
 
6771aca
6e96eae
 
 
 
 
 
 
1e44aa5
 
 
 
 
6e96eae
1e44aa5
04d3fa9
1e44aa5
 
 
04d3fa9
8b31668
1e44aa5
 
 
 
 
 
 
 
15a68f9
1e44aa5
 
 
 
 
7d7759a
1e44aa5
 
 
 
 
 
 
7d7759a
6e96eae
 
1e44aa5
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
import huggingface_hub as hf_hub
import openvino_genai as ov_genai
import gradio as gr
import re

# 下載模型
model_id = "OpenVINO/Qwen3-0.6B-int4-ov"
model_path = "Qwen3-0.6B-int4-ov"

hf_hub.snapshot_download(model_id, local_dir=model_path, local_dir_use_symlinks=False)

# 建立推理管線
device = "CPU"
pipe = ov_genai.LLMPipeline(model_path, device)
tokenizer = pipe.get_tokenizer()
tokenizer.set_chat_template(tokenizer.chat_template)

# 修改 generate_response 函數以支持流式輸出
def generate_response(prompt):
    try:
        response = ""
        tokens_per_sec = "N/A"  # 預設值

        # 定義流式處理回呼函數
        def streamer(subword):
            nonlocal response
            response += subword  # 拼接輸出
            print(subword, end='', flush=True)  # 日誌輸出到控制台以便即時檢查
            return ov_genai.StreamingStatus.RUNNING

        # 啟動流式生成
        pipe.start_chat()
        pipe.generate(prompt, streamer=streamer, max_new_tokens=100)
        pipe.finish_chat()

        # 根據性能指標計算 tokens/sec
        tokens_per_sec = f"{pipe.get_throughput():.2f}"
        return tokens_per_sec, response

    except Exception as e:
        return "N/A", f"生成回應時發生錯誤:{e}"

# 建立 Gradio 介面(保持不變)
demo = gr.Interface(
    fn=generate_response,
    inputs=gr.Textbox(lines=1, label="輸入提示 (Prompt)"),
    outputs=[
        gr.Textbox(label="tokens/sec"),
        gr.Textbox(label="回應")
    ],
    title="Qwen3-0.6B-int4-ov",
    description="基於 Qwen3-0.6B-int4-ov 推理應用,支援思考過程分離與 GUI。"
)

if __name__ == "__main__":
    demo.launch()