File size: 1,739 Bytes
6e96eae
 
 
 
e838cdf
582e4de
6e96eae
 
 
 
 
6771aca
6e96eae
 
 
 
 
 
 
 
8b31668
 
 
 
 
7d7759a
 
8b31668
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3592035
7d7759a
8b31668
15a68f9
 
7d7759a
 
 
 
8b31668
 
7d7759a
 
 
 
6e96eae
 
15a68f9
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import huggingface_hub as hf_hub
import time
import openvino_genai as ov_genai
import numpy as np
import gradio as gr
import re

# 下載模型
model_id = "OpenVINO/Qwen3-0.6B-int4-ov"
model_path = "Qwen3-0.6B-int4-ov"

hf_hub.snapshot_download(model_id, local_dir=model_path, local_dir_use_symlinks=False)

# 建立推理管線
device = "CPU"
pipe = ov_genai.LLMPipeline(model_path, device)
tokenizer = pipe.get_tokenizer()
tokenizer.set_chat_template(tokenizer.chat_template)


def streamer(subword):
    yield subword
    return ov_genai.StreamingStatus.RUNNING


def generate_response(prompt):
    try:
        full_response = ""
        token_count = 0
        start_time = time.time()

        for text in pipe.generate(prompt, streamer=streamer, max_new_tokens=1024):
            full_response += text
            token_count += 1
            yield (None, full_response) # 每次 yield 都会刷新界面

        end_time = time.time()
        elapsed_time = end_time - start_time
        tokens_per_sec = token_count / elapsed_time if elapsed_time > 0 else 0
        tokenpersec=f'{tokens_per_sec:.2f}'

        yield (tokenpersec, full_response)  # 最终 yield, 保证输出完整.

    except Exception as e:
        yield ("發生錯誤", f"生成回應時發生錯誤:{e}")  # 使用 yield 错误信息

# 建立 Gradio 介面
demo = gr.Interface(
    fn=generate_response,
    inputs=gr.Textbox(lines=5, label="輸入提示 (Prompt)"),
    outputs=[
        gr.Textbox(label="tokens/sec"),
        gr.Textbox(label="回應", streaming=True)
    ],
    title="Qwen3-0.6B-int4-ov ",
    description="基於 Qwen3-0.6B-int4-ov 推理應用,支援思考過程分離與 GUI。"
)

if __name__ == "__main__":
    demo.launch()