Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -19,13 +19,15 @@ tokenizer.set_chat_template(tokenizer.chat_template)
|
|
19 |
|
20 |
|
21 |
def generate_response(prompt):
|
22 |
-
full_response = ""
|
|
|
|
|
23 |
|
24 |
def streamer(subword):
|
25 |
nonlocal full_response
|
26 |
full_response += subword
|
27 |
-
|
28 |
-
return ov_genai.StreamingStatus.RUNNING
|
29 |
|
30 |
try:
|
31 |
# 使用流式生成
|
@@ -38,16 +40,12 @@ def generate_response(prompt):
|
|
38 |
|
39 |
|
40 |
# 建立 Gradio 介面
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
],
|
48 |
-
title="Qwen3-0.6B-int4-ov ",
|
49 |
-
description="基於 Qwen3-0.6B-int4-ov 推理應用,支援思考過程分離與 GUI。"
|
50 |
-
)
|
51 |
|
52 |
if __name__ == "__main__":
|
53 |
demo.launch()
|
|
|
19 |
|
20 |
|
21 |
def generate_response(prompt):
|
22 |
+
full_response = ""
|
23 |
+
tokenpersec = "計算中..."
|
24 |
+
global response_box # 宣告 response_box 為全域變數
|
25 |
|
26 |
def streamer(subword):
|
27 |
nonlocal full_response
|
28 |
full_response += subword
|
29 |
+
response_box.update(value=full_response) # 更新 Textbox 的內容
|
30 |
+
return ov_genai.StreamingStatus.RUNNING
|
31 |
|
32 |
try:
|
33 |
# 使用流式生成
|
|
|
40 |
|
41 |
|
42 |
# 建立 Gradio 介面
|
43 |
+
with gr.Blocks() as demo:
|
44 |
+
input_box = gr.Textbox(lines=5, label="輸入提示 (Prompt)")
|
45 |
+
response_box = gr.Textbox(label="回應", streaming=True) # 啟用 streaming
|
46 |
+
speed_box = gr.Textbox(label="tokens/sec")
|
47 |
+
|
48 |
+
input_box.change(fn=generate_response, inputs=input_box, outputs=[speed_box, response_box]) # 將輸入框的改變連接到 generate_response 函數
|
|
|
|
|
|
|
|
|
49 |
|
50 |
if __name__ == "__main__":
|
51 |
demo.launch()
|