hsuwill000 commited on
Commit
a211782
·
verified ·
1 Parent(s): 71ae563

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -13
app.py CHANGED
@@ -19,13 +19,15 @@ tokenizer.set_chat_template(tokenizer.chat_template)
19
 
20
 
21
  def generate_response(prompt):
22
- full_response = "" # 用於儲存完整的回應
 
 
23
 
24
  def streamer(subword):
25
  nonlocal full_response
26
  full_response += subword
27
- yield full_response # 使用 yield 使 streamer 成為生成器
28
- return ov_genai.StreamingStatus.RUNNING # 返回 StreamingStatus.RUNNING
29
 
30
  try:
31
  # 使用流式生成
@@ -38,16 +40,12 @@ def generate_response(prompt):
38
 
39
 
40
  # 建立 Gradio 介面
41
- demo = gr.Interface(
42
- fn=generate_response,
43
- inputs=gr.Textbox(lines=5, label="輸入提示 (Prompt)"),
44
- outputs=[
45
- gr.Textbox(label="tokens/sec"),
46
- gr.Textbox(label="回應"),
47
- ],
48
- title="Qwen3-0.6B-int4-ov ",
49
- description="基於 Qwen3-0.6B-int4-ov 推理應用,支援思考過程分離與 GUI。"
50
- )
51
 
52
  if __name__ == "__main__":
53
  demo.launch()
 
19
 
20
 
21
  def generate_response(prompt):
22
+ full_response = ""
23
+ tokenpersec = "計算中..."
24
+ global response_box # 宣告 response_box 為全域變數
25
 
26
  def streamer(subword):
27
  nonlocal full_response
28
  full_response += subword
29
+ response_box.update(value=full_response) # 更新 Textbox 的內容
30
+ return ov_genai.StreamingStatus.RUNNING
31
 
32
  try:
33
  # 使用流式生成
 
40
 
41
 
42
  # 建立 Gradio 介面
43
+ with gr.Blocks() as demo:
44
+ input_box = gr.Textbox(lines=5, label="輸入提示 (Prompt)")
45
+ response_box = gr.Textbox(label="回應", streaming=True) # 啟用 streaming
46
+ speed_box = gr.Textbox(label="tokens/sec")
47
+
48
+ input_box.change(fn=generate_response, inputs=input_box, outputs=[speed_box, response_box]) # 將輸入框的改變連接到 generate_response 函數
 
 
 
 
49
 
50
  if __name__ == "__main__":
51
  demo.launch()