hsuwill000 commited on
Commit
72bde98
·
verified ·
1 Parent(s): 981f96b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -52
app.py CHANGED
@@ -1,62 +1,69 @@
1
- import gradio as gr
2
- import openvino_genai as ov_genai
3
  import huggingface_hub as hf_hub
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
- # OpenVINO Setup
6
- model_id = "OpenVINO/Qwen3-0.6B-int4-ov" # Or your chosen model
7
- model_path = "Qwen3-0.6B-int4-ov" # Local directory for the model
8
-
9
- # Download the model if it doesn't exist locally
10
- hf_hub.snapshot_download(model_id, local_dir=model_path, local_dir_use_symlinks=False)
11
-
12
-
13
- pipe = ov_genai.LLMPipeline(model_path, "CPU")
14
- tokenizer = pipe.get_tokenizer()
15
- tokenizer.set_chat_template(tokenizer.chat_template)
16
- pipe.start_chat() # moved pipe.start_chat() here to run after pipeline intialization
17
-
18
-
19
- # Gradio Chatbot UI
20
- def user(user_message, history: list):
21
- return "", history + [{"role": "user", "content": user_message}]
22
-
23
 
24
- def bot(history: list, user_message):
25
- full_response = ""
26
 
27
- def streamer(subword):
28
- nonlocal full_response
29
- full_response += subword
30
- history[-1]['content'] = full_response
31
- yield history
32
- return ov_genai.StreamingStatus.RUNNING
33
 
34
- history.append({"role": "assistant", "content": ""})
 
 
 
35
 
36
  try:
37
- for updated_history in pipe.generate(user_message, streamer=streamer, max_new_tokens=100):
38
- yield updated_history
39
- except Exception as e:
40
- print(f"Error during OpenVINO generation: {e}") # Log the error!
41
- history[-1]['content'] = "An error occurred while generating the response."
42
- yield history
43
 
44
- with gr.Blocks() as demo:
45
- chatbot = gr.Chatbot(type="messages")
46
- msg = gr.Textbox()
47
- submit_button = gr.Button("Submit") # Added submit button
48
- clear = gr.Button("Clear")
49
-
50
- def respond(message, chat_history): # Combined user and bot functions
51
- user_message, chat_history = user(message, chat_history)
52
- for bot_response in bot(chat_history, message):
53
- chat_history = bot_response
54
- yield "", chat_history
55
-
56
-
57
- submit_button.click(respond, [msg, chatbot], [msg, chatbot])
58
- msg.submit(respond, [msg, chatbot], [msg, chatbot]) # Optional: allow Enter key submission
59
- clear.click(lambda: None, None, chatbot, queue=False)
 
 
 
 
 
60
 
61
  if __name__ == "__main__":
62
- demo.queue().launch(share=True)
 
 
 
1
  import huggingface_hub as hf_hub
2
+ import time
3
+ import openvino_genai as ov_genai
4
+ import numpy as np
5
+ import gradio as gr
6
+ import re
7
+
8
+ # 下載模型
9
+ model_ids = [
10
+ "OpenVINO/Qwen3-0.6B-int4-ov",
11
+ "OpenVINO/Qwen3-1.7B-int4-ov",
12
+ #"OpenVINO/Qwen3-4B-int4-ov",#不可用
13
+ "OpenVINO/Qwen3-8B-int4-ov",
14
+ "OpenVINO/Qwen3-14B-int4-ov",
15
+
16
+ ]
17
+
18
+ model_name_to_full_id = {model_id.split("/")[-1]: model_id for model_id in model_ids} #Create Dictionary
19
+
20
+ for model_id in model_ids:
21
+ model_path = model_id.split("/")[-1] # Extract model name
22
+ try:
23
+ hf_hub.snapshot_download(model_id, local_dir=model_path, local_dir_use_symlinks=False)
24
+ print(f"Successfully downloaded {model_id} to {model_path}") # Optional: Print confirmation
25
+ except Exception as e:
26
+ print(f"Error downloading {model_id}: {e}") # Handle download errors gracefully
27
 
28
+ # 建立推理管線 (Initialize with a default model first)
29
+ device = "CPU"
30
+ default_model_name = "Qwen3-0.6B-int4-ov" # Choose a default model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
+ def generate_response(prompt, model_name):
33
+ global pipe, tokenizer # Access the global variables
34
 
35
+ model_path = model_name
 
 
 
 
 
36
 
37
+ print(f"Switching to model: {model_name}")
38
+ pipe = ov_genai.LLMPipeline(model_path, device)
39
+ tokenizer = pipe.get_tokenizer()
40
+ tokenizer.set_chat_template(tokenizer.chat_template)
41
 
42
  try:
43
+ generated = pipe.generate([prompt], max_length=1024)
44
+ tokenpersec=f'{generated.perf_metrics.get_throughput().mean:.2f}'
 
 
 
 
45
 
46
+ return tokenpersec, generated
47
+ except Exception as e:
48
+ return "發生錯誤", "發生錯誤", f"生成回應時發生錯誤:{e}"
49
+
50
+
51
+ # 建立 Gradio 介面
52
+ model_choices = list(model_name_to_full_id.keys())
53
+
54
+ demo = gr.Interface(
55
+ fn=generate_response,
56
+ inputs=[
57
+ gr.Textbox(lines=5, label="輸入提示 (Prompt)"),
58
+ gr.Dropdown(choices=model_choices, value=default_model_name, label="選擇模型") # Added dropdown
59
+ ],
60
+ outputs=[
61
+ gr.Textbox(label="tokens/sec"),
62
+ gr.Textbox(label="回應"),
63
+ ],
64
+ title="Qwen3 Model Inference",
65
+ description="基於 Qwen3 推理應用,支援思考過程分離與 GUI。"
66
+ )
67
 
68
  if __name__ == "__main__":
69
+ demo.launch()