hsuwill000 commited on
Commit
a15895b
·
verified ·
1 Parent(s): 8e9ef4f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +66 -67
app.py CHANGED
@@ -1,70 +1,69 @@
1
- import gradio as gr
2
- import openvino_genai as ov_genai
3
  import huggingface_hub as hf_hub
4
-
5
- # OpenVINO Setup
6
- model_id = "OpenVINO/Qwen3-0.6B-int4-ov" # Or your chosen model
7
- model_path = "Qwen3-0.6B-int4-ov" # Local directory for the model
8
-
9
- # Download the model if it doesn't exist locally
10
- try:
11
- # Check if the model directory exists. A quick and dirty check. Adjust as needed.
12
- import os
13
- if not os.path.exists(model_path):
14
- hf_hub.snapshot_download(model_id, local_dir=model_path, local_dir_use_symlinks=False)
15
- except Exception as e:
16
- print(f"Error downloading model: {e}")
17
- print("Please ensure you have huggingface_hub installed and are authenticated if required.")
18
- exit() # Or handle the error more gracefully
19
-
20
- pipe = ov_genai.LLMPipeline(model_path, "CPU")
21
- tokenizer = pipe.get_tokenizer()
22
- tokenizer.set_chat_template(tokenizer.chat_template)
23
- pipe.start_chat() # moved pipe.start_chat() here to run after pipeline intialization
24
-
25
-
26
- # Gradio Chatbot UI
27
- def user(user_message, history: list):
28
- return "", history + [{"role": "user", "content": user_message}]
29
-
30
-
31
- def bot(history: list):
32
- # Get the user's last message from the history
33
- user_message = history[-1]["content"]
34
-
35
- # Use OpenVINO to generate a response
36
- full_response = "" # Store the complete response
37
-
38
- def streamer(subword): # Local streamer function
39
- nonlocal full_response # Allow modification of outer scope variable
40
- full_response += subword # Accumulate the subword
41
- history[-1]['content'] = full_response # Update chatbot content
42
- yield history
43
- return ov_genai.StreamingStatus.RUNNING
44
-
45
-
46
- # Initialize the bot message in history
47
- history.append({"role": "assistant", "content": ""})
48
-
49
- # Generate the response using the streaming function
50
- for updated_history in pipe.generate(user_message, streamer=streamer, max_new_tokens=100):
51
- yield updated_history
52
-
53
- # Alternatively, without the step-by-step updates, you can just do this:
54
- # full_response = pipe.generate(user_message, max_new_tokens=100) # but this will skip the steaming
55
- # history[-1]['content'] = full_response
56
- # yield history
57
-
58
-
59
- with gr.Blocks() as demo:
60
- chatbot = gr.Chatbot(type="messages")
61
- msg = gr.Textbox()
62
- clear = gr.Button("Clear")
63
-
64
- msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
65
- bot, chatbot, chatbot
66
- )
67
- clear.click(lambda: None, None, chatbot, queue=False)
 
68
 
69
  if __name__ == "__main__":
70
- demo.queue().launch()
 
 
 
1
  import huggingface_hub as hf_hub
2
+ import time
3
+ import openvino_genai as ov_genai
4
+ import numpy as np
5
+ import gradio as gr
6
+ import re
7
+
8
+ # 下載模型
9
+ model_ids = [
10
+ "OpenVINO/Qwen3-0.6B-int4-ov",
11
+ "OpenVINO/Qwen3-1.7B-int4-ov",
12
+ #"OpenVINO/Qwen3-4B-int4-ov",#不可用
13
+ "OpenVINO/Qwen3-8B-int4-ov",
14
+ "OpenVINO/Qwen3-14B-int4-ov",
15
+
16
+ ]
17
+
18
+ model_name_to_full_id = {model_id.split("/")[-1]: model_id for model_id in model_ids} #Create Dictionary
19
+
20
+ for model_id in model_ids:
21
+ model_path = model_id.split("/")[-1] # Extract model name
22
+ try:
23
+ hf_hub.snapshot_download(model_id, local_dir=model_path, local_dir_use_symlinks=False)
24
+ print(f"Successfully downloaded {model_id} to {model_path}") # Optional: Print confirmation
25
+ except Exception as e:
26
+ print(f"Error downloading {model_id}: {e}") # Handle download errors gracefully
27
+
28
+ # 建立推理管線 (Initialize with a default model first)
29
+ device = "CPU"
30
+ default_model_name = "Qwen3-0.6B-int4-ov" # Choose a default model
31
+
32
+ def generate_response(prompt, model_name):
33
+ global pipe, tokenizer # Access the global variables
34
+
35
+ model_path = model_name
36
+
37
+ print(f"Switching to model: {model_name}")
38
+ pipe = ov_genai.LLMPipeline(model_path, device)
39
+ tokenizer = pipe.get_tokenizer()
40
+ tokenizer.set_chat_template(tokenizer.chat_template)
41
+
42
+ try:
43
+ generated = pipe.generate([prompt], max_length=1024)
44
+ tokenpersec=f'{generated.perf_metrics.get_throughput().mean:.2f}'
45
+
46
+ return tokenpersec, generated
47
+ except Exception as e:
48
+ return "發生錯誤", "發生錯誤", f"生成回應時發生錯誤:{e}"
49
+
50
+
51
+ # 建立 Gradio 介面
52
+ model_choices = list(model_name_to_full_id.keys())
53
+
54
+ demo = gr.Interface(
55
+ fn=generate_response,
56
+ inputs=[
57
+ gr.Textbox(lines=5, label="輸入提示 (Prompt)"),
58
+ gr.Dropdown(choices=model_choices, value=default_model_name, label="選擇模型") # Added dropdown
59
+ ],
60
+ outputs=[
61
+ gr.Textbox(label="tokens/sec"),
62
+ gr.Textbox(label="回應"),
63
+ ],
64
+ title="Qwen3 Model Inference",
65
+ description="基於 Qwen3 推理應用,支援思考過程分離與 GUI。"
66
+ )
67
 
68
  if __name__ == "__main__":
69
+ demo.launch()