hsuwill000 commited on
Commit
6453441
·
verified ·
1 Parent(s): 8911ce0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -4,7 +4,7 @@ from optimum.intel import OVModelForCausalLM
4
  from transformers import AutoTokenizer, pipeline
5
 
6
  # Load the model and tokenizer
7
- model_id = "hsuwill000/DeepSeek-R1-Distill-Qwen-1.5B-openvino-8bit"
8
  model = OVModelForCausalLM.from_pretrained(model_id, device="CPU") # 明确指定设备
9
  tokenizer = AutoTokenizer.from_pretrained(model_id)
10
 
@@ -43,8 +43,8 @@ def respond(message):
43
 
44
  # Set up Gradio chat interface
45
  with gr.Blocks() as demo:
46
- gr.Markdown("# DeepSeek-R1-Distill-Qwen-1.5B-openvino-8bit Chat")
47
- gr.Markdown("Chat with DeepSeek-R1-Distill-Qwen-1.5B-openvino-8bit model.")
48
 
49
  chatbot = gr.Chatbot()
50
  msg = gr.Textbox(label="Your Message")
 
4
  from transformers import AutoTokenizer, pipeline
5
 
6
  # Load the model and tokenizer
7
+ model_id = "hsuwill000/DeepSeek-R1-Distill-Qwen-1.5B-openvino"
8
  model = OVModelForCausalLM.from_pretrained(model_id, device="CPU") # 明确指定设备
9
  tokenizer = AutoTokenizer.from_pretrained(model_id)
10
 
 
43
 
44
  # Set up Gradio chat interface
45
  with gr.Blocks() as demo:
46
+ gr.Markdown("# DeepSeek-R1-Distill-Qwen-1.5B-openvino Chat")
47
+ gr.Markdown("Chat with DeepSeek-R1-Distill-Qwen-1.5B-openvino model.")
48
 
49
  chatbot = gr.Chatbot()
50
  msg = gr.Textbox(label="Your Message")