hsuwill000 commited on
Commit
60685d1
·
verified ·
1 Parent(s): 759aea4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -10
app.py CHANGED
@@ -33,11 +33,6 @@ device = "CPU"
33
  default_model_name = "Qwen3-0.6B-int4-ov" # Choose a default model
34
  model_path = model_name_to_id[default_model_name].split("/")[-1]
35
 
36
- pipe = ov_genai.LLMPipeline(model_path, device)
37
- tokenizer = pipe.get_tokenizer()
38
- tokenizer.set_chat_template(tokenizer.chat_template)
39
-
40
-
41
  def generate_response(prompt, model_name):
42
  global pipe, tokenizer # Access the global variables
43
 
@@ -45,11 +40,11 @@ def generate_response(prompt, model_name):
45
  model_id = model_name_to_id[model_name]
46
  new_model_path = model_id.split("/")[-1]
47
 
48
- if pipe.model_name != new_model_path: # Assuming the LLMPipeline has a model_name property
49
- print(f"Switching to model: {model_name}")
50
- pipe = ov_genai.LLMPipeline(new_model_path, device)
51
- tokenizer = pipe.get_tokenizer()
52
- tokenizer.set_chat_template(tokenizer.chat_template)
53
 
54
 
55
  try:
 
33
  default_model_name = "Qwen3-0.6B-int4-ov" # Choose a default model
34
  model_path = model_name_to_id[default_model_name].split("/")[-1]
35
 
 
 
 
 
 
36
  def generate_response(prompt, model_name):
37
  global pipe, tokenizer # Access the global variables
38
 
 
40
  model_id = model_name_to_id[model_name]
41
  new_model_path = model_id.split("/")[-1]
42
 
43
+
44
+ print(f"Switching to model: {model_name}")
45
+ pipe = ov_genai.LLMPipeline(new_model_path, device)
46
+ tokenizer = pipe.get_tokenizer()
47
+ tokenizer.set_chat_template(tokenizer.chat_template)
48
 
49
 
50
  try: