hsuwill000 commited on
Commit
c695fb5
·
verified ·
1 Parent(s): b37db0d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -8,7 +8,7 @@ import re
8
  import gc
9
 
10
  # 下載模型
11
- '''
12
  model_ids = [
13
  "OpenVINO/Qwen3-0.6B-int4-ov",
14
  "OpenVINO/Qwen3-1.7B-int4-ov",
@@ -27,12 +27,12 @@ for model_id in model_ids:
27
  except Exception as e:
28
  print(f"Error downloading {model_id}: {e}") # Handle download errors gracefully
29
 
30
- '''
31
- hf_hub.snapshot_download("hsuwill000/Llama-3.1-TAIDE-LX-8B-Chat_int4_ov", local_dir="ov", local_dir_use_symlinks=False)
32
 
33
  # 建立推理管線 (Initialize with a default model first)
34
  device = "CPU"
35
- InUsed_model_name = "ov" # Choose a default model
36
  pipe = ov_genai.LLMPipeline(InUsed_model_name, device)
37
  tokenizer = pipe.get_tokenizer()
38
  tokenizer.set_chat_template(tokenizer.chat_template)
 
8
  import gc
9
 
10
  # 下載模型
11
+
12
  model_ids = [
13
  "OpenVINO/Qwen3-0.6B-int4-ov",
14
  "OpenVINO/Qwen3-1.7B-int4-ov",
 
27
  except Exception as e:
28
  print(f"Error downloading {model_id}: {e}") # Handle download errors gracefully
29
 
30
+
31
+ #hf_hub.snapshot_download("hsuwill000/Llama-3.1-TAIDE-LX-8B-Chat_int4_ov", local_dir="ov", local_dir_use_symlinks=False)
32
 
33
  # 建立推理管線 (Initialize with a default model first)
34
  device = "CPU"
35
+ InUsed_model_name = "Qwen3-0.6B-int4-ov" # Choose a default model
36
  pipe = ov_genai.LLMPipeline(InUsed_model_name, device)
37
  tokenizer = pipe.get_tokenizer()
38
  tokenizer.set_chat_template(tokenizer.chat_template)