oliver-aizip commited on
Commit
7f28f16
·
1 Parent(s): f35135e

hopefully fixed qwen

Browse files
Files changed (1) hide show
  1. utils/models.py +2 -6
utils/models.py CHANGED
@@ -23,7 +23,7 @@ models = {
23
  "Qwen3-0.6b": "qwen/qwen3-0.6b",
24
  "Qwen3-1.7b": "qwen/qwen3-1.7b",
25
  "Qwen3-4b": "qwen/qwen3-4b",
26
- "SmolLM2-1.7b-Instruct": "huggingfacetb/smolllm2-1.7b-instruct",
27
  "EXAONE-3.5-2.4B-instruct": "LGAI-EXAONE/EXAONE-3.5-2.4B-Instruct",
28
  "OLMo-2-1B-Instruct": "allenai/OLMo-2-0425-1B-Instruct",
29
 
@@ -107,7 +107,7 @@ def run_inference(model_name, context, question):
107
  model_kwargs["enable_thinking"] = False
108
 
109
  try:
110
- tokenizer = AutoTokenizer.from_pretrained(model_name, padding_side="left", token=True)
111
  accepts_sys = (
112
  "System role not supported" not in tokenizer.chat_template
113
  if tokenizer.chat_template else False # Handle missing chat_template
@@ -125,10 +125,6 @@ def run_inference(model_name, context, question):
125
  model=model_name,
126
  tokenizer=tokenizer,
127
  device_map='auto',
128
- do_sample=True,
129
- temperature=0.6,
130
- top_p=0.9,
131
- model_kwargs=model_kwargs,
132
  trust_remote_code=True,
133
  )
134
 
 
23
  "Qwen3-0.6b": "qwen/qwen3-0.6b",
24
  "Qwen3-1.7b": "qwen/qwen3-1.7b",
25
  "Qwen3-4b": "qwen/qwen3-4b",
26
+ "SmolLM2-1.7b-Instruct": "HuggingFaceTB/SmolLM2-1.7B-Instruct",
27
  "EXAONE-3.5-2.4B-instruct": "LGAI-EXAONE/EXAONE-3.5-2.4B-Instruct",
28
  "OLMo-2-1B-Instruct": "allenai/OLMo-2-0425-1B-Instruct",
29
 
 
107
  model_kwargs["enable_thinking"] = False
108
 
109
  try:
110
+ tokenizer = AutoTokenizer.from_pretrained(model_name, padding_side="left", token=True, kwargs=model_kwargs)
111
  accepts_sys = (
112
  "System role not supported" not in tokenizer.chat_template
113
  if tokenizer.chat_template else False # Handle missing chat_template
 
125
  model=model_name,
126
  tokenizer=tokenizer,
127
  device_map='auto',
 
 
 
 
128
  trust_remote_code=True,
129
  )
130