fix for quant config from model (#540)
Browse files
src/axolotl/utils/models.py
CHANGED
|
@@ -160,7 +160,7 @@ def load_model(
|
|
| 160 |
model_kwargs["revision"] = cfg.model_revision
|
| 161 |
if cfg.gptq:
|
| 162 |
model_config = load_model_config(cfg)
|
| 163 |
-
if hasattr(model_config, "quantization_config"):
|
| 164 |
LOG.warning("model config does not contain quantization_config information")
|
| 165 |
else:
|
| 166 |
model_kwargs["quantization_config"] = GPTQConfig(
|
|
|
|
| 160 |
model_kwargs["revision"] = cfg.model_revision
|
| 161 |
if cfg.gptq:
|
| 162 |
model_config = load_model_config(cfg)
|
| 163 |
+
if not hasattr(model_config, "quantization_config"):
|
| 164 |
LOG.warning("model config does not contain quantization_config information")
|
| 165 |
else:
|
| 166 |
model_kwargs["quantization_config"] = GPTQConfig(
|