Merge pull request #76 from OpenAccess-AI-Collective/truthy-validation
Browse files
src/axolotl/utils/models.py
CHANGED
|
@@ -364,7 +364,7 @@ def load_lora(model, cfg):
|
|
| 364 |
PeftModel,
|
| 365 |
)
|
| 366 |
|
| 367 |
-
lora_target_modules = list(cfg.lora_target_modules)
|
| 368 |
|
| 369 |
if cfg.lora_target_linear:
|
| 370 |
bits = None
|
|
|
|
| 364 |
PeftModel,
|
| 365 |
)
|
| 366 |
|
| 367 |
+
lora_target_modules = list(cfg.lora_target_modules or [])
|
| 368 |
|
| 369 |
if cfg.lora_target_linear:
|
| 370 |
bits = None
|
src/axolotl/utils/validation.py
CHANGED
|
@@ -5,12 +5,12 @@ def validate_config(cfg):
|
|
| 5 |
if cfg.adapter == "qlora":
|
| 6 |
if cfg.merge_lora:
|
| 7 |
# can't merge qlora if loaded in 8bit or 4bit
|
| 8 |
-
assert cfg.load_in_8bit is
|
| 9 |
-
assert cfg.load_4bit is
|
| 10 |
assert cfg.load_in_4bit is False
|
| 11 |
else:
|
| 12 |
-
assert cfg.load_in_8bit is
|
| 13 |
-
assert cfg.load_4bit is
|
| 14 |
assert cfg.load_in_4bit is True
|
| 15 |
if not cfg.load_in_8bit and cfg.adapter == "lora":
|
| 16 |
logging.warning("We recommend setting `load_in_8bit: true` for LORA finetuning")
|
|
|
|
| 5 |
if cfg.adapter == "qlora":
|
| 6 |
if cfg.merge_lora:
|
| 7 |
# can't merge qlora if loaded in 8bit or 4bit
|
| 8 |
+
assert cfg.load_in_8bit is not True
|
| 9 |
+
assert cfg.load_4bit is not True
|
| 10 |
assert cfg.load_in_4bit is False
|
| 11 |
else:
|
| 12 |
+
assert cfg.load_in_8bit is not True
|
| 13 |
+
assert cfg.load_4bit is not True
|
| 14 |
assert cfg.load_in_4bit is True
|
| 15 |
if not cfg.load_in_8bit and cfg.adapter == "lora":
|
| 16 |
logging.warning("We recommend setting `load_in_8bit: true` for LORA finetuning")
|