Mdrnfox commited on
Commit
e540b2c
Β·
verified Β·
1 Parent(s): d1a9a26

Update run_eval.py

Browse files
Files changed (1) hide show
  1. run_eval.py +13 -3
run_eval.py CHANGED
@@ -2,6 +2,7 @@
2
  import datetime, os, subprocess, tempfile
3
  from pathlib import Path
4
 
 
5
  import pandas as pd, yaml, torch
6
  from huggingface_hub import HfApi, login, hf_hub_download, model_info
7
  from lm_eval import evaluator
@@ -11,8 +12,10 @@ from transformers import (
11
  AutoModelForCausalLM,
12
  AutoModelForSequenceClassification,
13
  AutoTokenizer,
 
14
  )
15
 
 
16
  CONFIGS = []
17
 
18
  # ───── Load all configs ─────
@@ -67,6 +70,8 @@ for cfg in CONFIGS:
67
  try:
68
  base_model = AutoModelForCausalLM.from_pretrained(
69
  base_model_id,
 
 
70
  trust_remote_code=True,
71
  use_safetensors=True
72
  )
@@ -76,6 +81,8 @@ for cfg in CONFIGS:
76
  print(f"⚠️ Failed to load causal LM: {e}")
77
  base_model = AutoModelForSequenceClassification.from_pretrained(
78
  base_model_id,
 
 
79
  trust_remote_code=True,
80
  use_safetensors=True
81
  )
@@ -93,14 +100,17 @@ for cfg in CONFIGS:
93
  continue
94
 
95
  try:
96
- peft_model = PeftModel.from_pretrained(base_model, adapter_repo)
 
 
 
 
 
97
  merged_model = peft_model.merge_and_unload()
98
  except Exception as e:
99
  print(f"Failed to apply adapter {adapter_repo}: {e}")
100
  continue
101
 
102
- device = "cuda" if torch.cuda.is_available() else "cpu"
103
- merged_model.to(device)
104
  merged_model.eval()
105
 
106
  with tempfile.TemporaryDirectory() as td:
 
2
  import datetime, os, subprocess, tempfile
3
  from pathlib import Path
4
 
5
+ import gc
6
  import pandas as pd, yaml, torch
7
  from huggingface_hub import HfApi, login, hf_hub_download, model_info
8
  from lm_eval import evaluator
 
12
  AutoModelForCausalLM,
13
  AutoModelForSequenceClassification,
14
  AutoTokenizer,
15
+ BitsAndBytesConfig
16
  )
17
 
18
+
19
  CONFIGS = []
20
 
21
  # ───── Load all configs ─────
 
70
  try:
71
  base_model = AutoModelForCausalLM.from_pretrained(
72
  base_model_id,
73
+ device_map="auto",
74
+ torch_dtype=torch.float16,
75
  trust_remote_code=True,
76
  use_safetensors=True
77
  )
 
81
  print(f"⚠️ Failed to load causal LM: {e}")
82
  base_model = AutoModelForSequenceClassification.from_pretrained(
83
  base_model_id,
84
+ device_map="auto",
85
+ torch_dtype=torch.float16,
86
  trust_remote_code=True,
87
  use_safetensors=True
88
  )
 
100
  continue
101
 
102
  try:
103
+ peft_model = PeftModel.from_pretrained(
104
+ base_model,
105
+ adapter_repo,
106
+ device_map="auto",
107
+ torch_dtype=torch.float16,
108
+ )
109
  merged_model = peft_model.merge_and_unload()
110
  except Exception as e:
111
  print(f"Failed to apply adapter {adapter_repo}: {e}")
112
  continue
113
 
 
 
114
  merged_model.eval()
115
 
116
  with tempfile.TemporaryDirectory() as td: