Mdrnfox commited on
Commit
8359026
·
verified ·
1 Parent(s): 1e513f3

Update run_eval.py

Browse files
Files changed (1) hide show
  1. run_eval.py +4 -4
run_eval.py CHANGED
@@ -133,6 +133,10 @@ for cfg in CONFIGS:
133
 
134
  try:
135
  res = evaluator.simple_evaluate(model=hf_lm, tasks=tasks)
 
 
 
 
136
  del merged_model
137
  del peft_model
138
  del base_model
@@ -145,10 +149,6 @@ for cfg in CONFIGS:
145
  print(f"Evaluation failed for {adapter_repo}: {e}")
146
  continue
147
 
148
- if not res.get("results"):
149
- print(f"No results returned for {adapter_repo}. Skipping...")
150
- continue
151
-
152
  meta = {
153
  "model_id": adapter_repo,
154
  "adapter_type": adapter_type,
 
133
 
134
  try:
135
  res = evaluator.simple_evaluate(model=hf_lm, tasks=tasks)
136
+ print(f"Raw results for {adapter_repo}: {res}")
137
+ if not res.get("results"):
138
+ print(f"⚠️ Empty results — likely a task or model compatibility issue for: {adapter_repo}")
139
+ continue
140
  del merged_model
141
  del peft_model
142
  del base_model
 
149
  print(f"Evaluation failed for {adapter_repo}: {e}")
150
  continue
151
 
 
 
 
 
152
  meta = {
153
  "model_id": adapter_repo,
154
  "adapter_type": adapter_type,