AustingDong commited on
Commit
2bbc5f6
·
1 Parent(s): 8b5e432

Update evaluate.py

Browse files
Files changed (1) hide show
  1. evaluate/evaluate.py +14 -1
evaluate/evaluate.py CHANGED
@@ -10,8 +10,20 @@ def set_seed(model_seed = 42):
10
  np.random.seed(model_seed)
11
  torch.cuda.manual_seed(model_seed) if torch.cuda.is_available() else None
12
 
 
 
 
 
 
 
 
 
 
 
 
13
  def evaluate(model_type, num_eval = 10):
14
  for eval_idx in range(num_eval):
 
15
  set_seed(np.random.randint(0, 1000))
16
  model_utils, vl_gpt, tokenizer = None, None, None
17
 
@@ -61,6 +73,7 @@ if __name__ == '__main__':
61
 
62
  # models = ["ChartGemma", "Janus-Pro-1B", "Janus-Pro-7B", "LLaVA-1.5-7B"]
63
  # models = ["ChartGemma", "Janus-Pro-1B"]
64
- models = ["Janus-Pro-7B", "LLaVA-1.5-7B"]
 
65
  for model_type in models:
66
  evaluate(model_type=model_type, num_eval=10)
 
10
  np.random.seed(model_seed)
11
  torch.cuda.manual_seed(model_seed) if torch.cuda.is_available() else None
12
 
13
+ def clean():
14
+ # Empty CUDA cache
15
+ if torch.cuda.is_available():
16
+ torch.cuda.empty_cache()
17
+ torch.cuda.ipc_collect() # Frees inter-process CUDA memory
18
+
19
+ # Empty MacOS Metal backend (if using Apple Silicon)
20
+ if torch.backends.mps.is_available():
21
+ torch.mps.empty_cache()
22
+
23
+
24
  def evaluate(model_type, num_eval = 10):
25
  for eval_idx in range(num_eval):
26
+ clean()
27
  set_seed(np.random.randint(0, 1000))
28
  model_utils, vl_gpt, tokenizer = None, None, None
29
 
 
73
 
74
  # models = ["ChartGemma", "Janus-Pro-1B", "Janus-Pro-7B", "LLaVA-1.5-7B"]
75
  # models = ["ChartGemma", "Janus-Pro-1B"]
76
+ # models = ["Janus-Pro-7B", "LLaVA-1.5-7B"]
77
+ models = ["LLaVA-1.5-7B"]
78
  for model_type in models:
79
  evaluate(model_type=model_type, num_eval=10)