Libra-1995 commited on
Commit
d767e53
·
1 Parent(s): 5316350

feat: update score

Browse files
Files changed (1) hide show
  1. web_server.py +7 -10
web_server.py CHANGED
@@ -7,6 +7,7 @@ import io
7
  import enum
8
  import hugsim_env
9
  import subprocess as sp
 
10
  from collections import deque, OrderedDict
11
  from datetime import datetime
12
  from typing import Any, Dict, Optional, List
@@ -208,6 +209,7 @@ class EnvHandler:
208
  if self.env is not None:
209
  del self.env
210
  self.env = None
 
211
  self._log("Environment closed.")
212
 
213
  def reset_env(self):
@@ -216,6 +218,7 @@ class EnvHandler:
216
  """
217
  self._log_list = deque(maxlen=100)
218
  self._done = False
 
219
  self._switch_scene(0)
220
  self._log("Environment reset complete.")
221
 
@@ -299,6 +302,7 @@ class EnvHandler:
299
  results = hugsim_evaluate([self._save_data], ground_xyz, scene_xyz)
300
  with open(os.path.join(self.cur_otuput, 'eval.json'), 'w') as f:
301
  json.dump(results, f)
 
302
 
303
  self._log(f"Scene {self.cur_scene_index} completed. Evaluation results saved.")
304
 
@@ -322,16 +326,9 @@ class EnvHandler:
322
  """
323
  if not self._done:
324
  raise ValueError("Environment is not done yet. Cannot calculate score.")
325
-
326
- all_score_json = []
327
- for scene_config in self.scene_list:
328
- scene_output = os.path.join(self.base_output, scene_config.name)
329
- with open(os.path.join(scene_output, 'eval.json'), 'r') as f:
330
- score_json = json.load(f)
331
- all_score_json.append(score_json)
332
-
333
- rc = np.mean([score['rc'] for score in all_score_json]).round(4)
334
- hdscore = np.mean([score['hdscore'] for score in all_score_json]).round(4)
335
  return {"rc": rc, "hdscore": hdscore}
336
 
337
 
 
7
  import enum
8
  import hugsim_env
9
  import subprocess as sp
10
+ import shutil
11
  from collections import deque, OrderedDict
12
  from datetime import datetime
13
  from typing import Any, Dict, Optional, List
 
209
  if self.env is not None:
210
  del self.env
211
  self.env = None
212
+ shutil.rmtree(self.base_output, ignore_errors=True)
213
  self._log("Environment closed.")
214
 
215
  def reset_env(self):
 
218
  """
219
  self._log_list = deque(maxlen=100)
220
  self._done = False
221
+ self._score_list = []
222
  self._switch_scene(0)
223
  self._log("Environment reset complete.")
224
 
 
302
  results = hugsim_evaluate([self._save_data], ground_xyz, scene_xyz)
303
  with open(os.path.join(self.cur_otuput, 'eval.json'), 'w') as f:
304
  json.dump(results, f)
305
+ self._score_list.append(results.copy())
306
 
307
  self._log(f"Scene {self.cur_scene_index} completed. Evaluation results saved.")
308
 
 
326
  """
327
  if not self._done:
328
  raise ValueError("Environment is not done yet. Cannot calculate score.")
329
+
330
+ rc = np.mean([float(score['rc']) for score in self._score_list]).round(4)
331
+ hdscore = np.mean([float(score['hdscore']) for score in self._score_list]).round(4)
 
 
 
 
 
 
 
332
  return {"rc": rc, "hdscore": hdscore}
333
 
334