JiantaoLin commited on
Commit
2235e11
·
1 Parent(s): 98297e9
Files changed (1) hide show
  1. pipeline/kiss3d_wrapper.py +4 -4
pipeline/kiss3d_wrapper.py CHANGED
@@ -97,7 +97,7 @@ def init_wrapper_from_config(config_path):
97
 
98
  flux_redux_pipe.to(device=flux_device)
99
 
100
- logger.warning(f"GPU memory allocated after load flux model on {flux_device}: {torch.cuda.memory_allocated(device=flux_device) / 1024**3} GB")
101
 
102
  # TODO: load pulid model
103
 
@@ -147,7 +147,7 @@ def init_wrapper_from_config(config_path):
147
  recon_model.load_state_dict(state_dict, strict=True)
148
  recon_model.to(recon_device)
149
  recon_model.eval()
150
- logger.warning(f"GPU memory allocated after load reconstruction model on {recon_device}: {torch.cuda.memory_allocated(device=recon_device) / 1024**3} GB")
151
 
152
  # load llm
153
  llm_configs = config_.get('llm', None)
@@ -156,7 +156,7 @@ def init_wrapper_from_config(config_path):
156
  llm_device = llm_configs.get('device', 'cpu')
157
  llm, llm_tokenizer = load_llm_model(llm_configs['base_model'])
158
  llm.to(llm_device)
159
- logger.warning(f"GPU memory allocated after load llm model on {llm_device}: {torch.cuda.memory_allocated(device=llm_device) / 1024**3} GB")
160
  else:
161
  llm, llm_tokenizer = None, None
162
 
@@ -462,7 +462,7 @@ class kiss3d_wrapper(object):
462
  logger.info(f'Save image to {save_path}')
463
 
464
  return preprocessed
465
-
466
  @spaces.GPU
467
  def generate_3d_bundle_image_text(self,
468
  prompt,
 
97
 
98
  flux_redux_pipe.to(device=flux_device)
99
 
100
+ # logger.warning(f"GPU memory allocated after load flux model on {flux_device}: {torch.cuda.memory_allocated(device=flux_device) / 1024**3} GB")
101
 
102
  # TODO: load pulid model
103
 
 
147
  recon_model.load_state_dict(state_dict, strict=True)
148
  recon_model.to(recon_device)
149
  recon_model.eval()
150
+ # logger.warning(f"GPU memory allocated after load reconstruction model on {recon_device}: {torch.cuda.memory_allocated(device=recon_device) / 1024**3} GB")
151
 
152
  # load llm
153
  llm_configs = config_.get('llm', None)
 
156
  llm_device = llm_configs.get('device', 'cpu')
157
  llm, llm_tokenizer = load_llm_model(llm_configs['base_model'])
158
  llm.to(llm_device)
159
+ # logger.warning(f"GPU memory allocated after load llm model on {llm_device}: {torch.cuda.memory_allocated(device=llm_device) / 1024**3} GB")
160
  else:
161
  llm, llm_tokenizer = None, None
162
 
 
462
  logger.info(f'Save image to {save_path}')
463
 
464
  return preprocessed
465
+
466
  @spaces.GPU
467
  def generate_3d_bundle_image_text(self,
468
  prompt,