alexnasa commited on
Commit
464d1ca
·
verified ·
1 Parent(s): 454560a

Update pipelines/pipeline_seesr.py

Browse files
Files changed (1) hide show
  1. pipelines/pipeline_seesr.py +1 -14
pipelines/pipeline_seesr.py CHANGED
@@ -1025,7 +1025,6 @@ class StableDiffusionControlNetPipeline(DiffusionPipeline, TextualInversionLoade
1025
 
1026
  if use_KDS:
1027
  # 1) update batch_size to account for the new particles
1028
- print(f'beforehand {latents.shape}:{prompt_embeds.shape}')
1029
  batch_size = batch_size * num_particles
1030
 
1031
  # 2) now repeat latents/images/prompts
@@ -1061,19 +1060,14 @@ class StableDiffusionControlNetPipeline(DiffusionPipeline, TextualInversionLoade
1061
  if guess_mode and do_classifier_free_guidance:
1062
  # Infer ControlNet only for the conditional batch.
1063
  controlnet_latent_model_input = latents
1064
- controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
1065
- print("well unexpected")
1066
-
1067
  else:
1068
  controlnet_latent_model_input = latent_model_input
1069
  controlnet_prompt_embeds = prompt_embeds
1070
- print("a possiblity")
1071
 
1072
  if h*w<=tile_size*tile_size: # tiled latent input
1073
  down_block_res_samples, mid_block_res_sample = [None]*10, None
1074
 
1075
- print(f"controlnet 1 started with {controlnet_latent_model_input.shape}:{ram_encoder_hidden_states.shape}")
1076
-
1077
  down_block_res_samples, mid_block_res_sample = self.controlnet(
1078
  controlnet_latent_model_input,
1079
  t,
@@ -1094,7 +1088,6 @@ class StableDiffusionControlNetPipeline(DiffusionPipeline, TextualInversionLoade
1094
  mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
1095
 
1096
  # predict the noise residual
1097
- print(f"unet started with {latent_model_input.shape}:{prompt_embeds.shape}")
1098
  noise_pred = self.unet(
1099
  latent_model_input,
1100
  t,
@@ -1157,7 +1150,6 @@ class StableDiffusionControlNetPipeline(DiffusionPipeline, TextualInversionLoade
1157
  cond_list_t = torch.cat(cond_list, dim=0)
1158
  img_list_t = torch.cat(img_list, dim=0)
1159
  #print(input_list_t.shape, cond_list_t.shape, img_list_t.shape, fg_mask_list_t.shape)
1160
- print(f"controlnet 2 started with {cond_list_t.shape}:{controlnet_prompt_embeds.shape}")
1161
 
1162
  down_block_res_samples, mid_block_res_sample = self.controlnet(
1163
  cond_list_t,
@@ -1178,7 +1170,6 @@ class StableDiffusionControlNetPipeline(DiffusionPipeline, TextualInversionLoade
1178
  mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
1179
 
1180
  # predict the noise residual
1181
- print(f"unet started with {input_list_t.shape}:{prompt_embeds.shape}")
1182
  model_out = self.unet(
1183
  input_list_t,
1184
  t,
@@ -1233,8 +1224,6 @@ class StableDiffusionControlNetPipeline(DiffusionPipeline, TextualInversionLoade
1233
 
1234
  if use_KDS:
1235
 
1236
- print("using KDS")
1237
-
1238
  # 2) Compute x₀ prediction
1239
  beta_t = 1 - self.scheduler.alphas_cumprod[t]
1240
  alpha_t = self.scheduler.alphas_cumprod[t].sqrt()
@@ -1275,8 +1264,6 @@ class StableDiffusionControlNetPipeline(DiffusionPipeline, TextualInversionLoade
1275
  ).detach().requires_grad_(True)
1276
  else:
1277
 
1278
- print("KDS Free")
1279
-
1280
  # compute the previous noisy sample x_t -> x_t-1
1281
  latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1282
 
 
1025
 
1026
  if use_KDS:
1027
  # 1) update batch_size to account for the new particles
 
1028
  batch_size = batch_size * num_particles
1029
 
1030
  # 2) now repeat latents/images/prompts
 
1060
  if guess_mode and do_classifier_free_guidance:
1061
  # Infer ControlNet only for the conditional batch.
1062
  controlnet_latent_model_input = latents
1063
+ controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
 
 
1064
  else:
1065
  controlnet_latent_model_input = latent_model_input
1066
  controlnet_prompt_embeds = prompt_embeds
 
1067
 
1068
  if h*w<=tile_size*tile_size: # tiled latent input
1069
  down_block_res_samples, mid_block_res_sample = [None]*10, None
1070
 
 
 
1071
  down_block_res_samples, mid_block_res_sample = self.controlnet(
1072
  controlnet_latent_model_input,
1073
  t,
 
1088
  mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
1089
 
1090
  # predict the noise residual
 
1091
  noise_pred = self.unet(
1092
  latent_model_input,
1093
  t,
 
1150
  cond_list_t = torch.cat(cond_list, dim=0)
1151
  img_list_t = torch.cat(img_list, dim=0)
1152
  #print(input_list_t.shape, cond_list_t.shape, img_list_t.shape, fg_mask_list_t.shape)
 
1153
 
1154
  down_block_res_samples, mid_block_res_sample = self.controlnet(
1155
  cond_list_t,
 
1170
  mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
1171
 
1172
  # predict the noise residual
 
1173
  model_out = self.unet(
1174
  input_list_t,
1175
  t,
 
1224
 
1225
  if use_KDS:
1226
 
 
 
1227
  # 2) Compute x₀ prediction
1228
  beta_t = 1 - self.scheduler.alphas_cumprod[t]
1229
  alpha_t = self.scheduler.alphas_cumprod[t].sqrt()
 
1264
  ).detach().requires_grad_(True)
1265
  else:
1266
 
 
 
1267
  # compute the previous noisy sample x_t -> x_t-1
1268
  latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1269