da03 commited on
Commit
f942748
·
1 Parent(s): b87ef92
Files changed (2) hide show
  1. main.py +1 -1
  2. utils.py +1 -0
main.py CHANGED
@@ -223,7 +223,7 @@ def predict_next_frame(previous_frames, previous_actions: List[Tuple[str, List[i
223
  # Convert the image sequence to a tensor and concatenate in the channel dimension
224
  #image_sequence_tensor = torch.from_numpy(normalize_images(image_sequence_list, target_range=(-1, 1)))
225
  #image_sequence_tensor = image_sequence_tensor.to(device)
226
- image_sequence_tensor = torch.cat(image_sequence, dim=1)
227
 
228
  #image_sequence_tensor = (image_sequence_tensor - data_mean) / data_std
229
 
 
223
  # Convert the image sequence to a tensor and concatenate in the channel dimension
224
  #image_sequence_tensor = torch.from_numpy(normalize_images(image_sequence_list, target_range=(-1, 1)))
225
  #image_sequence_tensor = image_sequence_tensor.to(device)
226
+ image_sequence_tensor = torch.cat(image_sequence, dim=-1)
227
 
228
  #image_sequence_tensor = (image_sequence_tensor - data_mean) / data_std
229
 
utils.py CHANGED
@@ -45,6 +45,7 @@ def sample_frame(model: LatentDiffusion, prompt: str, image_sequence: torch.Tens
45
  #print (c['c_crossattn'][0])
46
  print (prompt)
47
  c = {'c_concat': image_sequence.unsqueeze(0)}
 
48
  #c = model.enc_concat_seq(c, c_dict, 'c_concat')
49
  # Zero out the corresponding subtensors in c_concat for padding images
50
  #padding_mask = torch.isclose(image_sequence, torch.tensor(-1.0), rtol=1e-5, atol=1e-5).all(dim=(1, 2, 3)).unsqueeze(0)
 
45
  #print (c['c_crossattn'][0])
46
  print (prompt)
47
  c = {'c_concat': image_sequence.unsqueeze(0)}
48
+ print (image_sequence.shape, c['c_concat'].shape)
49
  #c = model.enc_concat_seq(c, c_dict, 'c_concat')
50
  # Zero out the corresponding subtensors in c_concat for padding images
51
  #padding_mask = torch.isclose(image_sequence, torch.tensor(-1.0), rtol=1e-5, atol=1e-5).all(dim=(1, 2, 3)).unsqueeze(0)