Spaces:
Runtime error
Runtime error
Commit
·
b6e6a40
1
Parent(s):
edf9c56
Update utils.py
Browse files
utils.py
CHANGED
@@ -41,12 +41,12 @@ def sample_frame(model: LatentDiffusion, prompt: str, image_sequence: torch.Tens
|
|
41 |
c = model.enc_concat_seq(c, c_dict, 'c_concat')
|
42 |
if pos_map is not None:
|
43 |
print (pos_map.shape, c['c_concat'].shape)
|
44 |
-
c['c_concat'] = torch.cat([c['c_concat'], pos_map.to(c['c_concat'].device).unsqueeze(0)], dim=1)
|
45 |
|
46 |
print ('sleeping')
|
47 |
#time.sleep(120)
|
48 |
print ('finished sleeping')
|
49 |
-
|
50 |
#samples_ddim, _ = sampler.sample(S=999,
|
51 |
# conditioning=c,
|
52 |
# batch_size=1,
|
@@ -56,9 +56,9 @@ def sample_frame(model: LatentDiffusion, prompt: str, image_sequence: torch.Tens
|
|
56 |
# unconditional_conditioning=uc,
|
57 |
# eta=0)
|
58 |
|
59 |
-
|
60 |
-
x_samples_ddim = pos_map.to(c['c_concat'].device).unsqueeze(0).expand(-1, 3, -1, -1)
|
61 |
-
x_samples_ddim = model.decode_first_stage(x_samples_ddim)
|
62 |
#x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
|
63 |
x_samples_ddim = torch.clamp(x_samples_ddim, min=-1.0, max=1.0)
|
64 |
|
|
|
41 |
c = model.enc_concat_seq(c, c_dict, 'c_concat')
|
42 |
if pos_map is not None:
|
43 |
print (pos_map.shape, c['c_concat'].shape)
|
44 |
+
c['c_concat'] = torch.cat([c['c_concat'][:, :, :64, :64], pos_map.to(c['c_concat'].device).unsqueeze(0)], dim=1)
|
45 |
|
46 |
print ('sleeping')
|
47 |
#time.sleep(120)
|
48 |
print ('finished sleeping')
|
49 |
+
samples_ddim = model.p_sample_loop(cond=c, shape=[1, 3, 64, 64], return_intermediates=False, verbose=True)
|
50 |
#samples_ddim, _ = sampler.sample(S=999,
|
51 |
# conditioning=c,
|
52 |
# batch_size=1,
|
|
|
56 |
# unconditional_conditioning=uc,
|
57 |
# eta=0)
|
58 |
|
59 |
+
x_samples_ddim = model.decode_first_stage(samples_ddim)
|
60 |
+
#x_samples_ddim = pos_map.to(c['c_concat'].device).unsqueeze(0).expand(-1, 3, -1, -1)
|
61 |
+
#x_samples_ddim = model.decode_first_stage(x_samples_ddim)
|
62 |
#x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
|
63 |
x_samples_ddim = torch.clamp(x_samples_ddim, min=-1.0, max=1.0)
|
64 |
|