Spaces:
Runtime error
Runtime error
da03
commited on
Commit
·
c127bb8
1
Parent(s):
9c3326f
utils.py
CHANGED
@@ -84,6 +84,8 @@ def sample_frame(model: LatentDiffusion, prompt: str, image_sequence: torch.Tens
|
|
84 |
# unconditional_guidance_scale=5.0,
|
85 |
# unconditional_conditioning=uc,
|
86 |
# eta=0)
|
|
|
|
|
87 |
if False and DEBUG:
|
88 |
print ('samples_ddim.shape', samples_ddim.shape)
|
89 |
x_samples_ddim = samples_ddim[:, :3]
|
@@ -93,12 +95,14 @@ def sample_frame(model: LatentDiffusion, prompt: str, image_sequence: torch.Tens
|
|
93 |
#x_samples_ddim = torch.zeros((1, 3, 384, 512))
|
94 |
#x_samples_ddim[:, :, 128:128+48, 160:160+64] = samples_ddim[:, :3]
|
95 |
else:
|
|
|
96 |
data_mean = -0.54
|
97 |
data_std = 6.78
|
98 |
data_min = -27.681446075439453
|
99 |
data_max = 30.854148864746094
|
100 |
x_samples_ddim = samples_ddim * data_std + data_mean
|
101 |
x_samples_ddim = model.decode_first_stage(x_samples_ddim)
|
|
|
102 |
#x_samples_ddim = pos_map.to(c['c_concat'].device).unsqueeze(0).expand(-1, 3, -1, -1)
|
103 |
#x_samples_ddim = model.decode_first_stage(x_samples_ddim)
|
104 |
#x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
|
|
|
84 |
# unconditional_guidance_scale=5.0,
|
85 |
# unconditional_conditioning=uc,
|
86 |
# eta=0)
|
87 |
+
|
88 |
+
print ('dfsf1')
|
89 |
if False and DEBUG:
|
90 |
print ('samples_ddim.shape', samples_ddim.shape)
|
91 |
x_samples_ddim = samples_ddim[:, :3]
|
|
|
95 |
#x_samples_ddim = torch.zeros((1, 3, 384, 512))
|
96 |
#x_samples_ddim[:, :, 128:128+48, 160:160+64] = samples_ddim[:, :3]
|
97 |
else:
|
98 |
+
print ('dfsf2')
|
99 |
data_mean = -0.54
|
100 |
data_std = 6.78
|
101 |
data_min = -27.681446075439453
|
102 |
data_max = 30.854148864746094
|
103 |
x_samples_ddim = samples_ddim * data_std + data_mean
|
104 |
x_samples_ddim = model.decode_first_stage(x_samples_ddim)
|
105 |
+
print ('dfsf3')
|
106 |
#x_samples_ddim = pos_map.to(c['c_concat'].device).unsqueeze(0).expand(-1, 3, -1, -1)
|
107 |
#x_samples_ddim = model.decode_first_stage(x_samples_ddim)
|
108 |
#x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
|