Pusheen commited on
Commit
98e4435
·
verified ·
1 Parent(s): ed25fb1

Update gligen/ldm/models/diffusion/plms.py

Browse files
gligen/ldm/models/diffusion/plms.py CHANGED
@@ -197,11 +197,11 @@ class PLMSSampler(object):
197
  e_t, att_first, att_second, att_third, self_first, self_second, self_third = self.model(input)
198
  bboxes = input['boxes_att']
199
  object_positions = input['object_position']
200
- loss1 = caculate_loss_self_att(self_first, self_second, self_third, bboxes=bboxes,
201
- object_positions=object_positions, t = index1)*loss_scale
202
  loss2 = caculate_loss_LoCo(att_second,att_first,att_third, bboxes=bboxes,
203
  object_positions=object_positions, t = index1)*loss_scale
204
- loss = loss1 + loss2
205
  print('loss', loss, loss1, loss2)
206
  # hh = torch.autograd.backward(loss, retain_graph=True)
207
  grad_cond = torch.autograd.grad(loss.requires_grad_(True), [x])[0]
 
197
  e_t, att_first, att_second, att_third, self_first, self_second, self_third = self.model(input)
198
  bboxes = input['boxes_att']
199
  object_positions = input['object_position']
200
+ # loss1 = caculate_loss_self_att(self_first, self_second, self_third, bboxes=bboxes,
201
+ # object_positions=object_positions, t = index1)*loss_scale
202
  loss2 = caculate_loss_LoCo(att_second,att_first,att_third, bboxes=bboxes,
203
  object_positions=object_positions, t = index1)*loss_scale
204
+ loss = loss2 # + loss1
205
  print('loss', loss, loss1, loss2)
206
  # hh = torch.autograd.backward(loss, retain_graph=True)
207
  grad_cond = torch.autograd.grad(loss.requires_grad_(True), [x])[0]