alexnasa commited on
Commit
9487233
Β·
verified Β·
1 Parent(s): 4c500e4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -8
app.py CHANGED
@@ -211,9 +211,6 @@ def generate_image(
211
  image_1, caption_1,
212
  image_2 = None, caption_2 = None,
213
  image_3 = None, caption_3 = None,
214
- use_id_1 = True,
215
- use_id_2 = True,
216
- use_id_3 = True,
217
  num_inference_steps = 8,
218
  cond_size = 256,
219
  target_height = 768,
@@ -237,7 +234,7 @@ def generate_image(
237
 
238
  images = [image_1, image_2, image_3]
239
  captions = [caption_1, caption_2, caption_3]
240
- idips_checkboxes = [use_id_1, use_id_2, use_id_3]
241
 
242
  # β€”β€”β€” Fallback to VLM caption if any caption is empty β€”β€”β€”
243
  for idx, (img, cap) in enumerate(zip(images, captions)):
@@ -643,24 +640,28 @@ if __name__ == "__main__":
643
  "sample/woman2.jpg", "a woman",
644
  "sample/dress.jpg", "a dress",
645
  None, None,
 
646
  ],
647
  [
648
  "ENT1 wearing a tiny hat",
649
  "sample/hamster.jpg", "a hamster",
650
  None, None,
651
  None, None,
 
652
  ],
653
  [
654
  "a drawing of ENT1 and ENT2 that the ENT1 is running alongside of a giant ENT2, in style of a comic book",
655
  "sample/woman.jpg", "a woman",
656
  "sample/hamster.jpg", "a hamster",
657
  None, None,
 
658
  ],
659
  [
660
  "ENT1 with ENT2 holding ENT3",
661
  "sample/sam.jpg", "a man",
662
  "sample/hair.jpg", "curly hair",
663
  "sample/can.jpg", "a can",
 
664
  ],
665
  ],
666
  inputs=[
@@ -668,6 +669,7 @@ if __name__ == "__main__":
668
  images[0], captions[0],
669
  images[1], captions[1],
670
  images[2], captions[2],
 
671
  ],
672
  outputs=[output, final_text],
673
  fn=generate_image,
@@ -682,10 +684,7 @@ if __name__ == "__main__":
682
  prompt,
683
  images[0], captions[0],
684
  images[1], captions[1],
685
- images[2], captions[2],
686
- idip_checkboxes[0],
687
- idip_checkboxes[1],
688
- idip_checkboxes[2],
689
  steps_slider,
690
  cond_size,
691
  target_height,
 
211
  image_1, caption_1,
212
  image_2 = None, caption_2 = None,
213
  image_3 = None, caption_3 = None,
 
 
 
214
  num_inference_steps = 8,
215
  cond_size = 256,
216
  target_height = 768,
 
234
 
235
  images = [image_1, image_2, image_3]
236
  captions = [caption_1, caption_2, caption_3]
237
+ idips_checkboxes = [True, True, True]
238
 
239
  # β€”β€”β€” Fallback to VLM caption if any caption is empty β€”β€”β€”
240
  for idx, (img, cap) in enumerate(zip(images, captions)):
 
640
  "sample/woman2.jpg", "a woman",
641
  "sample/dress.jpg", "a dress",
642
  None, None,
643
+ 8,
644
  ],
645
  [
646
  "ENT1 wearing a tiny hat",
647
  "sample/hamster.jpg", "a hamster",
648
  None, None,
649
  None, None,
650
+ 8,
651
  ],
652
  [
653
  "a drawing of ENT1 and ENT2 that the ENT1 is running alongside of a giant ENT2, in style of a comic book",
654
  "sample/woman.jpg", "a woman",
655
  "sample/hamster.jpg", "a hamster",
656
  None, None,
657
+ 8,
658
  ],
659
  [
660
  "ENT1 with ENT2 holding ENT3",
661
  "sample/sam.jpg", "a man",
662
  "sample/hair.jpg", "curly hair",
663
  "sample/can.jpg", "a can",
664
+ 24,
665
  ],
666
  ],
667
  inputs=[
 
669
  images[0], captions[0],
670
  images[1], captions[1],
671
  images[2], captions[2],
672
+ steps_slider,
673
  ],
674
  outputs=[output, final_text],
675
  fn=generate_image,
 
684
  prompt,
685
  images[0], captions[0],
686
  images[1], captions[1],
687
+ images[2], captions[2],
 
 
 
688
  steps_slider,
689
  cond_size,
690
  target_height,