juliuse commited on
Commit
b941813
·
1 Parent(s): fe482e1

rm debugging code

Browse files
Files changed (1) hide show
  1. app.py +50 -54
app.py CHANGED
@@ -435,10 +435,6 @@ def super_resolution_image(lr_image, prompt_text, fixed_seed_value, use_random_s
435
  # add some noise to the input image
436
  noise_std = current_sr_config.get("degradation", {}).get("kwargs", {}).get("noise_std", 0.0)
437
  y_tensor += torch.randn_like(y_tensor) * noise_std
438
- # save for debugging purposes
439
- # first convert to PIL
440
- pil_y = postprocess_image(y_tensor)# Remove batch dimension and convert to PIL
441
- pil_y.save("debug_input_image.png") # Save the input image for debugging
442
 
443
 
444
  print("Running SR inference...")
@@ -455,8 +451,6 @@ def super_resolution_image(lr_image, prompt_text, fixed_seed_value, use_random_s
455
  upscaled_input = POSTERIOR_MODEL.forward_operator.nn(upscaled_input) # Use nearest neighbor upscaling
456
  upscaled_input = postprocess_image(upscaled_input)
457
  # save for debugging purposes
458
- upscaled_input.save("debug_upscaled_input.png") # Save the upscaled input image for debugging
459
- # upscaled_input = upscaled_input.resize((hr_resolution, hr_resolution), resample=Image.NEAREST)
460
  return (upscaled_input, output_pil), current_sr_config["seed"]
461
 
462
  except gr.Error as e:
@@ -734,55 +728,57 @@ Use the slider to compare the low resolution input image with the super-resolved
734
 
735
  gr.Markdown("---") # Separator
736
  gr.Markdown("### Click an example to load:")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
737
 
738
- # --- GALLERY FOR SUPER RESOLUTION EXAMPLES ---
739
- sr_gallery_items = [[ex[0], f"Prompt: {ex[1]} Steps: {ex[3]}"] for ex in example_list_sr]
740
- sr_gallery = gr.Gallery(
741
- value=sr_gallery_items,
742
- label="Super Resolution Examples",
743
- columns=4,
744
- height="auto",
745
- visible=True
746
- )
747
-
748
- # --- GALLERY FOR INPAINTING EXAMPLES ---
749
- inp_gallery_items = [[ex[0]["background"], f"Prompt: {ex[1]} Steps: {ex[3]}"] for ex in example_list_inp]
750
- inp_gallery = gr.Gallery(
751
- value=inp_gallery_items,
752
- label="Inpainting Examples",
753
- columns=4,
754
- height="auto",
755
- visible=True
756
- )
757
-
758
- def on_sr_gallery_select(evt: gr.SelectData):
759
- idx = evt.index
760
- ex = example_list_sr[idx]
761
- image_input.value = ex[0]
762
- prompt_text.value = ex[1]
763
- task_selector.value = ex[2]
764
- num_steps_slider.value = ex[3]
765
- update_visibility(ex[2])
766
- return [image_input, prompt_text, task_selector, num_steps_slider]
767
-
768
- def on_inp_gallery_select(evt: gr.SelectData):
769
- idx = evt.index
770
- ex = example_list_inp[idx]
771
- image_editor.value = ex[0]
772
- prompt_text.value = ex[1]
773
- task_selector.value = ex[2]
774
- num_steps_slider.value = ex[3]
775
- update_visibility(ex[2])
776
- return [image_editor, prompt_text, task_selector, num_steps_slider]
777
-
778
- sr_gallery.select(
779
- fn=on_sr_gallery_select,
780
- outputs=[image_input, prompt_text, task_selector, num_steps_slider]
781
- )
782
- inp_gallery.select(
783
- fn=on_inp_gallery_select,
784
- outputs=[image_editor, prompt_text, task_selector, num_steps_slider]
785
- )
786
 
787
  # --- End of Gradio UI definition ---
788
 
 
435
  # add some noise to the input image
436
  noise_std = current_sr_config.get("degradation", {}).get("kwargs", {}).get("noise_std", 0.0)
437
  y_tensor += torch.randn_like(y_tensor) * noise_std
 
 
 
 
438
 
439
 
440
  print("Running SR inference...")
 
451
  upscaled_input = POSTERIOR_MODEL.forward_operator.nn(upscaled_input) # Use nearest neighbor upscaling
452
  upscaled_input = postprocess_image(upscaled_input)
453
  # save for debugging purposes
 
 
454
  return (upscaled_input, output_pil), current_sr_config["seed"]
455
 
456
  except gr.Error as e:
 
728
 
729
  gr.Markdown("---") # Separator
730
  gr.Markdown("### Click an example to load:")
731
+ def load_inp_example(input_data):
732
+ for ex in example_list_inp:
733
+ if ex[0]["background"] == input_data:
734
+ prompt_value, task, num_steps = ex[1], ex[2], ex[3]
735
+ # Load inpainting example into ImageEditor
736
+ image_editor.clear()
737
+ if input_data and input_data.get("background"):
738
+ image_editor.upload_image(input_data["background"])
739
+ if input_data and input_data.get("layers"):
740
+ for layer in input_data["layers"]:
741
+ image_editor.upload_mask(layer)
742
+ # Update other UI controls
743
+ prompt_text.value = prompt_value
744
+ task_selector.value = task
745
+ num_steps_slider.value = num_steps
746
+ seed_slider.value = random.randint(0, 2**32 - 1)
747
+ guidance_scale_slider.value = default_guidance_scale
748
+ update_visibility(task)
749
+
750
+ def load_sr_example(input_data):
751
+ for ex in example_list_sr:
752
+ if ex[0] == input_data:
753
+ prompt_value, task, num_steps = ex[1], ex[2], ex[3]
754
+ # Load super-resolution example into Image component
755
+ image_input.clear()
756
+ image_input.upload_image(input_data)
757
+ # Update other UI controls
758
+ prompt_text.value = prompt_value
759
+ task_selector.value = task
760
+ num_steps_slider.value = num_steps
761
+ seed_slider.value = random.randint(0, 2**32 - 1)
762
+ guidance_scale_slider.value = default_guidance_scale
763
+ update_visibility(task)
764
+ # Examples for both tasks, loading via load_example (mutates components directly)
765
+ with gr.Row():
766
+ gr.Examples(
767
+ examples=example_list_sr,
768
+ inputs=[image_input, prompt_text, task_selector, num_steps_slider],
769
+ fn=load_sr_example,
770
+ label="Super Resolution Examples",
771
+ cache_examples=True
772
+ )
773
+ with gr.Row():
774
+ gr.Examples(
775
+ examples=example_list_inp,
776
+ inputs=[image_editor, prompt_text, task_selector, num_steps_slider],
777
+ fn=load_inp_example,
778
+ label="Inpainting Examples",
779
+ cache_examples=True
780
+ )
781
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
782
 
783
  # --- End of Gradio UI definition ---
784