fffiloni commited on
Commit
b0ad4c2
·
verified ·
1 Parent(s): 772a903

Update gradio_demo/app.py

Browse files
Files changed (1) hide show
  1. gradio_demo/app.py +42 -7
gradio_demo/app.py CHANGED
@@ -117,8 +117,43 @@ def show_final_preview(preview_row):
117
  @spaces.GPU(duration=70) #[uncomment to use ZeroGPU]
118
  @torch.no_grad()
119
  def instantir_restore(
120
- lq, prompt="", steps=30, cfg_scale=7.0, guidance_end=1.0,
121
- creative_restoration=False, seed=3407, height=1024, width=1024, preview_start=0.0, progress=gr.Progress(track_tqdm=True)):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
  if creative_restoration:
123
  if "lcm" not in pipe.unet.active_adapters():
124
  pipe.unet.set_adapter('lcm')
@@ -232,10 +267,10 @@ with gr.Blocks() as demo:
232
  ],
233
  outputs=[output, pipe_out], api_name="InstantIR"
234
  )
235
- steps.change(dynamic_guidance_slider, inputs=steps, outputs=guidance_end)
236
- output.change(dynamic_preview_slider, inputs=steps, outputs=index)
237
- index.release(unpack_pipe_out, inputs=[pipe_out, index], outputs=preview)
238
- output.change(show_final_preview, inputs=pipe_out, outputs=preview)
239
  gr.Markdown(
240
  """
241
  ## Advance usage:
@@ -262,4 +297,4 @@ with gr.Blocks() as demo:
262
  ```
263
  """)
264
 
265
- demo.queue().launch()
 
117
  @spaces.GPU(duration=70) #[uncomment to use ZeroGPU]
118
  @torch.no_grad()
119
  def instantir_restore(
120
+ lq, # A low-quality PIL image to be restored
121
+ prompt="", # Optional: A text prompt guiding creative restoration
122
+ steps=30, # Number of denoising steps (controls generation detail and time)
123
+ cfg_scale=7.0, # Classifier-Free Guidance scale; higher = more prompt adherence
124
+ guidance_end=1.0, # When to stop guidance and allow free generation (0.0 - 1.0 or 0 - steps)
125
+ creative_restoration=False, # Toggle creative mode (uses LCM adapter)
126
+ seed=3407, # Seed for reproducibility
127
+ height=1024, # Target height for output image
128
+ width=1024, # Target width for output image
129
+ preview_start=0.0, # When to start showing previews (fraction or step index)
130
+ progress=gr.Progress(track_tqdm=True) # Progress tracker for Gradio
131
+ ):
132
+ """
133
+ Restore or creatively re-generate a low-quality image using the InstantIR pipeline.
134
+
135
+ This function takes a degraded image and applies a guided diffusion model to restore it.
136
+ Optionally, a text prompt can be provided to guide a creative re-interpretation of the image.
137
+
138
+ Args:
139
+ lq (PIL.Image): The input low-quality image to restore.
140
+ prompt (str, optional): Text description to guide restoration or creative re-generation.
141
+ steps (int): Number of inference steps; more steps generally yield better results.
142
+ cfg_scale (float): Guidance scale for prompt adherence; higher means stronger influence.
143
+ guidance_end (float or int): Defines when to stop using prompt guidance during diffusion.
144
+ creative_restoration (bool): Whether to enable imaginative regeneration via LCM adapter.
145
+ seed (int): Random seed for reproducible results.
146
+ height (int): Output image height; used if input is square.
147
+ width (int): Output image width; used if input is square.
148
+ preview_start (float or int): Step or ratio when previewing starts.
149
+ progress (gr.Progress): Progress tracker for UI feedback.
150
+
151
+ Returns:
152
+ Tuple[PIL.Image, List[List[Union[PIL.Image, str]]]]:
153
+ - The final restored image.
154
+ - A list of preview images from intermediate steps with labels.
155
+ """
156
+
157
  if creative_restoration:
158
  if "lcm" not in pipe.unet.active_adapters():
159
  pipe.unet.set_adapter('lcm')
 
267
  ],
268
  outputs=[output, pipe_out], api_name="InstantIR"
269
  )
270
+ steps.change(dynamic_guidance_slider, inputs=steps, outputs=guidance_end, show_api=False)
271
+ output.change(dynamic_preview_slider, inputs=steps, outputs=index, show_api=False)
272
+ index.release(unpack_pipe_out, inputs=[pipe_out, index], outputs=preview, show_api=False)
273
+ output.change(show_final_preview, inputs=pipe_out, outputs=preview, show_api=False)
274
  gr.Markdown(
275
  """
276
  ## Advance usage:
 
297
  ```
298
  """)
299
 
300
+ demo.queue().launch(mcp_server=True)