alexnasa commited on
Commit
703b4d2
·
verified ·
1 Parent(s): 60bfc7d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +77 -58
app.py CHANGED
@@ -172,6 +172,13 @@ tag_model = ram(pretrained='preset/models/ram_swin_large_14m.pth',
172
  tag_model.eval()
173
  tag_model.to(device, dtype=weight_dtype)
174
 
 
 
 
 
 
 
 
175
  @spaces.GPU()
176
  def process(
177
  input_image: Image.Image,
@@ -187,7 +194,7 @@ def process(
187
  sample_times = 1,
188
  ) -> List[np.ndarray]:
189
 
190
- input_image = input_image.resize((256, 256), Image.Resampling.BILINEAR)
191
 
192
  process_size = 512
193
  resize_preproc = transforms.Compose([
@@ -251,62 +258,74 @@ def process(
251
  return input_image, images[0]
252
 
253
 
254
- #
255
- MARKDOWN = \
256
- """
257
- ## SeeSR: Towards Semantics-Aware Real-World Image Super-Resolution
258
-
259
- [GitHub](https://github.com/cswry/SeeSR) | [Paper](https://arxiv.org/abs/2311.16518)
260
-
261
- If SeeSR is helpful for you, please help star the GitHub Repo. Thanks!
262
- """
263
-
264
- block = gr.Blocks().queue()
265
- with block:
266
- with gr.Row():
267
- gr.Markdown(MARKDOWN)
268
- with gr.Row():
269
- with gr.Column():
270
- input_image = gr.Image(type="pil")
271
- run_button = gr.Button("Magnify 4x")
272
- with gr.Accordion("Options", visible=False):
273
- user_prompt = gr.Textbox(label="User Prompt", value="")
274
- positive_prompt = gr.Textbox(label="Positive Prompt", value="clean, high-resolution, 8k, best quality, masterpiece")
275
- negative_prompt = gr.Textbox(
276
- label="Negative Prompt",
277
- value="dotted, noise, blur, lowres, oversmooth, longbody, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality"
278
- )
279
- cfg_scale = gr.Slider(label="Classifier Free Guidance Scale (Set to 1.0 in sd-turbo)", minimum=1, maximum=10, value=7.5, step=0)
280
- num_inference_steps = gr.Slider(label="Inference Steps", minimum=2, maximum=100, value=50, step=1)
281
- seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, value=231)
282
- sample_times = gr.Slider(label="Sample Times", minimum=1, maximum=10, step=1, value=1)
283
- latent_tiled_size = gr.Slider(label="Diffusion Tile Size", minimum=128, maximum=480, value=320, step=1)
284
- latent_tiled_overlap = gr.Slider(label="Diffusion Tile Overlap", minimum=4, maximum=16, value=4, step=1)
285
- scale_factor = gr.Number(label="SR Scale", value=4)
286
- with gr.Column():
287
- result_gallery = ImageSlider(
288
- interactive=False,
289
- label="Generated Image",
290
- )
291
- examples = gr.Examples(
292
- examples=[
293
- [
294
- "preset/datasets/test_datasets/179.png",
295
- ],
296
- [
297
- "preset/datasets/test_datasets/apologise.png",
298
- ],
299
- ],
300
- inputs=[
301
- input_image,
302
- ],
303
- outputs=[result_gallery],
304
- fn=process,
305
- cache_examples=True,
306
  )
307
- inputs = [
308
- input_image,
309
- ]
310
- run_button.click(fn=process, inputs=inputs, outputs=[result_gallery])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
311
 
312
- block.launch(share=True)
 
172
  tag_model.eval()
173
  tag_model.to(device, dtype=weight_dtype)
174
 
175
+ def preprocess_image(
176
+ input_image: Image.Image
177
+ )
178
+ input_image = input_image.resize((256, 256), Image.Resampling.BILINEAR)
179
+
180
+ return input_image
181
+
182
  @spaces.GPU()
183
  def process(
184
  input_image: Image.Image,
 
194
  sample_times = 1,
195
  ) -> List[np.ndarray]:
196
 
197
+
198
 
199
  process_size = 512
200
  resize_preproc = transforms.Compose([
 
258
  return input_image, images[0]
259
 
260
 
261
+ css = """
262
+ #col-container {
263
+ margin: 0 auto;
264
+ max-width: 1024px;
265
+ }
266
+ """
267
+
268
+ with gr.Blocks(css=css) as demo:
269
+
270
+ with gr.Column(elem_id="col-container"):
271
+
272
+ with gr.Row():
273
+ gr.HTML(
274
+ """
275
+ <div style="text-align: center;">
276
+ <p style="font-size:16px; display: inline; margin: 0;">
277
+ <strong>SeeSR</strong> Towards Semantics-Aware Real-World Image Super-Resolution
278
+ </p>
279
+ <a href="https://github.com/cswry/SeeSR" style="display: inline-block; vertical-align: middle; margin-left: 0.5em;">
280
+ <img src="https://img.shields.io/badge/GitHub-Repo-blue" alt="GitHub Repo">
281
+ </a>
282
+ </div>
283
+ """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
284
  )
285
+ with gr.Row():
286
+ with gr.Column():
287
+ input_image = gr.Image(type="pil")
288
+ run_button = gr.Button("Magnify 4x")
289
+ preprocessed_image = gr.Image(label="preprocess image", type="pil")
290
+ with gr.Accordion("Options", visible=False):
291
+ user_prompt = gr.Textbox(label="User Prompt", value="")
292
+ positive_prompt = gr.Textbox(label="Positive Prompt", value="clean, high-resolution, 8k, best quality, masterpiece")
293
+ negative_prompt = gr.Textbox(
294
+ label="Negative Prompt",
295
+ value="dotted, noise, blur, lowres, oversmooth, longbody, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality"
296
+ )
297
+ cfg_scale = gr.Slider(label="Classifier Free Guidance Scale (Set to 1.0 in sd-turbo)", minimum=1, maximum=10, value=7.5, step=0)
298
+ num_inference_steps = gr.Slider(label="Inference Steps", minimum=2, maximum=100, value=50, step=1)
299
+ seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, value=231)
300
+ sample_times = gr.Slider(label="Sample Times", minimum=1, maximum=10, step=1, value=1)
301
+ latent_tiled_size = gr.Slider(label="Diffusion Tile Size", minimum=128, maximum=480, value=320, step=1)
302
+ latent_tiled_overlap = gr.Slider(label="Diffusion Tile Overlap", minimum=4, maximum=16, value=4, step=1)
303
+ scale_factor = gr.Number(label="SR Scale", value=4)
304
+ with gr.Column():
305
+ result_gallery = ImageSlider(
306
+ interactive=False,
307
+ label="Generated Image",
308
+ )
309
+ examples = gr.Examples(
310
+ examples=[
311
+ [
312
+ "preset/datasets/test_datasets/179.png",
313
+ ],
314
+ [
315
+ "preset/datasets/test_datasets/apologise.png",
316
+ ],
317
+ ],
318
+ inputs=[
319
+ input_image,
320
+ ],
321
+ outputs=[result_gallery],
322
+ fn=process,
323
+ cache_examples=True,
324
+ )
325
+ inputs = [
326
+ input_image,
327
+ ]
328
+ run_button.click(fn=process, inputs=preprocessed_image, outputs=[result_gallery])
329
+ input_image.upload(fn=preprocess_image,inputs=input_image, outputs=[preprocessed_image])
330
 
331
+ demo.launch(share=True)