min_size parameter
Browse files- gradio_demo.py +9 -6
gradio_demo.py
CHANGED
|
@@ -79,7 +79,7 @@ def check(input_image):
|
|
| 79 |
def reset_feedback():
|
| 80 |
return 3, ''
|
| 81 |
|
| 82 |
-
@spaces.GPU(duration=
|
| 83 |
def stage1_process(input_image, gamma_correction):
|
| 84 |
print('Start stage1_process')
|
| 85 |
if torch.cuda.device_count() == 0:
|
|
@@ -101,7 +101,7 @@ def stage1_process(input_image, gamma_correction):
|
|
| 101 |
print('End stage1_process')
|
| 102 |
return LQ, gr.update(visible = True)
|
| 103 |
|
| 104 |
-
@spaces.GPU(duration=
|
| 105 |
def llave_process(input_image, temperature, top_p, qs=None):
|
| 106 |
print('Start llave_process')
|
| 107 |
if torch.cuda.device_count() == 0:
|
|
@@ -117,7 +117,7 @@ def llave_process(input_image, temperature, top_p, qs=None):
|
|
| 117 |
print('End llave_process')
|
| 118 |
return captions[0]
|
| 119 |
|
| 120 |
-
@spaces.GPU(duration=
|
| 121 |
def stage2_process(
|
| 122 |
noisy_image,
|
| 123 |
denoise_image,
|
|
@@ -125,6 +125,7 @@ def stage2_process(
|
|
| 125 |
a_prompt,
|
| 126 |
n_prompt,
|
| 127 |
num_samples,
|
|
|
|
| 128 |
downscale,
|
| 129 |
upscale,
|
| 130 |
edm_steps,
|
|
@@ -148,7 +149,6 @@ def stage2_process(
|
|
| 148 |
):
|
| 149 |
start = time.time()
|
| 150 |
print('Start stage2_process')
|
| 151 |
-
print(a_prompt)
|
| 152 |
if torch.cuda.device_count() == 0:
|
| 153 |
gr.Warning('Set this space to GPU config to make it work.')
|
| 154 |
return None, None, None
|
|
@@ -175,7 +175,7 @@ def stage2_process(
|
|
| 175 |
model.current_model = model_select
|
| 176 |
input_image = HWC3(input_image)
|
| 177 |
input_image = upscale_image(input_image, upscale, unit_resolution=32,
|
| 178 |
-
min_size=
|
| 179 |
|
| 180 |
LQ = np.array(input_image) / 255.0
|
| 181 |
LQ = np.power(LQ, gamma_correction)
|
|
@@ -228,6 +228,7 @@ def stage2_process(
|
|
| 228 |
((str(hours) + " h, ") if hours != 0 else "") + \
|
| 229 |
((str(minutes) + " min, ") if hours != 0 or minutes != 0 else "") + \
|
| 230 |
str(secondes) + " sec."
|
|
|
|
| 231 |
|
| 232 |
# Only one image can be shown in the slider
|
| 233 |
return [noisy_image] + [results[0]], gr.update(format = output_format, value = [noisy_image] + results), gr.update(value = information, visible = True), event_id
|
|
@@ -291,7 +292,7 @@ title_html = """
|
|
| 291 |
LlaVa is not integrated in this demo. The content added by SUPIR is imagination, not real-world information.
|
| 292 |
The aim of SUPIR is the beauty and the illustration.
|
| 293 |
Most of the processes only last few minutes.
|
| 294 |
-
This demo can handle huge images but the process will be aborted if it lasts more than
|
| 295 |
|
| 296 |
<p><center><a href="https://arxiv.org/abs/2401.13627">Paper</a>   <a href="http://supir.xpixel.group/">Project Page</a>   <a href="https://github.com/Fanghua-Yu/SUPIR/blob/master/assets/DemoGuide.png">How to play</a>   <a href="https://huggingface.co/blog/MonsterMMORPG/supir-sota-image-upscale-better-than-magnific-ai">Local Install Guide</a></center></p>
|
| 297 |
"""
|
|
@@ -359,6 +360,7 @@ with gr.Blocks(title="SUPIR") as interface:
|
|
| 359 |
edm_steps = gr.Slider(label="Steps", info="lower=faster, higher=more details", minimum=1, maximum=200, value=default_setting.edm_steps if torch.cuda.device_count() > 0 else 1, step=1)
|
| 360 |
num_samples = gr.Slider(label="Num Samples", info="Number of generated results", minimum=1, maximum=4 if not args.use_image_slider else 1
|
| 361 |
, value=1, step=1)
|
|
|
|
| 362 |
downscale = gr.Radio([1, 2, 3, 4, 5, 6, 7, 8], label="Pre-downscale factor", info="Reducing blurred image reduce the process time", value=1, interactive=True)
|
| 363 |
with gr.Row():
|
| 364 |
with gr.Column():
|
|
@@ -449,6 +451,7 @@ with gr.Blocks(title="SUPIR") as interface:
|
|
| 449 |
a_prompt,
|
| 450 |
n_prompt,
|
| 451 |
num_samples,
|
|
|
|
| 452 |
downscale,
|
| 453 |
upscale,
|
| 454 |
edm_steps,
|
|
|
|
| 79 |
def reset_feedback():
|
| 80 |
return 3, ''
|
| 81 |
|
| 82 |
+
@spaces.GPU(duration=360)
|
| 83 |
def stage1_process(input_image, gamma_correction):
|
| 84 |
print('Start stage1_process')
|
| 85 |
if torch.cuda.device_count() == 0:
|
|
|
|
| 101 |
print('End stage1_process')
|
| 102 |
return LQ, gr.update(visible = True)
|
| 103 |
|
| 104 |
+
@spaces.GPU(duration=360)
|
| 105 |
def llave_process(input_image, temperature, top_p, qs=None):
|
| 106 |
print('Start llave_process')
|
| 107 |
if torch.cuda.device_count() == 0:
|
|
|
|
| 117 |
print('End llave_process')
|
| 118 |
return captions[0]
|
| 119 |
|
| 120 |
+
@spaces.GPU(duration=360)
|
| 121 |
def stage2_process(
|
| 122 |
noisy_image,
|
| 123 |
denoise_image,
|
|
|
|
| 125 |
a_prompt,
|
| 126 |
n_prompt,
|
| 127 |
num_samples,
|
| 128 |
+
min_size,
|
| 129 |
downscale,
|
| 130 |
upscale,
|
| 131 |
edm_steps,
|
|
|
|
| 149 |
):
|
| 150 |
start = time.time()
|
| 151 |
print('Start stage2_process')
|
|
|
|
| 152 |
if torch.cuda.device_count() == 0:
|
| 153 |
gr.Warning('Set this space to GPU config to make it work.')
|
| 154 |
return None, None, None
|
|
|
|
| 175 |
model.current_model = model_select
|
| 176 |
input_image = HWC3(input_image)
|
| 177 |
input_image = upscale_image(input_image, upscale, unit_resolution=32,
|
| 178 |
+
min_size=min_size)
|
| 179 |
|
| 180 |
LQ = np.array(input_image) / 255.0
|
| 181 |
LQ = np.power(LQ, gamma_correction)
|
|
|
|
| 228 |
((str(hours) + " h, ") if hours != 0 else "") + \
|
| 229 |
((str(minutes) + " min, ") if hours != 0 or minutes != 0 else "") + \
|
| 230 |
str(secondes) + " sec."
|
| 231 |
+
print(information)
|
| 232 |
|
| 233 |
# Only one image can be shown in the slider
|
| 234 |
return [noisy_image] + [results[0]], gr.update(format = output_format, value = [noisy_image] + results), gr.update(value = information, visible = True), event_id
|
|
|
|
| 292 |
LlaVa is not integrated in this demo. The content added by SUPIR is imagination, not real-world information.
|
| 293 |
The aim of SUPIR is the beauty and the illustration.
|
| 294 |
Most of the processes only last few minutes.
|
| 295 |
+
This demo can handle huge images but the process will be aborted if it lasts more than 5 min.
|
| 296 |
|
| 297 |
<p><center><a href="https://arxiv.org/abs/2401.13627">Paper</a>   <a href="http://supir.xpixel.group/">Project Page</a>   <a href="https://github.com/Fanghua-Yu/SUPIR/blob/master/assets/DemoGuide.png">How to play</a>   <a href="https://huggingface.co/blog/MonsterMMORPG/supir-sota-image-upscale-better-than-magnific-ai">Local Install Guide</a></center></p>
|
| 298 |
"""
|
|
|
|
| 360 |
edm_steps = gr.Slider(label="Steps", info="lower=faster, higher=more details", minimum=1, maximum=200, value=default_setting.edm_steps if torch.cuda.device_count() > 0 else 1, step=1)
|
| 361 |
num_samples = gr.Slider(label="Num Samples", info="Number of generated results", minimum=1, maximum=4 if not args.use_image_slider else 1
|
| 362 |
, value=1, step=1)
|
| 363 |
+
min_size = gr.Slider(label="Minimum size", info="Minimum height, minimum width", minimum=32, value=1024, step=32)
|
| 364 |
downscale = gr.Radio([1, 2, 3, 4, 5, 6, 7, 8], label="Pre-downscale factor", info="Reducing blurred image reduce the process time", value=1, interactive=True)
|
| 365 |
with gr.Row():
|
| 366 |
with gr.Column():
|
|
|
|
| 451 |
a_prompt,
|
| 452 |
n_prompt,
|
| 453 |
num_samples,
|
| 454 |
+
min_size,
|
| 455 |
downscale,
|
| 456 |
upscale,
|
| 457 |
edm_steps,
|