Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -65,6 +65,8 @@ pipe = FluxImg2ImgPipeline.from_pretrained(
|
|
65 |
torch_dtype=torch.bfloat16
|
66 |
)
|
67 |
pipe.to(device)
|
|
|
|
|
68 |
|
69 |
print("β
All models loaded successfully!")
|
70 |
|
@@ -117,7 +119,8 @@ def process_input(input_image, upscale_factor):
|
|
117 |
(
|
118 |
int(aspect_ratio * MAX_PIXEL_BUDGET**0.5 // upscale_factor),
|
119 |
int(MAX_PIXEL_BUDGET**0.5 // aspect_ratio // upscale_factor),
|
120 |
-
)
|
|
|
121 |
)
|
122 |
was_resized = True
|
123 |
|
@@ -126,7 +129,7 @@ def process_input(input_image, upscale_factor):
|
|
126 |
w = w - w % 8
|
127 |
h = h - h % 8
|
128 |
|
129 |
-
return input_image.resize((w, h)), w_original, h_original, was_resized
|
130 |
|
131 |
|
132 |
def load_image_from_url(url):
|
@@ -180,9 +183,9 @@ def enhance_image(
|
|
180 |
else:
|
181 |
prompt = custom_prompt if custom_prompt.strip() else ""
|
182 |
|
183 |
-
# Rescale with upscale factor
|
184 |
w, h = input_image.size
|
185 |
-
control_image = input_image.resize((w * upscale_factor, h * upscale_factor))
|
186 |
|
187 |
generator = torch.Generator().manual_seed(seed)
|
188 |
|
@@ -204,7 +207,7 @@ def enhance_image(
|
|
204 |
gr.Info(f"π Resizing output to target size: {w_original * upscale_factor}x{h_original * upscale_factor}")
|
205 |
|
206 |
# Resize to target desired size
|
207 |
-
final_image = image.resize((w_original * upscale_factor, h_original * upscale_factor))
|
208 |
|
209 |
return [true_input_image, final_image], seed, generated_caption if use_generated_caption else ""
|
210 |
|
@@ -268,7 +271,7 @@ with gr.Blocks(css=css, title="π¨ AI Image Enhancer - Florence-2 + FLUX") as d
|
|
268 |
minimum=8,
|
269 |
maximum=50,
|
270 |
step=1,
|
271 |
-
value=
|
272 |
info="More steps = better quality but slower"
|
273 |
)
|
274 |
|
@@ -287,7 +290,7 @@ with gr.Blocks(css=css, title="π¨ AI Image Enhancer - Florence-2 + FLUX") as d
|
|
287 |
maximum=1.0,
|
288 |
step=0.05,
|
289 |
value=0.3,
|
290 |
-
info="Controls how much the image is transformed
|
291 |
)
|
292 |
|
293 |
with gr.Row():
|
@@ -337,8 +340,8 @@ with gr.Blocks(css=css, title="π¨ AI Image Enhancer - Florence-2 + FLUX") as d
|
|
337 |
# Examples
|
338 |
gr.Examples(
|
339 |
examples=[
|
340 |
-
[None, "https://upload.wikimedia.org/wikipedia/commons/thumb/a/a7/Example.jpg/800px-Example.jpg", 42, False,
|
341 |
-
[None, "https://picsum.photos/512/512", 123, False, 25, 3, 4.0, 0.
|
342 |
],
|
343 |
inputs=[
|
344 |
input_image,
|
@@ -377,10 +380,10 @@ with gr.Blocks(css=css, title="π¨ AI Image Enhancer - Florence-2 + FLUX") as d
|
|
377 |
<h4>π‘ How it works:</h4>
|
378 |
<ol>
|
379 |
<li><strong>Florence-2</strong> analyzes your image and generates a detailed caption</li>
|
380 |
-
<li
|
381 |
-
<li>
|
382 |
</ol>
|
383 |
-
<p><strong>Note:</strong>
|
384 |
</div>
|
385 |
""")
|
386 |
|
|
|
65 |
torch_dtype=torch.bfloat16
|
66 |
)
|
67 |
pipe.to(device)
|
68 |
+
pipe.enable_vae_tiling()
|
69 |
+
pipe.enable_vae_slicing()
|
70 |
|
71 |
print("β
All models loaded successfully!")
|
72 |
|
|
|
119 |
(
|
120 |
int(aspect_ratio * MAX_PIXEL_BUDGET**0.5 // upscale_factor),
|
121 |
int(MAX_PIXEL_BUDGET**0.5 // aspect_ratio // upscale_factor),
|
122 |
+
),
|
123 |
+
resample=Image.LANCZOS
|
124 |
)
|
125 |
was_resized = True
|
126 |
|
|
|
129 |
w = w - w % 8
|
130 |
h = h - h % 8
|
131 |
|
132 |
+
return input_image.resize((w, h), resample=Image.LANCZOS), w_original, h_original, was_resized
|
133 |
|
134 |
|
135 |
def load_image_from_url(url):
|
|
|
183 |
else:
|
184 |
prompt = custom_prompt if custom_prompt.strip() else ""
|
185 |
|
186 |
+
# Rescale with upscale factor using LANCZOS
|
187 |
w, h = input_image.size
|
188 |
+
control_image = input_image.resize((w * upscale_factor, h * upscale_factor), resample=Image.LANCZOS)
|
189 |
|
190 |
generator = torch.Generator().manual_seed(seed)
|
191 |
|
|
|
207 |
gr.Info(f"π Resizing output to target size: {w_original * upscale_factor}x{h_original * upscale_factor}")
|
208 |
|
209 |
# Resize to target desired size
|
210 |
+
final_image = image.resize((w_original * upscale_factor, h_original * upscale_factor), resample=Image.LANCZOS)
|
211 |
|
212 |
return [true_input_image, final_image], seed, generated_caption if use_generated_caption else ""
|
213 |
|
|
|
271 |
minimum=8,
|
272 |
maximum=50,
|
273 |
step=1,
|
274 |
+
value=25,
|
275 |
info="More steps = better quality but slower"
|
276 |
)
|
277 |
|
|
|
290 |
maximum=1.0,
|
291 |
step=0.05,
|
292 |
value=0.3,
|
293 |
+
info="Controls how much the image is transformed"
|
294 |
)
|
295 |
|
296 |
with gr.Row():
|
|
|
340 |
# Examples
|
341 |
gr.Examples(
|
342 |
examples=[
|
343 |
+
[None, "https://upload.wikimedia.org/wikipedia/commons/thumb/a/a7/Example.jpg/800px-Example.jpg", 42, False, 25, 2, 3.5, 0.3, True, ""],
|
344 |
+
[None, "https://picsum.photos/512/512", 123, False, 25, 3, 4.0, 0.3, True, ""],
|
345 |
],
|
346 |
inputs=[
|
347 |
input_image,
|
|
|
380 |
<h4>π‘ How it works:</h4>
|
381 |
<ol>
|
382 |
<li><strong>Florence-2</strong> analyzes your image and generates a detailed caption</li>
|
383 |
+
<li>Initial upscale with LANCZOS interpolation</li>
|
384 |
+
<li><strong>FLUX Img2Img</strong> enhances the upscaled image with AI diffusion guided by the caption</li>
|
385 |
</ol>
|
386 |
+
<p><strong>Note:</strong> Output limited to 4096x4096 pixels total budget to prevent memory issues.</p>
|
387 |
</div>
|
388 |
""")
|
389 |
|