Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -11,6 +11,7 @@ from gradio_imageslider import ImageSlider
|
|
11 |
from PIL import Image
|
12 |
from huggingface_hub import snapshot_download
|
13 |
import requests
|
|
|
14 |
|
15 |
# For ESRGAN (requires pip install basicsr gfpgan)
|
16 |
try:
|
@@ -151,6 +152,7 @@ def enhance_image(
|
|
151 |
upscale_factor,
|
152 |
denoising_strength,
|
153 |
custom_prompt,
|
|
|
154 |
progress=gr.Progress(track_tqdm=True),
|
155 |
):
|
156 |
"""Main enhancement function"""
|
@@ -162,11 +164,13 @@ def enhance_image(
|
|
162 |
dtype = torch.bfloat16 if device == "cuda" else torch.float32
|
163 |
|
164 |
print(f"π₯ Loading FLUX Img2Img on {device}...")
|
|
|
165 |
pipe = FluxImg2ImgPipeline.from_pretrained(
|
166 |
"black-forest-labs/FLUX.1-dev",
|
167 |
torch_dtype=dtype,
|
168 |
low_cpu_mem_usage=True,
|
169 |
-
device_map="balanced"
|
|
|
170 |
)
|
171 |
pipe.enable_vae_tiling()
|
172 |
pipe.enable_vae_slicing()
|
@@ -192,11 +196,13 @@ def enhance_image(
|
|
192 |
device = "cpu"
|
193 |
dtype = torch.float32
|
194 |
# Reload on CPU if needed
|
|
|
195 |
pipe = FluxImg2ImgPipeline.from_pretrained(
|
196 |
"black-forest-labs/FLUX.1-dev",
|
197 |
torch_dtype=dtype,
|
198 |
low_cpu_mem_usage=True,
|
199 |
-
device_map=None
|
|
|
200 |
)
|
201 |
pipe.enable_vae_tiling()
|
202 |
pipe.enable_vae_slicing()
|
@@ -241,7 +247,7 @@ def enhance_image(
|
|
241 |
num_inference_steps,
|
242 |
3.5, # Updated guidance_scale to match workflow (3.5)
|
243 |
generator,
|
244 |
-
tile_size=
|
245 |
overlap=32
|
246 |
)
|
247 |
|
@@ -326,6 +332,15 @@ with gr.Blocks(css=css, title="π¨ AI Image Upscaler - FLUX") as demo:
|
|
326 |
value=0.3,
|
327 |
info="Controls how much the image is transformed"
|
328 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
329 |
|
330 |
with gr.Row():
|
331 |
randomize_seed = gr.Checkbox(
|
@@ -370,6 +385,7 @@ with gr.Blocks(css=css, title="π¨ AI Image Upscaler - FLUX") as demo:
|
|
370 |
upscale_factor,
|
371 |
denoising_strength,
|
372 |
custom_prompt,
|
|
|
373 |
],
|
374 |
outputs=[result_slider]
|
375 |
)
|
|
|
11 |
from PIL import Image
|
12 |
from huggingface_hub import snapshot_download
|
13 |
import requests
|
14 |
+
from transformers import T5TokenizerFast
|
15 |
|
16 |
# For ESRGAN (requires pip install basicsr gfpgan)
|
17 |
try:
|
|
|
152 |
upscale_factor,
|
153 |
denoising_strength,
|
154 |
custom_prompt,
|
155 |
+
tile_size,
|
156 |
progress=gr.Progress(track_tqdm=True),
|
157 |
):
|
158 |
"""Main enhancement function"""
|
|
|
164 |
dtype = torch.bfloat16 if device == "cuda" else torch.float32
|
165 |
|
166 |
print(f"π₯ Loading FLUX Img2Img on {device}...")
|
167 |
+
tokenizer_2 = T5TokenizerFast.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="text_encoder_2")
|
168 |
pipe = FluxImg2ImgPipeline.from_pretrained(
|
169 |
"black-forest-labs/FLUX.1-dev",
|
170 |
torch_dtype=dtype,
|
171 |
low_cpu_mem_usage=True,
|
172 |
+
device_map="balanced",
|
173 |
+
tokenizer_2=tokenizer_2
|
174 |
)
|
175 |
pipe.enable_vae_tiling()
|
176 |
pipe.enable_vae_slicing()
|
|
|
196 |
device = "cpu"
|
197 |
dtype = torch.float32
|
198 |
# Reload on CPU if needed
|
199 |
+
tokenizer_2 = T5TokenizerFast.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="text_encoder_2")
|
200 |
pipe = FluxImg2ImgPipeline.from_pretrained(
|
201 |
"black-forest-labs/FLUX.1-dev",
|
202 |
torch_dtype=dtype,
|
203 |
low_cpu_mem_usage=True,
|
204 |
+
device_map=None,
|
205 |
+
tokenizer_2=tokenizer_2
|
206 |
)
|
207 |
pipe.enable_vae_tiling()
|
208 |
pipe.enable_vae_slicing()
|
|
|
247 |
num_inference_steps,
|
248 |
3.5, # Updated guidance_scale to match workflow (3.5)
|
249 |
generator,
|
250 |
+
tile_size=tile_size,
|
251 |
overlap=32
|
252 |
)
|
253 |
|
|
|
332 |
value=0.3,
|
333 |
info="Controls how much the image is transformed"
|
334 |
)
|
335 |
+
|
336 |
+
tile_size = gr.Slider(
|
337 |
+
label="Tile Size",
|
338 |
+
minimum=256,
|
339 |
+
maximum=2048,
|
340 |
+
step=64,
|
341 |
+
value=1024,
|
342 |
+
info="Size of tiles for processing (larger = faster but more memory)"
|
343 |
+
)
|
344 |
|
345 |
with gr.Row():
|
346 |
randomize_seed = gr.Checkbox(
|
|
|
385 |
upscale_factor,
|
386 |
denoising_strength,
|
387 |
custom_prompt,
|
388 |
+
tile_size
|
389 |
],
|
390 |
outputs=[result_slider]
|
391 |
)
|