Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -8,6 +8,10 @@ import random
|
|
8 |
import numpy as np
|
9 |
import os
|
10 |
import spaces
|
|
|
|
|
|
|
|
|
11 |
|
12 |
try:
|
13 |
import basicsr
|
@@ -30,16 +34,24 @@ pipe = FluxImg2ImgPipeline.from_pretrained(
|
|
30 |
).to(device)
|
31 |
pipe.enable_vae_tiling() # To help with memory for large images
|
32 |
|
33 |
-
# Initialize Florence model with float32 to avoid dtype mismatch
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
MAX_SEED = np.iinfo(np.int32).max
|
45 |
MAX_IMAGE_SIZE = 2048
|
@@ -113,7 +125,8 @@ def tiled_flux_img2img(image, prompt, strength, num_inference_steps, guidance_sc
|
|
113 |
mask = Image.new('L', (tile_right - tile_left, effective_overlap))
|
114 |
for i in range(mask.width):
|
115 |
for j in range(mask.height):
|
116 |
-
|
|
|
117 |
blend_region = Image.composite(
|
118 |
generated_tile.crop((0, 0, mask.width, mask.height)),
|
119 |
result.crop((tile_left, tile_top, tile_right, tile_top + mask.height)),
|
@@ -131,7 +144,8 @@ def tiled_flux_img2img(image, prompt, strength, num_inference_steps, guidance_sc
|
|
131 |
mask_h = Image.new('L', (effective_overlap_h, tile_bottom - tile_top))
|
132 |
for i in range(mask_h.width):
|
133 |
for j in range(mask_h.height):
|
134 |
-
|
|
|
135 |
blend_region_h = Image.composite(
|
136 |
generated_tile.crop((0, 0, mask_h.width, mask_h.height)),
|
137 |
result.crop((tile_left, tile_top, tile_left + mask_h.width, tile_bottom)),
|
|
|
8 |
import numpy as np
|
9 |
import os
|
10 |
import spaces
|
11 |
+
import huggingface_hub
|
12 |
+
import time
|
13 |
+
|
14 |
+
huggingface_hub.constants.HF_HUB_DOWNLOAD_TIMEOUT = 60
|
15 |
|
16 |
try:
|
17 |
import basicsr
|
|
|
34 |
).to(device)
|
35 |
pipe.enable_vae_tiling() # To help with memory for large images
|
36 |
|
37 |
+
# Initialize Florence model with float32 to avoid dtype mismatch, with retry
|
38 |
+
for attempt in range(5):
|
39 |
+
try:
|
40 |
+
florence_model = AutoModelForCausalLM.from_pretrained(
|
41 |
+
'microsoft/Florence-2-large',
|
42 |
+
trust_remote_code=True,
|
43 |
+
torch_dtype=torch.float32
|
44 |
+
).to(device).eval()
|
45 |
+
florence_processor = AutoProcessor.from_pretrained(
|
46 |
+
'microsoft/Florence-2-large',
|
47 |
+
trust_remote_code=True
|
48 |
+
)
|
49 |
+
break
|
50 |
+
except Exception as e:
|
51 |
+
print(f"Attempt {attempt+1} to load Florence-2 failed: {e}")
|
52 |
+
time.sleep(10)
|
53 |
+
else:
|
54 |
+
raise RuntimeError("Failed to load Florence-2 after multiple attempts")
|
55 |
|
56 |
MAX_SEED = np.iinfo(np.int32).max
|
57 |
MAX_IMAGE_SIZE = 2048
|
|
|
125 |
mask = Image.new('L', (tile_right - tile_left, effective_overlap))
|
126 |
for i in range(mask.width):
|
127 |
for j in range(mask.height):
|
128 |
+
divisor = effective_overlap - 1 if effective_overlap > 1 else 1
|
129 |
+
mask.putpixel((i, j), int(255 * (j / divisor)))
|
130 |
blend_region = Image.composite(
|
131 |
generated_tile.crop((0, 0, mask.width, mask.height)),
|
132 |
result.crop((tile_left, tile_top, tile_right, tile_top + mask.height)),
|
|
|
144 |
mask_h = Image.new('L', (effective_overlap_h, tile_bottom - tile_top))
|
145 |
for i in range(mask_h.width):
|
146 |
for j in range(mask_h.height):
|
147 |
+
divisor_h = effective_overlap_h - 1 if effective_overlap_h > 1 else 1
|
148 |
+
mask_h.putpixel((i, j), int(255 * (i / divisor_h)))
|
149 |
blend_region_h = Image.composite(
|
150 |
generated_tile.crop((0, 0, mask_h.width, mask_h.height)),
|
151 |
result.crop((tile_left, tile_top, tile_left + mask_h.width, tile_bottom)),
|