Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -173,6 +173,15 @@ def tiled_flux_img2img(pipe, prompt, image, strength, steps, guidance, generator
|
|
173 |
w, h = image.size
|
174 |
output = image.copy() # Start with the control image
|
175 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
176 |
for x in range(0, w, tile_size - overlap):
|
177 |
for y in range(0, h, tile_size - overlap):
|
178 |
tile_w = min(tile_size, w - x)
|
@@ -181,7 +190,8 @@ def tiled_flux_img2img(pipe, prompt, image, strength, steps, guidance, generator
|
|
181 |
|
182 |
# Run Flux on tile
|
183 |
gen_tile = pipe(
|
184 |
-
prompt=
|
|
|
185 |
image=tile,
|
186 |
strength=strength,
|
187 |
num_inference_steps=steps,
|
|
|
173 |
w, h = image.size
|
174 |
output = image.copy() # Start with the control image
|
175 |
|
176 |
+
# For handling long prompts: truncate for CLIP, full for T5
|
177 |
+
max_clip_tokens = pipe.tokenizer.model_max_length # Typically 77
|
178 |
+
input_ids = pipe.tokenizer.encode(prompt, return_tensors="pt")
|
179 |
+
if input_ids.shape[1] > max_clip_tokens:
|
180 |
+
input_ids = input_ids[:, :max_clip_tokens]
|
181 |
+
prompt_clip = pipe.tokenizer.decode(input_ids[0], skip_special_tokens=True)
|
182 |
+
else:
|
183 |
+
prompt_clip = prompt
|
184 |
+
|
185 |
for x in range(0, w, tile_size - overlap):
|
186 |
for y in range(0, h, tile_size - overlap):
|
187 |
tile_w = min(tile_size, w - x)
|
|
|
190 |
|
191 |
# Run Flux on tile
|
192 |
gen_tile = pipe(
|
193 |
+
prompt=prompt_clip,
|
194 |
+
prompt_2=prompt,
|
195 |
image=tile,
|
196 |
strength=strength,
|
197 |
num_inference_steps=steps,
|