Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -11,6 +11,9 @@ from pipeline_fill_sd_xl import StableDiffusionXLFillPipeline
|
|
11 |
|
12 |
from PIL import Image, ImageDraw
|
13 |
import numpy as np
|
|
|
|
|
|
|
14 |
|
15 |
config_file = hf_hub_download(
|
16 |
"xinsir/controlnet-union-sdxl-1.0",
|
@@ -43,7 +46,6 @@ pipe = StableDiffusionXLFillPipeline.from_pretrained(
|
|
43 |
|
44 |
pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config)
|
45 |
|
46 |
-
|
47 |
def can_expand(source_width, source_height, target_width, target_height, alignment):
|
48 |
"""Checks if the image can be expanded based on the alignment."""
|
49 |
if alignment in ("Left", "Right") and source_width >= target_width:
|
@@ -89,6 +91,7 @@ def infer(image, width, height, overlap_width, num_inference_steps, resize_optio
|
|
89 |
|
90 |
if not can_expand(source.width, source.height, target_size[0], target_size[1], alignment):
|
91 |
alignment = "Middle"
|
|
|
92 |
# Calculate margins based on alignment
|
93 |
if alignment == "Middle":
|
94 |
margin_x = (target_size[0] - source.width) // 2
|
@@ -200,6 +203,41 @@ def select_the_right_preset(user_width, user_height):
|
|
200 |
def toggle_custom_resize_slider(resize_option):
|
201 |
return gr.update(visible=(resize_option == "Custom"))
|
202 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
203 |
css = """
|
204 |
.gradio-container {
|
205 |
width: 1200px !important;
|
@@ -288,6 +326,8 @@ with gr.Blocks(css=css) as demo:
|
|
288 |
value=512,
|
289 |
visible=False
|
290 |
)
|
|
|
|
|
291 |
|
292 |
gr.Examples(
|
293 |
examples=[
|
@@ -305,8 +345,11 @@ with gr.Blocks(css=css) as demo:
|
|
305 |
label="Generated Image",
|
306 |
)
|
307 |
use_as_input_button = gr.Button("Use as Input Image", visible=False)
|
|
|
|
|
308 |
|
309 |
def use_output_as_input(output_image):
|
|
|
310 |
return gr.update(value=output_image[1])
|
311 |
|
312 |
use_as_input_button.click(
|
@@ -373,4 +416,11 @@ with gr.Blocks(css=css) as demo:
|
|
373 |
outputs=use_as_input_button,
|
374 |
)
|
375 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
376 |
demo.queue(max_size=12).launch(share=False)
|
|
|
11 |
|
12 |
from PIL import Image, ImageDraw
|
13 |
import numpy as np
|
14 |
+
import cv2
|
15 |
+
import tempfile
|
16 |
+
import os
|
17 |
|
18 |
config_file = hf_hub_download(
|
19 |
"xinsir/controlnet-union-sdxl-1.0",
|
|
|
46 |
|
47 |
pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config)
|
48 |
|
|
|
49 |
def can_expand(source_width, source_height, target_width, target_height, alignment):
|
50 |
"""Checks if the image can be expanded based on the alignment."""
|
51 |
if alignment in ("Left", "Right") and source_width >= target_width:
|
|
|
91 |
|
92 |
if not can_expand(source.width, source.height, target_size[0], target_size[1], alignment):
|
93 |
alignment = "Middle"
|
94 |
+
|
95 |
# Calculate margins based on alignment
|
96 |
if alignment == "Middle":
|
97 |
margin_x = (target_size[0] - source.width) // 2
|
|
|
203 |
def toggle_custom_resize_slider(resize_option):
|
204 |
return gr.update(visible=(resize_option == "Custom"))
|
205 |
|
206 |
+
def create_video_from_images(image_list, fps=8):
|
207 |
+
if not image_list:
|
208 |
+
return None
|
209 |
+
|
210 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as temp_video_file:
|
211 |
+
video_path = temp_video_file.name
|
212 |
+
|
213 |
+
frame = np.array(image_list[0])
|
214 |
+
height, width, layers = frame.shape
|
215 |
+
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
216 |
+
video = cv2.VideoWriter(video_path, fourcc, fps, (width, height))
|
217 |
+
|
218 |
+
for image in image_list:
|
219 |
+
video.write(cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR))
|
220 |
+
|
221 |
+
video.release()
|
222 |
+
return video_path
|
223 |
+
|
224 |
+
def loop_outpainting(image, width, height, overlap_width, num_inference_steps, resize_option, custom_resize_size, prompt_input, alignment, num_iterations):
|
225 |
+
image_list = []
|
226 |
+
current_image = image
|
227 |
+
|
228 |
+
for _ in range(num_iterations):
|
229 |
+
# Generate new image
|
230 |
+
result = next(infer(current_image, width, height, overlap_width, num_inference_steps, resize_option, custom_resize_size, prompt_input, alignment))
|
231 |
+
new_image = result[1]
|
232 |
+
image_list.append(new_image)
|
233 |
+
|
234 |
+
# Use new image as input for next iteration
|
235 |
+
current_image = new_image
|
236 |
+
|
237 |
+
# Create video from image list
|
238 |
+
video_path = create_video_from_images(image_list)
|
239 |
+
return video_path
|
240 |
+
|
241 |
css = """
|
242 |
.gradio-container {
|
243 |
width: 1200px !important;
|
|
|
326 |
value=512,
|
327 |
visible=False
|
328 |
)
|
329 |
+
with gr.Row():
|
330 |
+
num_iterations = gr.Slider(label="Number of iterations", minimum=2, maximum=1024, step=1, value=10)
|
331 |
|
332 |
gr.Examples(
|
333 |
examples=[
|
|
|
345 |
label="Generated Image",
|
346 |
)
|
347 |
use_as_input_button = gr.Button("Use as Input Image", visible=False)
|
348 |
+
loop_button = gr.Button("Loop Outpainting")
|
349 |
+
video_output = gr.Video(label="Outpainting Video")
|
350 |
|
351 |
def use_output_as_input(output_image):
|
352 |
+
"""Sets the generated output as the new input image."""
|
353 |
return gr.update(value=output_image[1])
|
354 |
|
355 |
use_as_input_button.click(
|
|
|
416 |
outputs=use_as_input_button,
|
417 |
)
|
418 |
|
419 |
+
loop_button.click(
|
420 |
+
fn=loop_outpainting,
|
421 |
+
inputs=[input_image, width_slider, height_slider, overlap_width, num_inference_steps,
|
422 |
+
resize_option, custom_resize_size, prompt_input, alignment_dropdown, num_iterations],
|
423 |
+
outputs=video_output,
|
424 |
+
)
|
425 |
+
|
426 |
demo.queue(max_size=12).launch(share=False)
|