Update app.py
Browse files
app.py
CHANGED
@@ -35,7 +35,10 @@ pipe = StableDiffusionControlLoraV3Pipeline.from_pretrained(
|
|
35 |
unet=unet
|
36 |
)
|
37 |
|
|
|
38 |
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
|
|
|
|
|
39 |
|
40 |
pipe.load_lora_weights(
|
41 |
"models",
|
@@ -66,7 +69,7 @@ def get_canny_image(image, low_threshold=100, high_threshold=200):
|
|
66 |
canny_image = np.stack([canny_image] * 3, axis=-1)
|
67 |
return Image.fromarray(canny_image)
|
68 |
|
69 |
-
@spaces.GPU(duration=
|
70 |
def generate_image(input_image, prompt, negative_prompt, guidance_scale, steps, low_threshold, high_threshold, seed, progress=gr.Progress()):
|
71 |
if input_image is None:
|
72 |
raise gr.Error("Please provide an input image!")
|
@@ -88,8 +91,8 @@ def generate_image(input_image, prompt, negative_prompt, guidance_scale, steps,
|
|
88 |
image = pipe(
|
89 |
prompt=prompt,
|
90 |
negative_prompt=negative_prompt,
|
91 |
-
num_inference_steps=int(steps),
|
92 |
-
guidance_scale=float(guidance_scale),
|
93 |
image=canny_image,
|
94 |
extra_condition_scale=1.0,
|
95 |
generator=generator
|
@@ -107,14 +110,14 @@ def random_image_click():
|
|
107 |
return Image.open(image_path)
|
108 |
return None
|
109 |
|
110 |
-
# Example data
|
111 |
examples = [
|
112 |
[
|
113 |
"conditions/example1.jpg",
|
114 |
"a futuristic cyberpunk city",
|
115 |
"blurry, bad quality",
|
116 |
7.5,
|
117 |
-
|
118 |
100,
|
119 |
200,
|
120 |
42
|
@@ -124,7 +127,7 @@ examples = [
|
|
124 |
"a serene mountain landscape",
|
125 |
"dark, gloomy",
|
126 |
7.0,
|
127 |
-
|
128 |
120,
|
129 |
180,
|
130 |
123
|
@@ -136,7 +139,8 @@ with gr.Blocks() as demo:
|
|
136 |
gr.Markdown(
|
137 |
"""
|
138 |
# Control LoRA v3 Demo
|
139 |
-
⚠️ Warning: This is a demo of Control LoRA v3.
|
|
|
140 |
The model uses edge detection to guide the image generation process.
|
141 |
"""
|
142 |
)
|
@@ -159,7 +163,7 @@ with gr.Blocks() as demo:
|
|
159 |
low_threshold = gr.Slider(minimum=1, maximum=255, value=100, label="Canny Low Threshold")
|
160 |
high_threshold = gr.Slider(minimum=1, maximum=255, value=200, label="Canny High Threshold")
|
161 |
guidance_scale = gr.Slider(minimum=1, maximum=20, value=7.5, label="Guidance Scale")
|
162 |
-
steps = gr.Slider(minimum=1, maximum=
|
163 |
seed = gr.Textbox(label="Seed (empty for random)", placeholder="Enter a number for reproducible results")
|
164 |
generate = gr.Button("Generate")
|
165 |
|
|
|
35 |
unet=unet
|
36 |
)
|
37 |
|
38 |
+
# Performance optimizations
|
39 |
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
|
40 |
+
pipe.enable_attention_slicing()
|
41 |
+
pipe.enable_vae_slicing()
|
42 |
|
43 |
pipe.load_lora_weights(
|
44 |
"models",
|
|
|
69 |
canny_image = np.stack([canny_image] * 3, axis=-1)
|
70 |
return Image.fromarray(canny_image)
|
71 |
|
72 |
+
@spaces.GPU(duration=180) # Reduced to 3 minutes
|
73 |
def generate_image(input_image, prompt, negative_prompt, guidance_scale, steps, low_threshold, high_threshold, seed, progress=gr.Progress()):
|
74 |
if input_image is None:
|
75 |
raise gr.Error("Please provide an input image!")
|
|
|
91 |
image = pipe(
|
92 |
prompt=prompt,
|
93 |
negative_prompt=negative_prompt,
|
94 |
+
num_inference_steps=int(steps),
|
95 |
+
guidance_scale=float(guidance_scale),
|
96 |
image=canny_image,
|
97 |
extra_condition_scale=1.0,
|
98 |
generator=generator
|
|
|
110 |
return Image.open(image_path)
|
111 |
return None
|
112 |
|
113 |
+
# Example data with reduced steps
|
114 |
examples = [
|
115 |
[
|
116 |
"conditions/example1.jpg",
|
117 |
"a futuristic cyberpunk city",
|
118 |
"blurry, bad quality",
|
119 |
7.5,
|
120 |
+
25, # Reduced steps
|
121 |
100,
|
122 |
200,
|
123 |
42
|
|
|
127 |
"a serene mountain landscape",
|
128 |
"dark, gloomy",
|
129 |
7.0,
|
130 |
+
25, # Reduced steps
|
131 |
120,
|
132 |
180,
|
133 |
123
|
|
|
139 |
gr.Markdown(
|
140 |
"""
|
141 |
# Control LoRA v3 Demo
|
142 |
+
⚠️ Warning: This is a demo of Control LoRA v3. Generation might take a few minutes.
|
143 |
+
For better results with ZeroGPU, it's recommended to use 20-30 steps.
|
144 |
The model uses edge detection to guide the image generation process.
|
145 |
"""
|
146 |
)
|
|
|
163 |
low_threshold = gr.Slider(minimum=1, maximum=255, value=100, label="Canny Low Threshold")
|
164 |
high_threshold = gr.Slider(minimum=1, maximum=255, value=200, label="Canny High Threshold")
|
165 |
guidance_scale = gr.Slider(minimum=1, maximum=20, value=7.5, label="Guidance Scale")
|
166 |
+
steps = gr.Slider(minimum=1, maximum=50, value=25, label="Steps") # Reduced max steps
|
167 |
seed = gr.Textbox(label="Seed (empty for random)", placeholder="Enter a number for reproducible results")
|
168 |
generate = gr.Button("Generate")
|
169 |
|