Spaces:
Running
on
A10G
Running
on
A10G
added example
Browse files- app.py +18 -33
- elephent.jpg → examples/images/elephent.jpg +0 -0
- examples/images/kitten.jpeg +0 -0
- examples/log.csv +2 -0
app.py
CHANGED
|
@@ -18,16 +18,6 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
| 18 |
# pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
|
| 19 |
# pipe = pipe.to(device)
|
| 20 |
|
| 21 |
-
# return editor
|
| 22 |
-
# image = editor.edit(target_prompt)
|
| 23 |
-
# return image
|
| 24 |
-
|
| 25 |
-
# examples = [
|
| 26 |
-
# "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
|
| 27 |
-
# "An astronaut riding a green horse",
|
| 28 |
-
# "A delicious ceviche cheesecake slice",
|
| 29 |
-
# ]
|
| 30 |
-
|
| 31 |
# css = """
|
| 32 |
# #col-container-1 {
|
| 33 |
# margin: 0 auto;
|
|
@@ -56,40 +46,39 @@ with gr.Blocks(css="style.css") as demo:
|
|
| 56 |
@spaces.GPU
|
| 57 |
def set_pipe(input_image, description_prompt, edit_guidance_scale, num_inference_steps=4,
|
| 58 |
num_inversion_steps=4, inversion_max_step=0.6, rnri_iterations=2, rnri_alpha=0.1, rnri_lr=0.2):
|
| 59 |
-
|
| 60 |
scheduler_class = MyEulerAncestralDiscreteScheduler
|
| 61 |
-
|
| 62 |
print('\n################## 1')
|
| 63 |
-
pipe_inversion = SDXLDDIMPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
|
| 64 |
print('\n################## 2')
|
| 65 |
-
pipe_inference = AutoPipelineForImage2Image.from_pretrained("stabilityai/sdxl-turbo",
|
|
|
|
| 66 |
print('\n################## 3')
|
| 67 |
pipe_inference.scheduler = scheduler_class.from_config(pipe_inference.scheduler.config)
|
| 68 |
pipe_inversion.scheduler = scheduler_class.from_config(pipe_inversion.scheduler.config)
|
| 69 |
pipe_inversion.scheduler_inference = scheduler_class.from_config(pipe_inference.scheduler.config)
|
| 70 |
print('\n################## 4')
|
| 71 |
|
| 72 |
-
|
| 73 |
config = RunConfig(num_inference_steps=num_inference_steps,
|
| 74 |
num_inversion_steps=num_inversion_steps,
|
| 75 |
edit_guidance_scale=edit_guidance_scale,
|
| 76 |
inversion_max_step=inversion_max_step)
|
| 77 |
image_editor = ImageEditorDemo(pipe_inversion, pipe_inference, input_image,
|
| 78 |
-
|
| 79 |
-
|
| 80 |
print('\n################## 5')
|
| 81 |
return image_editor, "Input has set!"
|
| 82 |
-
|
| 83 |
@spaces.GPU
|
| 84 |
def edit(editor, target_prompt):
|
| 85 |
if editor is None:
|
| 86 |
raise gr.Error("Set inputs before editing.")
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
image = editor.edit(target_prompt)
|
| 91 |
return image
|
| 92 |
|
|
|
|
| 93 |
gr.Markdown(f"""running on {power_device}""")
|
| 94 |
with gr.Row():
|
| 95 |
with gr.Column(elem_id="col-container-1"):
|
|
@@ -99,14 +88,13 @@ with gr.Blocks(css="style.css") as demo:
|
|
| 99 |
with gr.Row():
|
| 100 |
description_prompt = gr.Text(
|
| 101 |
label="Image description",
|
| 102 |
-
info
|
| 103 |
show_label=False,
|
| 104 |
max_lines=1,
|
| 105 |
placeholder="a cake on a table",
|
| 106 |
container=False,
|
| 107 |
)
|
| 108 |
|
| 109 |
-
|
| 110 |
with gr.Accordion("Advanced Settings", open=False):
|
| 111 |
with gr.Row():
|
| 112 |
edit_guidance_scale = gr.Slider(
|
|
@@ -159,17 +147,13 @@ with gr.Blocks(css="style.css") as demo:
|
|
| 159 |
set_button = gr.Button("Set input image & description & settings", scale=1)
|
| 160 |
|
| 161 |
is_set_text = gr.Text("", show_label=False)
|
| 162 |
-
|
| 163 |
-
# Create a loading indicator
|
| 164 |
-
loading_indicator = gr.Markdown(value="⏳ Processing...", visible=False)
|
| 165 |
-
|
| 166 |
with gr.Column(elem_id="col-container-2"):
|
| 167 |
result = gr.Image(label="Result")
|
| 168 |
|
| 169 |
with gr.Row():
|
| 170 |
target_prompt = gr.Text(
|
| 171 |
label="Edit prompt",
|
| 172 |
-
info
|
| 173 |
show_label=False,
|
| 174 |
max_lines=1,
|
| 175 |
placeholder="an oreo cake on a table",
|
|
@@ -179,10 +163,12 @@ with gr.Blocks(css="style.css") as demo:
|
|
| 179 |
with gr.Row():
|
| 180 |
run_button = gr.Button("Edit", scale=1)
|
| 181 |
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
|
|
|
|
|
|
| 186 |
|
| 187 |
set_button.click(
|
| 188 |
fn=set_pipe,
|
|
@@ -197,7 +183,6 @@ with gr.Blocks(css="style.css") as demo:
|
|
| 197 |
outputs=[result]
|
| 198 |
)
|
| 199 |
|
| 200 |
-
|
| 201 |
demo.queue().launch()
|
| 202 |
|
| 203 |
# im = infer(input_image, description_prompt, target_prompt, edit_guidance_scale, num_inference_steps=4, num_inversion_steps=4,
|
|
|
|
| 18 |
# pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
|
| 19 |
# pipe = pipe.to(device)
|
| 20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
# css = """
|
| 22 |
# #col-container-1 {
|
| 23 |
# margin: 0 auto;
|
|
|
|
| 46 |
@spaces.GPU
|
| 47 |
def set_pipe(input_image, description_prompt, edit_guidance_scale, num_inference_steps=4,
|
| 48 |
num_inversion_steps=4, inversion_max_step=0.6, rnri_iterations=2, rnri_alpha=0.1, rnri_lr=0.2):
|
|
|
|
| 49 |
scheduler_class = MyEulerAncestralDiscreteScheduler
|
| 50 |
+
|
| 51 |
print('\n################## 1')
|
| 52 |
+
pipe_inversion = SDXLDDIMPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True) # .to('cpu')
|
| 53 |
print('\n################## 2')
|
| 54 |
+
pipe_inference = AutoPipelineForImage2Image.from_pretrained("stabilityai/sdxl-turbo",
|
| 55 |
+
use_safetensors=True) # .to('cpu')
|
| 56 |
print('\n################## 3')
|
| 57 |
pipe_inference.scheduler = scheduler_class.from_config(pipe_inference.scheduler.config)
|
| 58 |
pipe_inversion.scheduler = scheduler_class.from_config(pipe_inversion.scheduler.config)
|
| 59 |
pipe_inversion.scheduler_inference = scheduler_class.from_config(pipe_inference.scheduler.config)
|
| 60 |
print('\n################## 4')
|
| 61 |
|
|
|
|
| 62 |
config = RunConfig(num_inference_steps=num_inference_steps,
|
| 63 |
num_inversion_steps=num_inversion_steps,
|
| 64 |
edit_guidance_scale=edit_guidance_scale,
|
| 65 |
inversion_max_step=inversion_max_step)
|
| 66 |
image_editor = ImageEditorDemo(pipe_inversion, pipe_inference, input_image,
|
| 67 |
+
description_prompt, config, device,
|
| 68 |
+
[rnri_iterations, rnri_alpha, rnri_lr])
|
| 69 |
print('\n################## 5')
|
| 70 |
return image_editor, "Input has set!"
|
|
|
|
| 71 |
@spaces.GPU
|
| 72 |
def edit(editor, target_prompt):
|
| 73 |
if editor is None:
|
| 74 |
raise gr.Error("Set inputs before editing.")
|
| 75 |
+
# if device == "cuda":
|
| 76 |
+
# image = editor.to(device).edit(target_prompt)
|
| 77 |
+
# else:
|
| 78 |
image = editor.edit(target_prompt)
|
| 79 |
return image
|
| 80 |
|
| 81 |
+
|
| 82 |
gr.Markdown(f"""running on {power_device}""")
|
| 83 |
with gr.Row():
|
| 84 |
with gr.Column(elem_id="col-container-1"):
|
|
|
|
| 88 |
with gr.Row():
|
| 89 |
description_prompt = gr.Text(
|
| 90 |
label="Image description",
|
| 91 |
+
info="Enter your image description ",
|
| 92 |
show_label=False,
|
| 93 |
max_lines=1,
|
| 94 |
placeholder="a cake on a table",
|
| 95 |
container=False,
|
| 96 |
)
|
| 97 |
|
|
|
|
| 98 |
with gr.Accordion("Advanced Settings", open=False):
|
| 99 |
with gr.Row():
|
| 100 |
edit_guidance_scale = gr.Slider(
|
|
|
|
| 147 |
set_button = gr.Button("Set input image & description & settings", scale=1)
|
| 148 |
|
| 149 |
is_set_text = gr.Text("", show_label=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 150 |
with gr.Column(elem_id="col-container-2"):
|
| 151 |
result = gr.Image(label="Result")
|
| 152 |
|
| 153 |
with gr.Row():
|
| 154 |
target_prompt = gr.Text(
|
| 155 |
label="Edit prompt",
|
| 156 |
+
info="Enter your edit prompt",
|
| 157 |
show_label=False,
|
| 158 |
max_lines=1,
|
| 159 |
placeholder="an oreo cake on a table",
|
|
|
|
| 163 |
with gr.Row():
|
| 164 |
run_button = gr.Button("Edit", scale=1)
|
| 165 |
|
| 166 |
+
with gr.Row():
|
| 167 |
+
gr.Examples(
|
| 168 |
+
examples='examples',
|
| 169 |
+
inputs=[input_image, description_prompt, target_prompt, edit_guidance_scale, num_inference_steps,
|
| 170 |
+
inversion_max_step, rnri_iterations, rnri_alpha, rnri_lr],
|
| 171 |
+
)
|
| 172 |
|
| 173 |
set_button.click(
|
| 174 |
fn=set_pipe,
|
|
|
|
| 183 |
outputs=[result]
|
| 184 |
)
|
| 185 |
|
|
|
|
| 186 |
demo.queue().launch()
|
| 187 |
|
| 188 |
# im = infer(input_image, description_prompt, target_prompt, edit_guidance_scale, num_inference_steps=4, num_inversion_steps=4,
|
elephent.jpg → examples/images/elephent.jpg
RENAMED
|
File without changes
|
examples/images/kitten.jpeg
ADDED
|
examples/log.csv
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
input_image,description_prompt,target_prompt,edit_guidance_scale,num_inference_steps,inversion_max_step, rnri_iterations,rnri_alpha,rnri_lr
|
| 2 |
+
./images/kitten.jpeg,A kitten is sitting in a basket on a branch,A lego kitten is sitting in a basket on a branch,1.2,4,0.6,2,0.1,0.2
|