Update app.py
Browse files
app.py
CHANGED
|
@@ -154,7 +154,7 @@ depthanything_v2 = NODE_CLASS_MAPPINGS["DepthAnything_V2"]()
|
|
| 154 |
imageresize = NODE_CLASS_MAPPINGS["ImageResize+"]()
|
| 155 |
|
| 156 |
@spaces.GPU
|
| 157 |
-
def generate_image(prompt
|
| 158 |
"""Main generation function that processes inputs and returns the path to the generated image."""
|
| 159 |
with torch.inference_mode():
|
| 160 |
# Set up CLIP
|
|
@@ -288,9 +288,16 @@ def generate_image(prompt: str, structure_image: str, depth_strength: float, sty
|
|
| 288 |
return saved_path
|
| 289 |
|
| 290 |
# Create Gradio interface
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 291 |
with gr.Blocks() as app:
|
| 292 |
-
gr.Markdown("#
|
| 293 |
-
|
| 294 |
with gr.Row():
|
| 295 |
with gr.Column():
|
| 296 |
prompt_input = gr.Textbox(label="Prompt", placeholder="Enter your prompt here...")
|
|
@@ -305,10 +312,16 @@ with gr.Blocks() as app:
|
|
| 305 |
|
| 306 |
with gr.Column():
|
| 307 |
output_image = gr.Image(label="Generated Image")
|
| 308 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 309 |
generate_btn.click(
|
| 310 |
fn=generate_image,
|
| 311 |
-
inputs=[prompt_input, structure_image,
|
| 312 |
outputs=[output_image]
|
| 313 |
)
|
| 314 |
|
|
|
|
| 154 |
imageresize = NODE_CLASS_MAPPINGS["ImageResize+"]()
|
| 155 |
|
| 156 |
@spaces.GPU
|
| 157 |
+
def generate_image(prompt, structure_image, style_image, depth_strength=15, style_strength=0.5, progress=gr.Progress(track_tqdm=True)) -> str:
|
| 158 |
"""Main generation function that processes inputs and returns the path to the generated image."""
|
| 159 |
with torch.inference_mode():
|
| 160 |
# Set up CLIP
|
|
|
|
| 288 |
return saved_path
|
| 289 |
|
| 290 |
# Create Gradio interface
|
| 291 |
+
|
| 292 |
+
examples = [
|
| 293 |
+
["", "mona.png", "receita-tacos.webp"],
|
| 294 |
+
["a woman looking at a house catching fire on the background", "disaster_girl.png", "abaporu.jpg"]
|
| 295 |
+
["istanbul aerial, dramatic photography", "natasha.png", "istambul.png"],
|
| 296 |
+
]
|
| 297 |
+
|
| 298 |
with gr.Blocks() as app:
|
| 299 |
+
gr.Markdown("# FLUX Style Shaping")
|
| 300 |
+
gr.Markdown("## Flux[dev] Redux + Flux[dev] Depth ComfyUI workflow by [CitizenPlain](https://x.com/CitizenPlain) running directly on Gradio. [workflow](https://gist.github.com/nathanshipley/7a9ac1901adde76feebe58d558026f68) - [how to convert your comfy workflow to gradio (soon)](#)")
|
| 301 |
with gr.Row():
|
| 302 |
with gr.Column():
|
| 303 |
prompt_input = gr.Textbox(label="Prompt", placeholder="Enter your prompt here...")
|
|
|
|
| 312 |
|
| 313 |
with gr.Column():
|
| 314 |
output_image = gr.Image(label="Generated Image")
|
| 315 |
+
gr.Examples(
|
| 316 |
+
examples=examples,
|
| 317 |
+
inputs=[prompt, structure_image, style_image],
|
| 318 |
+
outputs=[output_image]
|
| 319 |
+
fn=generate_image,
|
| 320 |
+
cache_examples="lazy"
|
| 321 |
+
)
|
| 322 |
generate_btn.click(
|
| 323 |
fn=generate_image,
|
| 324 |
+
inputs=[prompt_input, structure_image, style_image, depth_strength, style_strength],
|
| 325 |
outputs=[output_image]
|
| 326 |
)
|
| 327 |
|