Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -8,12 +8,14 @@ from PIL import Image
|
|
8 |
from diffusers import FluxKontextPipeline
|
9 |
from diffusers.utils import load_image
|
10 |
|
|
|
|
|
11 |
MAX_SEED = np.iinfo(np.int32).max
|
12 |
|
13 |
pipe = FluxKontextPipeline.from_pretrained("black-forest-labs/FLUX.1-Kontext-dev", torch_dtype=torch.bfloat16).to("cuda")
|
14 |
|
15 |
@spaces.GPU
|
16 |
-
def infer(input_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5, steps=
|
17 |
"""
|
18 |
Perform image editing using the FLUX.1 Kontext pipeline.
|
19 |
|
@@ -60,7 +62,7 @@ def infer(input_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5
|
|
60 |
).images[0]
|
61 |
return image, seed, gr.Button(visible=True)
|
62 |
|
63 |
-
@spaces.GPU(duration=
|
64 |
def infer_example(input_image, prompt):
|
65 |
image, seed, _ = infer(input_image, prompt)
|
66 |
return image, seed
|
|
|
8 |
from diffusers import FluxKontextPipeline
|
9 |
from diffusers.utils import load_image
|
10 |
|
11 |
+
# down to 22 steps to try and keep this ~<30 seconds so it will generally work in claude.ai - which doesn't reset timeout with notifications.
|
12 |
+
|
13 |
MAX_SEED = np.iinfo(np.int32).max
|
14 |
|
15 |
pipe = FluxKontextPipeline.from_pretrained("black-forest-labs/FLUX.1-Kontext-dev", torch_dtype=torch.bfloat16).to("cuda")
|
16 |
|
17 |
@spaces.GPU
|
18 |
+
def infer(input_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5, steps=22, progress=gr.Progress(track_tqdm=True)):
|
19 |
"""
|
20 |
Perform image editing using the FLUX.1 Kontext pipeline.
|
21 |
|
|
|
62 |
).images[0]
|
63 |
return image, seed, gr.Button(visible=True)
|
64 |
|
65 |
+
@spaces.GPU(duration=25)
|
66 |
def infer_example(input_image, prompt):
|
67 |
image, seed, _ = infer(input_image, prompt)
|
68 |
return image, seed
|