Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -13,7 +13,7 @@ MAX_SEED = np.iinfo(np.int32).max
|
|
13 |
pipe = FluxKontextPipeline.from_pretrained("black-forest-labs/FLUX.1-Kontext-dev", torch_dtype=torch.bfloat16).to("cuda")
|
14 |
|
15 |
@spaces.GPU
|
16 |
-
def infer(input_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5, steps=28, progress=gr.Progress(
|
17 |
"""
|
18 |
Perform image editing using the FLUX.1 Kontext pipeline.
|
19 |
|
@@ -53,6 +53,15 @@ def infer(input_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5
|
|
53 |
... guidance_scale=2.5
|
54 |
... )
|
55 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
if randomize_seed:
|
57 |
seed = random.randint(0, MAX_SEED)
|
58 |
|
@@ -65,6 +74,7 @@ def infer(input_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5
|
|
65 |
width = input_image.size[0],
|
66 |
height = input_image.size[1],
|
67 |
num_inference_steps=steps,
|
|
|
68 |
generator=torch.Generator().manual_seed(seed),
|
69 |
).images[0]
|
70 |
else:
|
@@ -72,8 +82,11 @@ def infer(input_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5
|
|
72 |
prompt=prompt,
|
73 |
guidance_scale=guidance_scale,
|
74 |
num_inference_steps=steps,
|
|
|
75 |
generator=torch.Generator().manual_seed(seed),
|
76 |
).images[0]
|
|
|
|
|
77 |
return image, seed, gr.Button(visible=True)
|
78 |
|
79 |
@spaces.GPU
|
|
|
13 |
pipe = FluxKontextPipeline.from_pretrained("black-forest-labs/FLUX.1-Kontext-dev", torch_dtype=torch.bfloat16).to("cuda")
|
14 |
|
15 |
@spaces.GPU
|
16 |
+
def infer(input_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5, steps=28, progress=gr.Progress()):
|
17 |
"""
|
18 |
Perform image editing using the FLUX.1 Kontext pipeline.
|
19 |
|
|
|
53 |
... guidance_scale=2.5
|
54 |
... )
|
55 |
"""
|
56 |
+
|
57 |
+
progress(0,desc="Starting")
|
58 |
+
|
59 |
+
def callback_fn(pipe, step, timestep, callback_kwargs):
|
60 |
+
print(f"[Step {step}] Timestep: {timestep}")
|
61 |
+
progress_value = (step+1.0)/steps
|
62 |
+
progress(progress_value, desc=f"Image generating, {step + 1}/{steps} steps")
|
63 |
+
return callback_kwargs
|
64 |
+
|
65 |
if randomize_seed:
|
66 |
seed = random.randint(0, MAX_SEED)
|
67 |
|
|
|
74 |
width = input_image.size[0],
|
75 |
height = input_image.size[1],
|
76 |
num_inference_steps=steps,
|
77 |
+
callback_on_step_end=callback_fn,
|
78 |
generator=torch.Generator().manual_seed(seed),
|
79 |
).images[0]
|
80 |
else:
|
|
|
82 |
prompt=prompt,
|
83 |
guidance_scale=guidance_scale,
|
84 |
num_inference_steps=steps,
|
85 |
+
callback_on_step_end=callback_fn,
|
86 |
generator=torch.Generator().manual_seed(seed),
|
87 |
).images[0]
|
88 |
+
|
89 |
+
progress(1, desc="Complete")
|
90 |
return image, seed, gr.Button(visible=True)
|
91 |
|
92 |
@spaces.GPU
|