Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -12,10 +12,8 @@ MAX_SEED = np.iinfo(np.int32).max
|
|
12 |
|
13 |
pipe = FluxKontextPipeline.from_pretrained("black-forest-labs/FLUX.1-Kontext-dev", torch_dtype=torch.bfloat16).to("cuda")
|
14 |
|
15 |
-
progress=gr.Progress()
|
16 |
-
|
17 |
@spaces.GPU
|
18 |
-
def infer(input_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5, steps=28):
|
19 |
"""
|
20 |
Perform image editing using the FLUX.1 Kontext pipeline.
|
21 |
|
@@ -55,15 +53,6 @@ def infer(input_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5
|
|
55 |
... guidance_scale=2.5
|
56 |
... )
|
57 |
"""
|
58 |
-
|
59 |
-
progress(0,desc="Starting")
|
60 |
-
|
61 |
-
def callback_fn(pipe, step, timestep, callback_kwargs):
|
62 |
-
print(f"[Step {step}] Timestep: {timestep}")
|
63 |
-
progress_value = (step+1.0)/steps
|
64 |
-
progress(progress_value, desc=f"Image generating, {step + 1}/{steps} steps")
|
65 |
-
return callback_kwargs
|
66 |
-
|
67 |
if randomize_seed:
|
68 |
seed = random.randint(0, MAX_SEED)
|
69 |
|
@@ -76,7 +65,6 @@ def infer(input_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5
|
|
76 |
width = input_image.size[0],
|
77 |
height = input_image.size[1],
|
78 |
num_inference_steps=steps,
|
79 |
-
callback_on_step_end=callback_fn,
|
80 |
generator=torch.Generator().manual_seed(seed),
|
81 |
).images[0]
|
82 |
else:
|
@@ -84,11 +72,8 @@ def infer(input_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5
|
|
84 |
prompt=prompt,
|
85 |
guidance_scale=guidance_scale,
|
86 |
num_inference_steps=steps,
|
87 |
-
callback_on_step_end=callback_fn,
|
88 |
generator=torch.Generator().manual_seed(seed),
|
89 |
).images[0]
|
90 |
-
|
91 |
-
progress(1, desc="Complete")
|
92 |
return image, seed, gr.Button(visible=True)
|
93 |
|
94 |
@spaces.GPU
|
|
|
12 |
|
13 |
pipe = FluxKontextPipeline.from_pretrained("black-forest-labs/FLUX.1-Kontext-dev", torch_dtype=torch.bfloat16).to("cuda")
|
14 |
|
|
|
|
|
15 |
@spaces.GPU
|
16 |
+
def infer(input_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5, steps=28, progress=gr.Progress(track_tqdm=True)):
|
17 |
"""
|
18 |
Perform image editing using the FLUX.1 Kontext pipeline.
|
19 |
|
|
|
53 |
... guidance_scale=2.5
|
54 |
... )
|
55 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
if randomize_seed:
|
57 |
seed = random.randint(0, MAX_SEED)
|
58 |
|
|
|
65 |
width = input_image.size[0],
|
66 |
height = input_image.size[1],
|
67 |
num_inference_steps=steps,
|
|
|
68 |
generator=torch.Generator().manual_seed(seed),
|
69 |
).images[0]
|
70 |
else:
|
|
|
72 |
prompt=prompt,
|
73 |
guidance_scale=guidance_scale,
|
74 |
num_inference_steps=steps,
|
|
|
75 |
generator=torch.Generator().manual_seed(seed),
|
76 |
).images[0]
|
|
|
|
|
77 |
return image, seed, gr.Button(visible=True)
|
78 |
|
79 |
@spaces.GPU
|