6Morpheus6 commited on
Commit
2cfde88
·
verified ·
1 Parent(s): c6edca3

add download element

Browse files
Files changed (1) hide show
  1. app.py +44 -13
app.py CHANGED
@@ -1,16 +1,27 @@
 
 
 
 
 
 
1
  import gradio as gr
2
  import numpy as np
3
  # import spaces
4
- import torch
5
- import random
6
  from PIL import Image
7
 
8
  from diffusers import FluxKontextPipeline
9
  from diffusers.utils import load_image
 
10
 
11
  MAX_SEED = np.iinfo(np.int32).max
12
 
13
- pipe = FluxKontextPipeline.from_pretrained("fuliucansheng/FLUX.1-Kontext-dev-diffusers", torch_dtype=torch.bfloat16).to("cuda")
 
 
 
 
 
 
14
 
15
  # @spaces.GPU
16
  def infer(input_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5, steps=28, progress=gr.Progress(track_tqdm=True)):
@@ -74,11 +85,22 @@ def infer(input_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5
74
  num_inference_steps=steps,
75
  generator=torch.Generator().manual_seed(seed),
76
  ).images[0]
77
- return image, seed, gr.Button(visible=True)
 
 
 
 
 
 
 
 
 
78
 
79
  # @spaces.GPU
80
  def infer_example(input_image, prompt):
81
  image, seed, _ = infer(input_image, prompt)
 
 
82
  return image, seed
83
 
84
  css="""
@@ -90,7 +112,11 @@ css="""
90
  height: 70vh; !Important
91
  }
92
  #row {
93
- min-height: 35vh; !Important
 
 
 
 
94
  }
95
  """
96
 
@@ -108,17 +134,22 @@ Image editing and manipulation model guidance-distilled from FLUX.1 Kontext [pro
108
  result = gr.Image(label="Result", show_label=False, interactive=False, elem_classes="input-image", elem_id="row")
109
  reuse_button = gr.Button("Reuse this image", visible=False)
110
 
111
- with gr.Row():
 
112
  prompt = gr.Text(
113
  label="Prompt",
114
- show_label=False,
115
- max_lines=1,
 
116
  placeholder="Enter your prompt for editing (e.g., 'Remove glasses', 'Add a hat')",
117
- container=False,
118
- scale=2
119
  )
 
 
 
120
  run_button = gr.Button("Run", scale=1)
121
-
122
  with gr.Row():
123
  with gr.Accordion("Advanced Settings", open=False):
124
 
@@ -155,7 +186,7 @@ Image editing and manipulation model guidance-distilled from FLUX.1 Kontext [pro
155
  ["cat.png", "make this cat happy"]
156
  ],
157
  inputs=[input_image, prompt],
158
- outputs=[result, seed],
159
  fn=infer_example,
160
  cache_examples=False
161
  )
@@ -164,7 +195,7 @@ Image editing and manipulation model guidance-distilled from FLUX.1 Kontext [pro
164
  triggers=[run_button.click, prompt.submit],
165
  fn = infer,
166
  inputs = [input_image, prompt, seed, randomize_seed, guidance_scale, steps],
167
- outputs = [result, seed, reuse_button]
168
  )
169
  reuse_button.click(
170
  fn = lambda image: image,
 
1
+ import os
2
+ import gc
3
+ import random
4
+ import tempfile
5
+ import torch
6
+ import devicetorch
7
  import gradio as gr
8
  import numpy as np
9
  # import spaces
 
 
10
  from PIL import Image
11
 
12
  from diffusers import FluxKontextPipeline
13
  from diffusers.utils import load_image
14
+ from dfloat11 import DFloat11Model
15
 
16
  MAX_SEED = np.iinfo(np.int32).max
17
 
18
+ pipe = FluxKontextPipeline.from_pretrained("fuliucansheng/FLUX.1-Kontext-dev-diffusers", torch_dtype=torch.bfloat16)
19
+ DFloat11Model.from_pretrained(
20
+ "DFloat11/FLUX.1-Kontext-dev-DF11",
21
+ device="cpu",
22
+ bfloat16_model=pipe.transformer,
23
+ )
24
+ pipe.enable_model_cpu_offload()
25
 
26
  # @spaces.GPU
27
  def infer(input_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5, steps=28, progress=gr.Progress(track_tqdm=True)):
 
85
  num_inference_steps=steps,
86
  generator=torch.Generator().manual_seed(seed),
87
  ).images[0]
88
+
89
+ gradio_temp_dir = os.environ.get('GRADIO_TEMP_DIR', tempfile.gettempdir())
90
+ temp_file_path = os.path.join(gradio_temp_dir, "image.png")
91
+ image.save(temp_file_path, format="PNG")
92
+ print(f"Image saved in: {temp_file_path}")
93
+
94
+ gc.collect()
95
+ devicetorch.empty_cache(torch)
96
+
97
+ return image, temp_file_path, seed, gr.Button(visible=True)
98
 
99
  # @spaces.GPU
100
  def infer_example(input_image, prompt):
101
  image, seed, _ = infer(input_image, prompt)
102
+ gc.collect()
103
+ devicetorch.empty_cache(torch)
104
  return image, seed
105
 
106
  css="""
 
112
  height: 70vh; !Important
113
  }
114
  #row {
115
+ min-height: 40vh; !Important
116
+ }
117
+
118
+ #row-height {
119
+ height: 65px !important
120
  }
121
  """
122
 
 
134
  result = gr.Image(label="Result", show_label=False, interactive=False, elem_classes="input-image", elem_id="row")
135
  reuse_button = gr.Button("Reuse this image", visible=False)
136
 
137
+ with gr.Row(equal_height=True):
138
+ with gr.Column():
139
  prompt = gr.Text(
140
  label="Prompt",
141
+ show_label=True,
142
+ lines=3,
143
+ max_lines=3,
144
  placeholder="Enter your prompt for editing (e.g., 'Remove glasses', 'Add a hat')",
145
+ container=True,
146
+ scale=1
147
  )
148
+
149
+ with gr.Column():
150
+ download_image = gr.File(label="Download Image", elem_id="row-height", scale=0)
151
  run_button = gr.Button("Run", scale=1)
152
+
153
  with gr.Row():
154
  with gr.Accordion("Advanced Settings", open=False):
155
 
 
186
  ["cat.png", "make this cat happy"]
187
  ],
188
  inputs=[input_image, prompt],
189
+ outputs=[result, download_image, seed],
190
  fn=infer_example,
191
  cache_examples=False
192
  )
 
195
  triggers=[run_button.click, prompt.submit],
196
  fn = infer,
197
  inputs = [input_image, prompt, seed, randomize_seed, guidance_scale, steps],
198
+ outputs = [result, download_image, seed, reuse_button]
199
  )
200
  reuse_button.click(
201
  fn = lambda image: image,