stzhao commited on
Commit
9e8b0d0
·
verified ·
1 Parent(s): 9195195

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -9
app.py CHANGED
@@ -12,12 +12,12 @@ else:
12
  torch_dtype = torch.float32
13
 
14
 
15
- def set_client_for_session(request: gr.Request):
16
- x_ip_token = request.headers['x-ip-token']
17
 
18
- # The "gradio/text-to-image" space is a ZeroGPU space
19
- # return Client("stzhao/LeX-Enhancer", headers={"X-IP-Token": x_ip_token})
20
- return Client("stzhao/LeX-Enhancer")
21
 
22
  # Load models
23
  def load_models():
@@ -76,12 +76,13 @@ def generate_image(enhanced_caption, seed, num_inference_steps, guidance_scale):
76
  return image
77
 
78
  # @spaces.GPU(duration=130)
79
- def run_pipeline(image_caption, text_caption, seed, num_inference_steps, guidance_scale, enable_enhancer, client):
80
  """Run the complete pipeline from captions to final image"""
81
  combined_caption = f"{image_caption}, with the text on it: {text_caption}."
82
 
83
  if enable_enhancer:
84
  # combined_caption, enhanced_caption = generate_enhanced_caption(image_caption, text_caption)
 
85
  combined_caption, enhanced_caption = prompt_enhance(client, image_caption, text_caption)
86
  print(f"enhanced caption:\n{enhanced_caption}")
87
  else:
@@ -93,7 +94,7 @@ def run_pipeline(image_caption, text_caption, seed, num_inference_steps, guidanc
93
 
94
  # Gradio interface
95
  with gr.Blocks() as demo:
96
- client = gr.State()
97
  gr.Markdown("# LeX-Enhancer & LeX-FLUX Demo")
98
  gr.Markdown("## Project Page: https://zhaoshitian.github.io/lexart/")
99
  gr.Markdown("Generate enhanced captions from simple image and text descriptions, then create images with LeX-FLUX")
@@ -179,11 +180,11 @@ with gr.Blocks() as demo:
179
 
180
  submit_btn.click(
181
  fn=run_pipeline,
182
- inputs=[image_caption, text_caption, seed, num_inference_steps, guidance_scale, enable_enhancer, client],
183
  outputs=[output_image, combined_caption_box, enhanced_caption_box]
184
  )
185
 
186
- demo.load(set_client_for_session, None, client)
187
 
188
  if __name__ == "__main__":
189
  demo.launch(debug=True)
 
12
  torch_dtype = torch.float32
13
 
14
 
15
+ # def set_client_for_session(request: gr.Request):
16
+ # x_ip_token = request.headers['x-ip-token']
17
 
18
+ # # The "gradio/text-to-image" space is a ZeroGPU space
19
+ # # return Client("stzhao/LeX-Enhancer", headers={"X-IP-Token": x_ip_token})
20
+ # return Client("stzhao/LeX-Enhancer")
21
 
22
  # Load models
23
  def load_models():
 
76
  return image
77
 
78
  # @spaces.GPU(duration=130)
79
+ def run_pipeline(image_caption, text_caption, seed, num_inference_steps, guidance_scale, enable_enhancer):
80
  """Run the complete pipeline from captions to final image"""
81
  combined_caption = f"{image_caption}, with the text on it: {text_caption}."
82
 
83
  if enable_enhancer:
84
  # combined_caption, enhanced_caption = generate_enhanced_caption(image_caption, text_caption)
85
+ client = Client("stzhao/LeX-Enhancer")
86
  combined_caption, enhanced_caption = prompt_enhance(client, image_caption, text_caption)
87
  print(f"enhanced caption:\n{enhanced_caption}")
88
  else:
 
94
 
95
  # Gradio interface
96
  with gr.Blocks() as demo:
97
+ # client = gr.State()
98
  gr.Markdown("# LeX-Enhancer & LeX-FLUX Demo")
99
  gr.Markdown("## Project Page: https://zhaoshitian.github.io/lexart/")
100
  gr.Markdown("Generate enhanced captions from simple image and text descriptions, then create images with LeX-FLUX")
 
180
 
181
  submit_btn.click(
182
  fn=run_pipeline,
183
+ inputs=[image_caption, text_caption, seed, num_inference_steps, guidance_scale, enable_enhancer],
184
  outputs=[output_image, combined_caption_box, enhanced_caption_box]
185
  )
186
 
187
+ # demo.load(set_client_for_session, None, client)
188
 
189
  if __name__ == "__main__":
190
  demo.launch(debug=True)