patrickvonplaten commited on
Commit
73ff6df
·
1 Parent(s): 5e0a11c
Files changed (2) hide show
  1. __pycache__/app.cpython-310.pyc +0 -0
  2. app.py +3 -10
__pycache__/app.cpython-310.pyc CHANGED
Binary files a/__pycache__/app.cpython-310.pyc and b/__pycache__/app.cpython-310.pyc differ
 
app.py CHANGED
@@ -15,8 +15,8 @@ pipeline.to(device)
15
  if USE_TORCH_COMPILE:
16
  pipeline.unet = torch.compile(pipeline.unet, mode="reduce-overhead", fullgraph=True)
17
 
18
- def generate(prompt_len: int, num_images_per_prompt: int = 1):
19
- prompt = prompt_len * "a"
20
  num_inference_steps = 40
21
  start_time = time.time()
22
  pipeline(prompt, num_images_per_prompt=num_images_per_prompt, num_inference_steps=num_inference_steps).images
@@ -35,19 +35,12 @@ with gr.Blocks(css="style.css") as demo:
35
  step=1,
36
  value=1,
37
  )
38
- prompt_len = gr.Slider(
39
- label="Prompt len",
40
- minimum=1,
41
- maximum=77,
42
- step=20,
43
- value=1,
44
- )
45
  btn = gr.Button("Benchmark!").style(
46
  margin=False,
47
  rounded=(False, True, True, False),
48
  full_width=False,
49
  )
50
 
51
- btn.click(fn=generate, inputs=[batch_size, prompt_len])
52
 
53
  demo.launch(share=True)
 
15
  if USE_TORCH_COMPILE:
16
  pipeline.unet = torch.compile(pipeline.unet, mode="reduce-overhead", fullgraph=True)
17
 
18
+ def generate(num_images_per_prompt: int = 1):
19
+ prompt = 77 * "a"
20
  num_inference_steps = 40
21
  start_time = time.time()
22
  pipeline(prompt, num_images_per_prompt=num_images_per_prompt, num_inference_steps=num_inference_steps).images
 
35
  step=1,
36
  value=1,
37
  )
 
 
 
 
 
 
 
38
  btn = gr.Button("Benchmark!").style(
39
  margin=False,
40
  rounded=(False, True, True, False),
41
  full_width=False,
42
  )
43
 
44
+ btn.click(fn=generate, inputs=[batch_size])
45
 
46
  demo.launch(share=True)