erikbeltran commited on
Commit
7a8479d
·
verified ·
1 Parent(s): d06075d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -7
app.py CHANGED
@@ -7,6 +7,9 @@ import spaces
7
  from PIL import Image
8
  from fastapi import FastAPI
9
  from fastapi.responses import FileResponse
 
 
 
10
 
11
  # Initialize the base model
12
  dtype = torch.bfloat16
@@ -20,7 +23,7 @@ MAX_SEED = 2**32-1
20
  app = FastAPI()
21
 
22
  @spaces.GPU()
23
- def generate_image(prompt, width, height, lora_path, trigger_word, hash_value):
24
  # Load LoRA weights
25
  pipe.load_lora_weights(lora_path)
26
 
@@ -34,7 +37,7 @@ def generate_image(prompt, width, height, lora_path, trigger_word, hash_value):
34
  # Generate image
35
  image = pipe(
36
  prompt=full_prompt,
37
- num_inference_steps=28,
38
  guidance_scale=3.5,
39
  width=width,
40
  height=height,
@@ -50,8 +53,8 @@ def generate_image(prompt, width, height, lora_path, trigger_word, hash_value):
50
 
51
  return image, image_path
52
 
53
- def run_lora(prompt, width, height, lora_path, trigger_word, hash_value):
54
- image, image_path = generate_image(prompt, width, height, lora_path, trigger_word, hash_value)
55
  return image, image_path
56
 
57
  # Set up the Gradio interface
@@ -62,8 +65,9 @@ with gr.Blocks() as gradio_app:
62
  prompt = gr.Textbox(label="Prompt", lines=3, placeholder="Enter your prompt here")
63
 
64
  with gr.Row():
65
- width = gr.Slider(label="Width", minimum=256, maximum=1024, step=64, value=512)
66
- height = gr.Slider(label="Height", minimum=256, maximum=1024, step=64, value=512)
 
67
 
68
  with gr.Row():
69
  lora_path = gr.Textbox(label="LoRA Path", value="SebastianBodza/Flux_Aquarell_Watercolor_v2")
@@ -77,7 +81,7 @@ with gr.Blocks() as gradio_app:
77
 
78
  generate_button.click(
79
  fn=run_lora,
80
- inputs=[prompt, width, height, lora_path, trigger_word, hash_value],
81
  outputs=[output_image, output_path]
82
  )
83
 
 
7
  from PIL import Image
8
  from fastapi import FastAPI
9
  from fastapi.responses import FileResponse
10
+ import transformers
11
+
12
+ transformers.utils.move_cache()
13
 
14
  # Initialize the base model
15
  dtype = torch.bfloat16
 
23
  app = FastAPI()
24
 
25
  @spaces.GPU()
26
+ def generate_image(prompt, width, height, lora_path, trigger_word, hash_value, steps):
27
  # Load LoRA weights
28
  pipe.load_lora_weights(lora_path)
29
 
 
37
  # Generate image
38
  image = pipe(
39
  prompt=full_prompt,
40
+ num_inference_steps=steps,
41
  guidance_scale=3.5,
42
  width=width,
43
  height=height,
 
53
 
54
  return image, image_path
55
 
56
+ def run_lora(prompt, width, height, lora_path, trigger_word, hash_value, steps):
57
+ image, image_path = generate_image(prompt, width, height, lora_path, trigger_word, hash_value, steps)
58
  return image, image_path
59
 
60
  # Set up the Gradio interface
 
65
  prompt = gr.Textbox(label="Prompt", lines=3, placeholder="Enter your prompt here")
66
 
67
  with gr.Row():
68
+ width = gr.Slider(label="Width", minimum=128, maximum=1024, step=64, value=512)
69
+ height = gr.Slider(label="Height", minimum=128, maximum=1024, step=64, value=512)
70
+ steps = gr.Slider(label="Inference Steps", minimum=1, maximum=100, step=1, value=28)
71
 
72
  with gr.Row():
73
  lora_path = gr.Textbox(label="LoRA Path", value="SebastianBodza/Flux_Aquarell_Watercolor_v2")
 
81
 
82
  generate_button.click(
83
  fn=run_lora,
84
+ inputs=[prompt, width, height, lora_path, trigger_word, hash_value, steps],
85
  outputs=[output_image, output_path]
86
  )
87