Gemini899 commited on
Commit
327dc47
·
verified ·
1 Parent(s): 01500d8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +159 -139
app.py CHANGED
@@ -1,154 +1,174 @@
 
1
  import gradio as gr
2
- import numpy as np
3
- import random
4
 
5
- # import spaces #[uncomment to use ZeroGPU]
6
- from diffusers import DiffusionPipeline
7
  import torch
 
8
 
 
9
  device = "cuda" if torch.cuda.is_available() else "cpu"
10
- model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
11
-
12
- if torch.cuda.is_available():
13
- torch_dtype = torch.float16
14
- else:
15
- torch_dtype = torch.float32
16
-
17
- pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
18
- pipe = pipe.to(device)
19
-
20
- MAX_SEED = np.iinfo(np.int32).max
21
- MAX_IMAGE_SIZE = 1024
22
-
23
-
24
- # @spaces.GPU #[uncomment to use ZeroGPU]
25
- def infer(
26
- prompt,
27
- negative_prompt,
28
- seed,
29
- randomize_seed,
30
- width,
31
- height,
32
- guidance_scale,
33
- num_inference_steps,
34
- progress=gr.Progress(track_tqdm=True),
35
- ):
36
- if randomize_seed:
37
- seed = random.randint(0, MAX_SEED)
38
-
39
- generator = torch.Generator().manual_seed(seed)
40
-
41
- image = pipe(
42
- prompt=prompt,
43
- negative_prompt=negative_prompt,
44
- guidance_scale=guidance_scale,
45
- num_inference_steps=num_inference_steps,
46
- width=width,
47
- height=height,
48
- generator=generator,
49
- ).images[0]
50
-
51
- return image, seed
52
-
53
-
54
- examples = [
55
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
56
- "An astronaut riding a green horse",
57
- "A delicious ceviche cheesecake slice",
58
- ]
59
-
60
- css = """
61
- #col-container {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  margin: 0 auto;
63
  max-width: 640px;
64
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  """
66
 
67
- with gr.Blocks(css=css) as demo:
68
- with gr.Column(elem_id="col-container"):
69
- gr.Markdown(" # Text-to-Image Gradio Template")
70
-
71
- with gr.Row():
72
- prompt = gr.Text(
73
- label="Prompt",
74
- show_label=False,
75
- max_lines=1,
76
- placeholder="Enter your prompt",
77
- container=False,
78
- )
79
-
80
- run_button = gr.Button("Run", scale=0, variant="primary")
81
-
82
- result = gr.Image(label="Result", show_label=False)
83
-
84
- with gr.Accordion("Advanced Settings", open=False):
85
- negative_prompt = gr.Text(
86
- label="Negative prompt",
87
- max_lines=1,
88
- placeholder="Enter a negative prompt",
89
- visible=False,
90
- )
91
-
92
- seed = gr.Slider(
93
- label="Seed",
94
- minimum=0,
95
- maximum=MAX_SEED,
96
- step=1,
97
- value=0,
98
- )
99
-
100
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
101
-
102
- with gr.Row():
103
- width = gr.Slider(
104
- label="Width",
105
- minimum=256,
106
- maximum=MAX_IMAGE_SIZE,
107
- step=32,
108
- value=1024, # Replace with defaults that work for your model
109
- )
110
-
111
- height = gr.Slider(
112
- label="Height",
113
- minimum=256,
114
- maximum=MAX_IMAGE_SIZE,
115
- step=32,
116
- value=1024, # Replace with defaults that work for your model
117
- )
118
-
119
- with gr.Row():
120
- guidance_scale = gr.Slider(
121
- label="Guidance scale",
122
- minimum=0.0,
123
- maximum=10.0,
124
- step=0.1,
125
- value=0.0, # Replace with defaults that work for your model
126
- )
127
-
128
- num_inference_steps = gr.Slider(
129
- label="Number of inference steps",
130
- minimum=1,
131
- maximum=50,
132
- step=1,
133
- value=2, # Replace with defaults that work for your model
134
- )
135
-
136
- gr.Examples(examples=examples, inputs=[prompt])
137
  gr.on(
138
- triggers=[run_button.click, prompt.submit],
139
- fn=infer,
140
- inputs=[
141
- prompt,
142
- negative_prompt,
143
- seed,
144
- randomize_seed,
145
- width,
146
- height,
147
- guidance_scale,
148
- num_inference_steps,
149
- ],
150
- outputs=[result, seed],
151
  )
152
 
153
  if __name__ == "__main__":
154
  demo.launch()
 
 
 
1
+ import spaces
2
  import gradio as gr
3
+ import re
4
+ from PIL import Image
5
 
6
+ import os
7
+ import numpy as np
8
  import torch
9
+ from diffusers import FluxImg2ImgPipeline
10
 
11
+ dtype = torch.bfloat16
12
  device = "cuda" if torch.cuda.is_available() else "cpu"
13
+
14
+ pipe = FluxImg2ImgPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16).to(device)
15
+
16
+
17
+
18
+ def sanitize_prompt(prompt):
19
+ # Allow only alphanumeric characters, spaces, and basic punctuation
20
+ allowed_chars = re.compile(r"[^a-zA-Z0-9\s.,!?-]")
21
+ sanitized_prompt = allowed_chars.sub("", prompt)
22
+ return sanitized_prompt
23
+
24
+ def convert_to_fit_size(original_width_and_height, maximum_size = 2048):
25
+ width, height =original_width_and_height
26
+ if width <= maximum_size and height <= maximum_size:
27
+ return width,height
28
+
29
+ if width > height:
30
+ scaling_factor = maximum_size / width
31
+ else:
32
+ scaling_factor = maximum_size / height
33
+
34
+ new_width = int(width * scaling_factor)
35
+ new_height = int(height * scaling_factor)
36
+ return new_width, new_height
37
+
38
+ def adjust_to_multiple_of_32(width: int, height: int):
39
+ width = width - (width % 32)
40
+ height = height - (height % 32)
41
+ return width, height
42
+
43
+
44
+
45
+
46
+ @spaces.GPU(duration=120)
47
+ def process_images(image,prompt="a girl",strength=0.75,seed=0,inference_step=4,progress=gr.Progress(track_tqdm=True)):
48
+ #print("start process_images")
49
+ progress(0, desc="Starting")
50
+
51
+
52
+ def process_img2img(image,prompt="a person",strength=0.75,seed=0,num_inference_steps=4):
53
+ #print("start process_img2img")
54
+ if image == None:
55
+ print("empty input image returned")
56
+ return None
57
+
58
+ generators = []
59
+ generator = torch.Generator(device).manual_seed(seed)
60
+ generators.append(generator)
61
+ fit_width,fit_height = convert_to_fit_size(image.size)
62
+ #print(f"fit {width}x{height}")
63
+ width,height = adjust_to_multiple_of_32(fit_width,fit_height)
64
+ #print(f"multiple {width}x{height}")
65
+ image = image.resize((width, height), Image.LANCZOS)
66
+ #mask_image = mask_image.resize((width, height), Image.NEAREST)
67
+
68
+ # more parameter see https://huggingface.co/docs/diffusers/api/pipelines/flux#diffusers.FluxInpaintPipeline
69
+ #print(prompt)
70
+ output = pipe(prompt=prompt, image=image,generator=generator,strength=strength,width=width,height=height
71
+ ,guidance_scale=0,num_inference_steps=num_inference_steps,max_sequence_length=256)
72
+
73
+ pil_image = output.images[0]#Image.fromarray()
74
+ new_width,new_height = pil_image.size
75
+
76
+ # resize back multiple of 32
77
+ if (new_width!=fit_width) or (new_height!=fit_height):
78
+ resized_image= pil_image.resize((fit_width,fit_height),Image.LANCZOS)
79
+ return resized_image
80
+
81
+ return pil_image
82
+
83
+ output = process_img2img(image,prompt,strength,seed,inference_step)
84
+
85
+ #print("end process_images")
86
+ return output
87
+
88
+
89
+ def read_file(path: str) -> str:
90
+ with open(path, 'r', encoding='utf-8') as f:
91
+ content = f.read()
92
+
93
+ return content
94
+
95
+
96
+ css="""
97
+ #col-left {
98
+ margin: 0 auto;
99
+ max-width: 640px;
100
+ }
101
+ #col-right {
102
  margin: 0 auto;
103
  max-width: 640px;
104
  }
105
+ .grid-container {
106
+ display: flex;
107
+ align-items: center;
108
+ justify-content: center;
109
+ gap:10px
110
+ }
111
+
112
+ .image {
113
+ width: 128px;
114
+ height: 128px;
115
+ object-fit: cover;
116
+ }
117
+
118
+ .text {
119
+ font-size: 16px;
120
+ }
121
+
122
  """
123
 
124
+ with gr.Blocks(css=css, elem_id="demo-container") as demo:
125
+ with gr.Column():
126
+ gr.HTML(read_file("demo_header.html"))
127
+ gr.HTML(read_file("demo_tools.html"))
128
+ with gr.Row():
129
+ with gr.Column():
130
+ image = gr.Image(height=800,sources=['upload','clipboard'],image_mode='RGB', elem_id="image_upload", type="pil", label="Upload")
131
+ with gr.Row(elem_id="prompt-container", equal_height=False):
132
+ with gr.Row():
133
+ prompt = gr.Textbox(label="Prompt",value="a women",placeholder="Your prompt (what you want in place of what is erased)", elem_id="prompt")
134
+
135
+ btn = gr.Button("Img2Img", elem_id="run_button",variant="primary")
136
+
137
+ with gr.Accordion(label="Advanced Settings", open=False):
138
+ with gr.Row( equal_height=True):
139
+ strength = gr.Number(value=0.75, minimum=0, maximum=0.75, step=0.01, label="strength")
140
+ seed = gr.Number(value=100, minimum=0, step=1, label="seed")
141
+ inference_step = gr.Number(value=4, minimum=1, step=4, label="inference_step")
142
+ id_input=gr.Text(label="Name", visible=False)
143
+
144
+ with gr.Column():
145
+ image_out = gr.Image(height=800,sources=[],label="Output", elem_id="output-img",format="jpg")
146
+
147
+
148
+
149
+
150
+
151
+ gr.Examples(
152
+ examples=[
153
+ ["examples/draw_input.jpg", "examples/draw_output.jpg","a women ,eyes closed,mouth opened"],
154
+ ["examples/draw-gimp_input.jpg", "examples/draw-gimp_output.jpg","a women ,eyes closed,mouth opened"],
155
+ ["examples/gimp_input.jpg", "examples/gimp_output.jpg","a women ,hand on neck"],
156
+ ["examples/inpaint_input.jpg", "examples/inpaint_output.jpg","a women ,hand on neck"]
157
+ ]
158
+ ,
159
+ inputs=[image,image_out,prompt],
160
+ )
161
+ gr.HTML(
162
+ gr.HTML(read_file("demo_footer.html"))
163
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164
  gr.on(
165
+ triggers=[btn.click, prompt.submit],
166
+ fn = process_images,
167
+ inputs = [image,prompt,strength,seed,inference_step],
168
+ outputs = [image_out]
 
 
 
 
 
 
 
 
 
169
  )
170
 
171
  if __name__ == "__main__":
172
  demo.launch()
173
+
174
+