gaur3009 commited on
Commit
3eabff7
·
verified ·
1 Parent(s): 6dc1f73

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +278 -0
app.py ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import torch
3
+
4
+
5
+ # see https://huggingface.co/spaces/zero-gpu-explorers/README/discussions/85
6
+ def my_arange(*args, **kwargs):
7
+ return torch.arange(*args, **kwargs)
8
+
9
+
10
+ torch.arange = my_arange
11
+
12
+ from pathlib import Path
13
+
14
+ import gradio as gr
15
+ from gradio_imageslider import ImageSlider
16
+ from huggingface_hub import hf_hub_download
17
+ from PIL import Image
18
+ from refiners.fluxion.utils import manual_seed
19
+ from refiners.foundationals.latent_diffusion import Solver, solvers
20
+
21
+ from enhancer import ESRGANUpscaler, ESRGANUpscalerCheckpoints
22
+
23
+ TITLE = """
24
+ <h1 align="center">Image Enhancer, implemented using refiners</h1>
25
+ <p>
26
+ <center>
27
+ <a style="font-size: 1.25rem;" href="https://blog.finegrain.ai/posts/reproducing-clarity-upscaler/" target="_blank">[blog post]</a>
28
+ <a style="font-size: 1.25rem;" href="https://github.com/finegrain-ai/refiners" target="_blank">[refiners]</a>
29
+ <a style="font-size: 1.25rem;" href="https://github.com/philz1337x/clarity-upscaler" target="_blank">[clarity-upscaler]</a>
30
+ <a style="font-size: 1.25rem;" href="https://finegrain.ai/" target="_blank">[finegrain]</a>
31
+ </center>
32
+ </p>
33
+ """
34
+
35
+ CHECKPOINTS = ESRGANUpscalerCheckpoints(
36
+ unet=Path(
37
+ hf_hub_download(
38
+ repo_id="refiners/juggernaut.reborn",
39
+ filename="unet.safetensors",
40
+ revision="948510aaf4c8e8e9b32b5a7c25736422253f7b93",
41
+ )
42
+ ),
43
+ clip_text_encoder=Path(
44
+ hf_hub_download(
45
+ repo_id="refiners/juggernaut.reborn",
46
+ filename="text_encoder.safetensors",
47
+ revision="948510aaf4c8e8e9b32b5a7c25736422253f7b93",
48
+ )
49
+ ),
50
+ lda=Path(
51
+ hf_hub_download(
52
+ repo_id="refiners/juggernaut.reborn",
53
+ filename="autoencoder.safetensors",
54
+ revision="948510aaf4c8e8e9b32b5a7c25736422253f7b93",
55
+ )
56
+ ),
57
+ controlnet_tile=Path(
58
+ hf_hub_download(
59
+ repo_id="refiners/controlnet.sd15.tile",
60
+ filename="model.safetensors",
61
+ revision="48ced6ff8bfa873a8976fa467c3629a240643387",
62
+ )
63
+ ),
64
+ esrgan=Path(
65
+ hf_hub_download(
66
+ repo_id="philz1337x/upscaler",
67
+ filename="4x-UltraSharp.pth",
68
+ revision="011deacac8270114eb7d2eeff4fe6fa9a837be70",
69
+ )
70
+ ),
71
+ negative_embedding=Path(
72
+ hf_hub_download(
73
+ repo_id="philz1337x/embeddings",
74
+ filename="JuggernautNegative-neg.pt",
75
+ revision="203caa7e9cc2bc225031a4021f6ab1ded283454a",
76
+ )
77
+ ),
78
+ negative_embedding_key="string_to_param.*",
79
+ loras={
80
+ "more_details": Path(
81
+ hf_hub_download(
82
+ repo_id="philz1337x/loras",
83
+ filename="more_details.safetensors",
84
+ revision="a3802c0280c0d00c2ab18d37454a8744c44e474e",
85
+ )
86
+ ),
87
+ "sdxl_render": Path(
88
+ hf_hub_download(
89
+ repo_id="philz1337x/loras",
90
+ filename="SDXLrender_v2.0.safetensors",
91
+ revision="a3802c0280c0d00c2ab18d37454a8744c44e474e",
92
+ )
93
+ ),
94
+ },
95
+ )
96
+
97
+ LORA_SCALES = {
98
+ "more_details": 0.5,
99
+ "sdxl_render": 1.0,
100
+ }
101
+
102
+ # initialize the enhancer, on the cpu
103
+ DEVICE_CPU = torch.device("cpu")
104
+ DTYPE = torch.bfloat16 if torch.cuda.is_bf16_supported() else torch.float32
105
+ enhancer = ESRGANUpscaler(checkpoints=CHECKPOINTS, device=DEVICE_CPU, dtype=DTYPE)
106
+
107
+ # "move" the enhancer to the gpu, this is handled by Zero GPU
108
+ DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
109
+ enhancer.to(device=DEVICE, dtype=DTYPE)
110
+
111
+
112
+ @spaces.GPU
113
+ def process(
114
+ input_image: Image.Image,
115
+ prompt: str = "masterpiece, best quality, highres",
116
+ negative_prompt: str = "worst quality, low quality, normal quality",
117
+ seed: int = 42,
118
+ upscale_factor: int = 2,
119
+ controlnet_scale: float = 0.6,
120
+ controlnet_decay: float = 1.0,
121
+ condition_scale: int = 6,
122
+ tile_width: int = 112,
123
+ tile_height: int = 144,
124
+ denoise_strength: float = 0.35,
125
+ num_inference_steps: int = 18,
126
+ solver: str = "DDIM",
127
+ ) -> tuple[Image.Image, Image.Image]:
128
+ manual_seed(seed)
129
+
130
+ solver_type: type[Solver] = getattr(solvers, solver)
131
+
132
+ enhanced_image = enhancer.upscale(
133
+ image=input_image,
134
+ prompt=prompt,
135
+ negative_prompt=negative_prompt,
136
+ upscale_factor=upscale_factor,
137
+ controlnet_scale=controlnet_scale,
138
+ controlnet_scale_decay=controlnet_decay,
139
+ condition_scale=condition_scale,
140
+ tile_size=(tile_height, tile_width),
141
+ denoise_strength=denoise_strength,
142
+ num_inference_steps=num_inference_steps,
143
+ loras_scale=LORA_SCALES,
144
+ solver_type=solver_type,
145
+ )
146
+
147
+ return (input_image, enhanced_image)
148
+
149
+
150
+ with gr.Blocks() as demo:
151
+ gr.HTML(TITLE)
152
+
153
+ with gr.Row():
154
+ with gr.Column():
155
+ input_image = gr.Image(type="pil", label="Input Image")
156
+ run_button = gr.ClearButton(components=None, value="Enhance Image")
157
+ with gr.Column():
158
+ output_slider = ImageSlider(label="Before / After")
159
+ run_button.add(output_slider)
160
+
161
+ with gr.Accordion("Advanced Options", open=False):
162
+ prompt = gr.Textbox(
163
+ label="Prompt",
164
+ placeholder="masterpiece, best quality, highres",
165
+ )
166
+ negative_prompt = gr.Textbox(
167
+ label="Negative Prompt",
168
+ placeholder="worst quality, low quality, normal quality",
169
+ )
170
+ seed = gr.Slider(
171
+ minimum=0,
172
+ maximum=10_000,
173
+ value=42,
174
+ step=1,
175
+ label="Seed",
176
+ )
177
+ upscale_factor = gr.Slider(
178
+ minimum=1,
179
+ maximum=4,
180
+ value=2,
181
+ step=0.2,
182
+ label="Upscale Factor",
183
+ )
184
+ controlnet_scale = gr.Slider(
185
+ minimum=0,
186
+ maximum=1.5,
187
+ value=0.6,
188
+ step=0.1,
189
+ label="ControlNet Scale",
190
+ )
191
+ controlnet_decay = gr.Slider(
192
+ minimum=0.5,
193
+ maximum=1,
194
+ value=1.0,
195
+ step=0.025,
196
+ label="ControlNet Scale Decay",
197
+ )
198
+ condition_scale = gr.Slider(
199
+ minimum=2,
200
+ maximum=20,
201
+ value=6,
202
+ step=1,
203
+ label="Condition Scale",
204
+ )
205
+ tile_width = gr.Slider(
206
+ minimum=64,
207
+ maximum=200,
208
+ value=112,
209
+ step=1,
210
+ label="Latent Tile Width",
211
+ )
212
+ tile_height = gr.Slider(
213
+ minimum=64,
214
+ maximum=200,
215
+ value=144,
216
+ step=1,
217
+ label="Latent Tile Height",
218
+ )
219
+ denoise_strength = gr.Slider(
220
+ minimum=0,
221
+ maximum=1,
222
+ value=0.35,
223
+ step=0.1,
224
+ label="Denoise Strength",
225
+ )
226
+ num_inference_steps = gr.Slider(
227
+ minimum=1,
228
+ maximum=30,
229
+ value=18,
230
+ step=1,
231
+ label="Number of Inference Steps",
232
+ )
233
+ solver = gr.Radio(
234
+ choices=["DDIM", "DPMSolver"],
235
+ value="DDIM",
236
+ label="Solver",
237
+ )
238
+
239
+ run_button.click(
240
+ fn=process,
241
+ inputs=[
242
+ input_image,
243
+ prompt,
244
+ negative_prompt,
245
+ seed,
246
+ upscale_factor,
247
+ controlnet_scale,
248
+ controlnet_decay,
249
+ condition_scale,
250
+ tile_width,
251
+ tile_height,
252
+ denoise_strength,
253
+ num_inference_steps,
254
+ solver,
255
+ ],
256
+ outputs=output_slider,
257
+ )
258
+
259
+ gr.Examples(
260
+ examples=[
261
+ "examples/kara-eads-L7EwHkq1B2s-unsplash.jpg",
262
+ "examples/clarity_bird.webp",
263
+ "examples/edgar-infocus-gJH8AqpiSEU-unsplash.jpg",
264
+ "examples/jeremy-wallace-_XjW3oN8UOE-unsplash.jpg",
265
+ "examples/karina-vorozheeva-rW-I87aPY5Y-unsplash.jpg",
266
+ "examples/karographix-photography-hIaOPjYCEj4-unsplash.jpg",
267
+ "examples/melissa-walker-horn-gtDYwUIr9Vg-unsplash.jpg",
268
+ "examples/ryoji-iwata-X53e51WfjlE-unsplash.jpg",
269
+ "examples/tadeusz-lakota-jggQZkITXng-unsplash.jpg",
270
+ ],
271
+ inputs=[input_image],
272
+ outputs=output_slider,
273
+ fn=process,
274
+ cache_examples="lazy",
275
+ run_on_click=False,
276
+ )
277
+
278
+ demo.launch(share=False)