Oysiyl commited on
Commit
1f7ffd5
·
verified ·
1 Parent(s): c9c58eb

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +344 -0
app.py ADDED
@@ -0,0 +1,344 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ import sys
4
+ from typing import Sequence, Mapping, Any, Union
5
+ import torch
6
+ import gradio as gr
7
+ from PIL import Image
8
+ import numpy as np
9
+
10
+ # import spaces
11
+
12
+
13
+ from huggingface_hub import hf_hub_download
14
+
15
+ hf_hub_download(repo_id="stable-diffusion-v1-5/stable-diffusion-v1-5", filename="v1-5-pruned-emaonly.ckpt", local_dir="models/checkpoints")
16
+ hf_hub_download(repo_id="Lykon/DreamShaper", filename="DreamShaper_3.32_baked_vae_clip_fix_half.safetensors", local_dir="models/checkpoints")
17
+ hf_hub_download(repo_id="latentcat/latentcat-controlnet", filename="models/control_v1p_sd15_brightness.safetensors", local_dir="models/controlnet")
18
+ hf_hub_download(repo_id="comfyanonymous/ControlNet-v1-1_fp16_safetensors", filename="control_v11f1e_sd15_tile_fp16.safetensors", local_dir="models/controlnet")
19
+ hf_hub_download(repo_id="Lykon/dreamshaper-7", filename="vae/diffusion_pytorch_model.fp16.safetensors", local_dir="models")
20
+ hf_hub_download(repo_id="stabilityai/sd-vae-ft-mse-original", filename="vae-ft-mse-840000-ema-pruned.safetensors", local_dir="models/vae")
21
+
22
+ def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
23
+ """Returns the value at the given index of a sequence or mapping.
24
+
25
+ If the object is a sequence (like list or string), returns the value at the given index.
26
+ If the object is a mapping (like a dictionary), returns the value at the index-th key.
27
+
28
+ Some return a dictionary, in these cases, we look for the "results" key
29
+
30
+ Args:
31
+ obj (Union[Sequence, Mapping]): The object to retrieve the value from.
32
+ index (int): The index of the value to retrieve.
33
+
34
+ Returns:
35
+ Any: The value at the given index.
36
+
37
+ Raises:
38
+ IndexError: If the index is out of bounds for the object and the object is not a mapping.
39
+ """
40
+ try:
41
+ return obj[index]
42
+ except KeyError:
43
+ return obj["result"][index]
44
+
45
+
46
+ def find_path(name: str, path: str = None) -> str:
47
+ """
48
+ Recursively looks at parent folders starting from the given path until it finds the given name.
49
+ Returns the path as a Path object if found, or None otherwise.
50
+ """
51
+ # If no path is given, use the current working directory
52
+ if path is None:
53
+ path = os.getcwd()
54
+
55
+ # Check if the current directory contains the name
56
+ if name in os.listdir(path):
57
+ path_name = os.path.join(path, name)
58
+ print(f"{name} found: {path_name}")
59
+ return path_name
60
+
61
+ # Get the parent directory
62
+ parent_directory = os.path.dirname(path)
63
+
64
+ # If the parent directory is the same as the current directory, we've reached the root and stop the search
65
+ if parent_directory == path:
66
+ return None
67
+
68
+ # Recursively call the function with the parent directory
69
+ return find_path(name, parent_directory)
70
+
71
+
72
+ def add_comfyui_directory_to_sys_path() -> None:
73
+ """
74
+ Add 'ComfyUI' to the sys.path
75
+ """
76
+ comfyui_path = find_path("ComfyUI")
77
+ if comfyui_path is not None and os.path.isdir(comfyui_path):
78
+ sys.path.append(comfyui_path)
79
+ print(f"'{comfyui_path}' added to sys.path")
80
+
81
+
82
+ def add_extra_model_paths() -> None:
83
+ """
84
+ Parse the optional extra_model_paths.yaml file and add the parsed paths to the sys.path.
85
+ """
86
+ try:
87
+ from main import load_extra_path_config
88
+ except ImportError:
89
+ print(
90
+ "Could not import load_extra_path_config from main.py. Looking in utils.extra_config instead."
91
+ )
92
+ from utils.extra_config import load_extra_path_config
93
+
94
+ extra_model_paths = find_path("extra_model_paths.yaml")
95
+
96
+ if extra_model_paths is not None:
97
+ load_extra_path_config(extra_model_paths)
98
+ else:
99
+ print("Could not find the extra_model_paths config file.")
100
+
101
+
102
+ add_comfyui_directory_to_sys_path()
103
+ add_extra_model_paths()
104
+
105
+
106
+ def import_custom_nodes() -> None:
107
+ """Find all custom nodes in the custom_nodes folder and add those node objects to NODE_CLASS_MAPPINGS
108
+
109
+ This function sets up a new asyncio event loop, initializes the PromptServer,
110
+ creates a PromptQueue, and initializes the custom nodes.
111
+ """
112
+ import asyncio
113
+ import execution
114
+ from nodes import init_extra_nodes
115
+ import server
116
+
117
+ # Creating a new event loop and setting it as the default loop
118
+ loop = asyncio.new_event_loop()
119
+ asyncio.set_event_loop(loop)
120
+
121
+ # Creating an instance of PromptServer with the loop
122
+ server_instance = server.PromptServer(loop)
123
+ execution.PromptQueue(server_instance)
124
+
125
+ # Initializing custom nodes
126
+ init_extra_nodes()
127
+
128
+
129
+ from nodes import NODE_CLASS_MAPPINGS
130
+
131
+ checkpointloadersimple = NODE_CLASS_MAPPINGS["CheckpointLoaderSimple"]()
132
+ checkpointloadersimple_4 = checkpointloadersimple.load_checkpoint(
133
+ ckpt_name="DreamShaper_3.32_baked_vae_clip_fix_half.safetensors"
134
+ )
135
+ emptylatentimage = NODE_CLASS_MAPPINGS["EmptyLatentImage"]()
136
+ cliptextencode = NODE_CLASS_MAPPINGS["CLIPTextEncode"]()
137
+ controlnetloader = NODE_CLASS_MAPPINGS["ControlNetLoader"]()
138
+
139
+ controlnetapplyadvanced = NODE_CLASS_MAPPINGS["ControlNetApplyAdvanced"]()
140
+
141
+ ksampler = NODE_CLASS_MAPPINGS["KSampler"]()
142
+ vaedecode = NODE_CLASS_MAPPINGS["VAEDecode"]()
143
+
144
+ import_custom_nodes()
145
+ comfy_qr_by_module_size = NODE_CLASS_MAPPINGS["comfy-qr-by-module-size"]()
146
+ tilepreprocessor = NODE_CLASS_MAPPINGS["TilePreprocessor"]()
147
+
148
+ from comfy import model_management
149
+
150
+ #Add all the models that load a safetensors file
151
+ model_loaders = [checkpointloadersimple_4]
152
+
153
+ # Check which models are valid and how to best load them
154
+ valid_models = [
155
+ getattr(loader[0], 'patcher', loader[0])
156
+ for loader in model_loaders
157
+ if not isinstance(loader[0], dict) and not isinstance(getattr(loader[0], 'patcher', None), dict)
158
+ ]
159
+
160
+ model_management.load_models_gpu(valid_models)
161
+
162
+ # @spaces.GPU(duration=60)
163
+ def generate_qr_code(prompt: str, url: str):
164
+ if "https://" in url:
165
+ url = url.replace("https://", "")
166
+ if "http://" in url:
167
+ url = url.replace("http://", "")
168
+
169
+ with torch.inference_mode():
170
+
171
+ emptylatentimage_5 = emptylatentimage.generate(
172
+ width=512, height=512, batch_size=1
173
+ )
174
+
175
+ cliptextencode_6 = cliptextencode.encode(
176
+ text=prompt,
177
+ clip=get_value_at_index(checkpointloadersimple_4, 1),
178
+ )
179
+
180
+ cliptextencode_7 = cliptextencode.encode(
181
+ text="ugly, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, extra limbs, body out of frame, blurry, bad anatomy, blurred, watermark, grainy, signature, cut off, draft, closed eyes, text, logo",
182
+ clip=get_value_at_index(checkpointloadersimple_4, 1),
183
+ )
184
+
185
+ controlnetloader_10 = controlnetloader.load_controlnet(
186
+ control_net_name="models/control_v1p_sd15_brightness.safetensors"
187
+ )
188
+
189
+ controlnetloader_12 = controlnetloader.load_controlnet(
190
+ control_net_name="control_v11f1e_sd15_tile_fp16.safetensors"
191
+ )
192
+
193
+ comfy_qr_by_module_size_15 = comfy_qr_by_module_size.generate_qr(
194
+ protocol="Https",
195
+ text=url,
196
+ module_size=12,
197
+ max_image_size=512,
198
+ fill_hexcolor="#000000",
199
+ back_hexcolor="#FFFFFF",
200
+ error_correction="Medium",
201
+ border=4,
202
+ module_drawer="Square",
203
+ )
204
+
205
+ emptylatentimage_17 = emptylatentimage.generate(
206
+ width=1024, height=1024, batch_size=1
207
+ )
208
+
209
+ controlnetloader_19 = controlnetloader.load_controlnet(
210
+ control_net_name="control_v11f1e_sd15_tile_fp16.safetensors"
211
+ )
212
+
213
+ # saveimage = NODE_CLASS_MAPPINGS["SaveImage"]()
214
+
215
+ for q in range(1):
216
+ controlnetapplyadvanced_11 = controlnetapplyadvanced.apply_controlnet(
217
+ strength=0.45,
218
+ start_percent=0,
219
+ end_percent=1,
220
+ positive=get_value_at_index(cliptextencode_6, 0),
221
+ negative=get_value_at_index(cliptextencode_7, 0),
222
+ control_net=get_value_at_index(controlnetloader_10, 0),
223
+ image=get_value_at_index(comfy_qr_by_module_size_15, 0),
224
+ vae=get_value_at_index(checkpointloadersimple_4, 2),
225
+ )
226
+
227
+ tilepreprocessor_14 = tilepreprocessor.execute(
228
+ pyrUp_iters=3,
229
+ resolution=512,
230
+ image=get_value_at_index(comfy_qr_by_module_size_15, 0),
231
+ )
232
+
233
+ controlnetapplyadvanced_13 = controlnetapplyadvanced.apply_controlnet(
234
+ strength=0.45,
235
+ start_percent=0,
236
+ end_percent=1,
237
+ positive=get_value_at_index(controlnetapplyadvanced_11, 0),
238
+ negative=get_value_at_index(controlnetapplyadvanced_11, 1),
239
+ control_net=get_value_at_index(controlnetloader_12, 0),
240
+ image=get_value_at_index(tilepreprocessor_14, 0),
241
+ vae=get_value_at_index(checkpointloadersimple_4, 2),
242
+ )
243
+
244
+ ksampler_3 = ksampler.sample(
245
+ seed=random.randint(1, 2**64),
246
+ steps=20,
247
+ cfg=7,
248
+ sampler_name="dpmpp_2m",
249
+ scheduler="karras",
250
+ denoise=1,
251
+ model=get_value_at_index(checkpointloadersimple_4, 0),
252
+ positive=get_value_at_index(controlnetapplyadvanced_13, 0),
253
+ negative=get_value_at_index(controlnetapplyadvanced_13, 1),
254
+ latent_image=get_value_at_index(emptylatentimage_5, 0),
255
+ )
256
+
257
+ vaedecode_8 = vaedecode.decode(
258
+ samples=get_value_at_index(ksampler_3, 0),
259
+ vae=get_value_at_index(checkpointloadersimple_4, 2),
260
+ )
261
+
262
+ # saveimage_9 = saveimage.save_images(
263
+ # filename_prefix="qr-new", images=get_value_at_index(vaedecode_8, 0)
264
+ # )
265
+
266
+ controlnetapplyadvanced_20 = controlnetapplyadvanced.apply_controlnet(
267
+ strength=1,
268
+ start_percent=0,
269
+ end_percent=1,
270
+ positive=get_value_at_index(cliptextencode_6, 0),
271
+ negative=get_value_at_index(cliptextencode_7, 0),
272
+ control_net=get_value_at_index(controlnetloader_19, 0),
273
+ image=get_value_at_index(vaedecode_8, 0),
274
+ vae=get_value_at_index(checkpointloadersimple_4, 2),
275
+ )
276
+
277
+ ksampler_18 = ksampler.sample(
278
+ seed=random.randint(1, 2**64),
279
+ steps=20,
280
+ cfg=7,
281
+ sampler_name="dpmpp_2m",
282
+ scheduler="karras",
283
+ denoise=1,
284
+ model=get_value_at_index(checkpointloadersimple_4, 0),
285
+ positive=get_value_at_index(controlnetapplyadvanced_20, 0),
286
+ negative=get_value_at_index(controlnetapplyadvanced_20, 1),
287
+ latent_image=get_value_at_index(emptylatentimage_17, 0),
288
+ )
289
+
290
+ vaedecode_21 = vaedecode.decode(
291
+ samples=get_value_at_index(ksampler_18, 0),
292
+ vae=get_value_at_index(checkpointloadersimple_4, 2),
293
+ )
294
+
295
+ # saveimage_22 = saveimage.save_images(
296
+ # filename_prefix="qr-new-improved",
297
+ # images=get_value_at_index(vaedecode_21, 0),
298
+ # )
299
+
300
+ # Convert torch tensor to PIL Image
301
+ image_tensor = get_value_at_index(vaedecode_21, 0)
302
+ # Convert from [0,1] to [0,255] range and to uint8
303
+ image_np = (image_tensor.cpu().numpy() * 255).astype(np.uint8)
304
+ # Remove batch dimension and convert to PIL Image
305
+ image_np = image_np[0] # Shape will be (1024, 1024, 3)
306
+ pil_image = Image.fromarray(image_np)
307
+ return pil_image
308
+
309
+
310
+ if __name__ == "__main__":
311
+
312
+ # Start your Gradio app
313
+ with gr.Blocks() as app:
314
+ # Add a title
315
+ gr.Markdown("# QR Code Art Generator")
316
+
317
+ with gr.Row():
318
+ with gr.Column():
319
+ # Add inputs
320
+ prompt_input = gr.Textbox(
321
+ label="Prompt",
322
+ placeholder="Enter your prompt here...",
323
+ value="some clothes spread on ropes, realistic, great details, out in the open air sunny day realistic, great details,absence of people, Detailed and Intricate, CGI, Photoshoot,rim light, 8k, 16k, ultra detail"
324
+ )
325
+ url_input = gr.Textbox(
326
+ label="URL for QR Code",
327
+ placeholder="Enter URL to convert to QR code...",
328
+ value="https://www.linkedin.com/in/dmytro-kisil/"
329
+ )
330
+ # The generate button
331
+ generate_btn = gr.Button("Generate")
332
+
333
+ with gr.Column():
334
+ # The output image
335
+ output_image = gr.Image(label="Generated QR Code Art")
336
+
337
+ # When clicking the button, it will trigger the main function
338
+ generate_btn.click(
339
+ fn=generate_qr_code,
340
+ inputs=[prompt_input, url_input],
341
+ outputs=[output_image]
342
+ )
343
+ app.launch(share=False, mcp_server=True)
344
+