Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import random
|
2 |
+
import torch
|
3 |
+
import numpy as np
|
4 |
+
from PIL import Image
|
5 |
+
import gradio as gr
|
6 |
+
from nodes import NODE_CLASS_MAPPINGS
|
7 |
+
from totoro_extras import nodes_custom_sampler
|
8 |
+
from totoro_extras import nodes_flux
|
9 |
+
|
10 |
+
# Set device to GPU if available
|
11 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
12 |
+
|
13 |
+
# Load the necessary models and move them to the GPU
|
14 |
+
CheckpointLoaderSimple = NODE_CLASS_MAPPINGS["CheckpointLoaderSimple"]()
|
15 |
+
LoraLoader = NODE_CLASS_MAPPINGS["LoraLoader"]()
|
16 |
+
FluxGuidance = nodes_flux.NODE_CLASS_MAPPINGS["FluxGuidance"]()
|
17 |
+
RandomNoise = nodes_custom_sampler.NODE_CLASS_MAPPINGS["RandomNoise"]()
|
18 |
+
BasicGuider = nodes_custom_sampler.NODE_CLASS_MAPPINGS["BasicGuider"]()
|
19 |
+
KSamplerSelect = nodes_custom_sampler.NODE_CLASS_MAPPINGS["KSamplerSelect"]()
|
20 |
+
BasicScheduler = nodes_custom_sampler.NODE_CLASS_MAPPINGS["BasicScheduler"]()
|
21 |
+
SamplerCustomAdvanced = nodes_custom_sampler.NODE_CLASS_MAPPINGS["SamplerCustomAdvanced"]()
|
22 |
+
VAELoader = NODE_CLASS_MAPPINGS["VAELoader"]()
|
23 |
+
VAEDecode = NODE_CLASS_MAPPINGS["VAEDecode"]()
|
24 |
+
EmptyLatentImage = NODE_CLASS_MAPPINGS["EmptyLatentImage"]()
|
25 |
+
|
26 |
+
# Load checkpoint and move to GPU
|
27 |
+
with torch.inference_mode():
|
28 |
+
unet, clip, vae = CheckpointLoaderSimple.load_checkpoint("flux1-dev-fp8-all-in-one.safetensors")
|
29 |
+
unet = unet.to(device)
|
30 |
+
clip = clip.to(device)
|
31 |
+
vae = vae.to(device)
|
32 |
+
|
33 |
+
# Function to find the closest multiple of a number
|
34 |
+
def closestNumber(n, m):
|
35 |
+
q = int(n / m)
|
36 |
+
n1 = m * q
|
37 |
+
if (n * m) > 0:
|
38 |
+
n2 = m * (q + 1)
|
39 |
+
else:
|
40 |
+
n2 = m * (q - 1)
|
41 |
+
if abs(n - n1) < abs(n - n2):
|
42 |
+
return n1
|
43 |
+
return n2
|
44 |
+
|
45 |
+
# Main generation function
|
46 |
+
@torch.inference_mode()
|
47 |
+
def generate(positive_prompt, width, height, seed, steps, sampler_name, scheduler, guidance, lora_strength_model, lora_strength_clip):
|
48 |
+
global unet, clip
|
49 |
+
if seed == 0:
|
50 |
+
seed = random.randint(0, 18446744073709551615)
|
51 |
+
print(seed)
|
52 |
+
|
53 |
+
# Load LoRA models and move them to GPU
|
54 |
+
unet_lora, clip_lora = LoraLoader.load_lora(unet, clip, "flux_realism_lora.safetensors", lora_strength_model, lora_strength_clip)
|
55 |
+
unet_lora = unet_lora.to(device)
|
56 |
+
clip_lora = clip_lora.to(device)
|
57 |
+
|
58 |
+
# Encode prompt and apply guidance
|
59 |
+
cond, pooled = clip_lora.encode_from_tokens(clip_lora.tokenize(positive_prompt), return_pooled=True)
|
60 |
+
cond = [[cond, {"pooled_output": pooled}]]
|
61 |
+
cond = FluxGuidance.append(cond, guidance)[0]
|
62 |
+
|
63 |
+
# Generate noise and move it to the GPU
|
64 |
+
noise = RandomNoise.get_noise(seed)[0].to(device)
|
65 |
+
|
66 |
+
# Setup guider and sampler
|
67 |
+
guider = BasicGuider.get_guider(unet_lora, cond)[0]
|
68 |
+
sampler = KSamplerSelect.get_sampler(sampler_name)[0]
|
69 |
+
|
70 |
+
# Generate sigmas and latent image
|
71 |
+
sigmas = BasicScheduler.get_sigmas(unet_lora, scheduler, steps, 1.0)[0]
|
72 |
+
latent_image = EmptyLatentImage.generate(closestNumber(width, 16), closestNumber(height, 16))[0].to(device)
|
73 |
+
|
74 |
+
# Perform sampling
|
75 |
+
sample, sample_denoised = SamplerCustomAdvanced.sample(noise, guider, sampler, sigmas, latent_image)
|
76 |
+
|
77 |
+
# Decode the latent image to a regular image
|
78 |
+
decoded = VAEDecode.decode(vae, sample)[0].detach().cpu()
|
79 |
+
|
80 |
+
# Convert to image and save
|
81 |
+
output_image = Image.fromarray(np.array(decoded * 255, dtype=np.uint8)[0])
|
82 |
+
output_image.save("/content/flux.png")
|
83 |
+
return "/content/flux.png"
|
84 |
+
|
85 |
+
# Setup the Gradio interface
|
86 |
+
with gr.Blocks(analytics_enabled=False) as demo:
|
87 |
+
with gr.Row():
|
88 |
+
with gr.Column():
|
89 |
+
positive_prompt = gr.Textbox(lines=3, interactive=True, value="cute anime girl with massive fluffy fennec ears and a big fluffy tail blonde messy long hair blue eyes wearing a maid outfit with a long black dress with a gold leaf pattern and a white apron eating a slice of an apple pie in the kitchen of an old dark victorian mansion with a bright window and very expensive stuff everywhere", label="Prompt")
|
90 |
+
width = gr.Slider(minimum=256, maximum=2048, value=1024, step=16, label="width")
|
91 |
+
height = gr.Slider(minimum=256, maximum=2048, value=1024, step=16, label="height")
|
92 |
+
seed = gr.Slider(minimum=0, maximum=18446744073709551615, value=0, step=1, label="seed (0=random)")
|
93 |
+
steps = gr.Slider(minimum=4, maximum=50, value=20, step=1, label="steps")
|
94 |
+
guidance = gr.Slider(minimum=0, maximum=20, value=3.5, step=0.5, label="guidance")
|
95 |
+
lora_strength_model = gr.Slider(minimum=0, maximum=1, value=1.0, step=0.1, label="lora_strength_model")
|
96 |
+
lora_strength_clip = gr.Slider(minimum=0, maximum=1, value=1.0, step=0.1, label="lora_strength_clip")
|
97 |
+
sampler_name = gr.Dropdown(["euler", "heun", "heunpp2", "dpm_2", "lms", "dpmpp_2m", "ipndm", "deis", "ddim", "uni_pc", "uni_pc_bh2"], label="sampler_name", value="euler")
|
98 |
+
scheduler = gr.Dropdown(["normal", "sgm_uniform", "simple", "ddim_uniform"], label="scheduler", value="simple")
|
99 |
+
generate_button = gr.Button("Generate")
|
100 |
+
with gr.Column():
|
101 |
+
output_image = gr.Image(label="Generated image", interactive=False)
|
102 |
+
|
103 |
+
generate_button.click(fn=generate, inputs=[positive_prompt, width, height, seed, steps, sampler_name, scheduler, guidance, lora_strength_model, lora_strength_clip], outputs=output_image)
|
104 |
+
|
105 |
+
# Launch the Gradio interface
|
106 |
+
demo.queue().launch(inline=False, share=True, debug=True)
|