Update app.py
Browse files
app.py
CHANGED
|
@@ -6,6 +6,7 @@ import torch
|
|
| 6 |
import gradio as gr
|
| 7 |
from PIL import Image
|
| 8 |
from huggingface_hub import hf_hub_download
|
|
|
|
| 9 |
|
| 10 |
hf_hub_download(repo_id="black-forest-labs/FLUX.1-Redux-dev", filename="flux1-redux-dev.safetensors", local_dir="models/style_models")
|
| 11 |
hf_hub_download(repo_id="black-forest-labs/FLUX.1-Depth-dev", filename="flux1-depth-dev.safetensors", local_dir="models/diffusion_models")
|
|
@@ -88,72 +89,73 @@ from nodes import (
|
|
| 88 |
import_custom_nodes()
|
| 89 |
|
| 90 |
# Global variables for preloaded models and constants
|
| 91 |
-
with torch.inference_mode():
|
| 92 |
# Initialize constants
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
|
|
|
|
|
|
| 154 |
def generate_image(prompt: str, structure_image: str, depth_strength: float, style_image: str, style_strength: float, progress=gr.Progress(track_tqdm=True)) -> str:
|
| 155 |
"""Main generation function that processes inputs and returns the path to the generated image."""
|
| 156 |
-
|
| 157 |
with torch.inference_mode():
|
| 158 |
# Set up CLIP
|
| 159 |
clip_switch = cr_clip_input_switch.switch(
|
|
|
|
| 6 |
import gradio as gr
|
| 7 |
from PIL import Image
|
| 8 |
from huggingface_hub import hf_hub_download
|
| 9 |
+
import spaces
|
| 10 |
|
| 11 |
hf_hub_download(repo_id="black-forest-labs/FLUX.1-Redux-dev", filename="flux1-redux-dev.safetensors", local_dir="models/style_models")
|
| 12 |
hf_hub_download(repo_id="black-forest-labs/FLUX.1-Depth-dev", filename="flux1-depth-dev.safetensors", local_dir="models/diffusion_models")
|
|
|
|
| 89 |
import_custom_nodes()
|
| 90 |
|
| 91 |
# Global variables for preloaded models and constants
|
| 92 |
+
#with torch.inference_mode():
|
| 93 |
# Initialize constants
|
| 94 |
+
intconstant = NODE_CLASS_MAPPINGS["INTConstant"]()
|
| 95 |
+
CONST_1024 = intconstant.get_value(value=1024)
|
| 96 |
+
|
| 97 |
+
# Load CLIP
|
| 98 |
+
dualcliploader = DualCLIPLoader()
|
| 99 |
+
CLIP_MODEL = dualcliploader.load_clip(
|
| 100 |
+
clip_name1="t5/t5xxl_fp16.safetensors",
|
| 101 |
+
clip_name2="clip_l.safetensors",
|
| 102 |
+
type="flux",
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
# Load VAE
|
| 106 |
+
vaeloader = VAELoader()
|
| 107 |
+
VAE_MODEL = vaeloader.load_vae(vae_name="FLUX1/ae.safetensors")
|
| 108 |
+
|
| 109 |
+
# Load UNET
|
| 110 |
+
unetloader = UNETLoader()
|
| 111 |
+
UNET_MODEL = unetloader.load_unet(
|
| 112 |
+
unet_name="flux1-depth-dev.safetensors", weight_dtype="default"
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
# Load CLIP Vision
|
| 116 |
+
clipvisionloader = CLIPVisionLoader()
|
| 117 |
+
CLIP_VISION_MODEL = clipvisionloader.load_clip(
|
| 118 |
+
clip_name="sigclip_vision_patch14_384.safetensors"
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
# Load Style Model
|
| 122 |
+
stylemodelloader = StyleModelLoader()
|
| 123 |
+
STYLE_MODEL = stylemodelloader.load_style_model(
|
| 124 |
+
style_model_name="flux1-redux-dev.safetensors"
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
# Initialize samplers
|
| 128 |
+
ksamplerselect = NODE_CLASS_MAPPINGS["KSamplerSelect"]()
|
| 129 |
+
SAMPLER = ksamplerselect.get_sampler(sampler_name="euler")
|
| 130 |
+
|
| 131 |
+
# Initialize depth model
|
| 132 |
+
cr_clip_input_switch = NODE_CLASS_MAPPINGS["CR Clip Input Switch"]()
|
| 133 |
+
downloadandloaddepthanythingv2model = NODE_CLASS_MAPPINGS["DownloadAndLoadDepthAnythingV2Model"]()
|
| 134 |
+
DEPTH_MODEL = downloadandloaddepthanythingv2model.loadmodel(
|
| 135 |
+
model="depth_anything_v2_vitl_fp32.safetensors"
|
| 136 |
+
)
|
| 137 |
+
cliptextencode = CLIPTextEncode()
|
| 138 |
+
loadimage = LoadImage()
|
| 139 |
+
vaeencode = VAEEncode()
|
| 140 |
+
fluxguidance = NODE_CLASS_MAPPINGS["FluxGuidance"]()
|
| 141 |
+
instructpixtopixconditioning = NODE_CLASS_MAPPINGS["InstructPixToPixConditioning"]()
|
| 142 |
+
clipvisionencode = CLIPVisionEncode()
|
| 143 |
+
stylemodelapplyadvanced = NODE_CLASS_MAPPINGS["StyleModelApplyAdvanced"]()
|
| 144 |
+
emptylatentimage = EmptyLatentImage()
|
| 145 |
+
basicguider = NODE_CLASS_MAPPINGS["BasicGuider"]()
|
| 146 |
+
basicscheduler = NODE_CLASS_MAPPINGS["BasicScheduler"]()
|
| 147 |
+
randomnoise = NODE_CLASS_MAPPINGS["RandomNoise"]()
|
| 148 |
+
samplercustomadvanced = NODE_CLASS_MAPPINGS["SamplerCustomAdvanced"]()
|
| 149 |
+
vaedecode = VAEDecode()
|
| 150 |
+
cr_text = NODE_CLASS_MAPPINGS["CR Text"]()
|
| 151 |
+
saveimage = SaveImage()
|
| 152 |
+
getimagesizeandcount = NODE_CLASS_MAPPINGS["GetImageSizeAndCount"]()
|
| 153 |
+
depthanything_v2 = NODE_CLASS_MAPPINGS["DepthAnything_V2"]()
|
| 154 |
+
imageresize = NODE_CLASS_MAPPINGS["ImageResize+"]()
|
| 155 |
+
|
| 156 |
+
@spaces.GPU
|
| 157 |
def generate_image(prompt: str, structure_image: str, depth_strength: float, style_image: str, style_strength: float, progress=gr.Progress(track_tqdm=True)) -> str:
|
| 158 |
"""Main generation function that processes inputs and returns the path to the generated image."""
|
|
|
|
| 159 |
with torch.inference_mode():
|
| 160 |
# Set up CLIP
|
| 161 |
clip_switch = cr_clip_input_switch.switch(
|