Spaces:
Running
Running
File size: 8,537 Bytes
efa1353 04793a7 95a7614 efa1353 0639262 c812124 669715f 7d299b1 669715f 95a7614 669715f 04793a7 669715f 04793a7 669715f 04793a7 669715f 95a7614 024ee6a 95a7614 024ee6a 669715f 9e2834e c24728c 9e2834e 95a7614 9e2834e 95a7614 9e2834e c24728c 9e2834e 6ec8160 9e2834e 95a7614 9e2834e 04793a7 669715f 8c17f89 95a7614 8c17f89 95a7614 8c17f89 669715f 8c17f89 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 |
import sys
import os
from pathlib import Path
import gc
import traceback
# Add the StableCascade and CSD directories to the Python path
app_dir = Path(__file__).parent
sys.path.extend([
str(app_dir),
str(app_dir / "third_party" / "StableCascade"),
str(app_dir / "third_party" / "CSD")
])
import yaml
import torch
from tqdm import tqdm
from accelerate.utils import set_module_tensor_to_device
import torch.nn.functional as F
import torchvision.transforms as T
from lang_sam import LangSAM
from inference.utils import *
from core.utils import load_or_fail
from train import WurstCoreC, WurstCoreB
from gdf_rbm import RBM
from stage_c_rbm import StageCRBM
from utils import WurstCoreCRBM
from gdf.schedulers import CosineSchedule
from gdf import VPScaler, CosineTNoiseCond, DDPMSampler, P2LossWeight, AdaptiveLossWeight
from gdf.targets import EpsilonTarget
import PIL
# Enable mixed precision
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
# Device configuration
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
# Flag for low VRAM usage
low_vram = True # Set to True to enable low VRAM optimizations
# Function to clear GPU cache
def clear_gpu_cache():
torch.cuda.empty_cache()
gc.collect()
# Function to move model to CPU
def to_cpu(model):
return model.cpu()
# Function to move model to GPU
def to_gpu(model):
return model.cuda()
# Function definition for low VRAM usage
if low_vram:
def models_to(model, device="cpu", excepts=None):
"""
Change the device of nn.Modules within a class, skipping specified attributes.
"""
for attr_name in dir(model):
if attr_name.startswith('__') and attr_name.endswith('__'):
continue # skip special attributes
attr_value = getattr(model, attr_name, None)
if isinstance(attr_value, torch.nn.Module):
if excepts and attr_name in excepts:
print(f"Except '{attr_name}'")
continue
print(f"Change device of '{attr_name}' to {device}")
attr_value.to(device)
clear_gpu_cache()
# Load configurations
config_file = 'third_party/StableCascade/configs/inference/stage_c_3b.yaml'
with open(config_file, "r", encoding="utf-8") as file:
loaded_config = yaml.safe_load(file)
config_file_b = 'third_party/StableCascade/configs/inference/stage_b_3b.yaml'
with open(config_file_b, "r", encoding="utf-8") as file:
config_file_b = yaml.safe_load(file)
def initialize_models():
global models_rbm, models_b, extras, extras_b, core, core_b
# Clear any existing models from memory
models_rbm = None
models_b = None
extras = None
extras_b = None
# Clear GPU cache
clear_gpu_cache()
# Initialize models
core = WurstCoreCRBM(config_dict=loaded_config, device=device, training=False)
core_b = WurstCoreB(config_dict=config_file_b, device=device, training=False)
extras = core.setup_extras_pre()
models = core.setup_models(extras)
extras_b = core_b.setup_extras_pre()
models_b = core_b.setup_models(extras_b, skip_clip=True)
models_b = WurstCoreB.Models(
**{**models_b.to_dict(), 'tokenizer': models.tokenizer, 'text_model': models.text_model}
)
# Initialize models_rbm
generator_rbm = StageCRBM()
for param_name, param in load_or_fail(core.config.generator_checkpoint_path).items():
set_module_tensor_to_device(generator_rbm, param_name, "cpu", value=param)
generator_rbm = generator_rbm.to(getattr(torch, core.config.dtype)).to(device)
generator_rbm = core.load_model(generator_rbm, 'generator')
models_rbm = core.Models(
effnet=models.effnet,
previewer=models.previewer,
generator=generator_rbm,
generator_ema=models.generator_ema,
tokenizer=models.tokenizer,
text_model=models.text_model,
image_model=models.image_model
)
# Move models to appropriate devices
models_rbm.generator.to(device).eval().requires_grad_(False)
models_b.generator.to(device).eval().requires_grad_(False)
clear_gpu_cache()
def infer(style_description, ref_style_file, caption):
try:
# Clear GPU cache before inference
clear_gpu_cache()
# Ensure models are on the correct device
models_rbm.to(device)
models_b.to(device)
height = 1024
width = 1024
batch_size = 1
output_file = 'output.png'
stage_c_latent_shape, stage_b_latent_shape = calculate_latent_sizes(height, width, batch_size=batch_size)
ref_style = resize_image(PIL.Image.open(ref_style_file).convert("RGB")).unsqueeze(0).expand(batch_size, -1, -1, -1).to(device)
batch = {'captions': [caption] * batch_size}
batch['style'] = ref_style
x0_style_forward = models_rbm.effnet(extras.effnet_preprocess(ref_style))
conditions = core.get_conditions(batch, models_rbm, extras, is_eval=True, is_unconditional=False, eval_image_embeds=True, eval_style=True, eval_csd=False)
unconditions = core.get_conditions(batch, models_rbm, extras, is_eval=True, is_unconditional=True, eval_image_embeds=False)
conditions_b = core_b.get_conditions(batch, models_b, extras_b, is_eval=True, is_unconditional=False)
unconditions_b = core_b.get_conditions(batch, models_b, extras_b, is_eval=True, is_unconditional=True)
if low_vram:
# Offload non-essential models to CPU for memory savings
models_to(models_rbm, device="cpu", excepts=["generator", "previewer"])
# Stage C reverse process
with torch.cuda.amp.autocast():
sampling_c = extras.gdf.sample(
models_rbm.generator, conditions, stage_c_latent_shape,
unconditions, device=device,
**extras.sampling_configs,
x0_style_forward=x0_style_forward,
apply_pushforward=False, tau_pushforward=8,
num_iter=3, eta=0.1, tau=20, eval_csd=True,
extras=extras, models=models_rbm,
lam_style=1, lam_txt_alignment=1.0,
use_ddim_sampler=True,
)
for (sampled_c, _, _) in tqdm(sampling_c, total=extras.sampling_configs['timesteps']):
sampled_c = sampled_c
clear_gpu_cache() # Clear cache between stages
# Ensure models_b is on the correct device
models_b.to(device)
# Stage B reverse process
with torch.no_grad(), torch.cuda.amp.autocast(dtype=torch.bfloat16):
conditions_b['effnet'] = sampled_c
unconditions_b['effnet'] = torch.zeros_like(sampled_c)
sampling_b = extras_b.gdf.sample(
models_b.generator, conditions_b, stage_b_latent_shape,
unconditions_b, device=device, **extras_b.sampling_configs,
)
for (sampled_b, _, _) in tqdm(sampling_b, total=extras_b.sampling_configs['timesteps']):
sampled_b = sampled_b
sampled = models_b.stage_a.decode(sampled_b).float()
# Post-process and save the image
sampled = sampled.cpu() # Move to CPU before processing
# Ensure the tensor is in [C, H, W] format
if sampled.dim() == 4 and sampled.size(0) == 1:
sampled = sampled.squeeze(0)
if sampled.dim() == 3 and sampled.shape[0] == 3:
sampled_image = T.ToPILImage()(sampled) # Convert tensor to PIL image
sampled_image.save(output_file) # Save the image as a PNG
else:
raise ValueError(f"Expected tensor of shape [3, H, W] but got {sampled.shape}")
except Exception as e:
print(f"An error occurred during inference: {str(e)}")
traceback.print_exc() # This will print the full traceback
return None
finally:
clear_gpu_cache() # Always clear cache after inference
return output_file # Return the path to the saved image
import gradio as gr
def gradio_interface(style_description, ref_style_file, caption):
return infer(style_description, ref_style_file, caption)
gr.Interface(
fn=gradio_interface,
inputs=[gr.Textbox(label="style description"), gr.Image(label="Ref Style File", type="filepath"), gr.Textbox(label="caption")],
outputs=[gr.Image()]
).launch() |