Spaces:
Runtime error
Runtime error
import os | |
import sys | |
import comfy.samplers | |
import comfy.sd | |
import warnings | |
from segment_anything import sam_model_registry | |
from io import BytesIO | |
import piexif | |
import zipfile | |
import re | |
import impact.wildcards | |
from impact.utils import * | |
import impact.core as core | |
from impact.core import SEG | |
from impact.config import latent_letter_path | |
from nodes import MAX_RESOLUTION | |
from PIL import Image, ImageOps | |
import numpy as np | |
import hashlib | |
import json | |
import safetensors.torch | |
from PIL.PngImagePlugin import PngInfo | |
import comfy.model_management | |
import base64 | |
import impact.wildcards as wildcards | |
from . import hooks | |
from . import utils | |
import inspect | |
try: | |
from comfy_extras import nodes_differential_diffusion | |
except Exception: | |
print(f"\n#############################################\n[Impact Pack] ComfyUI is an outdated version.\n#############################################\n") | |
raise Exception("[Impact Pack] ComfyUI is an outdated version.") | |
warnings.filterwarnings('ignore', category=UserWarning, message='TypedStorage is deprecated') | |
model_path = folder_paths.models_dir | |
# folder_paths.supported_pt_extensions | |
add_folder_path_and_extensions("mmdets_bbox", [os.path.join(model_path, "mmdets", "bbox")], folder_paths.supported_pt_extensions) | |
add_folder_path_and_extensions("mmdets_segm", [os.path.join(model_path, "mmdets", "segm")], folder_paths.supported_pt_extensions) | |
add_folder_path_and_extensions("mmdets", [os.path.join(model_path, "mmdets")], folder_paths.supported_pt_extensions) | |
add_folder_path_and_extensions("sams", [os.path.join(model_path, "sams")], folder_paths.supported_pt_extensions) | |
add_folder_path_and_extensions("onnx", [os.path.join(model_path, "onnx")], {'.onnx'}) | |
# Nodes | |
class ONNXDetectorProvider: | |
def INPUT_TYPES(s): | |
return {"required": {"model_name": (folder_paths.get_filename_list("onnx"), )}} | |
RETURN_TYPES = ("BBOX_DETECTOR", ) | |
FUNCTION = "load_onnx" | |
CATEGORY = "ImpactPack" | |
def load_onnx(self, model_name): | |
model = folder_paths.get_full_path("onnx", model_name) | |
return (core.ONNXDetector(model), ) | |
class CLIPSegDetectorProvider: | |
def INPUT_TYPES(s): | |
return {"required": { | |
"text": ("STRING", {"multiline": False, "tooltip": "Enter the targets to be detected, separated by commas"}), | |
"blur": ("FLOAT", {"min": 0, "max": 15, "step": 0.1, "default": 7, "tooltip": "Blurs the detected mask"}), | |
"threshold": ("FLOAT", {"min": 0, "max": 1, "step": 0.05, "default": 0.4, "tooltip": "Detects only areas that are certain above the threshold."}), | |
"dilation_factor": ("INT", {"min": 0, "max": 10, "step": 1, "default": 4, "tooltip": "Dilates the detected mask."}), | |
} | |
} | |
RETURN_TYPES = ("BBOX_DETECTOR", ) | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Util" | |
DESCRIPTION = "Provides a detection function using CLIPSeg, which generates masks based on text prompts.\nTo use this node, the CLIPSeg custom node must be installed." | |
def doit(self, text, blur, threshold, dilation_factor): | |
if "CLIPSeg" in nodes.NODE_CLASS_MAPPINGS: | |
return (core.BBoxDetectorBasedOnCLIPSeg(text, blur, threshold, dilation_factor), ) | |
else: | |
print("[ERROR] CLIPSegToBboxDetector: CLIPSeg custom node isn't installed. You must install biegert/ComfyUI-CLIPSeg extension to use this node.") | |
class SAMLoader: | |
def INPUT_TYPES(cls): | |
models = [x for x in folder_paths.get_filename_list("sams") if 'hq' not in x] | |
if 'ESAM_ModelLoader_Zho' in nodes.NODE_CLASS_MAPPINGS: | |
models.append('ESAM') | |
return { | |
"required": { | |
"model_name": (models, {"tooltip": "The detection accuracy varies depending on the SAM model. ESAM can only be used if ComfyUI-YoloWorld-EfficientSAM is installed."}), | |
"device_mode": (["AUTO", "Prefer GPU", "CPU"], {"tooltip": "AUTO: Only applicable when a GPU is available. It temporarily loads the SAM_MODEL into VRAM only when the detection function is used.\n" | |
"Prefer GPU: Tries to keep the SAM_MODEL on the GPU whenever possible. This can be used when there is sufficient VRAM available.\n" | |
"CPU: Always loads only on the CPU."}), | |
} | |
} | |
RETURN_TYPES = ("SAM_MODEL", ) | |
FUNCTION = "load_model" | |
CATEGORY = "ImpactPack" | |
DESCRIPTION = "Load the SAM (Segment Anything) model. This can be used in places that utilize SAM detection functionality, such as SAMDetector or SimpleDetector.\nThe SAM detection functionality in Impact Pack must use the SAM_MODEL loaded through this node." | |
def load_model(self, model_name, device_mode="auto"): | |
if model_name == 'ESAM': | |
if 'ESAM_ModelLoader_Zho' not in nodes.NODE_CLASS_MAPPINGS: | |
try_install_custom_node('https://github.com/ZHO-ZHO-ZHO/ComfyUI-YoloWorld-EfficientSAM', | |
"To use 'ESAM' model, 'ComfyUI-YoloWorld-EfficientSAM' extension is required.") | |
raise Exception("'ComfyUI-YoloWorld-EfficientSAM' node isn't installed.") | |
esam_loader = nodes.NODE_CLASS_MAPPINGS['ESAM_ModelLoader_Zho']() | |
if device_mode == 'CPU': | |
esam = esam_loader.load_esam_model('CPU')[0] | |
else: | |
device_mode = 'CUDA' | |
esam = esam_loader.load_esam_model('CUDA')[0] | |
sam_obj = core.ESAMWrapper(esam, device_mode) | |
esam.sam_wrapper = sam_obj | |
print(f"Loads EfficientSAM model: (device:{device_mode})") | |
return (esam, ) | |
modelname = folder_paths.get_full_path("sams", model_name) | |
if 'vit_h' in model_name: | |
model_kind = 'vit_h' | |
elif 'vit_l' in model_name: | |
model_kind = 'vit_l' | |
else: | |
model_kind = 'vit_b' | |
sam = sam_model_registry[model_kind](checkpoint=modelname) | |
size = os.path.getsize(modelname) | |
safe_to = core.SafeToGPU(size) | |
# Unless user explicitly wants to use CPU, we use GPU | |
device = comfy.model_management.get_torch_device() if device_mode == "Prefer GPU" else "CPU" | |
if device_mode == "Prefer GPU": | |
safe_to.to_device(sam, device) | |
is_auto_mode = device_mode == "AUTO" | |
sam_obj = core.SAMWrapper(sam, is_auto_mode=is_auto_mode, safe_to_gpu=safe_to) | |
sam.sam_wrapper = sam_obj | |
print(f"Loads SAM model: {modelname} (device:{device_mode})") | |
return (sam, ) | |
class ONNXDetectorForEach: | |
def INPUT_TYPES(s): | |
return {"required": { | |
"onnx_detector": ("ONNX_DETECTOR",), | |
"image": ("IMAGE",), | |
"threshold": ("FLOAT", {"default": 0.8, "min": 0.0, "max": 1.0, "step": 0.01}), | |
"dilation": ("INT", {"default": 10, "min": -512, "max": 512, "step": 1}), | |
"crop_factor": ("FLOAT", {"default": 1.0, "min": 0.5, "max": 100, "step": 0.1}), | |
"drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), | |
} | |
} | |
RETURN_TYPES = ("SEGS", ) | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Detector" | |
OUTPUT_NODE = True | |
def doit(self, onnx_detector, image, threshold, dilation, crop_factor, drop_size): | |
segs = onnx_detector.detect(image, threshold, dilation, crop_factor, drop_size) | |
return (segs, ) | |
class DetailerForEach: | |
def INPUT_TYPES(s): | |
return {"required": { | |
"image": ("IMAGE", ), | |
"segs": ("SEGS", ), | |
"model": ("MODEL", {"tooltip": "If the `ImpactDummyInput` is connected to the model, the inference stage is skipped."}), | |
"clip": ("CLIP",), | |
"vae": ("VAE",), | |
"guide_size": ("FLOAT", {"default": 512, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), | |
"guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}), | |
"max_size": ("FLOAT", {"default": 1024, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), | |
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), | |
"steps": ("INT", {"default": 20, "min": 1, "max": 10000}), | |
"cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), | |
"sampler_name": (comfy.samplers.KSampler.SAMPLERS,), | |
"scheduler": (core.SCHEDULERS,), | |
"positive": ("CONDITIONING",), | |
"negative": ("CONDITIONING",), | |
"denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), | |
"feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), | |
"noise_mask": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), | |
"force_inpaint": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), | |
"wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}), | |
"cycle": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}), | |
}, | |
"optional": { | |
"detailer_hook": ("DETAILER_HOOK",), | |
"inpaint_model": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), | |
"noise_mask_feather": ("INT", {"default": 20, "min": 0, "max": 100, "step": 1}), | |
"scheduler_func_opt": ("SCHEDULER_FUNC",), | |
"tiled_encode": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), | |
"tiled_decode": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), | |
} | |
} | |
RETURN_TYPES = ("IMAGE", ) | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Detailer" | |
DESCRIPTION = "It enhances details by inpainting each region within the detected area bundle (SEGS) after enlarging them based on the guide size." | |
def get_core_module(): | |
return core | |
def do_detail(image, segs, model, clip, vae, guide_size, guide_size_for_bbox, max_size, seed, steps, cfg, sampler_name, scheduler, | |
positive, negative, denoise, feather, noise_mask, force_inpaint, wildcard_opt=None, detailer_hook=None, | |
refiner_ratio=None, refiner_model=None, refiner_clip=None, refiner_positive=None, refiner_negative=None, | |
cycle=1, inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None, tiled_encode=False, tiled_decode=False): | |
if len(image) > 1: | |
raise Exception('[Impact Pack] ERROR: DetailerForEach does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.') | |
image = image.clone() | |
enhanced_alpha_list = [] | |
enhanced_list = [] | |
cropped_list = [] | |
cnet_pil_list = [] | |
segs = core.segs_scale_match(segs, image.shape) | |
new_segs = [] | |
wildcard_concat_mode = None | |
if wildcard_opt is not None: | |
if wildcard_opt.startswith('[CONCAT]'): | |
wildcard_concat_mode = 'concat' | |
wildcard_opt = wildcard_opt[8:] | |
wmode, wildcard_chooser = wildcards.process_wildcard_for_segs(wildcard_opt) | |
else: | |
wmode, wildcard_chooser = None, None | |
if wmode in ['ASC', 'DSC', 'ASC-SIZE', 'DSC-SIZE']: | |
if wmode == 'ASC': | |
ordered_segs = sorted(segs[1], key=lambda x: (x.bbox[0], x.bbox[1])) | |
elif wmode == 'DSC': | |
ordered_segs = sorted(segs[1], key=lambda x: (x.bbox[0], x.bbox[1]), reverse=True) | |
elif wmode == 'ASC-SIZE': | |
ordered_segs = sorted(segs[1], key=lambda x: (x.bbox[2]-x.bbox[0]) * (x.bbox[3]-x.bbox[1])) | |
else: # wmode == 'DSC-SIZE' | |
ordered_segs = sorted(segs[1], key=lambda x: (x.bbox[2]-x.bbox[0]) * (x.bbox[3]-x.bbox[1]), reverse=True) | |
else: | |
ordered_segs = segs[1] | |
if not (isinstance(model, str) and model == "DUMMY") and noise_mask_feather > 0 and 'denoise_mask_function' not in model.model_options: | |
model = nodes_differential_diffusion.DifferentialDiffusion().apply(model)[0] | |
for i, seg in enumerate(ordered_segs): | |
cropped_image = crop_ndarray4(image.cpu().numpy(), seg.crop_region) # Never use seg.cropped_image to handle overlapping area | |
cropped_image = to_tensor(cropped_image) | |
mask = to_tensor(seg.cropped_mask) | |
mask = tensor_gaussian_blur_mask(mask, feather) | |
is_mask_all_zeros = (seg.cropped_mask == 0).all().item() | |
if is_mask_all_zeros: | |
print(f"Detailer: segment skip [empty mask]") | |
continue | |
if noise_mask: | |
cropped_mask = seg.cropped_mask | |
else: | |
cropped_mask = None | |
if wildcard_chooser is not None and wmode != "LAB": | |
seg_seed, wildcard_item = wildcard_chooser.get(seg) | |
elif wildcard_chooser is not None and wmode == "LAB": | |
seg_seed, wildcard_item = None, wildcard_chooser.get(seg) | |
else: | |
seg_seed, wildcard_item = None, None | |
seg_seed = seed + i if seg_seed is None else seg_seed | |
if not isinstance(positive, str): | |
cropped_positive = [ | |
[condition, { | |
k: core.crop_condition_mask(v, image, seg.crop_region) if k == "mask" else v | |
for k, v in details.items() | |
}] | |
for condition, details in positive | |
] | |
else: | |
cropped_positive = positive | |
if not isinstance(negative, str): | |
cropped_negative = [ | |
[condition, { | |
k: core.crop_condition_mask(v, image, seg.crop_region) if k == "mask" else v | |
for k, v in details.items() | |
}] | |
for condition, details in negative | |
] | |
else: | |
# Negative Conditioning is placeholder such as FLUX.1 | |
cropped_negative = negative | |
if wildcard_item and wildcard_item.strip() == '[SKIP]': | |
continue | |
if wildcard_item and wildcard_item.strip() == '[STOP]': | |
break | |
orig_cropped_image = cropped_image.clone() | |
if not (isinstance(model, str) and model == "DUMMY"): | |
enhanced_image, cnet_pils = core.enhance_detail(cropped_image, model, clip, vae, guide_size, guide_size_for_bbox, max_size, | |
seg.bbox, seg_seed, steps, cfg, sampler_name, scheduler, | |
cropped_positive, cropped_negative, denoise, cropped_mask, force_inpaint, | |
wildcard_opt=wildcard_item, wildcard_opt_concat_mode=wildcard_concat_mode, | |
detailer_hook=detailer_hook, | |
refiner_ratio=refiner_ratio, refiner_model=refiner_model, | |
refiner_clip=refiner_clip, refiner_positive=refiner_positive, | |
refiner_negative=refiner_negative, control_net_wrapper=seg.control_net_wrapper, | |
cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, | |
scheduler_func=scheduler_func_opt, vae_tiled_encode=tiled_encode, | |
vae_tiled_decode=tiled_decode) | |
else: | |
enhanced_image = cropped_image | |
cnet_pils = None | |
if cnet_pils is not None: | |
cnet_pil_list.extend(cnet_pils) | |
if not (enhanced_image is None): | |
# don't latent composite-> converting to latent caused poor quality | |
# use image paste | |
image = image.cpu() | |
enhanced_image = enhanced_image.cpu() | |
tensor_paste(image, enhanced_image, (seg.crop_region[0], seg.crop_region[1]), mask) # this code affecting to `cropped_image`. | |
enhanced_list.append(enhanced_image) | |
if detailer_hook is not None: | |
image = detailer_hook.post_paste(image) | |
if not (enhanced_image is None): | |
# Convert enhanced_pil_alpha to RGBA mode | |
enhanced_image_alpha = tensor_convert_rgba(enhanced_image) | |
new_seg_image = enhanced_image.numpy() # alpha should not be applied to seg_image | |
# Apply the mask | |
mask = tensor_resize(mask, *tensor_get_size(enhanced_image)) | |
tensor_putalpha(enhanced_image_alpha, mask) | |
enhanced_alpha_list.append(enhanced_image_alpha) | |
else: | |
new_seg_image = None | |
cropped_list.append(orig_cropped_image) # NOTE: Don't use `cropped_image` | |
new_seg = SEG(new_seg_image, seg.cropped_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, seg.control_net_wrapper) | |
new_segs.append(new_seg) | |
image_tensor = tensor_convert_rgb(image) | |
cropped_list.sort(key=lambda x: x.shape, reverse=True) | |
enhanced_list.sort(key=lambda x: x.shape, reverse=True) | |
enhanced_alpha_list.sort(key=lambda x: x.shape, reverse=True) | |
return image_tensor, cropped_list, enhanced_list, enhanced_alpha_list, cnet_pil_list, (segs[0], new_segs) | |
def doit(self, image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, | |
scheduler, positive, negative, denoise, feather, noise_mask, force_inpaint, wildcard, cycle=1, | |
detailer_hook=None, inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None, | |
tiled_encode=False, tiled_decode=False): | |
enhanced_img, *_ = \ | |
DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, | |
cfg, sampler_name, scheduler, positive, negative, denoise, feather, noise_mask, | |
force_inpaint, wildcard, detailer_hook, | |
cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, | |
scheduler_func_opt=scheduler_func_opt, tiled_encode=tiled_encode, tiled_decode=tiled_decode) | |
return (enhanced_img, ) | |
class DetailerForEachPipe: | |
def INPUT_TYPES(s): | |
return {"required": { | |
"image": ("IMAGE", ), | |
"segs": ("SEGS", ), | |
"guide_size": ("FLOAT", {"default": 512, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), | |
"guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}), | |
"max_size": ("FLOAT", {"default": 1024, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), | |
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), | |
"steps": ("INT", {"default": 20, "min": 1, "max": 10000}), | |
"cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), | |
"sampler_name": (comfy.samplers.KSampler.SAMPLERS,), | |
"scheduler": (core.SCHEDULERS,), | |
"denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), | |
"feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), | |
"noise_mask": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), | |
"force_inpaint": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), | |
"basic_pipe": ("BASIC_PIPE", {"tooltip": "If the `ImpactDummyInput` is connected to the model in the basic_pipe, the inference stage is skipped."}), | |
"wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}), | |
"refiner_ratio": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0}), | |
"cycle": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}), | |
}, | |
"optional": { | |
"detailer_hook": ("DETAILER_HOOK",), | |
"refiner_basic_pipe_opt": ("BASIC_PIPE",), | |
"inpaint_model": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), | |
"noise_mask_feather": ("INT", {"default": 20, "min": 0, "max": 100, "step": 1}), | |
"scheduler_func_opt": ("SCHEDULER_FUNC",), | |
"tiled_encode": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), | |
"tiled_decode": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), | |
} | |
} | |
RETURN_TYPES = ("IMAGE", "SEGS", "BASIC_PIPE", "IMAGE") | |
RETURN_NAMES = ("image", "segs", "basic_pipe", "cnet_images") | |
OUTPUT_IS_LIST = (False, False, False, True) | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Detailer" | |
DESCRIPTION = DetailerForEach.DESCRIPTION | |
def doit(self, image, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, | |
denoise, feather, noise_mask, force_inpaint, basic_pipe, wildcard, | |
refiner_ratio=None, detailer_hook=None, refiner_basic_pipe_opt=None, | |
cycle=1, inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None, | |
tiled_encode=False, tiled_decode=False): | |
if len(image) > 1: | |
raise Exception('[Impact Pack] ERROR: DetailerForEach does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.') | |
model, clip, vae, positive, negative = basic_pipe | |
if refiner_basic_pipe_opt is None: | |
refiner_model, refiner_clip, refiner_positive, refiner_negative = None, None, None, None | |
else: | |
refiner_model, refiner_clip, _, refiner_positive, refiner_negative = refiner_basic_pipe_opt | |
enhanced_img, cropped, cropped_enhanced, cropped_enhanced_alpha, cnet_pil_list, new_segs = \ | |
DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, | |
sampler_name, scheduler, positive, negative, denoise, feather, noise_mask, | |
force_inpaint, wildcard, detailer_hook, | |
refiner_ratio=refiner_ratio, refiner_model=refiner_model, | |
refiner_clip=refiner_clip, refiner_positive=refiner_positive, refiner_negative=refiner_negative, | |
cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, scheduler_func_opt=scheduler_func_opt, | |
tiled_encode=tiled_encode, tiled_decode=tiled_decode) | |
# set fallback image | |
if len(cnet_pil_list) == 0: | |
cnet_pil_list = [empty_pil_tensor()] | |
return enhanced_img, new_segs, basic_pipe, cnet_pil_list | |
class FaceDetailer: | |
def INPUT_TYPES(s): | |
return {"required": { | |
"image": ("IMAGE", ), | |
"model": ("MODEL", {"tooltip": "If the `ImpactDummyInput` is connected to the model, the inference stage is skipped."}), | |
"clip": ("CLIP",), | |
"vae": ("VAE",), | |
"guide_size": ("FLOAT", {"default": 512, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), | |
"guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}), | |
"max_size": ("FLOAT", {"default": 1024, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), | |
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), | |
"steps": ("INT", {"default": 20, "min": 1, "max": 10000}), | |
"cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), | |
"sampler_name": (comfy.samplers.KSampler.SAMPLERS,), | |
"scheduler": (core.SCHEDULERS,), | |
"positive": ("CONDITIONING",), | |
"negative": ("CONDITIONING",), | |
"denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), | |
"feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), | |
"noise_mask": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), | |
"force_inpaint": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), | |
"bbox_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), | |
"bbox_dilation": ("INT", {"default": 10, "min": -512, "max": 512, "step": 1}), | |
"bbox_crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 10, "step": 0.1}), | |
"sam_detection_hint": (["center-1", "horizontal-2", "vertical-2", "rect-4", "diamond-4", "mask-area", "mask-points", "mask-point-bbox", "none"],), | |
"sam_dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}), | |
"sam_threshold": ("FLOAT", {"default": 0.93, "min": 0.0, "max": 1.0, "step": 0.01}), | |
"sam_bbox_expansion": ("INT", {"default": 0, "min": 0, "max": 1000, "step": 1}), | |
"sam_mask_hint_threshold": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}), | |
"sam_mask_hint_use_negative": (["False", "Small", "Outter"],), | |
"drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), | |
"bbox_detector": ("BBOX_DETECTOR", ), | |
"wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}), | |
"cycle": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}), | |
}, | |
"optional": { | |
"sam_model_opt": ("SAM_MODEL", ), | |
"segm_detector_opt": ("SEGM_DETECTOR", ), | |
"detailer_hook": ("DETAILER_HOOK",), | |
"inpaint_model": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), | |
"noise_mask_feather": ("INT", {"default": 20, "min": 0, "max": 100, "step": 1}), | |
"scheduler_func_opt": ("SCHEDULER_FUNC",), | |
"tiled_encode": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), | |
"tiled_decode": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), | |
}} | |
RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "MASK", "DETAILER_PIPE", "IMAGE") | |
RETURN_NAMES = ("image", "cropped_refined", "cropped_enhanced_alpha", "mask", "detailer_pipe", "cnet_images") | |
OUTPUT_IS_LIST = (False, True, True, False, False, True) | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Simple" | |
DESCRIPTION = "This node enhances details by automatically detecting specific objects in the input image using detection models (bbox, segm, sam) and regenerating the image by enlarging the detected area based on the guide size.\nAlthough this node is specialized to simplify the commonly used facial detail enhancement workflow, it can also be used for various automatic inpainting purposes depending on the detection model." | |
def enhance_face(image, model, clip, vae, guide_size, guide_size_for_bbox, max_size, seed, steps, cfg, sampler_name, scheduler, | |
positive, negative, denoise, feather, noise_mask, force_inpaint, | |
bbox_threshold, bbox_dilation, bbox_crop_factor, | |
sam_detection_hint, sam_dilation, sam_threshold, sam_bbox_expansion, sam_mask_hint_threshold, | |
sam_mask_hint_use_negative, drop_size, | |
bbox_detector, segm_detector=None, sam_model_opt=None, wildcard_opt=None, detailer_hook=None, | |
refiner_ratio=None, refiner_model=None, refiner_clip=None, refiner_positive=None, refiner_negative=None, cycle=1, | |
inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None, tiled_encode=False, tiled_decode=False): | |
# make default prompt as 'face' if empty prompt for CLIPSeg | |
bbox_detector.setAux('face') | |
segs = bbox_detector.detect(image, bbox_threshold, bbox_dilation, bbox_crop_factor, drop_size, detailer_hook=detailer_hook) | |
bbox_detector.setAux(None) | |
# bbox + sam combination | |
if sam_model_opt is not None: | |
sam_mask = core.make_sam_mask(sam_model_opt, segs, image, sam_detection_hint, sam_dilation, | |
sam_threshold, sam_bbox_expansion, sam_mask_hint_threshold, | |
sam_mask_hint_use_negative, ) | |
segs = core.segs_bitwise_and_mask(segs, sam_mask) | |
elif segm_detector is not None: | |
segm_segs = segm_detector.detect(image, bbox_threshold, bbox_dilation, bbox_crop_factor, drop_size) | |
if (hasattr(segm_detector, 'override_bbox_by_segm') and segm_detector.override_bbox_by_segm and | |
not (detailer_hook is not None and not hasattr(detailer_hook, 'override_bbox_by_segm'))): | |
segs = segm_segs | |
else: | |
segm_mask = core.segs_to_combined_mask(segm_segs) | |
segs = core.segs_bitwise_and_mask(segs, segm_mask) | |
if len(segs[1]) > 0: | |
enhanced_img, _, cropped_enhanced, cropped_enhanced_alpha, cnet_pil_list, new_segs = \ | |
DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for_bbox, max_size, seed, steps, cfg, | |
sampler_name, scheduler, positive, negative, denoise, feather, noise_mask, | |
force_inpaint, wildcard_opt, detailer_hook, | |
refiner_ratio=refiner_ratio, refiner_model=refiner_model, | |
refiner_clip=refiner_clip, refiner_positive=refiner_positive, | |
refiner_negative=refiner_negative, | |
cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, | |
scheduler_func_opt=scheduler_func_opt, tiled_encode=tiled_encode, tiled_decode=tiled_decode) | |
else: | |
enhanced_img = image | |
cropped_enhanced = [] | |
cropped_enhanced_alpha = [] | |
cnet_pil_list = [] | |
# Mask Generator | |
mask = core.segs_to_combined_mask(segs) | |
if len(cropped_enhanced) == 0: | |
cropped_enhanced = [empty_pil_tensor()] | |
if len(cropped_enhanced_alpha) == 0: | |
cropped_enhanced_alpha = [empty_pil_tensor()] | |
if len(cnet_pil_list) == 0: | |
cnet_pil_list = [empty_pil_tensor()] | |
return enhanced_img, cropped_enhanced, cropped_enhanced_alpha, mask, cnet_pil_list | |
def doit(self, image, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, | |
positive, negative, denoise, feather, noise_mask, force_inpaint, | |
bbox_threshold, bbox_dilation, bbox_crop_factor, | |
sam_detection_hint, sam_dilation, sam_threshold, sam_bbox_expansion, sam_mask_hint_threshold, | |
sam_mask_hint_use_negative, drop_size, bbox_detector, wildcard, cycle=1, | |
sam_model_opt=None, segm_detector_opt=None, detailer_hook=None, inpaint_model=False, noise_mask_feather=0, | |
scheduler_func_opt=None, tiled_encode=False, tiled_decode=False): | |
result_img = None | |
result_mask = None | |
result_cropped_enhanced = [] | |
result_cropped_enhanced_alpha = [] | |
result_cnet_images = [] | |
if len(image) > 1: | |
print(f"[Impact Pack] WARN: FaceDetailer is not a node designed for video detailing. If you intend to perform video detailing, please use Detailer For AnimateDiff.") | |
for i, single_image in enumerate(image): | |
enhanced_img, cropped_enhanced, cropped_enhanced_alpha, mask, cnet_pil_list = FaceDetailer.enhance_face( | |
single_image.unsqueeze(0), model, clip, vae, guide_size, guide_size_for, max_size, seed + i, steps, cfg, sampler_name, scheduler, | |
positive, negative, denoise, feather, noise_mask, force_inpaint, | |
bbox_threshold, bbox_dilation, bbox_crop_factor, | |
sam_detection_hint, sam_dilation, sam_threshold, sam_bbox_expansion, sam_mask_hint_threshold, | |
sam_mask_hint_use_negative, drop_size, bbox_detector, segm_detector_opt, sam_model_opt, wildcard, detailer_hook, | |
cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, scheduler_func_opt=scheduler_func_opt, | |
tiled_encode=tiled_encode, tiled_decode=tiled_decode) | |
result_img = torch.cat((result_img, enhanced_img), dim=0) if result_img is not None else enhanced_img | |
result_mask = torch.cat((result_mask, mask), dim=0) if result_mask is not None else mask | |
result_cropped_enhanced.extend(cropped_enhanced) | |
result_cropped_enhanced_alpha.extend(cropped_enhanced_alpha) | |
result_cnet_images.extend(cnet_pil_list) | |
pipe = (model, clip, vae, positive, negative, wildcard, bbox_detector, segm_detector_opt, sam_model_opt, detailer_hook, None, None, None, None) | |
return result_img, result_cropped_enhanced, result_cropped_enhanced_alpha, result_mask, pipe, result_cnet_images | |
class LatentPixelScale: | |
upscale_methods = ["nearest-exact", "bilinear", "lanczos", "area"] | |
def INPUT_TYPES(s): | |
return {"required": { | |
"samples": ("LATENT", ), | |
"scale_method": (s.upscale_methods,), | |
"scale_factor": ("FLOAT", {"default": 1.5, "min": 0.1, "max": 10000, "step": 0.1}), | |
"vae": ("VAE", ), | |
"use_tiled_vae": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), | |
}, | |
"optional": { | |
"upscale_model_opt": ("UPSCALE_MODEL", ), | |
} | |
} | |
RETURN_TYPES = ("LATENT", "IMAGE") | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Upscale" | |
def doit(self, samples, scale_method, scale_factor, vae, use_tiled_vae, upscale_model_opt=None): | |
if upscale_model_opt is None: | |
latimg = core.latent_upscale_on_pixel_space2(samples, scale_method, scale_factor, vae, use_tile=use_tiled_vae) | |
else: | |
latimg = core.latent_upscale_on_pixel_space_with_model2(samples, scale_method, upscale_model_opt, scale_factor, vae, use_tile=use_tiled_vae) | |
return latimg | |
class NoiseInjectionDetailerHookProvider: | |
schedules = ["skip_start", "from_start"] | |
def INPUT_TYPES(s): | |
return {"required": { | |
"schedule_for_cycle": (s.schedules,), | |
"source": (["CPU", "GPU"],), | |
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), | |
"start_strength": ("FLOAT", {"default": 2.0, "min": 0.0, "max": 200.0, "step": 0.01}), | |
"end_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 200.0, "step": 0.01}), | |
}, | |
} | |
RETURN_TYPES = ("DETAILER_HOOK",) | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Detailer" | |
def doit(self, schedule_for_cycle, source, seed, start_strength, end_strength): | |
try: | |
hook = hooks.InjectNoiseHookForDetailer(source, seed, start_strength, end_strength, | |
from_start=('from_start' in schedule_for_cycle)) | |
return (hook, ) | |
except Exception as e: | |
print("[ERROR] NoiseInjectionDetailerHookProvider: 'ComfyUI Noise' custom node isn't installed. You must install 'BlenderNeko/ComfyUI Noise' extension to use this node.") | |
print(f"\t{e}") | |
pass | |
# class CustomNoiseDetailerHookProvider: | |
# @classmethod | |
# def INPUT_TYPES(s): | |
# return {"required": { | |
# "noise": ("NOISE",)}, | |
# } | |
# | |
# RETURN_TYPES = ("DETAILER_HOOK",) | |
# FUNCTION = "doit" | |
# | |
# CATEGORY = "ImpactPack/Detailer" | |
# | |
# def doit(self, noise): | |
# hook = hooks.CustomNoiseDetailerHookProvider(noise) | |
# return (hook, ) | |
class VariationNoiseDetailerHookProvider: | |
def INPUT_TYPES(s): | |
return {"required": { | |
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), | |
"strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01})} | |
} | |
RETURN_TYPES = ("DETAILER_HOOK",) | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Detailer" | |
def doit(self, seed, strength): | |
hook = hooks.VariationNoiseDetailerHookProvider(seed, strength) | |
return (hook, ) | |
class UnsamplerDetailerHookProvider: | |
schedules = ["skip_start", "from_start"] | |
def INPUT_TYPES(s): | |
return {"required": | |
{"model": ("MODEL",), | |
"steps": ("INT", {"default": 25, "min": 1, "max": 10000}), | |
"start_end_at_step": ("INT", {"default": 21, "min": 0, "max": 10000}), | |
"end_end_at_step": ("INT", {"default": 24, "min": 0, "max": 10000}), | |
"cfg": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0}), | |
"sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), | |
"scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), | |
"normalize": (["disable", "enable"], ), | |
"positive": ("CONDITIONING", ), | |
"negative": ("CONDITIONING", ), | |
"schedule_for_cycle": (s.schedules,), | |
}} | |
RETURN_TYPES = ("DETAILER_HOOK",) | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Detailer" | |
def doit(self, model, steps, start_end_at_step, end_end_at_step, cfg, sampler_name, | |
scheduler, normalize, positive, negative, schedule_for_cycle): | |
try: | |
hook = hooks.UnsamplerDetailerHook(model, steps, start_end_at_step, end_end_at_step, cfg, sampler_name, | |
scheduler, normalize, positive, negative, | |
from_start=('from_start' in schedule_for_cycle)) | |
return (hook, ) | |
except Exception as e: | |
print("[ERROR] UnsamplerDetailerHookProvider: 'ComfyUI Noise' custom node isn't installed. You must install 'BlenderNeko/ComfyUI Noise' extension to use this node.") | |
print(f"\t{e}") | |
pass | |
class DenoiseSchedulerDetailerHookProvider: | |
schedules = ["simple"] | |
def INPUT_TYPES(s): | |
return {"required": { | |
"schedule_for_cycle": (s.schedules,), | |
"target_denoise": ("FLOAT", {"default": 0.3, "min": 0.0, "max": 1.0, "step": 0.01}), | |
}, | |
} | |
RETURN_TYPES = ("DETAILER_HOOK",) | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Detailer" | |
def doit(self, schedule_for_cycle, target_denoise): | |
hook = hooks.SimpleDetailerDenoiseSchedulerHook(target_denoise) | |
return (hook, ) | |
class CoreMLDetailerHookProvider: | |
def INPUT_TYPES(s): | |
return {"required": {"mode": (["512x512", "768x768", "512x768", "768x512"], )}, } | |
RETURN_TYPES = ("DETAILER_HOOK",) | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Detailer" | |
def doit(self, mode): | |
hook = hooks.CoreMLHook(mode) | |
return (hook, ) | |
class CustomSamplerDetailerHookProvider: | |
def INPUT_TYPES(s): | |
return {"required": { | |
"sampler": ("SAMPLER", ), | |
}, | |
} | |
RETURN_TYPES = ("DETAILER_HOOK",) | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Detailer" | |
DESCRIPTION = "Apply a hook that allows you to use a custom sampler in the Detailer nodes. When using `DetailerHookCombine`, the sampler from the first hook is applied." | |
def doit(self, sampler): | |
hook = hooks.CustomSamplerDetailerHookProvider(sampler) | |
return (hook, ) | |
class CfgScheduleHookProvider: | |
schedules = ["simple"] | |
def INPUT_TYPES(s): | |
return {"required": { | |
"schedule_for_iteration": (s.schedules,), | |
"target_cfg": ("FLOAT", {"default": 3.0, "min": 0.0, "max": 100.0}), | |
}, | |
} | |
RETURN_TYPES = ("PK_HOOK",) | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Upscale" | |
def doit(self, schedule_for_iteration, target_cfg): | |
hook = None | |
if schedule_for_iteration == "simple": | |
hook = hooks.SimpleCfgScheduleHook(target_cfg) | |
return (hook, ) | |
class UnsamplerHookProvider: | |
schedules = ["simple"] | |
def INPUT_TYPES(s): | |
return {"required": | |
{"model": ("MODEL",), | |
"steps": ("INT", {"default": 25, "min": 1, "max": 10000}), | |
"start_end_at_step": ("INT", {"default": 21, "min": 0, "max": 10000}), | |
"end_end_at_step": ("INT", {"default": 24, "min": 0, "max": 10000}), | |
"cfg": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0}), | |
"sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), | |
"scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), | |
"normalize": (["disable", "enable"], ), | |
"positive": ("CONDITIONING", ), | |
"negative": ("CONDITIONING", ), | |
"schedule_for_iteration": (s.schedules,), | |
}} | |
RETURN_TYPES = ("PK_HOOK",) | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Upscale" | |
def doit(self, model, steps, start_end_at_step, end_end_at_step, cfg, sampler_name, | |
scheduler, normalize, positive, negative, schedule_for_iteration): | |
try: | |
hook = None | |
if schedule_for_iteration == "simple": | |
hook = hooks.UnsamplerHook(model, steps, start_end_at_step, end_end_at_step, cfg, sampler_name, | |
scheduler, normalize, positive, negative) | |
return (hook, ) | |
except Exception as e: | |
print("[ERROR] UnsamplerHookProvider: 'ComfyUI Noise' custom node isn't installed. You must install 'BlenderNeko/ComfyUI Noise' extension to use this node.") | |
print(f"\t{e}") | |
pass | |
class NoiseInjectionHookProvider: | |
schedules = ["simple"] | |
def INPUT_TYPES(s): | |
return {"required": { | |
"schedule_for_iteration": (s.schedules,), | |
"source": (["CPU", "GPU"],), | |
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), | |
"start_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 200.0, "step": 0.01}), | |
"end_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 200.0, "step": 0.01}), | |
}, | |
} | |
RETURN_TYPES = ("PK_HOOK",) | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Upscale" | |
def doit(self, schedule_for_iteration, source, seed, start_strength, end_strength): | |
try: | |
hook = None | |
if schedule_for_iteration == "simple": | |
hook = hooks.InjectNoiseHook(source, seed, start_strength, end_strength) | |
return (hook, ) | |
except Exception as e: | |
print("[ERROR] NoiseInjectionHookProvider: 'ComfyUI Noise' custom node isn't installed. You must install 'BlenderNeko/ComfyUI Noise' extension to use this node.") | |
print(f"\t{e}") | |
pass | |
class DenoiseScheduleHookProvider: | |
schedules = ["simple"] | |
def INPUT_TYPES(s): | |
return {"required": { | |
"schedule_for_iteration": (s.schedules,), | |
"target_denoise": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0, "step": 0.01}), | |
}, | |
} | |
RETURN_TYPES = ("PK_HOOK",) | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Upscale" | |
def doit(self, schedule_for_iteration, target_denoise): | |
hook = None | |
if schedule_for_iteration == "simple": | |
hook = hooks.SimpleDenoiseScheduleHook(target_denoise) | |
return (hook, ) | |
class StepsScheduleHookProvider: | |
schedules = ["simple"] | |
def INPUT_TYPES(s): | |
return {"required": { | |
"schedule_for_iteration": (s.schedules,), | |
"target_steps": ("INT", {"default": 20, "min": 1, "max": 10000}), | |
}, | |
} | |
RETURN_TYPES = ("PK_HOOK",) | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Upscale" | |
def doit(self, schedule_for_iteration, target_steps): | |
hook = None | |
if schedule_for_iteration == "simple": | |
hook = hooks.SimpleStepsScheduleHook(target_steps) | |
return (hook, ) | |
class DetailerHookCombine: | |
def INPUT_TYPES(s): | |
return {"required": { | |
"hook1": ("DETAILER_HOOK",), | |
"hook2": ("DETAILER_HOOK",), | |
}, | |
} | |
RETURN_TYPES = ("DETAILER_HOOK",) | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Upscale" | |
def doit(self, hook1, hook2): | |
hook = hooks.DetailerHookCombine(hook1, hook2) | |
return (hook, ) | |
class PixelKSampleHookCombine: | |
def INPUT_TYPES(s): | |
return {"required": { | |
"hook1": ("PK_HOOK",), | |
"hook2": ("PK_HOOK",), | |
}, | |
} | |
RETURN_TYPES = ("PK_HOOK",) | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Upscale" | |
def doit(self, hook1, hook2): | |
hook = hooks.PixelKSampleHookCombine(hook1, hook2) | |
return (hook, ) | |
class PixelTiledKSampleUpscalerProvider: | |
upscale_methods = ["nearest-exact", "bilinear", "lanczos", "area"] | |
def INPUT_TYPES(s): | |
return {"required": { | |
"scale_method": (s.upscale_methods,), | |
"model": ("MODEL",), | |
"vae": ("VAE",), | |
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), | |
"steps": ("INT", {"default": 20, "min": 1, "max": 10000}), | |
"cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), | |
"sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), | |
"scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), | |
"positive": ("CONDITIONING", ), | |
"negative": ("CONDITIONING", ), | |
"denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), | |
"tile_width": ("INT", {"default": 512, "min": 320, "max": MAX_RESOLUTION, "step": 64}), | |
"tile_height": ("INT", {"default": 512, "min": 320, "max": MAX_RESOLUTION, "step": 64}), | |
"tiling_strategy": (["random", "padded", 'simple'], ), | |
}, | |
"optional": { | |
"upscale_model_opt": ("UPSCALE_MODEL", ), | |
"pk_hook_opt": ("PK_HOOK", ), | |
"tile_cnet_opt": ("CONTROL_NET", ), | |
"tile_cnet_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), | |
"overlap": ("INT", {"default": 64, "min": 0, "max": 4096, "step": 32}), | |
} | |
} | |
RETURN_TYPES = ("UPSCALER",) | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Upscale" | |
def doit(self, scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, tile_width, tile_height, tiling_strategy, upscale_model_opt=None, | |
pk_hook_opt=None, tile_cnet_opt=None, tile_cnet_strength=1.0, overlap=64): | |
if "BNK_TiledKSampler" in nodes.NODE_CLASS_MAPPINGS: | |
upscaler = core.PixelTiledKSampleUpscaler(scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, | |
tile_width, tile_height, tiling_strategy, upscale_model_opt, pk_hook_opt, tile_cnet_opt, | |
tile_size=max(tile_width, tile_height), tile_cnet_strength=tile_cnet_strength, overlap=overlap) | |
return (upscaler, ) | |
else: | |
utils.try_install_custom_node('https://github.com/BlenderNeko/ComfyUI_TiledKSampler', | |
"To use 'PixelTiledKSampleUpscalerProvider' node, 'BlenderNeko/ComfyUI_TiledKSampler' extension is required.") | |
raise Exception("[ERROR] PixelTiledKSampleUpscalerProvider: ComfyUI_TiledKSampler custom node isn't installed. You must install BlenderNeko/ComfyUI_TiledKSampler extension to use this node.") | |
class PixelTiledKSampleUpscalerProviderPipe: | |
upscale_methods = ["nearest-exact", "bilinear", "lanczos", "area"] | |
def INPUT_TYPES(s): | |
return {"required": { | |
"scale_method": (s.upscale_methods,), | |
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), | |
"steps": ("INT", {"default": 20, "min": 1, "max": 10000}), | |
"cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), | |
"sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), | |
"scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), | |
"denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), | |
"tile_width": ("INT", {"default": 512, "min": 320, "max": MAX_RESOLUTION, "step": 64}), | |
"tile_height": ("INT", {"default": 512, "min": 320, "max": MAX_RESOLUTION, "step": 64}), | |
"tiling_strategy": (["random", "padded", 'simple'], ), | |
"basic_pipe": ("BASIC_PIPE",) | |
}, | |
"optional": { | |
"upscale_model_opt": ("UPSCALE_MODEL", ), | |
"pk_hook_opt": ("PK_HOOK", ), | |
"tile_cnet_opt": ("CONTROL_NET", ), | |
"tile_cnet_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), | |
} | |
} | |
RETURN_TYPES = ("UPSCALER",) | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Upscale" | |
def doit(self, scale_method, seed, steps, cfg, sampler_name, scheduler, denoise, tile_width, tile_height, tiling_strategy, basic_pipe, upscale_model_opt=None, pk_hook_opt=None, | |
tile_cnet_opt=None, tile_cnet_strength=1.0): | |
if "BNK_TiledKSampler" in nodes.NODE_CLASS_MAPPINGS: | |
model, _, vae, positive, negative = basic_pipe | |
upscaler = core.PixelTiledKSampleUpscaler(scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, | |
tile_width, tile_height, tiling_strategy, upscale_model_opt, pk_hook_opt, tile_cnet_opt, | |
tile_size=max(tile_width, tile_height), tile_cnet_strength=tile_cnet_strength) | |
return (upscaler, ) | |
else: | |
print("[ERROR] PixelTiledKSampleUpscalerProviderPipe: ComfyUI_TiledKSampler custom node isn't installed. You must install BlenderNeko/ComfyUI_TiledKSampler extension to use this node.") | |
class PixelKSampleUpscalerProvider: | |
upscale_methods = ["nearest-exact", "bilinear", "lanczos", "area"] | |
def INPUT_TYPES(s): | |
return {"required": { | |
"scale_method": (s.upscale_methods,), | |
"model": ("MODEL",), | |
"vae": ("VAE",), | |
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), | |
"steps": ("INT", {"default": 20, "min": 1, "max": 10000}), | |
"cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), | |
"sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), | |
"scheduler": (core.SCHEDULERS, ), | |
"positive": ("CONDITIONING", ), | |
"negative": ("CONDITIONING", ), | |
"denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), | |
"use_tiled_vae": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), | |
"tile_size": ("INT", {"default": 512, "min": 320, "max": 4096, "step": 64}), | |
}, | |
"optional": { | |
"upscale_model_opt": ("UPSCALE_MODEL", ), | |
"pk_hook_opt": ("PK_HOOK", ), | |
"scheduler_func_opt": ("SCHEDULER_FUNC",), | |
} | |
} | |
RETURN_TYPES = ("UPSCALER",) | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Upscale" | |
def doit(self, scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, | |
use_tiled_vae, upscale_model_opt=None, pk_hook_opt=None, tile_size=512, scheduler_func_opt=None): | |
upscaler = core.PixelKSampleUpscaler(scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, | |
positive, negative, denoise, use_tiled_vae, upscale_model_opt, pk_hook_opt, | |
tile_size=tile_size, scheduler_func=scheduler_func_opt) | |
return (upscaler, ) | |
class PixelKSampleUpscalerProviderPipe(PixelKSampleUpscalerProvider): | |
upscale_methods = ["nearest-exact", "bilinear", "lanczos", "area"] | |
def INPUT_TYPES(s): | |
return {"required": { | |
"scale_method": (s.upscale_methods,), | |
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), | |
"steps": ("INT", {"default": 20, "min": 1, "max": 10000}), | |
"cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), | |
"sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), | |
"scheduler": (core.SCHEDULERS, ), | |
"denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), | |
"use_tiled_vae": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), | |
"basic_pipe": ("BASIC_PIPE",), | |
"tile_size": ("INT", {"default": 512, "min": 320, "max": 4096, "step": 64}), | |
}, | |
"optional": { | |
"upscale_model_opt": ("UPSCALE_MODEL", ), | |
"pk_hook_opt": ("PK_HOOK", ), | |
"scheduler_func_opt": ("SCHEDULER_FUNC",), | |
"tile_cnet_opt": ("CONTROL_NET", ), | |
"tile_cnet_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), | |
} | |
} | |
RETURN_TYPES = ("UPSCALER",) | |
FUNCTION = "doit_pipe" | |
CATEGORY = "ImpactPack/Upscale" | |
def doit_pipe(self, scale_method, seed, steps, cfg, sampler_name, scheduler, denoise, | |
use_tiled_vae, basic_pipe, upscale_model_opt=None, pk_hook_opt=None, | |
tile_size=512, scheduler_func_opt=None, tile_cnet_opt=None, tile_cnet_strength=1.0): | |
model, _, vae, positive, negative = basic_pipe | |
upscaler = core.PixelKSampleUpscaler(scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, | |
positive, negative, denoise, use_tiled_vae, upscale_model_opt, pk_hook_opt, | |
tile_size=tile_size, scheduler_func=scheduler_func_opt, | |
tile_cnet_opt=tile_cnet_opt, tile_cnet_strength=tile_cnet_strength) | |
return (upscaler, ) | |
class TwoSamplersForMaskUpscalerProvider: | |
upscale_methods = ["nearest-exact", "bilinear", "lanczos", "area"] | |
def INPUT_TYPES(s): | |
return {"required": { | |
"scale_method": (s.upscale_methods,), | |
"full_sample_schedule": ( | |
["none", "interleave1", "interleave2", "interleave3", | |
"last1", "last2", | |
"interleave1+last1", "interleave2+last1", "interleave3+last1", | |
],), | |
"use_tiled_vae": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), | |
"base_sampler": ("KSAMPLER", ), | |
"mask_sampler": ("KSAMPLER", ), | |
"mask": ("MASK", ), | |
"vae": ("VAE",), | |
"tile_size": ("INT", {"default": 512, "min": 320, "max": 4096, "step": 64}), | |
}, | |
"optional": { | |
"full_sampler_opt": ("KSAMPLER",), | |
"upscale_model_opt": ("UPSCALE_MODEL", ), | |
"pk_hook_base_opt": ("PK_HOOK", ), | |
"pk_hook_mask_opt": ("PK_HOOK", ), | |
"pk_hook_full_opt": ("PK_HOOK", ), | |
} | |
} | |
RETURN_TYPES = ("UPSCALER", ) | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Upscale" | |
def doit(self, scale_method, full_sample_schedule, use_tiled_vae, base_sampler, mask_sampler, mask, vae, | |
full_sampler_opt=None, upscale_model_opt=None, | |
pk_hook_base_opt=None, pk_hook_mask_opt=None, pk_hook_full_opt=None, tile_size=512): | |
upscaler = core.TwoSamplersForMaskUpscaler(scale_method, full_sample_schedule, use_tiled_vae, | |
base_sampler, mask_sampler, mask, vae, full_sampler_opt, upscale_model_opt, | |
pk_hook_base_opt, pk_hook_mask_opt, pk_hook_full_opt, tile_size=tile_size) | |
return (upscaler, ) | |
class TwoSamplersForMaskUpscalerProviderPipe: | |
upscale_methods = ["nearest-exact", "bilinear", "lanczos", "area"] | |
def INPUT_TYPES(s): | |
return {"required": { | |
"scale_method": (s.upscale_methods,), | |
"full_sample_schedule": ( | |
["none", "interleave1", "interleave2", "interleave3", | |
"last1", "last2", | |
"interleave1+last1", "interleave2+last1", "interleave3+last1", | |
],), | |
"use_tiled_vae": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), | |
"base_sampler": ("KSAMPLER", ), | |
"mask_sampler": ("KSAMPLER", ), | |
"mask": ("MASK", ), | |
"basic_pipe": ("BASIC_PIPE",), | |
"tile_size": ("INT", {"default": 512, "min": 320, "max": 4096, "step": 64}), | |
}, | |
"optional": { | |
"full_sampler_opt": ("KSAMPLER",), | |
"upscale_model_opt": ("UPSCALE_MODEL", ), | |
"pk_hook_base_opt": ("PK_HOOK", ), | |
"pk_hook_mask_opt": ("PK_HOOK", ), | |
"pk_hook_full_opt": ("PK_HOOK", ), | |
} | |
} | |
RETURN_TYPES = ("UPSCALER", ) | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Upscale" | |
def doit(self, scale_method, full_sample_schedule, use_tiled_vae, base_sampler, mask_sampler, mask, basic_pipe, | |
full_sampler_opt=None, upscale_model_opt=None, | |
pk_hook_base_opt=None, pk_hook_mask_opt=None, pk_hook_full_opt=None, tile_size=512): | |
mask = make_2d_mask(mask) | |
_, _, vae, _, _ = basic_pipe | |
upscaler = core.TwoSamplersForMaskUpscaler(scale_method, full_sample_schedule, use_tiled_vae, | |
base_sampler, mask_sampler, mask, vae, full_sampler_opt, upscale_model_opt, | |
pk_hook_base_opt, pk_hook_mask_opt, pk_hook_full_opt, tile_size=tile_size) | |
return (upscaler, ) | |
class IterativeLatentUpscale: | |
def INPUT_TYPES(s): | |
return {"required": { | |
"samples": ("LATENT", ), | |
"upscale_factor": ("FLOAT", {"default": 1.5, "min": 1, "max": 10000, "step": 0.1}), | |
"steps": ("INT", {"default": 3, "min": 1, "max": 10000, "step": 1}), | |
"temp_prefix": ("STRING", {"default": ""}), | |
"upscaler": ("UPSCALER",), | |
"step_mode": (["simple", "geometric"], {"default": "simple"}) | |
}, | |
"hidden": {"unique_id": "UNIQUE_ID"}, | |
} | |
RETURN_TYPES = ("LATENT", "VAE") | |
RETURN_NAMES = ("latent", "vae") | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Upscale" | |
def doit(self, samples, upscale_factor, steps, temp_prefix, upscaler, step_mode="simple", unique_id=None): | |
w = samples['samples'].shape[3]*8 # image width | |
h = samples['samples'].shape[2]*8 # image height | |
if temp_prefix == "": | |
temp_prefix = None | |
if step_mode == "geometric": | |
upscale_factor_unit = pow(upscale_factor, 1.0/steps) | |
else: # simple | |
upscale_factor_unit = max(0, (upscale_factor - 1.0) / steps) | |
current_latent = samples | |
noise_mask = current_latent.get('noise_mask') | |
scale = 1 | |
for i in range(steps-1): | |
if step_mode == "geometric": | |
scale *= upscale_factor_unit | |
else: # simple | |
scale += upscale_factor_unit | |
new_w = w*scale | |
new_h = h*scale | |
core.update_node_status(unique_id, f"{i+1}/{steps} steps | x{scale:.2f}", (i+1)/steps) | |
print(f"IterativeLatentUpscale[{i+1}/{steps}]: {new_w:.1f}x{new_h:.1f} (scale:{scale:.2f}) ") | |
step_info = i, steps | |
current_latent = upscaler.upscale_shape(step_info, current_latent, new_w, new_h, temp_prefix) | |
if noise_mask is not None: | |
current_latent['noise_mask'] = noise_mask | |
if scale < upscale_factor: | |
new_w = w*upscale_factor | |
new_h = h*upscale_factor | |
core.update_node_status(unique_id, f"Final step | x{upscale_factor:.2f}", 1.0) | |
print(f"IterativeLatentUpscale[Final]: {new_w:.1f}x{new_h:.1f} (scale:{upscale_factor:.2f}) ") | |
step_info = steps-1, steps | |
current_latent = upscaler.upscale_shape(step_info, current_latent, new_w, new_h, temp_prefix) | |
core.update_node_status(unique_id, "", None) | |
return current_latent, upscaler.vae | |
class IterativeImageUpscale: | |
def INPUT_TYPES(s): | |
return {"required": { | |
"pixels": ("IMAGE", ), | |
"upscale_factor": ("FLOAT", {"default": 1.5, "min": 1, "max": 10000, "step": 0.1}), | |
"steps": ("INT", {"default": 3, "min": 1, "max": 10000, "step": 1}), | |
"temp_prefix": ("STRING", {"default": ""}), | |
"upscaler": ("UPSCALER",), | |
"vae": ("VAE",), | |
"step_mode": (["simple", "geometric"], {"default": "simple"}) | |
}, | |
"hidden": {"unique_id": "UNIQUE_ID"} | |
} | |
RETURN_TYPES = ("IMAGE",) | |
RETURN_NAMES = ("image",) | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Upscale" | |
def doit(self, pixels, upscale_factor, steps, temp_prefix, upscaler, vae, step_mode="simple", unique_id=None): | |
if temp_prefix == "": | |
temp_prefix = None | |
core.update_node_status(unique_id, "VAEEncode (first)", 0) | |
if upscaler.is_tiled: | |
encoder = nodes.VAEEncodeTiled() | |
if 'overlap' in inspect.signature(encoder.encode).parameters: | |
latent = encoder.encode(vae, pixels, upscaler.tile_size, overlap=upscaler.overlap)[0] | |
else: | |
latent = encoder.encode(vae, pixels, upscaler.tile_size)[0] | |
else: | |
latent = nodes.VAEEncode().encode(vae, pixels)[0] | |
refined_latent = IterativeLatentUpscale().doit(latent, upscale_factor, steps, temp_prefix, upscaler, step_mode, unique_id) | |
core.update_node_status(unique_id, "VAEDecode (final)", 1.0) | |
if upscaler.is_tiled: | |
pixels = nodes.VAEDecodeTiled().decode(vae, refined_latent[0], upscaler.tile_size)[0] | |
else: | |
pixels = nodes.VAEDecode().decode(vae, refined_latent[0])[0] | |
core.update_node_status(unique_id, "", None) | |
return (pixels, ) | |
class FaceDetailerPipe: | |
def INPUT_TYPES(s): | |
return {"required": { | |
"image": ("IMAGE", ), | |
"detailer_pipe": ("DETAILER_PIPE", {"tooltip": "If the `ImpactDummyInput` is connected to the model in the detailer_pipe, the inference stage is skipped."}), | |
"guide_size": ("FLOAT", {"default": 512, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), | |
"guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}), | |
"max_size": ("FLOAT", {"default": 1024, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), | |
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), | |
"steps": ("INT", {"default": 20, "min": 1, "max": 10000}), | |
"cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), | |
"sampler_name": (comfy.samplers.KSampler.SAMPLERS,), | |
"scheduler": (core.SCHEDULERS,), | |
"denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), | |
"feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), | |
"noise_mask": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), | |
"force_inpaint": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), | |
"bbox_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), | |
"bbox_dilation": ("INT", {"default": 10, "min": -512, "max": 512, "step": 1}), | |
"bbox_crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 10, "step": 0.1}), | |
"sam_detection_hint": (["center-1", "horizontal-2", "vertical-2", "rect-4", "diamond-4", "mask-area", "mask-points", "mask-point-bbox", "none"],), | |
"sam_dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}), | |
"sam_threshold": ("FLOAT", {"default": 0.93, "min": 0.0, "max": 1.0, "step": 0.01}), | |
"sam_bbox_expansion": ("INT", {"default": 0, "min": 0, "max": 1000, "step": 1}), | |
"sam_mask_hint_threshold": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}), | |
"sam_mask_hint_use_negative": (["False", "Small", "Outter"],), | |
"drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), | |
"refiner_ratio": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0}), | |
"cycle": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}), | |
}, | |
"optional": { | |
"inpaint_model": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), | |
"noise_mask_feather": ("INT", {"default": 20, "min": 0, "max": 100, "step": 1}), | |
"scheduler_func_opt": ("SCHEDULER_FUNC",), | |
"tiled_encode": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), | |
"tiled_decode": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), | |
} | |
} | |
RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "MASK", "DETAILER_PIPE", "IMAGE") | |
RETURN_NAMES = ("image", "cropped_refined", "cropped_enhanced_alpha", "mask", "detailer_pipe", "cnet_images") | |
OUTPUT_IS_LIST = (False, True, True, False, False, True) | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Simple" | |
DESCRIPTION = FaceDetailer.DESCRIPTION | |
def doit(self, image, detailer_pipe, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, | |
denoise, feather, noise_mask, force_inpaint, bbox_threshold, bbox_dilation, bbox_crop_factor, | |
sam_detection_hint, sam_dilation, sam_threshold, sam_bbox_expansion, | |
sam_mask_hint_threshold, sam_mask_hint_use_negative, drop_size, refiner_ratio=None, | |
cycle=1, inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None, | |
tiled_encode=False, tiled_decode=False): | |
result_img = None | |
result_mask = None | |
result_cropped_enhanced = [] | |
result_cropped_enhanced_alpha = [] | |
result_cnet_images = [] | |
if len(image) > 1: | |
print(f"[Impact Pack] WARN: FaceDetailer is not a node designed for video detailing. If you intend to perform video detailing, please use Detailer For AnimateDiff.") | |
model, clip, vae, positive, negative, wildcard, bbox_detector, segm_detector, sam_model_opt, detailer_hook, \ | |
refiner_model, refiner_clip, refiner_positive, refiner_negative = detailer_pipe | |
for i, single_image in enumerate(image): | |
enhanced_img, cropped_enhanced, cropped_enhanced_alpha, mask, cnet_pil_list = FaceDetailer.enhance_face( | |
single_image.unsqueeze(0), model, clip, vae, guide_size, guide_size_for, max_size, seed + i, steps, cfg, sampler_name, scheduler, | |
positive, negative, denoise, feather, noise_mask, force_inpaint, | |
bbox_threshold, bbox_dilation, bbox_crop_factor, | |
sam_detection_hint, sam_dilation, sam_threshold, sam_bbox_expansion, sam_mask_hint_threshold, | |
sam_mask_hint_use_negative, drop_size, bbox_detector, segm_detector, sam_model_opt, wildcard, detailer_hook, | |
refiner_ratio=refiner_ratio, refiner_model=refiner_model, | |
refiner_clip=refiner_clip, refiner_positive=refiner_positive, refiner_negative=refiner_negative, | |
cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, scheduler_func_opt=scheduler_func_opt, | |
tiled_encode=tiled_encode, tiled_decode=tiled_decode) | |
result_img = torch.cat((result_img, enhanced_img), dim=0) if result_img is not None else enhanced_img | |
result_mask = torch.cat((result_mask, mask), dim=0) if result_mask is not None else mask | |
result_cropped_enhanced.extend(cropped_enhanced) | |
result_cropped_enhanced_alpha.extend(cropped_enhanced_alpha) | |
result_cnet_images.extend(cnet_pil_list) | |
if len(result_cropped_enhanced) == 0: | |
result_cropped_enhanced = [empty_pil_tensor()] | |
if len(result_cropped_enhanced_alpha) == 0: | |
result_cropped_enhanced_alpha = [empty_pil_tensor()] | |
if len(result_cnet_images) == 0: | |
result_cnet_images = [empty_pil_tensor()] | |
return result_img, result_cropped_enhanced, result_cropped_enhanced_alpha, result_mask, detailer_pipe, result_cnet_images | |
class MaskDetailerPipe: | |
def INPUT_TYPES(s): | |
return {"required": { | |
"image": ("IMAGE", ), | |
"mask": ("MASK", ), | |
"basic_pipe": ("BASIC_PIPE",), | |
"guide_size": ("FLOAT", {"default": 512, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), | |
"guide_size_for": ("BOOLEAN", {"default": True, "label_on": "mask bbox", "label_off": "crop region"}), | |
"max_size": ("FLOAT", {"default": 1024, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), | |
"mask_mode": ("BOOLEAN", {"default": True, "label_on": "masked only", "label_off": "whole"}), | |
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), | |
"steps": ("INT", {"default": 20, "min": 1, "max": 10000}), | |
"cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), | |
"sampler_name": (comfy.samplers.KSampler.SAMPLERS,), | |
"scheduler": (core.SCHEDULERS,), | |
"denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), | |
"feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), | |
"crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 10, "step": 0.1}), | |
"drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), | |
"refiner_ratio": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0}), | |
"batch_size": ("INT", {"default": 1, "min": 1, "max": 100}), | |
"cycle": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}), | |
}, | |
"optional": { | |
"refiner_basic_pipe_opt": ("BASIC_PIPE", ), | |
"detailer_hook": ("DETAILER_HOOK",), | |
"inpaint_model": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), | |
"noise_mask_feather": ("INT", {"default": 20, "min": 0, "max": 100, "step": 1}), | |
"bbox_fill": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), | |
"contour_fill": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), | |
"scheduler_func_opt": ("SCHEDULER_FUNC",), | |
} | |
} | |
RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "BASIC_PIPE", "BASIC_PIPE") | |
RETURN_NAMES = ("image", "cropped_refined", "cropped_enhanced_alpha", "basic_pipe", "refiner_basic_pipe_opt") | |
OUTPUT_IS_LIST = (False, True, True, False, False) | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Detailer" | |
DESCRIPTION = "" | |
def doit(self, image, mask, basic_pipe, guide_size, guide_size_for, max_size, mask_mode, | |
seed, steps, cfg, sampler_name, scheduler, denoise, | |
feather, crop_factor, drop_size, refiner_ratio, batch_size, cycle=1, | |
refiner_basic_pipe_opt=None, detailer_hook=None, inpaint_model=False, noise_mask_feather=0, | |
bbox_fill=False, contour_fill=True, scheduler_func_opt=None): | |
if len(image) > 1: | |
raise Exception('[Impact Pack] ERROR: MaskDetailer does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.') | |
model, clip, vae, positive, negative = basic_pipe | |
if refiner_basic_pipe_opt is None: | |
refiner_model, refiner_clip, refiner_positive, refiner_negative = None, None, None, None | |
else: | |
refiner_model, refiner_clip, _, refiner_positive, refiner_negative = refiner_basic_pipe_opt | |
# create segs | |
if mask is not None: | |
mask = make_2d_mask(mask) | |
segs = core.mask_to_segs(mask, False, crop_factor, bbox_fill, drop_size, is_contour=contour_fill) | |
else: | |
segs = ((image.shape[1], image.shape[2]), []) | |
enhanced_img_batch = None | |
cropped_enhanced_list = [] | |
cropped_enhanced_alpha_list = [] | |
for i in range(batch_size): | |
if mask is not None: | |
enhanced_img, _, cropped_enhanced, cropped_enhanced_alpha, _, _ = \ | |
DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed+i, steps, | |
cfg, sampler_name, scheduler, positive, negative, denoise, feather, mask_mode, | |
force_inpaint=True, wildcard_opt=None, detailer_hook=detailer_hook, | |
refiner_ratio=refiner_ratio, refiner_model=refiner_model, refiner_clip=refiner_clip, | |
refiner_positive=refiner_positive, refiner_negative=refiner_negative, | |
cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, scheduler_func_opt=scheduler_func_opt) | |
else: | |
enhanced_img, cropped_enhanced, cropped_enhanced_alpha = image, [], [] | |
if enhanced_img_batch is None: | |
enhanced_img_batch = enhanced_img | |
else: | |
enhanced_img_batch = torch.cat((enhanced_img_batch, enhanced_img), dim=0) | |
cropped_enhanced_list += cropped_enhanced | |
cropped_enhanced_alpha_list += cropped_enhanced_alpha | |
# set fallback image | |
if len(cropped_enhanced_list) == 0: | |
cropped_enhanced_list = [empty_pil_tensor()] | |
if len(cropped_enhanced_alpha_list) == 0: | |
cropped_enhanced_alpha_list = [empty_pil_tensor()] | |
return enhanced_img_batch, cropped_enhanced_list, cropped_enhanced_alpha_list, basic_pipe, refiner_basic_pipe_opt | |
class DetailerForEachTest(DetailerForEach): | |
RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "IMAGE", "IMAGE") | |
RETURN_NAMES = ("image", "cropped", "cropped_refined", "cropped_refined_alpha", "cnet_images") | |
OUTPUT_IS_LIST = (False, True, True, True, True) | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Detailer" | |
def doit(self, image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, | |
scheduler, positive, negative, denoise, feather, noise_mask, force_inpaint, wildcard, detailer_hook=None, | |
cycle=1, inpaint_model=False, noise_mask_feather=0, scheduler_func_opt=None, tiled_encode=False, tiled_decode=False): | |
if len(image) > 1: | |
raise Exception('[Impact Pack] ERROR: DetailerForEach does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.') | |
enhanced_img, cropped, cropped_enhanced, cropped_enhanced_alpha, cnet_pil_list, new_segs = \ | |
DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, | |
cfg, sampler_name, scheduler, positive, negative, denoise, feather, noise_mask, | |
force_inpaint, wildcard, detailer_hook, | |
cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, | |
scheduler_func_opt=scheduler_func_opt, tiled_encode=tiled_encode, tiled_decode=tiled_decode) | |
# set fallback image | |
if len(cropped) == 0: | |
cropped = [empty_pil_tensor()] | |
if len(cropped_enhanced) == 0: | |
cropped_enhanced = [empty_pil_tensor()] | |
if len(cropped_enhanced_alpha) == 0: | |
cropped_enhanced_alpha = [empty_pil_tensor()] | |
if len(cnet_pil_list) == 0: | |
cnet_pil_list = [empty_pil_tensor()] | |
return enhanced_img, cropped, cropped_enhanced, cropped_enhanced_alpha, cnet_pil_list | |
class DetailerForEachTestPipe(DetailerForEachPipe): | |
RETURN_TYPES = ("IMAGE", "SEGS", "BASIC_PIPE", "IMAGE", "IMAGE", "IMAGE", "IMAGE", ) | |
RETURN_NAMES = ("image", "segs", "basic_pipe", "cropped", "cropped_refined", "cropped_refined_alpha", 'cnet_images') | |
OUTPUT_IS_LIST = (False, False, False, True, True, True, True) | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Detailer" | |
DESCRIPTION = DetailerForEach.DESCRIPTION | |
def doit(self, image, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, | |
denoise, feather, noise_mask, force_inpaint, basic_pipe, wildcard, cycle=1, | |
refiner_ratio=None, detailer_hook=None, refiner_basic_pipe_opt=None, inpaint_model=False, noise_mask_feather=0, | |
scheduler_func_opt=None, tiled_encode=False, tiled_decode=False): | |
if len(image) > 1: | |
raise Exception('[Impact Pack] ERROR: DetailerForEach does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.') | |
model, clip, vae, positive, negative = basic_pipe | |
if refiner_basic_pipe_opt is None: | |
refiner_model, refiner_clip, refiner_positive, refiner_negative = None, None, None, None | |
else: | |
refiner_model, refiner_clip, _, refiner_positive, refiner_negative = refiner_basic_pipe_opt | |
enhanced_img, cropped, cropped_enhanced, cropped_enhanced_alpha, cnet_pil_list, new_segs = \ | |
DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, | |
sampler_name, scheduler, positive, negative, denoise, feather, noise_mask, | |
force_inpaint, wildcard, detailer_hook, | |
refiner_ratio=refiner_ratio, refiner_model=refiner_model, | |
refiner_clip=refiner_clip, refiner_positive=refiner_positive, | |
refiner_negative=refiner_negative, | |
cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather, | |
scheduler_func_opt=scheduler_func_opt, tiled_encode=tiled_encode, tiled_decode=tiled_decode) | |
# set fallback image | |
if len(cropped) == 0: | |
cropped = [empty_pil_tensor()] | |
if len(cropped_enhanced) == 0: | |
cropped_enhanced = [empty_pil_tensor()] | |
if len(cropped_enhanced_alpha) == 0: | |
cropped_enhanced_alpha = [empty_pil_tensor()] | |
if len(cnet_pil_list) == 0: | |
cnet_pil_list = [empty_pil_tensor()] | |
return enhanced_img, new_segs, basic_pipe, cropped, cropped_enhanced, cropped_enhanced_alpha, cnet_pil_list | |
class SegsBitwiseAndMask: | |
def INPUT_TYPES(s): | |
return {"required": { | |
"segs": ("SEGS",), | |
"mask": ("MASK",), | |
} | |
} | |
RETURN_TYPES = ("SEGS",) | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Operation" | |
def doit(self, segs, mask): | |
return (core.segs_bitwise_and_mask(segs, mask), ) | |
class SegsBitwiseAndMaskForEach: | |
def INPUT_TYPES(s): | |
return {"required": { | |
"segs": ("SEGS",), | |
"masks": ("MASK",), | |
} | |
} | |
RETURN_TYPES = ("SEGS",) | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Operation" | |
def doit(self, segs, masks): | |
return (core.apply_mask_to_each_seg(segs, masks), ) | |
class BitwiseAndMaskForEach: | |
def INPUT_TYPES(s): | |
return {"required": | |
{ | |
"base_segs": ("SEGS",), | |
"mask_segs": ("SEGS",), | |
} | |
} | |
RETURN_TYPES = ("SEGS",) | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Operation" | |
DESCRIPTION = "Retains only the overlapping areas between the masks included in base_segs and the mask regions of mask_segs. SEGS with no overlapping mask areas are filtered out." | |
def doit(self, base_segs, mask_segs): | |
mask = core.segs_to_combined_mask(mask_segs) | |
mask = make_3d_mask(mask) | |
return SegsBitwiseAndMask().doit(base_segs, mask) | |
class SubtractMaskForEach: | |
def INPUT_TYPES(s): | |
return {"required": { | |
"base_segs": ("SEGS",), | |
"mask_segs": ("SEGS",), | |
} | |
} | |
RETURN_TYPES = ("SEGS",) | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Operation" | |
DESCRIPTION = "Removes only the overlapping areas between the masks included in base_segs and the mask regions of mask_segs. SEGS with no overlapping mask areas are filtered out." | |
def doit(self, base_segs, mask_segs): | |
mask = core.segs_to_combined_mask(mask_segs) | |
mask = make_3d_mask(mask) | |
return (core.segs_bitwise_subtract_mask(base_segs, mask), ) | |
class ToBinaryMask: | |
def INPUT_TYPES(s): | |
return {"required": { | |
"mask": ("MASK",), | |
"threshold": ("INT", {"default": 20, "min": 1, "max": 255}), | |
} | |
} | |
RETURN_TYPES = ("MASK",) | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Operation" | |
def doit(self, mask, threshold): | |
mask = to_binary_mask(mask, threshold/255.0) | |
return (mask,) | |
class FlattenMask: | |
def INPUT_TYPES(s): | |
return {"required": { | |
"masks": ("MASK",), | |
} | |
} | |
RETURN_TYPES = ("MASK",) | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Operation" | |
def doit(self, masks): | |
masks = utils.make_3d_mask(masks) | |
masks = utils.flatten_mask(masks) | |
return (masks,) | |
class BitwiseAndMask: | |
def INPUT_TYPES(s): | |
return {"required": { | |
"mask1": ("MASK",), | |
"mask2": ("MASK",), | |
} | |
} | |
RETURN_TYPES = ("MASK",) | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Operation" | |
def doit(self, mask1, mask2): | |
mask = bitwise_and_masks(mask1, mask2) | |
return (mask,) | |
class SubtractMask: | |
def INPUT_TYPES(s): | |
return {"required": { | |
"mask1": ("MASK", ), | |
"mask2": ("MASK", ), | |
} | |
} | |
RETURN_TYPES = ("MASK",) | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Operation" | |
def doit(self, mask1, mask2): | |
mask = subtract_masks(mask1, mask2) | |
return (mask,) | |
class AddMask: | |
def INPUT_TYPES(s): | |
return {"required": { | |
"mask1": ("MASK",), | |
"mask2": ("MASK",), | |
} | |
} | |
RETURN_TYPES = ("MASK",) | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Operation" | |
def doit(self, mask1, mask2): | |
mask = add_masks(mask1, mask2) | |
return (mask,) | |
import nodes | |
def get_image_hash(arr): | |
split_index1 = arr.shape[0] // 2 | |
split_index2 = arr.shape[1] // 2 | |
part1 = arr[:split_index1, :split_index2] | |
part2 = arr[:split_index1, split_index2:] | |
part3 = arr[split_index1:, :split_index2] | |
part4 = arr[split_index1:, split_index2:] | |
# 각 부분을 합산 | |
sum1 = np.sum(part1) | |
sum2 = np.sum(part2) | |
sum3 = np.sum(part3) | |
sum4 = np.sum(part4) | |
return hash((sum1, sum2, sum3, sum4)) | |
def get_file_item(base_type, path): | |
path_type = base_type | |
if path == "[output]": | |
path_type = "output" | |
path = path[:-9] | |
elif path == "[input]": | |
path_type = "input" | |
path = path[:-8] | |
elif path == "[temp]": | |
path_type = "temp" | |
path = path[:-7] | |
subfolder = os.path.dirname(path) | |
filename = os.path.basename(path) | |
return { | |
"filename": filename, | |
"subfolder": subfolder, | |
"type": path_type | |
} | |
class MaskRectArea: | |
# Creates a rectangle mask using percentage. | |
def __init__(self): | |
pass | |
def INPUT_TYPES(cls): | |
return { | |
"required": { | |
}, | |
"hidden": {"extra_pnginfo": "EXTRA_PNGINFO", "unique_id": "UNIQUE_ID"} | |
} | |
RETURN_TYPES = ("MASK",) | |
CATEGORY = "ImpactPack/Operation" | |
FUNCTION = "create_mask" | |
def create_mask(self, extra_pnginfo, unique_id, **kwargs): | |
# search for node | |
node_found = False | |
for node in extra_pnginfo["workflow"]["nodes"]: | |
if str(node["id"]) == unique_id: | |
min_x = node["properties"].get("x", 0) / 100 | |
min_y = node["properties"].get("y", 0) / 100 | |
width = node["properties"].get("w", 0) / 100 | |
height = node["properties"].get("h", 0) / 100 | |
blur_radius = node["properties"].get("blur_radius", 0) | |
node_found = True | |
break | |
if not node_found: | |
raise ValueError(f"No node found with unique_id {unique_id}.") | |
# Create a mask with standard resolution (e.g., 512x512) | |
resolution = 512 | |
mask = torch.zeros((resolution, resolution)) | |
# Calculate pixel coordinates | |
min_x_px = int(min_x * resolution) | |
min_y_px = int(min_y * resolution) | |
max_x_px = int((min_x + width) * resolution) | |
max_y_px = int((min_y + height) * resolution) | |
# Draw the rectangle on the mask | |
mask[min_y_px:max_y_px, min_x_px:max_x_px] = 1 | |
# Apply blur if the radii are greater than 0 | |
if blur_radius > 0: | |
dx = blur_radius * 2 + 1 | |
dy = blur_radius * 2 + 1 | |
# Convert the mask to a format compatible with OpenCV (numpy array) | |
mask_np = mask.cpu().numpy().astype("float32") | |
# Apply Gaussian Blur | |
blurred_mask = cv2.GaussianBlur(mask_np, (dx, dy), 0) | |
# Convert back to tensor | |
mask = torch.from_numpy(blurred_mask) | |
# Return the mask as a tensor with an additional channel | |
return (mask.unsqueeze(0),) | |
class MaskRectAreaAdvanced: | |
# Creates a rectangle mask using pixels relative to image size. | |
def __init__(self): | |
pass | |
def INPUT_TYPES(cls): | |
return { | |
"required": { | |
}, | |
"hidden": {"extra_pnginfo": "EXTRA_PNGINFO", "unique_id": "UNIQUE_ID"} | |
} | |
RETURN_TYPES = ("MASK",) | |
CATEGORY = "ImpactPack/Operation" | |
FUNCTION = "create_mask_advanced" | |
def create_mask_advanced(self, extra_pnginfo, unique_id, **kwargs): | |
# search for node | |
node_found = False | |
for node in extra_pnginfo["workflow"]["nodes"]: | |
if node["id"] == int(unique_id): | |
min_x = node["properties"]["x"] | |
min_y = node["properties"]["y"] | |
width = node["properties"]["w"] | |
height = node["properties"]["h"] | |
image_width = node["properties"]["width"] | |
image_height = node["properties"]["height"] | |
blur_radius = node["properties"]["blur_radius"] | |
node_found = True | |
break | |
if not node_found: | |
raise ValueError(f"No node found with unique_id {unique_id}.") | |
# Calculate maximum coordinates | |
max_x = min_x + width | |
max_y = min_y + height | |
# Create a mask with the image dimensions | |
mask = torch.zeros((image_height, image_width)) | |
# Draw the rectangle on the mask | |
mask[int(min_y):int(max_y), int(min_x):int(max_x)] = 1 | |
# Apply blur if the radii are greater than 0 | |
if blur_radius > 0: | |
dx = blur_radius * 2 + 1 | |
dy = blur_radius * 2 + 1 | |
# Convert the mask to a format compatible with OpenCV (numpy array) | |
mask_np = mask.cpu().numpy().astype("float32") | |
# Apply Gaussian Blur | |
blurred_mask = cv2.GaussianBlur(mask_np, (dx, dy), 0) | |
# Convert back to tensor | |
mask = torch.from_numpy(blurred_mask) | |
# Return the mask as a tensor with an additional channel | |
return (mask.unsqueeze(0),) | |
class ImageReceiver: | |
def INPUT_TYPES(s): | |
input_dir = folder_paths.get_input_directory() | |
files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))] | |
return {"required": { | |
"image": (sorted(files), ), | |
"link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), | |
"save_to_workflow": ("BOOLEAN", {"default": False}), | |
"image_data": ("STRING", {"multiline": False}), | |
"trigger_always": ("BOOLEAN", {"default": False, "label_on": "enable", "label_off": "disable"}), | |
}, | |
} | |
FUNCTION = "doit" | |
RETURN_TYPES = ("IMAGE", "MASK") | |
CATEGORY = "ImpactPack/Util" | |
def doit(self, image, link_id, save_to_workflow, image_data, trigger_always): | |
if save_to_workflow: | |
try: | |
image_data = base64.b64decode(image_data.split(",")[1]) | |
i = Image.open(BytesIO(image_data)) | |
i = ImageOps.exif_transpose(i) | |
image = i.convert("RGB") | |
image = np.array(image).astype(np.float32) / 255.0 | |
image = torch.from_numpy(image)[None,] | |
if 'A' in i.getbands(): | |
mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0 | |
mask = 1. - torch.from_numpy(mask) | |
else: | |
mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") | |
return (image, mask.unsqueeze(0)) | |
except Exception as e: | |
print(f"[WARN] ComfyUI-Impact-Pack: ImageReceiver - invalid 'image_data'") | |
mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") | |
return (empty_pil_tensor(64, 64), mask, ) | |
else: | |
return nodes.LoadImage().load_image(image) | |
def VALIDATE_INPUTS(s, image, link_id, save_to_workflow, image_data, trigger_always): | |
if image != '#DATA' and not folder_paths.exists_annotated_filepath(image) or image.startswith("/") or ".." in image: | |
return "Invalid image file: {}".format(image) | |
return True | |
def IS_CHANGED(s, image, link_id, save_to_workflow, image_data, trigger_always): | |
if trigger_always: | |
return float("NaN") | |
else: | |
if save_to_workflow: | |
return hash(image_data) | |
else: | |
return hash(image) | |
from server import PromptServer | |
class ImageSender(nodes.PreviewImage): | |
def INPUT_TYPES(s): | |
return {"required": { | |
"images": ("IMAGE", ), | |
"filename_prefix": ("STRING", {"default": "ImgSender"}), | |
"link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), }, | |
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, | |
} | |
OUTPUT_NODE = True | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Util" | |
def doit(self, images, filename_prefix="ImgSender", link_id=0, prompt=None, extra_pnginfo=None): | |
result = nodes.PreviewImage().save_images(images, filename_prefix, prompt, extra_pnginfo) | |
PromptServer.instance.send_sync("img-send", {"link_id": link_id, "images": result['ui']['images']}) | |
return result | |
class LatentReceiver: | |
def __init__(self): | |
self.input_dir = folder_paths.get_input_directory() | |
self.type = "input" | |
def INPUT_TYPES(s): | |
def check_file_extension(x): | |
return x.endswith(".latent") or x.endswith(".latent.png") | |
input_dir = folder_paths.get_input_directory() | |
files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f)) and check_file_extension(f)] | |
return {"required": { | |
"latent": (sorted(files), ), | |
"link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), | |
"trigger_always": ("BOOLEAN", {"default": False, "label_on": "enable", "label_off": "disable"}), | |
}, | |
} | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Util" | |
RETURN_TYPES = ("LATENT",) | |
def load_preview_latent(image_path): | |
if not os.path.exists(image_path): | |
return None | |
image = Image.open(image_path) | |
exif_data = piexif.load(image.info["exif"]) | |
if piexif.ExifIFD.UserComment in exif_data["Exif"]: | |
compressed_data = exif_data["Exif"][piexif.ExifIFD.UserComment] | |
compressed_data_io = BytesIO(compressed_data) | |
with zipfile.ZipFile(compressed_data_io, mode='r') as archive: | |
tensor_bytes = archive.read("latent") | |
tensor = safetensors.torch.load(tensor_bytes) | |
return {"samples": tensor['latent_tensor']} | |
return None | |
def parse_filename(self, filename): | |
pattern = r"^(.*)/(.*?)\[(.*)\]\s*$" | |
match = re.match(pattern, filename) | |
if match: | |
subfolder = match.group(1) | |
filename = match.group(2).rstrip() | |
file_type = match.group(3) | |
else: | |
subfolder = '' | |
file_type = self.type | |
return {'filename': filename, 'subfolder': subfolder, 'type': file_type} | |
def doit(self, **kwargs): | |
if 'latent' not in kwargs: | |
return (torch.zeros([1, 4, 8, 8]), ) | |
latent = kwargs['latent'] | |
latent_name = latent | |
latent_path = folder_paths.get_annotated_filepath(latent_name) | |
if latent.endswith(".latent"): | |
latent = safetensors.torch.load_file(latent_path, device="cpu") | |
multiplier = 1.0 | |
if "latent_format_version_0" not in latent: | |
multiplier = 1.0 / 0.18215 | |
samples = {"samples": latent["latent_tensor"].float() * multiplier} | |
else: | |
samples = LatentReceiver.load_preview_latent(latent_path) | |
if samples is None: | |
samples = {'samples': torch.zeros([1, 4, 8, 8])} | |
preview = self.parse_filename(latent_name) | |
return { | |
'ui': {"images": [preview]}, | |
'result': (samples, ) | |
} | |
def IS_CHANGED(s, latent, link_id, trigger_always): | |
if trigger_always: | |
return float("NaN") | |
else: | |
image_path = folder_paths.get_annotated_filepath(latent) | |
m = hashlib.sha256() | |
with open(image_path, 'rb') as f: | |
m.update(f.read()) | |
return m.digest().hex() | |
def VALIDATE_INPUTS(s, latent, link_id, trigger_always): | |
if not folder_paths.exists_annotated_filepath(latent) or latent.startswith("/") or ".." in latent: | |
return "Invalid latent file: {}".format(latent) | |
return True | |
class LatentSender(nodes.SaveLatent): | |
def __init__(self): | |
super().__init__() | |
self.output_dir = folder_paths.get_temp_directory() | |
self.type = "temp" | |
def INPUT_TYPES(s): | |
return {"required": { | |
"samples": ("LATENT", ), | |
"filename_prefix": ("STRING", {"default": "latents/LatentSender"}), | |
"link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), | |
"preview_method": (["Latent2RGB-FLUX.1", | |
"Latent2RGB-SDXL", "Latent2RGB-SD15", "Latent2RGB-SD3", | |
"Latent2RGB-SD-X4", "Latent2RGB-Playground-2.5", | |
"Latent2RGB-SC-Prior", "Latent2RGB-SC-B", | |
"Latent2RGB-LTXV", | |
"TAEF1", "TAESDXL", "TAESD15", "TAESD3"],) | |
}, | |
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, | |
} | |
OUTPUT_NODE = True | |
RETURN_TYPES = () | |
FUNCTION = "doit" | |
CATEGORY = "ImpactPack/Util" | |
def save_to_file(tensor_bytes, prompt, extra_pnginfo, image, image_path): | |
compressed_data = BytesIO() | |
with zipfile.ZipFile(compressed_data, mode='w') as archive: | |
archive.writestr("latent", tensor_bytes) | |
image = image.copy() | |
exif_data = {"Exif": {piexif.ExifIFD.UserComment: compressed_data.getvalue()}} | |
metadata = PngInfo() | |
if prompt is not None: | |
metadata.add_text("prompt", json.dumps(prompt)) | |
if extra_pnginfo is not None: | |
for x in extra_pnginfo: | |
metadata.add_text(x, json.dumps(extra_pnginfo[x])) | |
exif_bytes = piexif.dump(exif_data) | |
image.save(image_path, format='png', exif=exif_bytes, pnginfo=metadata, optimize=True) | |
def prepare_preview(latent_tensor, preview_method): | |
from comfy.cli_args import LatentPreviewMethod | |
import comfy.latent_formats as latent_formats | |
lower_bound = 128 | |
upper_bound = 256 | |
if preview_method == "Latent2RGB-SD15": | |
latent_format = latent_formats.SD15() | |
method = LatentPreviewMethod.Latent2RGB | |
elif preview_method == "Latent2RGB-SDXL": | |
latent_format = latent_formats.SDXL() | |
method = LatentPreviewMethod.Latent2RGB | |
elif preview_method == "Latent2RGB-SD3": | |
latent_format = latent_formats.SD3() | |
method = LatentPreviewMethod.Latent2RGB | |
elif preview_method == "Latent2RGB-SD-X4": | |
latent_format = latent_formats.SD_X4() | |
method = LatentPreviewMethod.Latent2RGB | |
elif preview_method == "Latent2RGB-Playground-2.5": | |
latent_format = latent_formats.SDXL_Playground_2_5() | |
method = LatentPreviewMethod.Latent2RGB | |
elif preview_method == "Latent2RGB-SC-Prior": | |
latent_format = latent_formats.SC_Prior() | |
method = LatentPreviewMethod.Latent2RGB | |
elif preview_method == "Latent2RGB-SC-B": | |
latent_format = latent_formats.SC_B() | |
method = LatentPreviewMethod.Latent2RGB | |
elif preview_method == "Latent2RGB-FLUX.1": | |
latent_format = latent_formats.Flux() | |
method = LatentPreviewMethod.Latent2RGB | |
elif preview_method == "Latent2RGB-LTXV": | |
latent_format = latent_formats.LTXV() | |
method = LatentPreviewMethod.Latent2RGB | |
else: | |
print(f"[Impact Pack] LatentSender: '{preview_method}' is unsupported preview method.") | |
latent_format = latent_formats.SD15() | |
method = LatentPreviewMethod.Latent2RGB | |
previewer = core.get_previewer("cpu", latent_format=latent_format, force=True, method=method) | |
image = previewer.decode_latent_to_preview(latent_tensor) | |
min_size = min(image.size[0], image.size[1]) | |
max_size = max(image.size[0], image.size[1]) | |
scale_factor = 1 | |
if max_size > upper_bound: | |
scale_factor = upper_bound/max_size | |
# prevent too small preview | |
if min_size*scale_factor < lower_bound: | |
scale_factor = lower_bound/min_size | |
w = int(image.size[0] * scale_factor) | |
h = int(image.size[1] * scale_factor) | |
image = image.resize((w, h), resample=Image.NEAREST) | |
return LatentSender.attach_format_text(image) | |
def attach_format_text(image): | |
width_a, height_a = image.size | |
letter_image = Image.open(latent_letter_path) | |
width_b, height_b = letter_image.size | |
new_width = max(width_a, width_b) | |
new_height = height_a + height_b | |
new_image = Image.new('RGB', (new_width, new_height), (0, 0, 0)) | |
offset_x = (new_width - width_b) // 2 | |
offset_y = (height_a + (new_height - height_a - height_b) // 2) | |
new_image.paste(letter_image, (offset_x, offset_y)) | |
new_image.paste(image, (0, 0)) | |
return new_image | |
def doit(self, samples, filename_prefix="latents/LatentSender", link_id=0, preview_method="Latent2RGB-SDXL", prompt=None, extra_pnginfo=None): | |
full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir) | |
# load preview | |
preview = LatentSender.prepare_preview(samples['samples'], preview_method) | |
# support save metadata for latent sharing | |
file = f"{filename}_{counter:05}_.latent.png" | |
fullpath = os.path.join(full_output_folder, file) | |
output = {"latent_tensor": samples["samples"]} | |
tensor_bytes = safetensors.torch.save(output) | |
LatentSender.save_to_file(tensor_bytes, prompt, extra_pnginfo, preview, fullpath) | |
latent_path = { | |
'filename': file, | |
'subfolder': subfolder, | |
'type': self.type | |
} | |
PromptServer.instance.send_sync("latent-send", {"link_id": link_id, "images": [latent_path]}) | |
return {'ui': {'images': [latent_path]}} | |
class ImpactWildcardProcessor: | |
def INPUT_TYPES(s): | |
return {"required": { | |
"wildcard_text": ("STRING", {"multiline": True, "dynamicPrompts": False, "tooltip": "Enter a prompt using wildcard syntax."}), | |
"populated_text": ("STRING", {"multiline": True, "dynamicPrompts": False, "tooltip": "The actual value passed during the execution of 'ImpactWildcardProcessor' is what is shown here. The behavior varies slightly depending on the mode. Wildcard syntax can also be used in 'populated_text'."}), | |
"mode": (["populate", "fixed", "reproduce"], {"default": "populate", "tooltip": | |
"populate: Before running the workflow, it overwrites the existing value of 'populated_text' with the prompt processed from 'wildcard_text'. In this mode, 'populated_text' cannot be edited.\n" | |
"fixed: Ignores wildcard_text and keeps 'populated_text' as is. You can edit 'populated_text' in this mode.\n" | |
"reproduce: This mode operates as 'fixed' mode only once for reproduction, and then it switches to 'populate' mode." | |
}), | |
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "tooltip": "Determines the random seed to be used for wildcard processing."}), | |
"Select to add Wildcard": (["Select the Wildcard to add to the text"],), | |
}, | |
} | |
CATEGORY = "ImpactPack/Prompt" | |
DESCRIPTION = ("The 'ImpactWildcardProcessor' processes text prompts written in wildcard syntax and outputs the processed text prompt.\n\n" | |
"TIP: Before the workflow is executed, the processing result of 'wildcard_text' is displayed in 'populated_text', and the populated text is saved along with the workflow. If you want to use a seed converted as input, write the prompt directly in 'populated_text' instead of 'wildcard_text', and set the mode to 'fixed'.") | |
RETURN_TYPES = ("STRING", ) | |
RETURN_NAMES = ("processed text",) | |
FUNCTION = "doit" | |
def process(**kwargs): | |
return impact.wildcards.process(**kwargs) | |
def doit(self, *args, **kwargs): | |
populated_text = ImpactWildcardProcessor.process(text=kwargs['populated_text'], seed=kwargs['seed']) | |
return (populated_text, ) | |
class ImpactWildcardEncode: | |
def INPUT_TYPES(s): | |
return {"required": { | |
"model": ("MODEL",), | |
"clip": ("CLIP",), | |
"wildcard_text": ("STRING", {"multiline": True, "dynamicPrompts": False, "tooltip": "Enter a prompt using wildcard syntax."}), | |
"populated_text": ("STRING", {"multiline": True, "dynamicPrompts": False, "tooltip": "The actual value passed during the execution of 'ImpactWildcardEncode' is what is shown here. The behavior varies slightly depending on the mode. Wildcard syntax can also be used in 'populated_text'."}), | |
"mode": (["populate", "fixed", "reproduce"], {"tooltip": | |
"populate: Before running the workflow, it overwrites the existing value of 'populated_text' with the prompt processed from 'wildcard_text'. In this mode, 'populated_text' cannot be edited.\n" | |
"fixed: Ignores wildcard_text and keeps 'populated_text' as is. You can edit 'populated_text' in this mode\n." | |
"reproduce: This mode operates as 'fixed' mode only once for reproduction, and then it switches to 'populate' mode."}), | |
"Select to add LoRA": (["Select the LoRA to add to the text"] + folder_paths.get_filename_list("loras"), ), | |
"Select to add Wildcard": (["Select the Wildcard to add to the text"], ), | |
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "tooltip": "Determines the random seed to be used for wildcard processing."}), | |
}, | |
} | |
CATEGORY = "ImpactPack/Prompt" | |
DESCRIPTION = ("The 'ImpactWildcardEncode' node processes text prompts written in wildcard syntax and outputs them as conditioning. It also supports LoRA syntax, with the applied LoRA reflected in the model's output.\n\n" | |
"TIP1: Before the workflow is executed, the processing result of 'wildcard_text' is displayed in 'populated_text', and the populated text is saved along with the workflow. If you want to use a seed converted as input, write the prompt directly in 'populated_text' instead of 'wildcard_text', and set the mode to 'fixed'.\n" | |
"TIP2: If the 'Inspire Pack' is installed, LBW(LoRA Block Weight) syntax can also be applied.") | |
RETURN_TYPES = ("MODEL", "CLIP", "CONDITIONING", "STRING") | |
RETURN_NAMES = ("model", "clip", "conditioning", "populated_text") | |
FUNCTION = "doit" | |
def process_with_loras(**kwargs): | |
return impact.wildcards.process_with_loras(**kwargs) | |
def get_wildcard_list(): | |
return impact.wildcards.get_wildcard_list() | |
def doit(self, *args, **kwargs): | |
populated = kwargs['populated_text'] | |
processed = [] | |
model, clip, conditioning = impact.wildcards.process_with_loras(wildcard_opt=populated, model=kwargs['model'], clip=kwargs['clip'], seed=kwargs['seed'], processed=processed) | |
return model, clip, conditioning, processed[0] | |
class ImpactSchedulerAdapter: | |
def INPUT_TYPES(s): | |
return {"required": { | |
"scheduler": (comfy.samplers.KSampler.SCHEDULERS, {"defaultInput": True, }), | |
"extra_scheduler": (['None', 'AYS SDXL', 'AYS SD1', 'AYS SVD', 'GITS[coeff=1.2]', 'LTXV[default]', 'OSS FLUX', 'OSS Wan'],), | |
}} | |
CATEGORY = "ImpactPack/Util" | |
RETURN_TYPES = (core.SCHEDULERS,) | |
RETURN_NAMES = ("scheduler",) | |
FUNCTION = "doit" | |
def doit(self, scheduler, extra_scheduler): | |
if extra_scheduler != 'None': | |
return (extra_scheduler,) | |
return (scheduler,) | |