Spaces:
Paused
Paused
| import os | |
| import sys | |
| import comfy.samplers | |
| import comfy.sd | |
| import warnings | |
| from segment_anything import sam_model_registry | |
| from io import BytesIO | |
| import piexif | |
| import zipfile | |
| import re | |
| import impact.wildcards | |
| from impact.utils import * | |
| import impact.core as core | |
| from impact.core import SEG | |
| from impact.config import MAX_RESOLUTION, latent_letter_path | |
| from PIL import Image, ImageOps | |
| import numpy as np | |
| import hashlib | |
| import json | |
| import safetensors.torch | |
| from PIL.PngImagePlugin import PngInfo | |
| import comfy.model_management | |
| import base64 | |
| import impact.wildcards as wildcards | |
| from . import hooks | |
| warnings.filterwarnings('ignore', category=UserWarning, message='TypedStorage is deprecated') | |
| model_path = folder_paths.models_dir | |
| # folder_paths.supported_pt_extensions | |
| add_folder_path_and_extensions("mmdets_bbox", [os.path.join(model_path, "mmdets", "bbox")], folder_paths.supported_pt_extensions) | |
| add_folder_path_and_extensions("mmdets_segm", [os.path.join(model_path, "mmdets", "segm")], folder_paths.supported_pt_extensions) | |
| add_folder_path_and_extensions("mmdets", [os.path.join(model_path, "mmdets")], folder_paths.supported_pt_extensions) | |
| add_folder_path_and_extensions("sams", [os.path.join(model_path, "sams")], folder_paths.supported_pt_extensions) | |
| add_folder_path_and_extensions("onnx", [os.path.join(model_path, "onnx")], {'.onnx'}) | |
| # Nodes | |
| class ONNXDetectorProvider: | |
| def INPUT_TYPES(s): | |
| return {"required": {"model_name": (folder_paths.get_filename_list("onnx"), )}} | |
| RETURN_TYPES = ("BBOX_DETECTOR", ) | |
| FUNCTION = "load_onnx" | |
| CATEGORY = "ImpactPack" | |
| def load_onnx(self, model_name): | |
| model = folder_paths.get_full_path("onnx", model_name) | |
| return (core.ONNXDetector(model), ) | |
| class CLIPSegDetectorProvider: | |
| def INPUT_TYPES(s): | |
| return {"required": { | |
| "text": ("STRING", {"multiline": False}), | |
| "blur": ("FLOAT", {"min": 0, "max": 15, "step": 0.1, "default": 7}), | |
| "threshold": ("FLOAT", {"min": 0, "max": 1, "step": 0.05, "default": 0.4}), | |
| "dilation_factor": ("INT", {"min": 0, "max": 10, "step": 1, "default": 4}), | |
| } | |
| } | |
| RETURN_TYPES = ("BBOX_DETECTOR", ) | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Util" | |
| def doit(self, text, blur, threshold, dilation_factor): | |
| if "CLIPSeg" in nodes.NODE_CLASS_MAPPINGS: | |
| return (core.BBoxDetectorBasedOnCLIPSeg(text, blur, threshold, dilation_factor), ) | |
| else: | |
| print("[ERROR] CLIPSegToBboxDetector: CLIPSeg custom node isn't installed. You must install biegert/ComfyUI-CLIPSeg extension to use this node.") | |
| class SAMLoader: | |
| def INPUT_TYPES(cls): | |
| models = [x for x in folder_paths.get_filename_list("sams") if 'hq' not in x] | |
| return { | |
| "required": { | |
| "model_name": (models, ), | |
| "device_mode": (["AUTO", "Prefer GPU", "CPU"],), | |
| } | |
| } | |
| RETURN_TYPES = ("SAM_MODEL", ) | |
| FUNCTION = "load_model" | |
| CATEGORY = "ImpactPack" | |
| def load_model(self, model_name, device_mode="auto"): | |
| modelname = folder_paths.get_full_path("sams", model_name) | |
| if 'vit_h' in model_name: | |
| model_kind = 'vit_h' | |
| elif 'vit_l' in model_name: | |
| model_kind = 'vit_l' | |
| else: | |
| model_kind = 'vit_b' | |
| sam = sam_model_registry[model_kind](checkpoint=modelname) | |
| size = os.path.getsize(modelname) | |
| sam.safe_to = core.SafeToGPU(size) | |
| # Unless user explicitly wants to use CPU, we use GPU | |
| device = comfy.model_management.get_torch_device() if device_mode == "Prefer GPU" else "CPU" | |
| if device_mode == "Prefer GPU": | |
| sam.safe_to.to_device(sam, device) | |
| sam.is_auto_mode = device_mode == "AUTO" | |
| print(f"Loads SAM model: {modelname} (device:{device_mode})") | |
| return (sam, ) | |
| class ONNXDetectorForEach: | |
| def INPUT_TYPES(s): | |
| return {"required": { | |
| "onnx_detector": ("ONNX_DETECTOR",), | |
| "image": ("IMAGE",), | |
| "threshold": ("FLOAT", {"default": 0.8, "min": 0.0, "max": 1.0, "step": 0.01}), | |
| "dilation": ("INT", {"default": 10, "min": -512, "max": 512, "step": 1}), | |
| "crop_factor": ("FLOAT", {"default": 1.0, "min": 0.5, "max": 100, "step": 0.1}), | |
| "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), | |
| } | |
| } | |
| RETURN_TYPES = ("SEGS", ) | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Detector" | |
| OUTPUT_NODE = True | |
| def doit(self, onnx_detector, image, threshold, dilation, crop_factor, drop_size): | |
| segs = onnx_detector.detect(image, threshold, dilation, crop_factor, drop_size) | |
| return (segs, ) | |
| class DetailerForEach: | |
| def INPUT_TYPES(s): | |
| return {"required": { | |
| "image": ("IMAGE", ), | |
| "segs": ("SEGS", ), | |
| "model": ("MODEL",), | |
| "clip": ("CLIP",), | |
| "vae": ("VAE",), | |
| "guide_size": ("FLOAT", {"default": 384, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), | |
| "guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}), | |
| "max_size": ("FLOAT", {"default": 1024, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), | |
| "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), | |
| "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), | |
| "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), | |
| "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), | |
| "scheduler": (comfy.samplers.KSampler.SCHEDULERS,), | |
| "positive": ("CONDITIONING",), | |
| "negative": ("CONDITIONING",), | |
| "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), | |
| "feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), | |
| "noise_mask": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), | |
| "force_inpaint": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), | |
| "wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}), | |
| "cycle": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}), | |
| }, | |
| "optional": { | |
| "detailer_hook": ("DETAILER_HOOK",), | |
| "inpaint_model": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), | |
| "noise_mask_feather": ("INT", {"default": 0, "min": 0, "max": 100, "step": 1}), | |
| } | |
| } | |
| RETURN_TYPES = ("IMAGE", ) | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Detailer" | |
| def do_detail(image, segs, model, clip, vae, guide_size, guide_size_for_bbox, max_size, seed, steps, cfg, sampler_name, scheduler, | |
| positive, negative, denoise, feather, noise_mask, force_inpaint, wildcard_opt=None, detailer_hook=None, | |
| refiner_ratio=None, refiner_model=None, refiner_clip=None, refiner_positive=None, refiner_negative=None, | |
| cycle=1, inpaint_model=False, noise_mask_feather=0): | |
| if len(image) > 1: | |
| raise Exception('[Impact Pack] ERROR: DetailerForEach does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.') | |
| image = image.clone() | |
| enhanced_alpha_list = [] | |
| enhanced_list = [] | |
| cropped_list = [] | |
| cnet_pil_list = [] | |
| segs = core.segs_scale_match(segs, image.shape) | |
| new_segs = [] | |
| wildcard_concat_mode = None | |
| if wildcard_opt is not None: | |
| if wildcard_opt.startswith('[CONCAT]'): | |
| wildcard_concat_mode = 'concat' | |
| wildcard_opt = wildcard_opt[8:] | |
| wmode, wildcard_chooser = wildcards.process_wildcard_for_segs(wildcard_opt) | |
| else: | |
| wmode, wildcard_chooser = None, None | |
| if wmode in ['ASC', 'DSC']: | |
| if wmode == 'ASC': | |
| ordered_segs = sorted(segs[1], key=lambda x: (x.bbox[0], x.bbox[1])) | |
| else: | |
| ordered_segs = sorted(segs[1], key=lambda x: (x.bbox[0], x.bbox[1]), reverse=True) | |
| else: | |
| ordered_segs = segs[1] | |
| for i, seg in enumerate(ordered_segs): | |
| cropped_image = seg.cropped_image if seg.cropped_image is not None \ | |
| else crop_ndarray4(image.numpy(), seg.crop_region) | |
| cropped_image = to_tensor(cropped_image) | |
| mask = to_tensor(seg.cropped_mask) | |
| mask = tensor_gaussian_blur_mask(mask, feather) | |
| is_mask_all_zeros = (seg.cropped_mask == 0).all().item() | |
| if is_mask_all_zeros: | |
| print(f"Detailer: segment skip [empty mask]") | |
| continue | |
| if noise_mask: | |
| cropped_mask = seg.cropped_mask | |
| else: | |
| cropped_mask = None | |
| if wildcard_chooser is not None and wmode != "LAB": | |
| seg_seed, wildcard_item = wildcard_chooser.get(seg) | |
| elif wildcard_chooser is not None and wmode == "LAB": | |
| seg_seed, wildcard_item = None, wildcard_chooser.get(seg) | |
| else: | |
| seg_seed, wildcard_item = None, None | |
| seg_seed = seed + i if seg_seed is None else seg_seed | |
| enhanced_image, cnet_pils = core.enhance_detail(cropped_image, model, clip, vae, guide_size, guide_size_for_bbox, max_size, | |
| seg.bbox, seg_seed, steps, cfg, sampler_name, scheduler, | |
| positive, negative, denoise, cropped_mask, force_inpaint, | |
| wildcard_opt=wildcard_item, wildcard_opt_concat_mode=wildcard_concat_mode, | |
| detailer_hook=detailer_hook, | |
| refiner_ratio=refiner_ratio, refiner_model=refiner_model, | |
| refiner_clip=refiner_clip, refiner_positive=refiner_positive, | |
| refiner_negative=refiner_negative, control_net_wrapper=seg.control_net_wrapper, | |
| cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather) | |
| if cnet_pils is not None: | |
| cnet_pil_list.extend(cnet_pils) | |
| if not (enhanced_image is None): | |
| # don't latent composite-> converting to latent caused poor quality | |
| # use image paste | |
| image = image.cpu() | |
| enhanced_image = enhanced_image.cpu() | |
| tensor_paste(image, enhanced_image, (seg.crop_region[0], seg.crop_region[1]), mask) | |
| enhanced_list.append(enhanced_image) | |
| if detailer_hook is not None: | |
| detailer_hook.post_paste(image) | |
| if not (enhanced_image is None): | |
| # Convert enhanced_pil_alpha to RGBA mode | |
| enhanced_image_alpha = tensor_convert_rgba(enhanced_image) | |
| new_seg_image = enhanced_image.numpy() # alpha should not be applied to seg_image | |
| # Apply the mask | |
| mask = tensor_resize(mask, *tensor_get_size(enhanced_image)) | |
| tensor_putalpha(enhanced_image_alpha, mask) | |
| enhanced_alpha_list.append(enhanced_image_alpha) | |
| else: | |
| new_seg_image = None | |
| cropped_list.append(cropped_image) | |
| new_seg = SEG(new_seg_image, seg.cropped_mask, seg.confidence, seg.crop_region, seg.bbox, seg.label, seg.control_net_wrapper) | |
| new_segs.append(new_seg) | |
| image_tensor = tensor_convert_rgb(image) | |
| cropped_list.sort(key=lambda x: x.shape, reverse=True) | |
| enhanced_list.sort(key=lambda x: x.shape, reverse=True) | |
| enhanced_alpha_list.sort(key=lambda x: x.shape, reverse=True) | |
| return image_tensor, cropped_list, enhanced_list, enhanced_alpha_list, cnet_pil_list, (segs[0], new_segs) | |
| def doit(self, image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, | |
| scheduler, positive, negative, denoise, feather, noise_mask, force_inpaint, wildcard, cycle=1, | |
| detailer_hook=None, inpaint_model=False, noise_mask_feather=0): | |
| enhanced_img, *_ = \ | |
| DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, | |
| cfg, sampler_name, scheduler, positive, negative, denoise, feather, noise_mask, | |
| force_inpaint, wildcard, detailer_hook, | |
| cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather) | |
| return (enhanced_img, ) | |
| class DetailerForEachPipe: | |
| def INPUT_TYPES(s): | |
| return {"required": { | |
| "image": ("IMAGE", ), | |
| "segs": ("SEGS", ), | |
| "guide_size": ("FLOAT", {"default": 384, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), | |
| "guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}), | |
| "max_size": ("FLOAT", {"default": 1024, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), | |
| "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), | |
| "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), | |
| "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), | |
| "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), | |
| "scheduler": (comfy.samplers.KSampler.SCHEDULERS,), | |
| "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), | |
| "feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), | |
| "noise_mask": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), | |
| "force_inpaint": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), | |
| "basic_pipe": ("BASIC_PIPE", ), | |
| "wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}), | |
| "refiner_ratio": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0}), | |
| "cycle": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}), | |
| }, | |
| "optional": { | |
| "detailer_hook": ("DETAILER_HOOK",), | |
| "refiner_basic_pipe_opt": ("BASIC_PIPE",), | |
| "inpaint_model": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), | |
| "noise_mask_feather": ("INT", {"default": 0, "min": 0, "max": 100, "step": 1}), | |
| } | |
| } | |
| RETURN_TYPES = ("IMAGE", "SEGS", "BASIC_PIPE", "IMAGE") | |
| RETURN_NAMES = ("image", "segs", "basic_pipe", "cnet_images") | |
| OUTPUT_IS_LIST = (False, False, False, True) | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Detailer" | |
| def doit(self, image, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, | |
| denoise, feather, noise_mask, force_inpaint, basic_pipe, wildcard, | |
| refiner_ratio=None, detailer_hook=None, refiner_basic_pipe_opt=None, | |
| cycle=1, inpaint_model=False, noise_mask_feather=0): | |
| if len(image) > 1: | |
| raise Exception('[Impact Pack] ERROR: DetailerForEach does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.') | |
| model, clip, vae, positive, negative = basic_pipe | |
| if refiner_basic_pipe_opt is None: | |
| refiner_model, refiner_clip, refiner_positive, refiner_negative = None, None, None, None | |
| else: | |
| refiner_model, refiner_clip, _, refiner_positive, refiner_negative = refiner_basic_pipe_opt | |
| enhanced_img, cropped, cropped_enhanced, cropped_enhanced_alpha, cnet_pil_list, new_segs = \ | |
| DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, | |
| sampler_name, scheduler, positive, negative, denoise, feather, noise_mask, | |
| force_inpaint, wildcard, detailer_hook, | |
| refiner_ratio=refiner_ratio, refiner_model=refiner_model, | |
| refiner_clip=refiner_clip, refiner_positive=refiner_positive, refiner_negative=refiner_negative, | |
| cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather) | |
| # set fallback image | |
| if len(cnet_pil_list) == 0: | |
| cnet_pil_list = [empty_pil_tensor()] | |
| return (enhanced_img, new_segs, basic_pipe, cnet_pil_list) | |
| class FaceDetailer: | |
| def INPUT_TYPES(s): | |
| return {"required": { | |
| "image": ("IMAGE", ), | |
| "model": ("MODEL",), | |
| "clip": ("CLIP",), | |
| "vae": ("VAE",), | |
| "guide_size": ("FLOAT", {"default": 384, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), | |
| "guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}), | |
| "max_size": ("FLOAT", {"default": 1024, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), | |
| "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), | |
| "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), | |
| "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), | |
| "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), | |
| "scheduler": (comfy.samplers.KSampler.SCHEDULERS,), | |
| "positive": ("CONDITIONING",), | |
| "negative": ("CONDITIONING",), | |
| "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), | |
| "feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), | |
| "noise_mask": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), | |
| "force_inpaint": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), | |
| "bbox_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), | |
| "bbox_dilation": ("INT", {"default": 10, "min": -512, "max": 512, "step": 1}), | |
| "bbox_crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 10, "step": 0.1}), | |
| "sam_detection_hint": (["center-1", "horizontal-2", "vertical-2", "rect-4", "diamond-4", "mask-area", "mask-points", "mask-point-bbox", "none"],), | |
| "sam_dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}), | |
| "sam_threshold": ("FLOAT", {"default": 0.93, "min": 0.0, "max": 1.0, "step": 0.01}), | |
| "sam_bbox_expansion": ("INT", {"default": 0, "min": 0, "max": 1000, "step": 1}), | |
| "sam_mask_hint_threshold": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}), | |
| "sam_mask_hint_use_negative": (["False", "Small", "Outter"],), | |
| "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), | |
| "bbox_detector": ("BBOX_DETECTOR", ), | |
| "wildcard": ("STRING", {"multiline": True, "dynamicPrompts": False}), | |
| "cycle": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}), | |
| }, | |
| "optional": { | |
| "sam_model_opt": ("SAM_MODEL", ), | |
| "segm_detector_opt": ("SEGM_DETECTOR", ), | |
| "detailer_hook": ("DETAILER_HOOK",), | |
| "inpaint_model": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), | |
| "noise_mask_feather": ("INT", {"default": 0, "min": 0, "max": 100, "step": 1}), | |
| }} | |
| RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "MASK", "DETAILER_PIPE", "IMAGE") | |
| RETURN_NAMES = ("image", "cropped_refined", "cropped_enhanced_alpha", "mask", "detailer_pipe", "cnet_images") | |
| OUTPUT_IS_LIST = (False, True, True, False, False, True) | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Simple" | |
| def enhance_face(image, model, clip, vae, guide_size, guide_size_for_bbox, max_size, seed, steps, cfg, sampler_name, scheduler, | |
| positive, negative, denoise, feather, noise_mask, force_inpaint, | |
| bbox_threshold, bbox_dilation, bbox_crop_factor, | |
| sam_detection_hint, sam_dilation, sam_threshold, sam_bbox_expansion, sam_mask_hint_threshold, | |
| sam_mask_hint_use_negative, drop_size, | |
| bbox_detector, segm_detector=None, sam_model_opt=None, wildcard_opt=None, detailer_hook=None, | |
| refiner_ratio=None, refiner_model=None, refiner_clip=None, refiner_positive=None, refiner_negative=None, cycle=1, | |
| inpaint_model=False, noise_mask_feather=0): | |
| # make default prompt as 'face' if empty prompt for CLIPSeg | |
| bbox_detector.setAux('face') | |
| segs = bbox_detector.detect(image, bbox_threshold, bbox_dilation, bbox_crop_factor, drop_size, detailer_hook=detailer_hook) | |
| bbox_detector.setAux(None) | |
| # bbox + sam combination | |
| if sam_model_opt is not None: | |
| sam_mask = core.make_sam_mask(sam_model_opt, segs, image, sam_detection_hint, sam_dilation, | |
| sam_threshold, sam_bbox_expansion, sam_mask_hint_threshold, | |
| sam_mask_hint_use_negative, ) | |
| segs = core.segs_bitwise_and_mask(segs, sam_mask) | |
| elif segm_detector is not None: | |
| segm_segs = segm_detector.detect(image, bbox_threshold, bbox_dilation, bbox_crop_factor, drop_size) | |
| if (hasattr(segm_detector, 'override_bbox_by_segm') and segm_detector.override_bbox_by_segm and | |
| not (detailer_hook is not None and not hasattr(detailer_hook, 'override_bbox_by_segm'))): | |
| segs = segm_segs | |
| else: | |
| segm_mask = core.segs_to_combined_mask(segm_segs) | |
| segs = core.segs_bitwise_and_mask(segs, segm_mask) | |
| if len(segs[1]) > 0: | |
| enhanced_img, _, cropped_enhanced, cropped_enhanced_alpha, cnet_pil_list, new_segs = \ | |
| DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for_bbox, max_size, seed, steps, cfg, | |
| sampler_name, scheduler, positive, negative, denoise, feather, noise_mask, | |
| force_inpaint, wildcard_opt, detailer_hook, | |
| refiner_ratio=refiner_ratio, refiner_model=refiner_model, | |
| refiner_clip=refiner_clip, refiner_positive=refiner_positive, | |
| refiner_negative=refiner_negative, | |
| cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather) | |
| else: | |
| enhanced_img = image | |
| cropped_enhanced = [] | |
| cropped_enhanced_alpha = [] | |
| cnet_pil_list = [] | |
| # Mask Generator | |
| mask = core.segs_to_combined_mask(segs) | |
| if len(cropped_enhanced) == 0: | |
| cropped_enhanced = [empty_pil_tensor()] | |
| if len(cropped_enhanced_alpha) == 0: | |
| cropped_enhanced_alpha = [empty_pil_tensor()] | |
| if len(cnet_pil_list) == 0: | |
| cnet_pil_list = [empty_pil_tensor()] | |
| return enhanced_img, cropped_enhanced, cropped_enhanced_alpha, mask, cnet_pil_list | |
| def doit(self, image, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, | |
| positive, negative, denoise, feather, noise_mask, force_inpaint, | |
| bbox_threshold, bbox_dilation, bbox_crop_factor, | |
| sam_detection_hint, sam_dilation, sam_threshold, sam_bbox_expansion, sam_mask_hint_threshold, | |
| sam_mask_hint_use_negative, drop_size, bbox_detector, wildcard, cycle=1, | |
| sam_model_opt=None, segm_detector_opt=None, detailer_hook=None, inpaint_model=False, noise_mask_feather=0): | |
| result_img = None | |
| result_mask = None | |
| result_cropped_enhanced = [] | |
| result_cropped_enhanced_alpha = [] | |
| result_cnet_images = [] | |
| if len(image) > 1: | |
| print(f"[Impact Pack] WARN: FaceDetailer is not a node designed for video detailing. If you intend to perform video detailing, please use Detailer For AnimateDiff.") | |
| for i, single_image in enumerate(image): | |
| enhanced_img, cropped_enhanced, cropped_enhanced_alpha, mask, cnet_pil_list = FaceDetailer.enhance_face( | |
| single_image.unsqueeze(0), model, clip, vae, guide_size, guide_size_for, max_size, seed + i, steps, cfg, sampler_name, scheduler, | |
| positive, negative, denoise, feather, noise_mask, force_inpaint, | |
| bbox_threshold, bbox_dilation, bbox_crop_factor, | |
| sam_detection_hint, sam_dilation, sam_threshold, sam_bbox_expansion, sam_mask_hint_threshold, | |
| sam_mask_hint_use_negative, drop_size, bbox_detector, segm_detector_opt, sam_model_opt, wildcard, detailer_hook, | |
| cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather) | |
| result_img = torch.cat((result_img, enhanced_img), dim=0) if result_img is not None else enhanced_img | |
| result_mask = torch.cat((result_mask, mask), dim=0) if result_mask is not None else mask | |
| result_cropped_enhanced.extend(cropped_enhanced) | |
| result_cropped_enhanced_alpha.extend(cropped_enhanced_alpha) | |
| result_cnet_images.extend(cnet_pil_list) | |
| pipe = (model, clip, vae, positive, negative, wildcard, bbox_detector, segm_detector_opt, sam_model_opt, detailer_hook, None, None, None, None) | |
| return result_img, result_cropped_enhanced, result_cropped_enhanced_alpha, result_mask, pipe, result_cnet_images | |
| class LatentPixelScale: | |
| upscale_methods = ["nearest-exact", "bilinear", "lanczos", "area"] | |
| def INPUT_TYPES(s): | |
| return {"required": { | |
| "samples": ("LATENT", ), | |
| "scale_method": (s.upscale_methods,), | |
| "scale_factor": ("FLOAT", {"default": 1.5, "min": 0.1, "max": 10000, "step": 0.1}), | |
| "vae": ("VAE", ), | |
| "use_tiled_vae": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), | |
| }, | |
| "optional": { | |
| "upscale_model_opt": ("UPSCALE_MODEL", ), | |
| } | |
| } | |
| RETURN_TYPES = ("LATENT", "IMAGE") | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Upscale" | |
| def doit(self, samples, scale_method, scale_factor, vae, use_tiled_vae, upscale_model_opt=None): | |
| if upscale_model_opt is None: | |
| latimg = core.latent_upscale_on_pixel_space2(samples, scale_method, scale_factor, vae, use_tile=use_tiled_vae) | |
| else: | |
| latimg = core.latent_upscale_on_pixel_space_with_model2(samples, scale_method, upscale_model_opt, scale_factor, vae, use_tile=use_tiled_vae) | |
| return latimg | |
| class NoiseInjectionDetailerHookProvider: | |
| schedules = ["skip_start", "from_start"] | |
| def INPUT_TYPES(s): | |
| return {"required": { | |
| "schedule_for_cycle": (s.schedules,), | |
| "source": (["CPU", "GPU"],), | |
| "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), | |
| "start_strength": ("FLOAT", {"default": 2.0, "min": 0.0, "max": 200.0, "step": 0.01}), | |
| "end_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 200.0, "step": 0.01}), | |
| }, | |
| } | |
| RETURN_TYPES = ("DETAILER_HOOK",) | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Detailer" | |
| def doit(self, schedule_for_cycle, source, seed, start_strength, end_strength): | |
| try: | |
| hook = hooks.InjectNoiseHookForDetailer(source, seed, start_strength, end_strength, | |
| from_start=('from_start' in schedule_for_cycle)) | |
| return (hook, ) | |
| except Exception as e: | |
| print("[ERROR] NoiseInjectionDetailerHookProvider: 'ComfyUI Noise' custom node isn't installed. You must install 'BlenderNeko/ComfyUI Noise' extension to use this node.") | |
| print(f"\t{e}") | |
| pass | |
| class UnsamplerDetailerHookProvider: | |
| schedules = ["skip_start", "from_start"] | |
| def INPUT_TYPES(s): | |
| return {"required": | |
| {"model": ("MODEL",), | |
| "steps": ("INT", {"default": 25, "min": 1, "max": 10000}), | |
| "start_end_at_step": ("INT", {"default": 21, "min": 0, "max": 10000}), | |
| "end_end_at_step": ("INT", {"default": 24, "min": 0, "max": 10000}), | |
| "cfg": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0}), | |
| "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), | |
| "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), | |
| "normalize": (["disable", "enable"], ), | |
| "positive": ("CONDITIONING", ), | |
| "negative": ("CONDITIONING", ), | |
| "schedule_for_cycle": (s.schedules,), | |
| }} | |
| RETURN_TYPES = ("DETAILER_HOOK",) | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Detailer" | |
| def doit(self, model, steps, start_end_at_step, end_end_at_step, cfg, sampler_name, | |
| scheduler, normalize, positive, negative, schedule_for_cycle): | |
| try: | |
| hook = hooks.UnsamplerDetailerHook(model, steps, start_end_at_step, end_end_at_step, cfg, sampler_name, | |
| scheduler, normalize, positive, negative, | |
| from_start=('from_start' in schedule_for_cycle)) | |
| return (hook, ) | |
| except Exception as e: | |
| print("[ERROR] UnsamplerDetailerHookProvider: 'ComfyUI Noise' custom node isn't installed. You must install 'BlenderNeko/ComfyUI Noise' extension to use this node.") | |
| print(f"\t{e}") | |
| pass | |
| class DenoiseSchedulerDetailerHookProvider: | |
| schedules = ["simple"] | |
| def INPUT_TYPES(s): | |
| return {"required": { | |
| "schedule_for_cycle": (s.schedules,), | |
| "target_denoise": ("FLOAT", {"default": 0.3, "min": 0.0, "max": 1.0, "step": 0.01}), | |
| }, | |
| } | |
| RETURN_TYPES = ("DETAILER_HOOK",) | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Detailer" | |
| def doit(self, schedule_for_cycle, target_denoise): | |
| hook = hooks.SimpleDetailerDenoiseSchedulerHook(target_denoise) | |
| return (hook, ) | |
| class CoreMLDetailerHookProvider: | |
| def INPUT_TYPES(s): | |
| return {"required": {"mode": (["512x512", "768x768", "512x768", "768x512"], )}, } | |
| RETURN_TYPES = ("DETAILER_HOOK",) | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Detailer" | |
| def doit(self, mode): | |
| hook = hooks.CoreMLHook(mode) | |
| return (hook, ) | |
| class CfgScheduleHookProvider: | |
| schedules = ["simple"] | |
| def INPUT_TYPES(s): | |
| return {"required": { | |
| "schedule_for_iteration": (s.schedules,), | |
| "target_cfg": ("FLOAT", {"default": 3.0, "min": 0.0, "max": 100.0}), | |
| }, | |
| } | |
| RETURN_TYPES = ("PK_HOOK",) | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Upscale" | |
| def doit(self, schedule_for_iteration, target_cfg): | |
| hook = None | |
| if schedule_for_iteration == "simple": | |
| hook = hooks.SimpleCfgScheduleHook(target_cfg) | |
| return (hook, ) | |
| class UnsamplerHookProvider: | |
| schedules = ["simple"] | |
| def INPUT_TYPES(s): | |
| return {"required": | |
| {"model": ("MODEL",), | |
| "steps": ("INT", {"default": 25, "min": 1, "max": 10000}), | |
| "start_end_at_step": ("INT", {"default": 21, "min": 0, "max": 10000}), | |
| "end_end_at_step": ("INT", {"default": 24, "min": 0, "max": 10000}), | |
| "cfg": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0}), | |
| "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), | |
| "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), | |
| "normalize": (["disable", "enable"], ), | |
| "positive": ("CONDITIONING", ), | |
| "negative": ("CONDITIONING", ), | |
| "schedule_for_iteration": (s.schedules,), | |
| }} | |
| RETURN_TYPES = ("PK_HOOK",) | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Upscale" | |
| def doit(self, model, steps, start_end_at_step, end_end_at_step, cfg, sampler_name, | |
| scheduler, normalize, positive, negative, schedule_for_iteration): | |
| try: | |
| hook = None | |
| if schedule_for_iteration == "simple": | |
| hook = hooks.UnsamplerHook(model, steps, start_end_at_step, end_end_at_step, cfg, sampler_name, | |
| scheduler, normalize, positive, negative) | |
| return (hook, ) | |
| except Exception as e: | |
| print("[ERROR] UnsamplerHookProvider: 'ComfyUI Noise' custom node isn't installed. You must install 'BlenderNeko/ComfyUI Noise' extension to use this node.") | |
| print(f"\t{e}") | |
| pass | |
| class NoiseInjectionHookProvider: | |
| schedules = ["simple"] | |
| def INPUT_TYPES(s): | |
| return {"required": { | |
| "schedule_for_iteration": (s.schedules,), | |
| "source": (["CPU", "GPU"],), | |
| "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), | |
| "start_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 200.0, "step": 0.01}), | |
| "end_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 200.0, "step": 0.01}), | |
| }, | |
| } | |
| RETURN_TYPES = ("PK_HOOK",) | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Upscale" | |
| def doit(self, schedule_for_iteration, source, seed, start_strength, end_strength): | |
| try: | |
| hook = None | |
| if schedule_for_iteration == "simple": | |
| hook = hooks.InjectNoiseHook(source, seed, start_strength, end_strength) | |
| return (hook, ) | |
| except Exception as e: | |
| print("[ERROR] NoiseInjectionHookProvider: 'ComfyUI Noise' custom node isn't installed. You must install 'BlenderNeko/ComfyUI Noise' extension to use this node.") | |
| print(f"\t{e}") | |
| pass | |
| class DenoiseScheduleHookProvider: | |
| schedules = ["simple"] | |
| def INPUT_TYPES(s): | |
| return {"required": { | |
| "schedule_for_iteration": (s.schedules,), | |
| "target_denoise": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0, "step": 0.01}), | |
| }, | |
| } | |
| RETURN_TYPES = ("PK_HOOK",) | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Upscale" | |
| def doit(self, schedule_for_iteration, target_denoise): | |
| hook = None | |
| if schedule_for_iteration == "simple": | |
| hook = hooks.SimpleDenoiseScheduleHook(target_denoise) | |
| return (hook, ) | |
| class StepsScheduleHookProvider: | |
| schedules = ["simple"] | |
| def INPUT_TYPES(s): | |
| return {"required": { | |
| "schedule_for_iteration": (s.schedules,), | |
| "target_steps": ("INT", {"default": 20, "min": 1, "max": 10000}), | |
| }, | |
| } | |
| RETURN_TYPES = ("PK_HOOK",) | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Upscale" | |
| def doit(self, schedule_for_iteration, target_steps): | |
| hook = None | |
| if schedule_for_iteration == "simple": | |
| hook = hooks.SimpleStepsScheduleHook(target_steps) | |
| return (hook, ) | |
| class DetailerHookCombine: | |
| def INPUT_TYPES(s): | |
| return {"required": { | |
| "hook1": ("DETAILER_HOOK",), | |
| "hook2": ("DETAILER_HOOK",), | |
| }, | |
| } | |
| RETURN_TYPES = ("DETAILER_HOOK",) | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Upscale" | |
| def doit(self, hook1, hook2): | |
| hook = hooks.DetailerHookCombine(hook1, hook2) | |
| return (hook, ) | |
| class PixelKSampleHookCombine: | |
| def INPUT_TYPES(s): | |
| return {"required": { | |
| "hook1": ("PK_HOOK",), | |
| "hook2": ("PK_HOOK",), | |
| }, | |
| } | |
| RETURN_TYPES = ("PK_HOOK",) | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Upscale" | |
| def doit(self, hook1, hook2): | |
| hook = hooks.PixelKSampleHookCombine(hook1, hook2) | |
| return (hook, ) | |
| class PixelTiledKSampleUpscalerProvider: | |
| upscale_methods = ["nearest-exact", "bilinear", "lanczos", "area"] | |
| def INPUT_TYPES(s): | |
| return {"required": { | |
| "scale_method": (s.upscale_methods,), | |
| "model": ("MODEL",), | |
| "vae": ("VAE",), | |
| "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), | |
| "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), | |
| "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), | |
| "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), | |
| "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), | |
| "positive": ("CONDITIONING", ), | |
| "negative": ("CONDITIONING", ), | |
| "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), | |
| "tile_width": ("INT", {"default": 512, "min": 320, "max": MAX_RESOLUTION, "step": 64}), | |
| "tile_height": ("INT", {"default": 512, "min": 320, "max": MAX_RESOLUTION, "step": 64}), | |
| "tiling_strategy": (["random", "padded", 'simple'], ), | |
| }, | |
| "optional": { | |
| "upscale_model_opt": ("UPSCALE_MODEL", ), | |
| "pk_hook_opt": ("PK_HOOK", ), | |
| } | |
| } | |
| RETURN_TYPES = ("UPSCALER",) | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Upscale" | |
| def doit(self, scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, tile_width, tile_height, tiling_strategy, upscale_model_opt=None, pk_hook_opt=None): | |
| if "BNK_TiledKSampler" in nodes.NODE_CLASS_MAPPINGS: | |
| upscaler = core.PixelTiledKSampleUpscaler(scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, tile_width, tile_height, tiling_strategy, upscale_model_opt, pk_hook_opt, tile_size=max(tile_width, tile_height)) | |
| return (upscaler, ) | |
| else: | |
| print("[ERROR] PixelTiledKSampleUpscalerProvider: ComfyUI_TiledKSampler custom node isn't installed. You must install BlenderNeko/ComfyUI_TiledKSampler extension to use this node.") | |
| class PixelTiledKSampleUpscalerProviderPipe: | |
| upscale_methods = ["nearest-exact", "bilinear", "lanczos", "area"] | |
| def INPUT_TYPES(s): | |
| return {"required": { | |
| "scale_method": (s.upscale_methods,), | |
| "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), | |
| "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), | |
| "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), | |
| "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), | |
| "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), | |
| "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), | |
| "tile_width": ("INT", {"default": 512, "min": 320, "max": MAX_RESOLUTION, "step": 64}), | |
| "tile_height": ("INT", {"default": 512, "min": 320, "max": MAX_RESOLUTION, "step": 64}), | |
| "tiling_strategy": (["random", "padded", 'simple'], ), | |
| "basic_pipe": ("BASIC_PIPE",) | |
| }, | |
| "optional": { | |
| "upscale_model_opt": ("UPSCALE_MODEL", ), | |
| "pk_hook_opt": ("PK_HOOK", ), | |
| } | |
| } | |
| RETURN_TYPES = ("UPSCALER",) | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Upscale" | |
| def doit(self, scale_method, seed, steps, cfg, sampler_name, scheduler, denoise, tile_width, tile_height, tiling_strategy, basic_pipe, upscale_model_opt=None, pk_hook_opt=None): | |
| if "BNK_TiledKSampler" in nodes.NODE_CLASS_MAPPINGS: | |
| model, _, vae, positive, negative = basic_pipe | |
| upscaler = core.PixelTiledKSampleUpscaler(scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, tile_width, tile_height, tiling_strategy, upscale_model_opt, pk_hook_opt, tile_size=max(tile_width, tile_height)) | |
| return (upscaler, ) | |
| else: | |
| print("[ERROR] PixelTiledKSampleUpscalerProviderPipe: ComfyUI_TiledKSampler custom node isn't installed. You must install BlenderNeko/ComfyUI_TiledKSampler extension to use this node.") | |
| class PixelKSampleUpscalerProvider: | |
| upscale_methods = ["nearest-exact", "bilinear", "lanczos", "area"] | |
| def INPUT_TYPES(s): | |
| return {"required": { | |
| "scale_method": (s.upscale_methods,), | |
| "model": ("MODEL",), | |
| "vae": ("VAE",), | |
| "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), | |
| "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), | |
| "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), | |
| "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), | |
| "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), | |
| "positive": ("CONDITIONING", ), | |
| "negative": ("CONDITIONING", ), | |
| "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), | |
| "use_tiled_vae": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), | |
| "tile_size": ("INT", {"default": 512, "min": 320, "max": 4096, "step": 64}), | |
| }, | |
| "optional": { | |
| "upscale_model_opt": ("UPSCALE_MODEL", ), | |
| "pk_hook_opt": ("PK_HOOK", ), | |
| } | |
| } | |
| RETURN_TYPES = ("UPSCALER",) | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Upscale" | |
| def doit(self, scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, positive, negative, denoise, | |
| use_tiled_vae, upscale_model_opt=None, pk_hook_opt=None, tile_size=512): | |
| upscaler = core.PixelKSampleUpscaler(scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, | |
| positive, negative, denoise, use_tiled_vae, upscale_model_opt, pk_hook_opt, | |
| tile_size=tile_size) | |
| return (upscaler, ) | |
| class PixelKSampleUpscalerProviderPipe(PixelKSampleUpscalerProvider): | |
| upscale_methods = ["nearest-exact", "bilinear", "lanczos", "area"] | |
| def INPUT_TYPES(s): | |
| return {"required": { | |
| "scale_method": (s.upscale_methods,), | |
| "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), | |
| "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), | |
| "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), | |
| "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), | |
| "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), | |
| "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), | |
| "use_tiled_vae": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), | |
| "basic_pipe": ("BASIC_PIPE",), | |
| "tile_size": ("INT", {"default": 512, "min": 320, "max": 4096, "step": 64}), | |
| }, | |
| "optional": { | |
| "upscale_model_opt": ("UPSCALE_MODEL", ), | |
| "pk_hook_opt": ("PK_HOOK", ), | |
| } | |
| } | |
| RETURN_TYPES = ("UPSCALER",) | |
| FUNCTION = "doit_pipe" | |
| CATEGORY = "ImpactPack/Upscale" | |
| def doit_pipe(self, scale_method, seed, steps, cfg, sampler_name, scheduler, denoise, | |
| use_tiled_vae, basic_pipe, upscale_model_opt=None, pk_hook_opt=None, tile_size=512): | |
| model, _, vae, positive, negative = basic_pipe | |
| upscaler = core.PixelKSampleUpscaler(scale_method, model, vae, seed, steps, cfg, sampler_name, scheduler, | |
| positive, negative, denoise, use_tiled_vae, upscale_model_opt, pk_hook_opt, | |
| tile_size=tile_size) | |
| return (upscaler, ) | |
| class TwoSamplersForMaskUpscalerProvider: | |
| upscale_methods = ["nearest-exact", "bilinear", "lanczos", "area"] | |
| def INPUT_TYPES(s): | |
| return {"required": { | |
| "scale_method": (s.upscale_methods,), | |
| "full_sample_schedule": ( | |
| ["none", "interleave1", "interleave2", "interleave3", | |
| "last1", "last2", | |
| "interleave1+last1", "interleave2+last1", "interleave3+last1", | |
| ],), | |
| "use_tiled_vae": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), | |
| "base_sampler": ("KSAMPLER", ), | |
| "mask_sampler": ("KSAMPLER", ), | |
| "mask": ("MASK", ), | |
| "vae": ("VAE",), | |
| "tile_size": ("INT", {"default": 512, "min": 320, "max": 4096, "step": 64}), | |
| }, | |
| "optional": { | |
| "full_sampler_opt": ("KSAMPLER",), | |
| "upscale_model_opt": ("UPSCALE_MODEL", ), | |
| "pk_hook_base_opt": ("PK_HOOK", ), | |
| "pk_hook_mask_opt": ("PK_HOOK", ), | |
| "pk_hook_full_opt": ("PK_HOOK", ), | |
| } | |
| } | |
| RETURN_TYPES = ("UPSCALER", ) | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Upscale" | |
| def doit(self, scale_method, full_sample_schedule, use_tiled_vae, base_sampler, mask_sampler, mask, vae, | |
| full_sampler_opt=None, upscale_model_opt=None, | |
| pk_hook_base_opt=None, pk_hook_mask_opt=None, pk_hook_full_opt=None, tile_size=512): | |
| upscaler = core.TwoSamplersForMaskUpscaler(scale_method, full_sample_schedule, use_tiled_vae, | |
| base_sampler, mask_sampler, mask, vae, full_sampler_opt, upscale_model_opt, | |
| pk_hook_base_opt, pk_hook_mask_opt, pk_hook_full_opt, tile_size=tile_size) | |
| return (upscaler, ) | |
| class TwoSamplersForMaskUpscalerProviderPipe: | |
| upscale_methods = ["nearest-exact", "bilinear", "lanczos", "area"] | |
| def INPUT_TYPES(s): | |
| return {"required": { | |
| "scale_method": (s.upscale_methods,), | |
| "full_sample_schedule": ( | |
| ["none", "interleave1", "interleave2", "interleave3", | |
| "last1", "last2", | |
| "interleave1+last1", "interleave2+last1", "interleave3+last1", | |
| ],), | |
| "use_tiled_vae": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), | |
| "base_sampler": ("KSAMPLER", ), | |
| "mask_sampler": ("KSAMPLER", ), | |
| "mask": ("MASK", ), | |
| "basic_pipe": ("BASIC_PIPE",), | |
| "tile_size": ("INT", {"default": 512, "min": 320, "max": 4096, "step": 64}), | |
| }, | |
| "optional": { | |
| "full_sampler_opt": ("KSAMPLER",), | |
| "upscale_model_opt": ("UPSCALE_MODEL", ), | |
| "pk_hook_base_opt": ("PK_HOOK", ), | |
| "pk_hook_mask_opt": ("PK_HOOK", ), | |
| "pk_hook_full_opt": ("PK_HOOK", ), | |
| } | |
| } | |
| RETURN_TYPES = ("UPSCALER", ) | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Upscale" | |
| def doit(self, scale_method, full_sample_schedule, use_tiled_vae, base_sampler, mask_sampler, mask, basic_pipe, | |
| full_sampler_opt=None, upscale_model_opt=None, | |
| pk_hook_base_opt=None, pk_hook_mask_opt=None, pk_hook_full_opt=None, tile_size=512): | |
| mask = make_2d_mask(mask) | |
| _, _, vae, _, _ = basic_pipe | |
| upscaler = core.TwoSamplersForMaskUpscaler(scale_method, full_sample_schedule, use_tiled_vae, | |
| base_sampler, mask_sampler, mask, vae, full_sampler_opt, upscale_model_opt, | |
| pk_hook_base_opt, pk_hook_mask_opt, pk_hook_full_opt, tile_size=tile_size) | |
| return (upscaler, ) | |
| class IterativeLatentUpscale: | |
| def INPUT_TYPES(s): | |
| return {"required": { | |
| "samples": ("LATENT", ), | |
| "upscale_factor": ("FLOAT", {"default": 1.5, "min": 1, "max": 10000, "step": 0.1}), | |
| "steps": ("INT", {"default": 3, "min": 1, "max": 10000, "step": 1}), | |
| "temp_prefix": ("STRING", {"default": ""}), | |
| "upscaler": ("UPSCALER",) | |
| }, | |
| "hidden": {"unique_id": "UNIQUE_ID"}, | |
| } | |
| RETURN_TYPES = ("LATENT", "VAE") | |
| RETURN_NAMES = ("latent", "vae") | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Upscale" | |
| def doit(self, samples, upscale_factor, steps, temp_prefix, upscaler, unique_id): | |
| w = samples['samples'].shape[3]*8 # image width | |
| h = samples['samples'].shape[2]*8 # image height | |
| if temp_prefix == "": | |
| temp_prefix = None | |
| upscale_factor_unit = max(0, (upscale_factor-1.0)/steps) | |
| current_latent = samples | |
| scale = 1 | |
| for i in range(steps-1): | |
| scale += upscale_factor_unit | |
| new_w = w*scale | |
| new_h = h*scale | |
| core.update_node_status(unique_id, f"{i+1}/{steps} steps | x{scale:.2f}", (i+1)/steps) | |
| print(f"IterativeLatentUpscale[{i+1}/{steps}]: {new_w:.1f}x{new_h:.1f} (scale:{scale:.2f}) ") | |
| step_info = i, steps | |
| current_latent = upscaler.upscale_shape(step_info, current_latent, new_w, new_h, temp_prefix) | |
| if scale < upscale_factor: | |
| new_w = w*upscale_factor | |
| new_h = h*upscale_factor | |
| core.update_node_status(unique_id, f"Final step | x{upscale_factor:.2f}", 1.0) | |
| print(f"IterativeLatentUpscale[Final]: {new_w:.1f}x{new_h:.1f} (scale:{upscale_factor:.2f}) ") | |
| step_info = steps-1, steps | |
| current_latent = upscaler.upscale_shape(step_info, current_latent, new_w, new_h, temp_prefix) | |
| core.update_node_status(unique_id, "", None) | |
| return (current_latent, upscaler.vae) | |
| class IterativeImageUpscale: | |
| def INPUT_TYPES(s): | |
| return {"required": { | |
| "pixels": ("IMAGE", ), | |
| "upscale_factor": ("FLOAT", {"default": 1.5, "min": 1, "max": 10000, "step": 0.1}), | |
| "steps": ("INT", {"default": 3, "min": 1, "max": 10000, "step": 1}), | |
| "temp_prefix": ("STRING", {"default": ""}), | |
| "upscaler": ("UPSCALER",), | |
| "vae": ("VAE",), | |
| }, | |
| "hidden": {"unique_id": "UNIQUE_ID"} | |
| } | |
| RETURN_TYPES = ("IMAGE",) | |
| RETURN_NAMES = ("image",) | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Upscale" | |
| def doit(self, pixels, upscale_factor, steps, temp_prefix, upscaler, vae, unique_id): | |
| if temp_prefix == "": | |
| temp_prefix = None | |
| core.update_node_status(unique_id, "VAEEncode (first)", 0) | |
| if upscaler.is_tiled: | |
| latent = nodes.VAEEncodeTiled().encode(vae, pixels, upscaler.tile_size)[0] | |
| else: | |
| latent = nodes.VAEEncode().encode(vae, pixels)[0] | |
| refined_latent = IterativeLatentUpscale().doit(latent, upscale_factor, steps, temp_prefix, upscaler, unique_id) | |
| core.update_node_status(unique_id, "VAEDecode (final)", 1.0) | |
| if upscaler.is_tiled: | |
| pixels = nodes.VAEDecodeTiled().decode(vae, refined_latent[0], upscaler.tile_size)[0] | |
| else: | |
| pixels = nodes.VAEDecode().decode(vae, refined_latent[0])[0] | |
| core.update_node_status(unique_id, "", None) | |
| return (pixels, ) | |
| class FaceDetailerPipe: | |
| def INPUT_TYPES(s): | |
| return {"required": { | |
| "image": ("IMAGE", ), | |
| "detailer_pipe": ("DETAILER_PIPE",), | |
| "guide_size": ("FLOAT", {"default": 384, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), | |
| "guide_size_for": ("BOOLEAN", {"default": True, "label_on": "bbox", "label_off": "crop_region"}), | |
| "max_size": ("FLOAT", {"default": 1024, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), | |
| "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), | |
| "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), | |
| "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), | |
| "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), | |
| "scheduler": (comfy.samplers.KSampler.SCHEDULERS,), | |
| "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), | |
| "feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), | |
| "noise_mask": ("BOOLEAN", {"default": True, "label_on": "enabled", "label_off": "disabled"}), | |
| "force_inpaint": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), | |
| "bbox_threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), | |
| "bbox_dilation": ("INT", {"default": 10, "min": -512, "max": 512, "step": 1}), | |
| "bbox_crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 10, "step": 0.1}), | |
| "sam_detection_hint": (["center-1", "horizontal-2", "vertical-2", "rect-4", "diamond-4", "mask-area", "mask-points", "mask-point-bbox", "none"],), | |
| "sam_dilation": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}), | |
| "sam_threshold": ("FLOAT", {"default": 0.93, "min": 0.0, "max": 1.0, "step": 0.01}), | |
| "sam_bbox_expansion": ("INT", {"default": 0, "min": 0, "max": 1000, "step": 1}), | |
| "sam_mask_hint_threshold": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}), | |
| "sam_mask_hint_use_negative": (["False", "Small", "Outter"],), | |
| "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), | |
| "refiner_ratio": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0}), | |
| "cycle": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}), | |
| }, | |
| "optional": { | |
| "inpaint_model": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), | |
| "noise_mask_feather": ("INT", {"default": 0, "min": 0, "max": 100, "step": 1}), | |
| } | |
| } | |
| RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "MASK", "DETAILER_PIPE", "IMAGE") | |
| RETURN_NAMES = ("image", "cropped_refined", "cropped_enhanced_alpha", "mask", "detailer_pipe", "cnet_images") | |
| OUTPUT_IS_LIST = (False, True, True, False, False, True) | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Simple" | |
| def doit(self, image, detailer_pipe, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, | |
| denoise, feather, noise_mask, force_inpaint, bbox_threshold, bbox_dilation, bbox_crop_factor, | |
| sam_detection_hint, sam_dilation, sam_threshold, sam_bbox_expansion, | |
| sam_mask_hint_threshold, sam_mask_hint_use_negative, drop_size, refiner_ratio=None, | |
| cycle=1, inpaint_model=False, noise_mask_feather=0): | |
| result_img = None | |
| result_mask = None | |
| result_cropped_enhanced = [] | |
| result_cropped_enhanced_alpha = [] | |
| result_cnet_images = [] | |
| if len(image) > 1: | |
| print(f"[Impact Pack] WARN: FaceDetailer is not a node designed for video detailing. If you intend to perform video detailing, please use Detailer For AnimateDiff.") | |
| model, clip, vae, positive, negative, wildcard, bbox_detector, segm_detector, sam_model_opt, detailer_hook, \ | |
| refiner_model, refiner_clip, refiner_positive, refiner_negative = detailer_pipe | |
| for i, single_image in enumerate(image): | |
| enhanced_img, cropped_enhanced, cropped_enhanced_alpha, mask, cnet_pil_list = FaceDetailer.enhance_face( | |
| single_image.unsqueeze(0), model, clip, vae, guide_size, guide_size_for, max_size, seed + i, steps, cfg, sampler_name, scheduler, | |
| positive, negative, denoise, feather, noise_mask, force_inpaint, | |
| bbox_threshold, bbox_dilation, bbox_crop_factor, | |
| sam_detection_hint, sam_dilation, sam_threshold, sam_bbox_expansion, sam_mask_hint_threshold, | |
| sam_mask_hint_use_negative, drop_size, bbox_detector, segm_detector, sam_model_opt, wildcard, detailer_hook, | |
| refiner_ratio=refiner_ratio, refiner_model=refiner_model, | |
| refiner_clip=refiner_clip, refiner_positive=refiner_positive, refiner_negative=refiner_negative, | |
| cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather) | |
| result_img = torch.cat((result_img, enhanced_img), dim=0) if result_img is not None else enhanced_img | |
| result_mask = torch.cat((result_mask, mask), dim=0) if result_mask is not None else mask | |
| result_cropped_enhanced.extend(cropped_enhanced) | |
| result_cropped_enhanced_alpha.extend(cropped_enhanced_alpha) | |
| result_cnet_images.extend(cnet_pil_list) | |
| if len(result_cropped_enhanced) == 0: | |
| result_cropped_enhanced = [empty_pil_tensor()] | |
| if len(result_cropped_enhanced_alpha) == 0: | |
| result_cropped_enhanced_alpha = [empty_pil_tensor()] | |
| if len(result_cnet_images) == 0: | |
| result_cnet_images = [empty_pil_tensor()] | |
| return result_img, result_cropped_enhanced, result_cropped_enhanced_alpha, result_mask, detailer_pipe, result_cnet_images | |
| class MaskDetailerPipe: | |
| def INPUT_TYPES(s): | |
| return {"required": { | |
| "image": ("IMAGE", ), | |
| "mask": ("MASK", ), | |
| "basic_pipe": ("BASIC_PIPE",), | |
| "guide_size": ("FLOAT", {"default": 384, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), | |
| "guide_size_for": ("BOOLEAN", {"default": True, "label_on": "mask bbox", "label_off": "crop region"}), | |
| "max_size": ("FLOAT", {"default": 1024, "min": 64, "max": nodes.MAX_RESOLUTION, "step": 8}), | |
| "mask_mode": ("BOOLEAN", {"default": True, "label_on": "masked only", "label_off": "whole"}), | |
| "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), | |
| "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), | |
| "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), | |
| "sampler_name": (comfy.samplers.KSampler.SAMPLERS,), | |
| "scheduler": (comfy.samplers.KSampler.SCHEDULERS,), | |
| "denoise": ("FLOAT", {"default": 0.5, "min": 0.0001, "max": 1.0, "step": 0.01}), | |
| "feather": ("INT", {"default": 5, "min": 0, "max": 100, "step": 1}), | |
| "crop_factor": ("FLOAT", {"default": 3.0, "min": 1.0, "max": 10, "step": 0.1}), | |
| "drop_size": ("INT", {"min": 1, "max": MAX_RESOLUTION, "step": 1, "default": 10}), | |
| "refiner_ratio": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0}), | |
| "batch_size": ("INT", {"default": 1, "min": 1, "max": 100}), | |
| "cycle": ("INT", {"default": 1, "min": 1, "max": 10, "step": 1}), | |
| }, | |
| "optional": { | |
| "refiner_basic_pipe_opt": ("BASIC_PIPE", ), | |
| "detailer_hook": ("DETAILER_HOOK",), | |
| "inpaint_model": ("BOOLEAN", {"default": False, "label_on": "enabled", "label_off": "disabled"}), | |
| "noise_mask_feather": ("INT", {"default": 0, "min": 0, "max": 100, "step": 1}), | |
| } | |
| } | |
| RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "BASIC_PIPE", "BASIC_PIPE") | |
| RETURN_NAMES = ("image", "cropped_refined", "cropped_enhanced_alpha", "basic_pipe", "refiner_basic_pipe_opt") | |
| OUTPUT_IS_LIST = (False, True, True, False, False) | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Detailer" | |
| def doit(self, image, mask, basic_pipe, guide_size, guide_size_for, max_size, mask_mode, | |
| seed, steps, cfg, sampler_name, scheduler, denoise, | |
| feather, crop_factor, drop_size, refiner_ratio, batch_size, cycle=1, | |
| refiner_basic_pipe_opt=None, detailer_hook=None, inpaint_model=False, noise_mask_feather=0): | |
| if len(image) > 1: | |
| raise Exception('[Impact Pack] ERROR: MaskDetailer does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.') | |
| model, clip, vae, positive, negative = basic_pipe | |
| if refiner_basic_pipe_opt is None: | |
| refiner_model, refiner_clip, refiner_positive, refiner_negative = None, None, None, None | |
| else: | |
| refiner_model, refiner_clip, _, refiner_positive, refiner_negative = refiner_basic_pipe_opt | |
| # create segs | |
| if mask is not None: | |
| mask = make_2d_mask(mask) | |
| segs = core.mask_to_segs(mask, False, crop_factor, False, drop_size) | |
| else: | |
| segs = ((image.shape[1], image.shape[2]), []) | |
| enhanced_img_batch = None | |
| cropped_enhanced_list = [] | |
| cropped_enhanced_alpha_list = [] | |
| for i in range(batch_size): | |
| if mask is not None: | |
| enhanced_img, _, cropped_enhanced, cropped_enhanced_alpha, _, _ = \ | |
| DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed+i, steps, | |
| cfg, sampler_name, scheduler, positive, negative, denoise, feather, mask_mode, | |
| force_inpaint=True, wildcard_opt=None, detailer_hook=detailer_hook, | |
| refiner_ratio=refiner_ratio, refiner_model=refiner_model, refiner_clip=refiner_clip, | |
| refiner_positive=refiner_positive, refiner_negative=refiner_negative, | |
| cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather) | |
| else: | |
| enhanced_img, cropped_enhanced, cropped_enhanced_alpha = image, [], [] | |
| if enhanced_img_batch is None: | |
| enhanced_img_batch = enhanced_img | |
| else: | |
| enhanced_img_batch = torch.cat((enhanced_img_batch, enhanced_img), dim=0) | |
| cropped_enhanced_list += cropped_enhanced | |
| cropped_enhanced_alpha_list += cropped_enhanced_alpha | |
| # set fallback image | |
| if len(cropped_enhanced_list) == 0: | |
| cropped_enhanced_list = [empty_pil_tensor()] | |
| if len(cropped_enhanced_alpha_list) == 0: | |
| cropped_enhanced_alpha_list = [empty_pil_tensor()] | |
| return enhanced_img_batch, cropped_enhanced_list, cropped_enhanced_alpha_list, basic_pipe, refiner_basic_pipe_opt | |
| class DetailerForEachTest(DetailerForEach): | |
| RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "IMAGE", "IMAGE") | |
| RETURN_NAMES = ("image", "cropped", "cropped_refined", "cropped_refined_alpha", "cnet_images") | |
| OUTPUT_IS_LIST = (False, True, True, True, True) | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Detailer" | |
| def doit(self, image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, | |
| scheduler, positive, negative, denoise, feather, noise_mask, force_inpaint, wildcard, detailer_hook=None, | |
| cycle=1, inpaint_model=False, noise_mask_feather=0): | |
| if len(image) > 1: | |
| raise Exception('[Impact Pack] ERROR: DetailerForEach does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.') | |
| enhanced_img, cropped, cropped_enhanced, cropped_enhanced_alpha, cnet_pil_list, new_segs = \ | |
| DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, | |
| cfg, sampler_name, scheduler, positive, negative, denoise, feather, noise_mask, | |
| force_inpaint, wildcard, detailer_hook, | |
| cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather) | |
| # set fallback image | |
| if len(cropped) == 0: | |
| cropped = [empty_pil_tensor()] | |
| if len(cropped_enhanced) == 0: | |
| cropped_enhanced = [empty_pil_tensor()] | |
| if len(cropped_enhanced_alpha) == 0: | |
| cropped_enhanced_alpha = [empty_pil_tensor()] | |
| if len(cnet_pil_list) == 0: | |
| cnet_pil_list = [empty_pil_tensor()] | |
| return enhanced_img, cropped, cropped_enhanced, cropped_enhanced_alpha, cnet_pil_list | |
| class DetailerForEachTestPipe(DetailerForEachPipe): | |
| RETURN_TYPES = ("IMAGE", "SEGS", "BASIC_PIPE", "IMAGE", "IMAGE", "IMAGE", "IMAGE", ) | |
| RETURN_NAMES = ("image", "segs", "basic_pipe", "cropped", "cropped_refined", "cropped_refined_alpha", 'cnet_images') | |
| OUTPUT_IS_LIST = (False, False, False, True, True, True, True) | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Detailer" | |
| def doit(self, image, segs, guide_size, guide_size_for, max_size, seed, steps, cfg, sampler_name, scheduler, | |
| denoise, feather, noise_mask, force_inpaint, basic_pipe, wildcard, cycle=1, | |
| refiner_ratio=None, detailer_hook=None, refiner_basic_pipe_opt=None, inpaint_model=False, noise_mask_feather=0): | |
| if len(image) > 1: | |
| raise Exception('[Impact Pack] ERROR: DetailerForEach does not allow image batches.\nPlease refer to https://github.com/ltdrdata/ComfyUI-extension-tutorials/blob/Main/ComfyUI-Impact-Pack/tutorial/batching-detailer.md for more information.') | |
| model, clip, vae, positive, negative = basic_pipe | |
| if refiner_basic_pipe_opt is None: | |
| refiner_model, refiner_clip, refiner_positive, refiner_negative = None, None, None, None | |
| else: | |
| refiner_model, refiner_clip, _, refiner_positive, refiner_negative = refiner_basic_pipe_opt | |
| enhanced_img, cropped, cropped_enhanced, cropped_enhanced_alpha, cnet_pil_list, new_segs = \ | |
| DetailerForEach.do_detail(image, segs, model, clip, vae, guide_size, guide_size_for, max_size, seed, steps, cfg, | |
| sampler_name, scheduler, positive, negative, denoise, feather, noise_mask, | |
| force_inpaint, wildcard, detailer_hook, | |
| refiner_ratio=refiner_ratio, refiner_model=refiner_model, | |
| refiner_clip=refiner_clip, refiner_positive=refiner_positive, | |
| refiner_negative=refiner_negative, | |
| cycle=cycle, inpaint_model=inpaint_model, noise_mask_feather=noise_mask_feather) | |
| # set fallback image | |
| if len(cropped) == 0: | |
| cropped = [empty_pil_tensor()] | |
| if len(cropped_enhanced) == 0: | |
| cropped_enhanced = [empty_pil_tensor()] | |
| if len(cropped_enhanced_alpha) == 0: | |
| cropped_enhanced_alpha = [empty_pil_tensor()] | |
| if len(cnet_pil_list) == 0: | |
| cnet_pil_list = [empty_pil_tensor()] | |
| return enhanced_img, new_segs, basic_pipe, cropped, cropped_enhanced, cropped_enhanced_alpha, cnet_pil_list | |
| class SegsBitwiseAndMask: | |
| def INPUT_TYPES(s): | |
| return {"required": { | |
| "segs": ("SEGS",), | |
| "mask": ("MASK",), | |
| } | |
| } | |
| RETURN_TYPES = ("SEGS",) | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Operation" | |
| def doit(self, segs, mask): | |
| return (core.segs_bitwise_and_mask(segs, mask), ) | |
| class SegsBitwiseAndMaskForEach: | |
| def INPUT_TYPES(s): | |
| return {"required": { | |
| "segs": ("SEGS",), | |
| "masks": ("MASK",), | |
| } | |
| } | |
| RETURN_TYPES = ("SEGS",) | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Operation" | |
| def doit(self, segs, masks): | |
| return (core.apply_mask_to_each_seg(segs, masks), ) | |
| class BitwiseAndMaskForEach: | |
| def INPUT_TYPES(s): | |
| return {"required": | |
| { | |
| "base_segs": ("SEGS",), | |
| "mask_segs": ("SEGS",), | |
| } | |
| } | |
| RETURN_TYPES = ("SEGS",) | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Operation" | |
| def doit(self, base_segs, mask_segs): | |
| result = [] | |
| for bseg in base_segs[1]: | |
| cropped_mask1 = bseg.cropped_mask.copy() | |
| crop_region1 = bseg.crop_region | |
| for mseg in mask_segs[1]: | |
| cropped_mask2 = mseg.cropped_mask | |
| crop_region2 = mseg.crop_region | |
| # compute the intersection of the two crop regions | |
| intersect_region = (max(crop_region1[0], crop_region2[0]), | |
| max(crop_region1[1], crop_region2[1]), | |
| min(crop_region1[2], crop_region2[2]), | |
| min(crop_region1[3], crop_region2[3])) | |
| overlapped = False | |
| # set all pixels in cropped_mask1 to 0 except for those that overlap with cropped_mask2 | |
| for i in range(intersect_region[0], intersect_region[2]): | |
| for j in range(intersect_region[1], intersect_region[3]): | |
| if cropped_mask1[j - crop_region1[1], i - crop_region1[0]] == 1 and \ | |
| cropped_mask2[j - crop_region2[1], i - crop_region2[0]] == 1: | |
| # pixel overlaps with both masks, keep it as 1 | |
| overlapped = True | |
| pass | |
| else: | |
| # pixel does not overlap with both masks, set it to 0 | |
| cropped_mask1[j - crop_region1[1], i - crop_region1[0]] = 0 | |
| if overlapped: | |
| item = SEG(bseg.cropped_image, cropped_mask1, bseg.confidence, bseg.crop_region, bseg.bbox, bseg.label, None) | |
| result.append(item) | |
| return ((base_segs[0], result),) | |
| class SubtractMaskForEach: | |
| def INPUT_TYPES(s): | |
| return {"required": { | |
| "base_segs": ("SEGS",), | |
| "mask_segs": ("SEGS",), | |
| } | |
| } | |
| RETURN_TYPES = ("SEGS",) | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Operation" | |
| def doit(self, base_segs, mask_segs): | |
| result = [] | |
| for bseg in base_segs[1]: | |
| cropped_mask1 = bseg.cropped_mask.copy() | |
| crop_region1 = bseg.crop_region | |
| for mseg in mask_segs[1]: | |
| cropped_mask2 = mseg.cropped_mask | |
| crop_region2 = mseg.crop_region | |
| # compute the intersection of the two crop regions | |
| intersect_region = (max(crop_region1[0], crop_region2[0]), | |
| max(crop_region1[1], crop_region2[1]), | |
| min(crop_region1[2], crop_region2[2]), | |
| min(crop_region1[3], crop_region2[3])) | |
| changed = False | |
| # subtract operation | |
| for i in range(intersect_region[0], intersect_region[2]): | |
| for j in range(intersect_region[1], intersect_region[3]): | |
| if cropped_mask1[j - crop_region1[1], i - crop_region1[0]] == 1 and \ | |
| cropped_mask2[j - crop_region2[1], i - crop_region2[0]] == 1: | |
| # pixel overlaps with both masks, set it as 0 | |
| changed = True | |
| cropped_mask1[j - crop_region1[1], i - crop_region1[0]] = 0 | |
| else: | |
| # pixel does not overlap with both masks, don't care | |
| pass | |
| if changed: | |
| item = SEG(bseg.cropped_image, cropped_mask1, bseg.confidence, bseg.crop_region, bseg.bbox, bseg.label, None) | |
| result.append(item) | |
| else: | |
| result.append(base_segs) | |
| return ((base_segs[0], result),) | |
| class ToBinaryMask: | |
| def INPUT_TYPES(s): | |
| return {"required": { | |
| "mask": ("MASK",), | |
| "threshold": ("INT", {"default": 20, "min": 1, "max": 255}), | |
| } | |
| } | |
| RETURN_TYPES = ("MASK",) | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Operation" | |
| def doit(self, mask, threshold): | |
| mask = to_binary_mask(mask, threshold/255.0) | |
| return (mask,) | |
| class BitwiseAndMask: | |
| def INPUT_TYPES(s): | |
| return {"required": { | |
| "mask1": ("MASK",), | |
| "mask2": ("MASK",), | |
| } | |
| } | |
| RETURN_TYPES = ("MASK",) | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Operation" | |
| def doit(self, mask1, mask2): | |
| mask = bitwise_and_masks(mask1, mask2) | |
| return (mask,) | |
| class SubtractMask: | |
| def INPUT_TYPES(s): | |
| return {"required": { | |
| "mask1": ("MASK", ), | |
| "mask2": ("MASK", ), | |
| } | |
| } | |
| RETURN_TYPES = ("MASK",) | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Operation" | |
| def doit(self, mask1, mask2): | |
| mask = subtract_masks(mask1, mask2) | |
| return (mask,) | |
| class AddMask: | |
| def INPUT_TYPES(s): | |
| return {"required": { | |
| "mask1": ("MASK",), | |
| "mask2": ("MASK",), | |
| } | |
| } | |
| RETURN_TYPES = ("MASK",) | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Operation" | |
| def doit(self, mask1, mask2): | |
| mask = add_masks(mask1, mask2) | |
| return (mask,) | |
| import nodes | |
| def get_image_hash(arr): | |
| split_index1 = arr.shape[0] // 2 | |
| split_index2 = arr.shape[1] // 2 | |
| part1 = arr[:split_index1, :split_index2] | |
| part2 = arr[:split_index1, split_index2:] | |
| part3 = arr[split_index1:, :split_index2] | |
| part4 = arr[split_index1:, split_index2:] | |
| # 각 부분을 합산 | |
| sum1 = np.sum(part1) | |
| sum2 = np.sum(part2) | |
| sum3 = np.sum(part3) | |
| sum4 = np.sum(part4) | |
| return hash((sum1, sum2, sum3, sum4)) | |
| def get_file_item(base_type, path): | |
| path_type = base_type | |
| if path == "[output]": | |
| path_type = "output" | |
| path = path[:-9] | |
| elif path == "[input]": | |
| path_type = "input" | |
| path = path[:-8] | |
| elif path == "[temp]": | |
| path_type = "temp" | |
| path = path[:-7] | |
| subfolder = os.path.dirname(path) | |
| filename = os.path.basename(path) | |
| return { | |
| "filename": filename, | |
| "subfolder": subfolder, | |
| "type": path_type | |
| } | |
| class ImageReceiver: | |
| def INPUT_TYPES(s): | |
| input_dir = folder_paths.get_input_directory() | |
| files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))] | |
| return {"required": { | |
| "image": (sorted(files), ), | |
| "link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), | |
| "save_to_workflow": ("BOOLEAN", {"default": False}), | |
| "image_data": ("STRING", {"multiline": False}), | |
| "trigger_always": ("BOOLEAN", {"default": False, "label_on": "enable", "label_off": "disable"}), | |
| }, | |
| } | |
| FUNCTION = "doit" | |
| RETURN_TYPES = ("IMAGE", "MASK") | |
| CATEGORY = "ImpactPack/Util" | |
| def doit(self, image, link_id, save_to_workflow, image_data, trigger_always): | |
| if save_to_workflow: | |
| try: | |
| image_data = base64.b64decode(image_data.split(",")[1]) | |
| i = Image.open(BytesIO(image_data)) | |
| i = ImageOps.exif_transpose(i) | |
| image = i.convert("RGB") | |
| image = np.array(image).astype(np.float32) / 255.0 | |
| image = torch.from_numpy(image)[None,] | |
| if 'A' in i.getbands(): | |
| mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0 | |
| mask = 1. - torch.from_numpy(mask) | |
| else: | |
| mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") | |
| return (image, mask.unsqueeze(0)) | |
| except Exception as e: | |
| print(f"[WARN] ComfyUI-Impact-Pack: ImageReceiver - invalid 'image_data'") | |
| mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") | |
| return (empty_pil_tensor(64, 64), mask, ) | |
| else: | |
| return nodes.LoadImage().load_image(image) | |
| def VALIDATE_INPUTS(s, image, link_id, save_to_workflow, image_data, trigger_always): | |
| if image != '#DATA' and not folder_paths.exists_annotated_filepath(image) or image.startswith("/") or ".." in image: | |
| return "Invalid image file: {}".format(image) | |
| return True | |
| def IS_CHANGED(s, image, link_id, save_to_workflow, image_data, trigger_always): | |
| if trigger_always: | |
| return float("NaN") | |
| else: | |
| if save_to_workflow: | |
| return hash(image_data) | |
| else: | |
| return hash(image) | |
| from server import PromptServer | |
| class ImageSender(nodes.PreviewImage): | |
| def INPUT_TYPES(s): | |
| return {"required": { | |
| "images": ("IMAGE", ), | |
| "filename_prefix": ("STRING", {"default": "ImgSender"}), | |
| "link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), }, | |
| "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, | |
| } | |
| OUTPUT_NODE = True | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Util" | |
| def doit(self, images, filename_prefix="ImgSender", link_id=0, prompt=None, extra_pnginfo=None): | |
| result = nodes.PreviewImage().save_images(images, filename_prefix, prompt, extra_pnginfo) | |
| PromptServer.instance.send_sync("img-send", {"link_id": link_id, "images": result['ui']['images']}) | |
| return result | |
| class LatentReceiver: | |
| def __init__(self): | |
| self.input_dir = folder_paths.get_input_directory() | |
| self.type = "input" | |
| def INPUT_TYPES(s): | |
| def check_file_extension(x): | |
| return x.endswith(".latent") or x.endswith(".latent.png") | |
| input_dir = folder_paths.get_input_directory() | |
| files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f)) and check_file_extension(f)] | |
| return {"required": { | |
| "latent": (sorted(files), ), | |
| "link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), | |
| "trigger_always": ("BOOLEAN", {"default": False, "label_on": "enable", "label_off": "disable"}), | |
| }, | |
| } | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Util" | |
| RETURN_TYPES = ("LATENT",) | |
| def load_preview_latent(image_path): | |
| if not os.path.exists(image_path): | |
| return None | |
| image = Image.open(image_path) | |
| exif_data = piexif.load(image.info["exif"]) | |
| if piexif.ExifIFD.UserComment in exif_data["Exif"]: | |
| compressed_data = exif_data["Exif"][piexif.ExifIFD.UserComment] | |
| compressed_data_io = BytesIO(compressed_data) | |
| with zipfile.ZipFile(compressed_data_io, mode='r') as archive: | |
| tensor_bytes = archive.read("latent") | |
| tensor = safetensors.torch.load(tensor_bytes) | |
| return {"samples": tensor['latent_tensor']} | |
| return None | |
| def parse_filename(self, filename): | |
| pattern = r"^(.*)/(.*?)\[(.*)\]\s*$" | |
| match = re.match(pattern, filename) | |
| if match: | |
| subfolder = match.group(1) | |
| filename = match.group(2).rstrip() | |
| file_type = match.group(3) | |
| else: | |
| subfolder = '' | |
| file_type = self.type | |
| return {'filename': filename, 'subfolder': subfolder, 'type': file_type} | |
| def doit(self, **kwargs): | |
| if 'latent' not in kwargs: | |
| return (torch.zeros([1, 4, 8, 8]), ) | |
| latent = kwargs['latent'] | |
| latent_name = latent | |
| latent_path = folder_paths.get_annotated_filepath(latent_name) | |
| if latent.endswith(".latent"): | |
| latent = safetensors.torch.load_file(latent_path, device="cpu") | |
| multiplier = 1.0 | |
| if "latent_format_version_0" not in latent: | |
| multiplier = 1.0 / 0.18215 | |
| samples = {"samples": latent["latent_tensor"].float() * multiplier} | |
| else: | |
| samples = LatentReceiver.load_preview_latent(latent_path) | |
| if samples is None: | |
| samples = {'samples': torch.zeros([1, 4, 8, 8])} | |
| preview = self.parse_filename(latent_name) | |
| return { | |
| 'ui': {"images": [preview]}, | |
| 'result': (samples, ) | |
| } | |
| def IS_CHANGED(s, latent, link_id, trigger_always): | |
| if trigger_always: | |
| return float("NaN") | |
| else: | |
| image_path = folder_paths.get_annotated_filepath(latent) | |
| m = hashlib.sha256() | |
| with open(image_path, 'rb') as f: | |
| m.update(f.read()) | |
| return m.digest().hex() | |
| def VALIDATE_INPUTS(s, latent, link_id, trigger_always): | |
| if not folder_paths.exists_annotated_filepath(latent) or latent.startswith("/") or ".." in latent: | |
| return "Invalid latent file: {}".format(latent) | |
| return True | |
| class LatentSender(nodes.SaveLatent): | |
| def __init__(self): | |
| super().__init__() | |
| self.output_dir = folder_paths.get_temp_directory() | |
| self.type = "temp" | |
| def INPUT_TYPES(s): | |
| return {"required": { | |
| "samples": ("LATENT", ), | |
| "filename_prefix": ("STRING", {"default": "latents/LatentSender"}), | |
| "link_id": ("INT", {"default": 0, "min": 0, "max": sys.maxsize, "step": 1}), | |
| "preview_method": (["Latent2RGB-SDXL", "Latent2RGB-SD15", "TAESDXL", "TAESD15"],) | |
| }, | |
| "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, | |
| } | |
| OUTPUT_NODE = True | |
| RETURN_TYPES = () | |
| FUNCTION = "doit" | |
| CATEGORY = "ImpactPack/Util" | |
| def save_to_file(tensor_bytes, prompt, extra_pnginfo, image, image_path): | |
| compressed_data = BytesIO() | |
| with zipfile.ZipFile(compressed_data, mode='w') as archive: | |
| archive.writestr("latent", tensor_bytes) | |
| image = image.copy() | |
| exif_data = {"Exif": {piexif.ExifIFD.UserComment: compressed_data.getvalue()}} | |
| metadata = PngInfo() | |
| if prompt is not None: | |
| metadata.add_text("prompt", json.dumps(prompt)) | |
| if extra_pnginfo is not None: | |
| for x in extra_pnginfo: | |
| metadata.add_text(x, json.dumps(extra_pnginfo[x])) | |
| exif_bytes = piexif.dump(exif_data) | |
| image.save(image_path, format='png', exif=exif_bytes, pnginfo=metadata, optimize=True) | |
| def prepare_preview(latent_tensor, preview_method): | |
| from comfy.cli_args import LatentPreviewMethod | |
| import comfy.latent_formats as latent_formats | |
| lower_bound = 128 | |
| upper_bound = 256 | |
| if preview_method == "Latent2RGB-SD15": | |
| latent_format = latent_formats.SD15() | |
| method = LatentPreviewMethod.Latent2RGB | |
| elif preview_method == "TAESD15": | |
| latent_format = latent_formats.SD15() | |
| method = LatentPreviewMethod.TAESD | |
| elif preview_method == "TAESDXL": | |
| latent_format = latent_formats.SDXL() | |
| method = LatentPreviewMethod.TAESD | |
| else: # preview_method == "Latent2RGB-SDXL" | |
| latent_format = latent_formats.SDXL() | |
| method = LatentPreviewMethod.Latent2RGB | |
| previewer = core.get_previewer("cpu", latent_format=latent_format, force=True, method=method) | |
| image = previewer.decode_latent_to_preview(latent_tensor) | |
| min_size = min(image.size[0], image.size[1]) | |
| max_size = max(image.size[0], image.size[1]) | |
| scale_factor = 1 | |
| if max_size > upper_bound: | |
| scale_factor = upper_bound/max_size | |
| # prevent too small preview | |
| if min_size*scale_factor < lower_bound: | |
| scale_factor = lower_bound/min_size | |
| w = int(image.size[0] * scale_factor) | |
| h = int(image.size[1] * scale_factor) | |
| image = image.resize((w, h), resample=Image.NEAREST) | |
| return LatentSender.attach_format_text(image) | |
| def attach_format_text(image): | |
| width_a, height_a = image.size | |
| letter_image = Image.open(latent_letter_path) | |
| width_b, height_b = letter_image.size | |
| new_width = max(width_a, width_b) | |
| new_height = height_a + height_b | |
| new_image = Image.new('RGB', (new_width, new_height), (0, 0, 0)) | |
| offset_x = (new_width - width_b) // 2 | |
| offset_y = (height_a + (new_height - height_a - height_b) // 2) | |
| new_image.paste(letter_image, (offset_x, offset_y)) | |
| new_image.paste(image, (0, 0)) | |
| return new_image | |
| def doit(self, samples, filename_prefix="latents/LatentSender", link_id=0, preview_method="Latent2RGB-SDXL", prompt=None, extra_pnginfo=None): | |
| full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir) | |
| # load preview | |
| preview = LatentSender.prepare_preview(samples['samples'], preview_method) | |
| # support save metadata for latent sharing | |
| file = f"{filename}_{counter:05}_.latent.png" | |
| fullpath = os.path.join(full_output_folder, file) | |
| output = {"latent_tensor": samples["samples"]} | |
| tensor_bytes = safetensors.torch.save(output) | |
| LatentSender.save_to_file(tensor_bytes, prompt, extra_pnginfo, preview, fullpath) | |
| latent_path = { | |
| 'filename': file, | |
| 'subfolder': subfolder, | |
| 'type': self.type | |
| } | |
| PromptServer.instance.send_sync("latent-send", {"link_id": link_id, "images": [latent_path]}) | |
| return {'ui': {'images': [latent_path]}} | |
| class ImpactWildcardProcessor: | |
| def INPUT_TYPES(s): | |
| return {"required": { | |
| "wildcard_text": ("STRING", {"multiline": True, "dynamicPrompts": False}), | |
| "populated_text": ("STRING", {"multiline": True, "dynamicPrompts": False}), | |
| "mode": ("BOOLEAN", {"default": True, "label_on": "Populate", "label_off": "Fixed"}), | |
| "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), | |
| "Select to add Wildcard": (["Select the Wildcard to add to the text"],), | |
| }, | |
| } | |
| CATEGORY = "ImpactPack/Prompt" | |
| RETURN_TYPES = ("STRING", ) | |
| FUNCTION = "doit" | |
| def process(**kwargs): | |
| return impact.wildcards.process(**kwargs) | |
| def doit(self, *args, **kwargs): | |
| populated_text = kwargs['populated_text'] | |
| return (populated_text, ) | |
| class ImpactWildcardEncode: | |
| def INPUT_TYPES(s): | |
| return {"required": { | |
| "model": ("MODEL",), | |
| "clip": ("CLIP",), | |
| "wildcard_text": ("STRING", {"multiline": True, "dynamicPrompts": False}), | |
| "populated_text": ("STRING", {"multiline": True, "dynamicPrompts": False}), | |
| "mode": ("BOOLEAN", {"default": True, "label_on": "Populate", "label_off": "Fixed"}), | |
| "Select to add LoRA": (["Select the LoRA to add to the text"] + folder_paths.get_filename_list("loras"), ), | |
| "Select to add Wildcard": (["Select the Wildcard to add to the text"], ), | |
| "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), | |
| }, | |
| } | |
| CATEGORY = "ImpactPack/Prompt" | |
| RETURN_TYPES = ("MODEL", "CLIP", "CONDITIONING", "STRING") | |
| RETURN_NAMES = ("model", "clip", "conditioning", "populated_text") | |
| FUNCTION = "doit" | |
| def process_with_loras(**kwargs): | |
| return impact.wildcards.process_with_loras(**kwargs) | |
| def get_wildcard_list(): | |
| return impact.wildcards.get_wildcard_list() | |
| def doit(self, *args, **kwargs): | |
| populated = kwargs['populated_text'] | |
| model, clip, conditioning = impact.wildcards.process_with_loras(populated, kwargs['model'], kwargs['clip']) | |
| return (model, clip, conditioning, populated) | |