File size: 3,325 Bytes
f2dbf59
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
from .imagefunc import *

NODE_NAME = 'BlendIfMask'

def norm_value(value):
    if value < 0.01:
        value = 0.01
    if value > 0.99:
        value = 0.99
    return value

class BlendIfMask:

    def __init__(self):
        pass

    @classmethod
    def INPUT_TYPES(self):

        blend_if_list = ["gray", "red", "green", "blue"]
        return {
            "required": {
                "image": ("IMAGE", ),
                "invert_mask": ("BOOLEAN", {"default": True}),  # 反转mask
                "blend_if": (blend_if_list,),
                "black_point": ("INT", {"default": 0, "min": 0, "max": 254, "step": 1, "display": "slider"}),
                "black_range": ("INT", {"default": 0, "min": 0, "max": 255, "step": 1}),
                "white_point": ("INT", {"default": 255, "min": 1, "max": 255, "step": 1, "display": "slider"}),
                "white_range": ("INT", {"default": 0, "min": 0, "max": 255, "step": 1}),
            },
            "optional": {
                "mask": ("MASK",),  #
            }
        }

    RETURN_TYPES = ("MASK",)
    RETURN_NAMES = ("mask",)
    FUNCTION = 'blend_if_mask'
    CATEGORY = '😺dzNodes/LayerMask'

    def blend_if_mask(self, image, invert_mask, blend_if,
                      black_point, black_range,
                      white_point, white_range,
                      mask=None
                      ):


        ret_masks = []
        input_images = []
        input_masks = []

        for i in image:
            input_images.append(torch.unsqueeze(i, 0))
            m = tensor2pil(i)
            if m.mode == 'RGBA':
                input_masks.append(m.split()[-1])
            else:
                input_masks.append(Image.new('L', size=m.size, color='white'))
        if mask is not None:
            if mask.dim() == 2:
                mask = torch.unsqueeze(mask, 0)
            input_masks = []
            for m in mask:
                if invert_mask:
                    m = 1 - m
                input_masks.append(tensor2pil(torch.unsqueeze(m, 0)).convert('L'))
        max_batch = max(len(input_images), len(input_masks))

        for i in range(max_batch):
            _image = input_images[i] if i < len(input_images) else input_images[-1]
            _image = tensor2pil(_image).convert('RGB')

            if blend_if == "red":
                gray = image_channel_split(_image, 'RGB')[0]
            elif blend_if == "green":
                gray = image_channel_split(_image, 'RGB')[1]
            elif blend_if == "blue":
                gray = image_channel_split(_image, 'RGB')[2]
            else:
                gray = _image.convert('L')

            _mask = input_masks[i] if i < len(input_masks) else input_masks[-1]

            gray = histogram_range(gray, black_point, black_range, white_point, white_range)
            black = Image.new('L', size=_image.size, color='black')
            _mask = ImageChops.invert(_mask)
            gray.paste(black, mask=_mask)

            ret_masks.append(image2mask(gray))

        log(f"{NODE_NAME} Processed {len(ret_masks)} image(s).", message_type='finish')
        return (torch.cat(ret_masks, dim=0),)

NODE_CLASS_MAPPINGS = {
    "LayerMask: BlendIf Mask": BlendIfMask
}

NODE_DISPLAY_NAME_MAPPINGS = {
    "LayerMask: BlendIf Mask": "LayerMask: BlendIf Mask"
}