File size: 1,905 Bytes
baa8e90 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
from ..utils import common_annotator_call, annotator_ckpts_path, HF_MODEL_NAME, create_node_input_types
import comfy.model_management as model_management
class OneFormer_COCO_SemSegPreprocessor:
@classmethod
def INPUT_TYPES(s):
return create_node_input_types()
RETURN_TYPES = ("IMAGE",)
FUNCTION = "semantic_segmentate"
CATEGORY = "ControlNet Preprocessors/Semantic Segmentation"
def semantic_segmentate(self, image, resolution=512):
from controlnet_aux.oneformer import OneformerSegmentor
model = OneformerSegmentor.from_pretrained(HF_MODEL_NAME, "150_16_swin_l_oneformer_coco_100ep.pth", cache_dir=annotator_ckpts_path)
model = model.to(model_management.get_torch_device())
out = common_annotator_call(model, image, resolution=resolution)
del model
return (out,)
class OneFormer_ADE20K_SemSegPreprocessor:
@classmethod
def INPUT_TYPES(s):
return create_node_input_types()
RETURN_TYPES = ("IMAGE",)
FUNCTION = "semantic_segmentate"
CATEGORY = "ControlNet Preprocessors/Semantic Segmentation"
def semantic_segmentate(self, image, resolution=512):
from controlnet_aux.oneformer import OneformerSegmentor
model = OneformerSegmentor.from_pretrained(HF_MODEL_NAME, "250_16_swin_l_oneformer_ade20k_160k.pth", cache_dir=annotator_ckpts_path)
model = model.to(model_management.get_torch_device())
out = common_annotator_call(model, image, resolution=resolution)
del model
return (out,)
NODE_CLASS_MAPPINGS = {
"OneFormer-COCO-SemSegPreprocessor": OneFormer_COCO_SemSegPreprocessor,
"OneFormer-ADE20K-SemSegPreprocessor": OneFormer_ADE20K_SemSegPreprocessor
}
NODE_DISPLAY_NAME_MAPPINGS = {
"OneFormer-COCO-SemSegPreprocessor": "OneFormer COCO Segmentor",
"OneFormer-ADE20K-SemSegPreprocessor": "OneFormer ADE20K Segmentor"
} |