import argparse import json import tqdm import cv2 import os import numpy as np from pycocotools import mask as mask_utils import random from PIL import Image EVALMODE = "test" def blend_mask(input_img, binary_mask, alpha=0.3): mask_image = np.zeros(input_img.shape, np.uint8) mask_image[:, :, 0] = 255 mask_image[:, :, 1] = 165 mask_image[:, :, 2] = 0 mask_image = mask_image * np.repeat(binary_mask[:, :, np.newaxis], 3, axis=2) blend_image = input_img[:, :, :].copy() pos_idx = binary_mask > 0 for ind in range(input_img.ndim): ch_img1 = input_img[:, :, ind] ch_img2 = mask_image[:, :, ind] ch_img3 = blend_image[:, :, ind] ch_img3[pos_idx] = alpha * ch_img1[pos_idx] + (1 - alpha) * ch_img2[pos_idx] blend_image[:, :, ind] = ch_img3 return blend_image def upsample_mask(mask, frame): H, W = frame.shape[:2] mH, mW = mask.shape[:2] if W > H: ratio = mW / W h = H * ratio diff = int((mH - h) // 2) if diff == 0: mask = mask else: mask = mask[diff:-diff] else: ratio = mH / H w = W * ratio diff = int((mW - w) // 2) if diff == 0: mask = mask else: mask = mask[:, diff:-diff] mask = cv2.resize(mask, (W, H)) return mask def downsample(mask, frame): H, W = frame.shape[:2] mH, mW = mask.shape[:2] mask = cv2.resize(mask, (W, H)) return mask #datapath /datasegswap #inference_path /inference_xmem_ego_last/coco #output /vis_piano #--show_gt要加上 if __name__ == "__main__": out_path = "/home/yuqian_fu/Projects/sam2/predicted_mask" frame = cv2.imread( "/data/work-gcp-europe-west4-a/yuqian_fu/Ego/multi_view_data_2/multi_vew_data_3/000001-color.jpg" ) mask = Image.open("/data/work-gcp-europe-west4-a/yuqian_fu/Ego/multi_view_data_2/mask/000001-label.png") mask = np.array(mask) mask = cv2.resize(mask, (frame.shape[1], frame.shape[0])) mask = upsample_mask(mask, frame) out = blend_mask(frame, mask) cv2.imwrite( f"{out_path}/cor_0.jpg", out, )