Spaces:
Sleeping
Sleeping
File size: 6,628 Bytes
440fd96 40b1711 440fd96 5540863 40b1711 440fd96 40b1711 0ec7070 440fd96 5540863 0ec7070 440fd96 e8e2aa0 40b1711 440fd96 0ec7070 440fd96 40b1711 440fd96 0ec7070 440fd96 40b1711 0f3fb3e 40b1711 0f3fb3e 40b1711 0ec7070 40b1711 0f3fb3e 0ec7070 40b1711 440fd96 40b1711 440fd96 0ec7070 440fd96 5540863 440fd96 0ec7070 440fd96 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 |
import gradio as gr
import mediapipe as mp
import numpy as np
from PIL import Image
from mediapipe.tasks import python
from mediapipe.tasks.python import vision
from scipy.ndimage import binary_dilation, label
BG_COLOR = np.array([0, 0, 0], dtype=np.uint8) # black
MASK_COLOR = np.array([255, 255, 255], dtype=np.uint8) # white
MODEL_PATH = "checkpoints/selfie_multiclass_256x256.tflite"
category_options = ["hair", "clothes", "background"]
base_options = python.BaseOptions(model_asset_path=MODEL_PATH)
options = vision.ImageSegmenterOptions(base_options=base_options,output_category_mask=True)
segmenter = vision.ImageSegmenter.create_from_options(options)
labels = segmenter.labels
expand_size = 40
def segment(input_image, category):
original_height, original_width = input_image.size
image = mp.Image(image_format=mp.ImageFormat.SRGB, data=np.asarray(input_image))
segmentation_result = segmenter.segment(image)
category_mask = segmentation_result.category_mask
category_mask_np = category_mask.numpy_view()
if category == "hair":
target_mask = get_hair_mask(category_mask_np, should_dilate=True)
elif category == "clothes":
target_mask = get_clothes_mask(category_mask_np)
else:
target_mask = category_mask_np == 0
target_indices = np.where(target_mask)
start_y = np.min(target_indices[0]) - expand_size
if start_y < 0:
start_y = 0
end_y = np.max(target_indices[0]) + expand_size
if end_y > original_height:
end_y = original_height
start_x = np.min(target_indices[1]) - expand_size
if start_x < 0:
start_x = 0
end_x = np.max(target_indices[1]) + expand_size
if end_x > original_width:
end_x = original_width
target_height = end_y - start_y
target_width = end_x - start_x
# choose the max side length
max_side_length = max(target_height, target_width)
# calculate the crop area
crop_mask = target_mask[start_y:end_y, start_x:end_x]
crop_mask_height, crop_mask_width = crop_mask.shape
crop_mask_start_y = (max_side_length - crop_mask_height) // 2
crop_mask_end_y = crop_mask_start_y + crop_mask_height
crop_mask_start_x = (max_side_length - crop_mask_width) // 2
crop_mask_end_x = crop_mask_start_x + crop_mask_width
# create a square mask
crop_mask_square = np.zeros((max_side_length, max_side_length), dtype=target_mask.dtype)
crop_mask_square[crop_mask_start_y:crop_mask_end_y, crop_mask_start_x:crop_mask_end_x] = crop_mask
# create a square image
crop_mask_image = Image.fromarray((crop_mask_square * 255).astype(np.uint8))
crop_image = input_image.crop((start_x, start_y, end_x, end_y))
crop_image_square = Image.new("RGB", (max_side_length, max_side_length))
crop_image_square.paste(crop_image, (crop_mask_start_x, crop_mask_start_y))
# Generate solid color images for showing the output segmentation mask.
image_data = image.numpy_view()
fg_image = np.zeros(image_data.shape, dtype=np.uint8)
fg_image[:] = MASK_COLOR
bg_image = np.zeros(image_data.shape, dtype=np.uint8)
bg_image[:] = BG_COLOR
condition = np.stack((target_mask,) * 3, axis=-1) > 0.2
output_image = np.where(condition, fg_image, bg_image)
output_image = Image.fromarray(output_image)
return crop_mask_image, crop_image_square
def get_clothes_mask(category_mask_np):
body_skin_mask = category_mask_np == 2
clothes_mask = category_mask_np == 4
combined_mask = np.logical_or(body_skin_mask, clothes_mask)
combined_mask = binary_dilation(combined_mask, iterations=4)
return combined_mask
def get_hair_mask(category_mask_np, should_dilate=False):
hair_mask = category_mask_np == 1
hair_mask = binary_dilation(hair_mask, iterations=4)
if not should_dilate:
return hair_mask
body_skin_mask = category_mask_np == 2
face_skin_mask = category_mask_np == 3
clothes_mask = category_mask_np == 4
face_indices = np.where(face_skin_mask)
min_face_y = np.min(face_indices[0])
max_face_y = np.max(face_indices[0])
labeled_hair, hair_features = label(hair_mask)
top_hair_mask = np.zeros_like(hair_mask)
for i in range(1, hair_features + 1):
component_mask = labeled_hair == i
component_indices = np.where(component_mask)
min_component_y = np.min(component_indices[0])
if min_component_y <= min_face_y:
top_hair_mask[component_mask] = True
# Combine the reference masks (body, clothes)
reference_mask = np.logical_or(body_skin_mask, clothes_mask)
# Remove the area above the face by 40 pixels
reference_mask[:max_face_y+40, :] = 0
# Expand the hair mask downward until it reaches the reference areas
expanded_hair_mask = top_hair_mask
while not np.any(np.logical_and(expanded_hair_mask, reference_mask)):
expanded_hair_mask = binary_dilation(expanded_hair_mask, iterations=10)
# Trim the expanded_hair_mask
# 1. Remove the area above hair_mask by 10 pixels
hair_indices = np.where(hair_mask)
min_hair_y = np.min(hair_indices[0])
expanded_hair_mask[:min_hair_y - 10, :] = 0
# 2. Remove the areas on both sides that exceed the clothing coordinates
clothes_indices = np.where(clothes_mask)
min_clothes_x = np.min(clothes_indices[1])
max_clothes_x = np.max(clothes_indices[1])
expanded_hair_mask[:, :min_clothes_x] = 0
expanded_hair_mask[:, max_clothes_x+1:] = 0
# exclude the face-skin, body-skin and clothes areas
expanded_hair_mask = np.logical_and(expanded_hair_mask, ~face_skin_mask)
expanded_hair_mask = np.logical_and(expanded_hair_mask, ~body_skin_mask)
expanded_hair_mask = np.logical_and(expanded_hair_mask, ~clothes_mask)
# combine the hair mask with the expanded hair mask
expanded_hair_mask = np.logical_or(hair_mask, expanded_hair_mask)
return expanded_hair_mask
with gr.Blocks() as app:
with gr.Row():
with gr.Column():
input_image = gr.Image(type='pil', label='Upload image')
category = gr.Dropdown(label='Category', choices=category_options, value=category_options[0])
submit_btn = gr.Button(value='Submit', variant='primary')
with gr.Column():
mask_image = gr.Image(type='pil', label='Segmentation mask')
output_image = gr.Image(type='pil', label='Segmented image')
submit_btn.click(
fn=segment,
inputs=[
input_image,
category,
],
outputs=[mask_image, output_image]
)
app.launch(debug=False, show_error=True) |