Spaces:
Sleeping
Sleeping
zhiweili
commited on
Commit
·
ed9b64d
1
Parent(s):
6bdded7
test token
Browse files
app.py
CHANGED
@@ -18,6 +18,10 @@ options = vision.ImageSegmenterOptions(base_options=base_options,output_category
|
|
18 |
segmenter = vision.ImageSegmenter.create_from_options(options)
|
19 |
labels = segmenter.labels
|
20 |
|
|
|
|
|
|
|
|
|
21 |
def segment(input_image, category):
|
22 |
image = mp.Image(image_format=mp.ImageFormat.SRGB, data=np.asarray(input_image))
|
23 |
segmentation_result = segmenter.segment(image)
|
@@ -32,7 +36,11 @@ def segment(input_image, category):
|
|
32 |
target_mask = category_mask_np == 0
|
33 |
croper = Croper(input_image, target_mask)
|
34 |
|
35 |
-
|
|
|
|
|
|
|
|
|
36 |
|
37 |
def get_clothes_mask(category_mask_np):
|
38 |
body_skin_mask = category_mask_np == 2
|
@@ -101,6 +109,7 @@ with gr.Blocks() as app:
|
|
101 |
input_image = gr.Image(type='pil', label='Upload image')
|
102 |
category = gr.Dropdown(label='Category', choices=category_options, value=category_options[0])
|
103 |
submit_btn = gr.Button(value='Submit', variant='primary')
|
|
|
104 |
with gr.Column():
|
105 |
mask_image = gr.Image(type='pil', label='Segmentation mask')
|
106 |
output_image = gr.Image(type='pil', label='Segmented image')
|
@@ -114,4 +123,6 @@ with gr.Blocks() as app:
|
|
114 |
outputs=[mask_image, output_image]
|
115 |
)
|
116 |
|
|
|
|
|
117 |
app.launch(debug=False, show_error=True)
|
|
|
18 |
segmenter = vision.ImageSegmenter.create_from_options(options)
|
19 |
labels = segmenter.labels
|
20 |
|
21 |
+
def get_session_token(request: gr.Request):
|
22 |
+
x_ip_token = request.headers['x-ip-token']
|
23 |
+
return x_ip_token
|
24 |
+
|
25 |
def segment(input_image, category):
|
26 |
image = mp.Image(image_format=mp.ImageFormat.SRGB, data=np.asarray(input_image))
|
27 |
segmentation_result = segmenter.segment(image)
|
|
|
36 |
target_mask = category_mask_np == 0
|
37 |
croper = Croper(input_image, target_mask)
|
38 |
|
39 |
+
croper.corp_mask_image()
|
40 |
+
restore_image = croper.restore_result(croper.resized_square_image)
|
41 |
+
mask_image = croper.resized_square_mask_image
|
42 |
+
|
43 |
+
return mask_image, restore_image
|
44 |
|
45 |
def get_clothes_mask(category_mask_np):
|
46 |
body_skin_mask = category_mask_np == 2
|
|
|
109 |
input_image = gr.Image(type='pil', label='Upload image')
|
110 |
category = gr.Dropdown(label='Category', choices=category_options, value=category_options[0])
|
111 |
submit_btn = gr.Button(value='Submit', variant='primary')
|
112 |
+
session_token = gr.Textbox(label='Session token', value='')
|
113 |
with gr.Column():
|
114 |
mask_image = gr.Image(type='pil', label='Segmentation mask')
|
115 |
output_image = gr.Image(type='pil', label='Segmented image')
|
|
|
123 |
outputs=[mask_image, output_image]
|
124 |
)
|
125 |
|
126 |
+
app.load(get_session_token, None, session_token)
|
127 |
+
|
128 |
app.launch(debug=False, show_error=True)
|
croper.py
CHANGED
@@ -8,27 +8,30 @@ class Croper:
|
|
8 |
self,
|
9 |
input_image: PIL.Image,
|
10 |
target_mask: np.ndarray,
|
|
|
|
|
11 |
):
|
12 |
self.input_image = input_image
|
13 |
self.target_mask = target_mask
|
|
|
|
|
14 |
|
15 |
def corp_mask_image(self):
|
16 |
target_mask = self.target_mask
|
17 |
input_image = self.input_image
|
18 |
-
|
19 |
-
expand_size = 40
|
20 |
original_width, original_height = input_image.size
|
21 |
mask_indices = np.where(target_mask)
|
22 |
-
start_y = np.min(mask_indices[0]) -
|
23 |
if start_y < 0:
|
24 |
start_y = 0
|
25 |
-
end_y = np.max(mask_indices[0]) +
|
26 |
if end_y > original_height:
|
27 |
end_y = original_height
|
28 |
-
start_x = np.min(mask_indices[1]) -
|
29 |
if start_x < 0:
|
30 |
start_x = 0
|
31 |
-
end_x = np.max(mask_indices[1]) +
|
32 |
if end_x > original_width:
|
33 |
end_x = original_width
|
34 |
mask_height = end_y - start_y
|
@@ -65,7 +68,21 @@ class Croper:
|
|
65 |
self.square_mask_image = square_mask_image
|
66 |
self.square_image = square_image
|
67 |
|
68 |
-
|
69 |
-
self.
|
|
|
70 |
|
71 |
-
return self.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
self,
|
9 |
input_image: PIL.Image,
|
10 |
target_mask: np.ndarray,
|
11 |
+
mask_size: int = 256,
|
12 |
+
mask_expansion: int = 20
|
13 |
):
|
14 |
self.input_image = input_image
|
15 |
self.target_mask = target_mask
|
16 |
+
self.mask_size = mask_size
|
17 |
+
self.mask_expansion = mask_expansion
|
18 |
|
19 |
def corp_mask_image(self):
|
20 |
target_mask = self.target_mask
|
21 |
input_image = self.input_image
|
22 |
+
mask_expansion = self.mask_expansion
|
|
|
23 |
original_width, original_height = input_image.size
|
24 |
mask_indices = np.where(target_mask)
|
25 |
+
start_y = np.min(mask_indices[0]) - mask_expansion
|
26 |
if start_y < 0:
|
27 |
start_y = 0
|
28 |
+
end_y = np.max(mask_indices[0]) + mask_expansion
|
29 |
if end_y > original_height:
|
30 |
end_y = original_height
|
31 |
+
start_x = np.min(mask_indices[1]) - mask_expansion
|
32 |
if start_x < 0:
|
33 |
start_x = 0
|
34 |
+
end_x = np.max(mask_indices[1]) + mask_expansion
|
35 |
if end_x > original_width:
|
36 |
end_x = original_width
|
37 |
mask_height = end_y - start_y
|
|
|
68 |
self.square_mask_image = square_mask_image
|
69 |
self.square_image = square_image
|
70 |
|
71 |
+
mask_size = self.mask_size
|
72 |
+
self.resized_square_mask_image = square_mask_image.resize((mask_size, mask_size))
|
73 |
+
self.resized_square_image = square_image.resize((mask_size, mask_size))
|
74 |
|
75 |
+
return self.resized_square_mask_image
|
76 |
+
|
77 |
+
def restore_result(self, generated_image):
|
78 |
+
square_length = self.square_length
|
79 |
+
generated_image = generated_image.resize((square_length, square_length))
|
80 |
+
square_mask_image = self.square_mask_image
|
81 |
+
cropped_generated_image = generated_image.crop((self.square_start_x, self.square_start_y, self.square_end_x, self.square_end_y))
|
82 |
+
cropped_square_mask_image = square_mask_image.crop((self.square_start_x, self.square_start_y, self.square_end_x, self.square_end_y))
|
83 |
+
|
84 |
+
restored_image = self.input_image.copy()
|
85 |
+
restored_image.paste(cropped_generated_image, (self.origin_start_x, self.origin_start_y), cropped_square_mask_image)
|
86 |
+
|
87 |
+
return restored_image
|
88 |
+
|