Spaces:
Running
Running
Commit
·
a15a169
1
Parent(s):
53c5d04
Update app.py
Browse files
app.py
CHANGED
@@ -120,204 +120,6 @@ def load_img_1_(nparr, gray: bool = False):
|
|
120 |
return np_img, alpha_channel
|
121 |
|
122 |
model = None
|
123 |
-
def model_process_pil(input):
|
124 |
-
global model
|
125 |
-
|
126 |
-
# input = request.files
|
127 |
-
# RGB
|
128 |
-
# origin_image_bytes = input["image"].read()
|
129 |
-
image_pil = input['image']
|
130 |
-
mask_pil = input['mask']
|
131 |
-
|
132 |
-
image = np.array(image_pil)
|
133 |
-
mask = np.array(mask_pil.convert("L"))
|
134 |
-
# print(f'image_pil_ = {type(image_pil)}')
|
135 |
-
# print(f'mask_pil_ = {type(mask_pil)}')
|
136 |
-
# mask_pil.save(f'./mask_pil.png')
|
137 |
-
|
138 |
-
#image, alpha_channel = load_img(image)
|
139 |
-
# Origin image shape: (512, 512, 3)
|
140 |
-
|
141 |
-
alpha_channel = (np.ones((image.shape[0],image.shape[1]))*255).astype(np.uint8)
|
142 |
-
original_shape = image.shape
|
143 |
-
interpolation = cv2.INTER_CUBIC
|
144 |
-
|
145 |
-
# form = request.form
|
146 |
-
print(f'liuyz_3_here_', original_shape, alpha_channel, image.dtype, mask.dtype)
|
147 |
-
|
148 |
-
size_limit = "Original" # image.shape[1] # : Union[int, str] = form.get("sizeLimit", "1080")
|
149 |
-
if size_limit == "Original":
|
150 |
-
size_limit = max(image.shape)
|
151 |
-
else:
|
152 |
-
size_limit = int(size_limit)
|
153 |
-
|
154 |
-
config = Config(
|
155 |
-
ldm_steps=25,
|
156 |
-
ldm_sampler='plms',
|
157 |
-
zits_wireframe=True,
|
158 |
-
hd_strategy='Original',
|
159 |
-
hd_strategy_crop_margin=196,
|
160 |
-
hd_strategy_crop_trigger_size=1280,
|
161 |
-
hd_strategy_resize_limit=2048,
|
162 |
-
prompt='',
|
163 |
-
use_croper=False,
|
164 |
-
croper_x=0,
|
165 |
-
croper_y=0,
|
166 |
-
croper_height=512,
|
167 |
-
croper_width=512,
|
168 |
-
sd_mask_blur=5,
|
169 |
-
sd_strength=0.75,
|
170 |
-
sd_steps=50,
|
171 |
-
sd_guidance_scale=7.5,
|
172 |
-
sd_sampler='ddim',
|
173 |
-
sd_seed=42,
|
174 |
-
cv2_flag='INPAINT_NS',
|
175 |
-
cv2_radius=5,
|
176 |
-
)
|
177 |
-
|
178 |
-
# print(f'config = {config}')
|
179 |
-
|
180 |
-
print(f'config/alpha_channel/size_limit = {config} / {alpha_channel} / {size_limit}')
|
181 |
-
if config.sd_seed == -1:
|
182 |
-
config.sd_seed = random.randint(1, 999999999)
|
183 |
-
|
184 |
-
# logger.info(f"Origin image shape: {original_shape}")
|
185 |
-
print(f"Origin image shape: {original_shape} / {image[250][250]}")
|
186 |
-
image = resize_max_size(image, size_limit=size_limit, interpolation=interpolation)
|
187 |
-
# logger.info(f"Resized image shape: {image.shape}")
|
188 |
-
print(f"Resized image shape: {image.shape} / {image[250][250]} / {image.dtype}")
|
189 |
-
|
190 |
-
# mask, _ = load_img(mask, gray=True)
|
191 |
-
#mask = np.array(mask_pil)
|
192 |
-
mask = resize_max_size(mask, size_limit=size_limit, interpolation=interpolation)
|
193 |
-
print(f"mask image shape: {mask.shape} / {type(mask)} / {mask[250][250]} / {mask.dtype}")
|
194 |
-
|
195 |
-
if model is None:
|
196 |
-
return None
|
197 |
-
|
198 |
-
start = time.time()
|
199 |
-
res_np_img = model(image, mask, config)
|
200 |
-
logger.info(f"process time: {(time.time() - start) * 1000}ms, {res_np_img.shape}")
|
201 |
-
print(f"process time_1_: {(time.time() - start) * 1000}ms, {alpha_channel.shape}, {res_np_img.shape} / {res_np_img[250][250]} / {res_np_img.dtype}")
|
202 |
-
|
203 |
-
torch.cuda.empty_cache()
|
204 |
-
|
205 |
-
if alpha_channel is not None:
|
206 |
-
print(f"liuyz_here_10_: {alpha_channel.shape} / {alpha_channel.dtype} / {res_np_img.dtype}")
|
207 |
-
if alpha_channel.shape[:2] != res_np_img.shape[:2]:
|
208 |
-
print(f"liuyz_here_20_: {alpha_channel.shape} / {res_np_img.shape}")
|
209 |
-
alpha_channel = cv2.resize(
|
210 |
-
alpha_channel, dsize=(res_np_img.shape[1], res_np_img.shape[0])
|
211 |
-
)
|
212 |
-
print(f"liuyz_here_30_: {alpha_channel.shape} / {res_np_img.shape} / {alpha_channel.dtype} / {res_np_img.dtype}")
|
213 |
-
res_np_img = np.concatenate(
|
214 |
-
(res_np_img, alpha_channel[:, :, np.newaxis]), axis=-1
|
215 |
-
)
|
216 |
-
print(f"liuyz_here_40_: {alpha_channel.shape} / {res_np_img.shape} / {alpha_channel.dtype} / {res_np_img.dtype}")
|
217 |
-
print(f"process time_2_: {(time.time() - start) * 1000}ms, {alpha_channel.shape}, {res_np_img.shape} / {res_np_img[250][250]} / {res_np_img.dtype}")
|
218 |
-
ext = 'png'
|
219 |
-
image = Image.open(io.BytesIO(numpy_to_bytes(res_np_img, ext)))
|
220 |
-
image.save(f'./result_image.png')
|
221 |
-
return image # res_np_img.astype(np.uint8) # image
|
222 |
-
|
223 |
-
'''
|
224 |
-
ext = get_image_ext(origin_image_bytes)
|
225 |
-
return ext
|
226 |
-
'''
|
227 |
-
|
228 |
-
def model_process_filepath(input): #image, mask):
|
229 |
-
global model
|
230 |
-
# {'image': '/tmp/tmp8mn9xw93.png', 'mask': '/tmp/tmpn5ars4te.png'}
|
231 |
-
# input = request.files
|
232 |
-
# RGB
|
233 |
-
origin_image_bytes = read_content(input["image"])
|
234 |
-
print(f'origin_image_bytes = ', type(origin_image_bytes), len(origin_image_bytes))
|
235 |
-
|
236 |
-
image, alpha_channel = load_img(origin_image_bytes)
|
237 |
-
|
238 |
-
alpha_channel = (np.ones((image.shape[0],image.shape[1]))*255).astype(np.uint8)
|
239 |
-
original_shape = image.shape
|
240 |
-
interpolation = cv2.INTER_CUBIC
|
241 |
-
|
242 |
-
image_pil = Image.fromarray(image)
|
243 |
-
# mask_pil = Image.fromarray(mask).convert("L")
|
244 |
-
|
245 |
-
# form = request.form
|
246 |
-
# print(f'size_limit_1_ = ', form["sizeLimit"], type(input["image"]))
|
247 |
-
size_limit = "Original" #: Union[int, str] = form.get("sizeLimit", "1080")
|
248 |
-
print(f'size_limit_2_ = {size_limit}')
|
249 |
-
if size_limit == "Original":
|
250 |
-
size_limit = max(image.shape)
|
251 |
-
else:
|
252 |
-
size_limit = int(size_limit)
|
253 |
-
print(f'size_limit_3_ = {size_limit}')
|
254 |
-
|
255 |
-
config = Config(
|
256 |
-
ldm_steps=25,
|
257 |
-
ldm_sampler='plms',
|
258 |
-
zits_wireframe=True,
|
259 |
-
hd_strategy='Original',
|
260 |
-
hd_strategy_crop_margin=196,
|
261 |
-
hd_strategy_crop_trigger_size=1280,
|
262 |
-
hd_strategy_resize_limit=2048,
|
263 |
-
prompt='',
|
264 |
-
use_croper=False,
|
265 |
-
croper_x=0,
|
266 |
-
croper_y=0,
|
267 |
-
croper_height=512,
|
268 |
-
croper_width=512,
|
269 |
-
sd_mask_blur=5,
|
270 |
-
sd_strength=0.75,
|
271 |
-
sd_steps=50,
|
272 |
-
sd_guidance_scale=7.5,
|
273 |
-
sd_sampler='ddim',
|
274 |
-
sd_seed=42,
|
275 |
-
cv2_flag='INPAINT_NS',
|
276 |
-
cv2_radius=5,
|
277 |
-
)
|
278 |
-
|
279 |
-
print(f'config/alpha_channel/size_limit = {config} / {alpha_channel} / {size_limit}')
|
280 |
-
if config.sd_seed == -1:
|
281 |
-
config.sd_seed = random.randint(1, 999999999)
|
282 |
-
|
283 |
-
logger.info(f"Origin image shape: {original_shape}")
|
284 |
-
print(f"Origin image shape: {original_shape} / {image[250][250]}")
|
285 |
-
image = resize_max_size(image, size_limit=size_limit, interpolation=interpolation)
|
286 |
-
logger.info(f"Resized image shape: {image.shape} / {type(image)}")
|
287 |
-
print(f"Resized image shape: {image.shape} / {image[250][250]}")
|
288 |
-
|
289 |
-
mask, _ = load_img(read_content(input["mask"]), gray=True)
|
290 |
-
mask = resize_max_size(mask, size_limit=size_limit, interpolation=interpolation)
|
291 |
-
print(f"mask image shape: {mask.shape} / {type(mask)} / {mask[250][250]} / {alpha_channel}")
|
292 |
-
|
293 |
-
if model is None:
|
294 |
-
return None
|
295 |
-
|
296 |
-
start = time.time()
|
297 |
-
res_np_img = model(image, mask, config)
|
298 |
-
logger.info(f"process time: {(time.time() - start) * 1000}ms, {res_np_img.shape}")
|
299 |
-
print(f"process time_1_: {(time.time() - start) * 1000}ms, {alpha_channel.shape}, {res_np_img.shape} / {res_np_img[250][250]} / {res_np_img.dtype}")
|
300 |
-
|
301 |
-
torch.cuda.empty_cache()
|
302 |
-
|
303 |
-
if alpha_channel is not None:
|
304 |
-
print(f"liuyz_here_10_: {alpha_channel.shape} / {alpha_channel.dtype} / {res_np_img.dtype}")
|
305 |
-
if alpha_channel.shape[:2] != res_np_img.shape[:2]:
|
306 |
-
print(f"liuyz_here_20_: {alpha_channel.shape} / {res_np_img.shape}")
|
307 |
-
alpha_channel = cv2.resize(
|
308 |
-
alpha_channel, dsize=(res_np_img.shape[1], res_np_img.shape[0])
|
309 |
-
)
|
310 |
-
print(f"liuyz_here_30_: {alpha_channel.shape} / {res_np_img.shape} / {alpha_channel.dtype} / {res_np_img.dtype}")
|
311 |
-
res_np_img = np.concatenate(
|
312 |
-
(res_np_img, alpha_channel[:, :, np.newaxis]), axis=-1
|
313 |
-
)
|
314 |
-
print(f"liuyz_here_40_: {alpha_channel.shape} / {res_np_img.shape} / {alpha_channel.dtype} / {res_np_img.dtype}")
|
315 |
-
ext = get_image_ext(origin_image_bytes)
|
316 |
-
print(f"process time_2_: {(time.time() - start) * 1000}ms, {alpha_channel.shape}, {res_np_img.shape} / {res_np_img[250][250]} / {res_np_img.dtype} /{ext}")
|
317 |
-
|
318 |
-
image = Image.open(io.BytesIO(numpy_to_bytes(res_np_img, ext)))
|
319 |
-
image.save(f'./result_image.png')
|
320 |
-
return image # image
|
321 |
|
322 |
def model_process(image, mask, alpha_channel, ext):
|
323 |
global model
|
@@ -382,6 +184,7 @@ def model_process(image, mask, alpha_channel, ext):
|
|
382 |
|
383 |
torch.cuda.empty_cache()
|
384 |
|
|
|
385 |
if alpha_channel is not None:
|
386 |
print(f"liuyz_here_10_: {alpha_channel.shape} / {alpha_channel.dtype} / {res_np_img.dtype}")
|
387 |
if alpha_channel.shape[:2] != res_np_img.shape[:2]:
|
|
|
120 |
return np_img, alpha_channel
|
121 |
|
122 |
model = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
123 |
|
124 |
def model_process(image, mask, alpha_channel, ext):
|
125 |
global model
|
|
|
184 |
|
185 |
torch.cuda.empty_cache()
|
186 |
|
187 |
+
alpha_channel = None
|
188 |
if alpha_channel is not None:
|
189 |
print(f"liuyz_here_10_: {alpha_channel.shape} / {alpha_channel.dtype} / {res_np_img.dtype}")
|
190 |
if alpha_channel.shape[:2] != res_np_img.shape[:2]:
|