Spaces:
Running
Running
Commit
·
383d7cb
1
Parent(s):
e995f9f
Update app.py
Browse files
app.py
CHANGED
|
@@ -120,7 +120,7 @@ def load_img_1_(nparr, gray: bool = False):
|
|
| 120 |
return np_img, alpha_channel
|
| 121 |
|
| 122 |
model = None
|
| 123 |
-
def
|
| 124 |
global model
|
| 125 |
|
| 126 |
# input = request.files
|
|
@@ -225,7 +225,7 @@ def model_process(input):
|
|
| 225 |
return ext
|
| 226 |
'''
|
| 227 |
|
| 228 |
-
def
|
| 229 |
global model
|
| 230 |
# {'image': '/tmp/tmp8mn9xw93.png', 'mask': '/tmp/tmpn5ars4te.png'}
|
| 231 |
# input = request.files
|
|
@@ -237,6 +237,9 @@ def model_process_2(input): #image, mask):
|
|
| 237 |
original_shape = image.shape
|
| 238 |
interpolation = cv2.INTER_CUBIC
|
| 239 |
|
|
|
|
|
|
|
|
|
|
| 240 |
# form = request.form
|
| 241 |
# print(f'size_limit_1_ = ', form["sizeLimit"], type(input["image"]))
|
| 242 |
size_limit = "Original" #: Union[int, str] = form.get("sizeLimit", "1080")
|
|
@@ -285,26 +288,33 @@ def model_process_2(input): #image, mask):
|
|
| 285 |
mask = resize_max_size(mask, size_limit=size_limit, interpolation=interpolation)
|
| 286 |
print(f"mask image shape: {mask.shape} / {type(mask)} / {mask[250][250]} / {alpha_channel}")
|
| 287 |
|
|
|
|
|
|
|
|
|
|
| 288 |
start = time.time()
|
| 289 |
res_np_img = model(image, mask, config)
|
| 290 |
-
logger.info(f"process time: {(time.time() - start) * 1000}ms")
|
| 291 |
-
print(f"process
|
| 292 |
|
| 293 |
torch.cuda.empty_cache()
|
| 294 |
|
| 295 |
if alpha_channel is not None:
|
| 296 |
-
print(f"
|
| 297 |
if alpha_channel.shape[:2] != res_np_img.shape[:2]:
|
|
|
|
| 298 |
alpha_channel = cv2.resize(
|
| 299 |
alpha_channel, dsize=(res_np_img.shape[1], res_np_img.shape[0])
|
| 300 |
)
|
|
|
|
| 301 |
res_np_img = np.concatenate(
|
| 302 |
(res_np_img, alpha_channel[:, :, np.newaxis]), axis=-1
|
| 303 |
)
|
| 304 |
-
|
|
|
|
|
|
|
| 305 |
image = Image.fromarray(res_np_img)
|
| 306 |
image.save(f'./result_image.png')
|
| 307 |
-
return image
|
| 308 |
'''
|
| 309 |
ext = get_image_ext(origin_image_bytes)
|
| 310 |
|
|
@@ -346,6 +356,7 @@ def read_content(file_path):
|
|
| 346 |
|
| 347 |
return content
|
| 348 |
|
|
|
|
| 349 |
def predict(input):
|
| 350 |
print(f'liuyz_0_', input)
|
| 351 |
'''
|
|
@@ -362,12 +373,15 @@ def predict(input):
|
|
| 362 |
print(f'liuyz_3_', image.convert("RGB").resize((512, 512)).shape)
|
| 363 |
# mask = dict["mask"] # .convert("RGB") #.resize((512, 512))
|
| 364 |
'''
|
| 365 |
-
|
|
|
|
|
|
|
|
|
|
| 366 |
|
| 367 |
# output = mask #output.images[0]
|
| 368 |
# output = pipe(prompt = prompt, image=init_image, mask_image=mask,guidance_scale=7.5)
|
| 369 |
# output = input["mask"]
|
| 370 |
-
output = None
|
| 371 |
return output #, gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
|
| 372 |
|
| 373 |
print(f'liuyz_500_here_')
|
|
@@ -460,7 +474,7 @@ with image_blocks as demo:
|
|
| 460 |
with gr.Box():
|
| 461 |
with gr.Row():
|
| 462 |
with gr.Column():
|
| 463 |
-
image = gr.Image(source='upload', tool='sketch',type='
|
| 464 |
with gr.Row(elem_id="prompt-container").style(mobile_collapse=False, equal_height=True):
|
| 465 |
# prompt = gr.Textbox(placeholder = 'Your prompt (what you want in place of what is erased)', show_label=False, elem_id="input-text")
|
| 466 |
btn = gr.Button("Done!").style(
|
|
|
|
| 120 |
return np_img, alpha_channel
|
| 121 |
|
| 122 |
model = None
|
| 123 |
+
def model_process_pil(input):
|
| 124 |
global model
|
| 125 |
|
| 126 |
# input = request.files
|
|
|
|
| 225 |
return ext
|
| 226 |
'''
|
| 227 |
|
| 228 |
+
def model_process_filepath(input): #image, mask):
|
| 229 |
global model
|
| 230 |
# {'image': '/tmp/tmp8mn9xw93.png', 'mask': '/tmp/tmpn5ars4te.png'}
|
| 231 |
# input = request.files
|
|
|
|
| 237 |
original_shape = image.shape
|
| 238 |
interpolation = cv2.INTER_CUBIC
|
| 239 |
|
| 240 |
+
image_pil = Image.fromarray(image)
|
| 241 |
+
# mask_pil = Image.fromarray(mask).convert("L")
|
| 242 |
+
|
| 243 |
# form = request.form
|
| 244 |
# print(f'size_limit_1_ = ', form["sizeLimit"], type(input["image"]))
|
| 245 |
size_limit = "Original" #: Union[int, str] = form.get("sizeLimit", "1080")
|
|
|
|
| 288 |
mask = resize_max_size(mask, size_limit=size_limit, interpolation=interpolation)
|
| 289 |
print(f"mask image shape: {mask.shape} / {type(mask)} / {mask[250][250]} / {alpha_channel}")
|
| 290 |
|
| 291 |
+
if model is None:
|
| 292 |
+
return None
|
| 293 |
+
|
| 294 |
start = time.time()
|
| 295 |
res_np_img = model(image, mask, config)
|
| 296 |
+
logger.info(f"process time: {(time.time() - start) * 1000}ms, {res_np_img.shape}")
|
| 297 |
+
print(f"process time_1_: {(time.time() - start) * 1000}ms, {alpha_channel.shape}, {res_np_img.shape} / {res_np_img[250][250]} / {res_np_img.dtype}")
|
| 298 |
|
| 299 |
torch.cuda.empty_cache()
|
| 300 |
|
| 301 |
if alpha_channel is not None:
|
| 302 |
+
print(f"liuyz_here_10_: {alpha_channel.shape} / {alpha_channel.dtype} / {res_np_img.dtype}")
|
| 303 |
if alpha_channel.shape[:2] != res_np_img.shape[:2]:
|
| 304 |
+
print(f"liuyz_here_20_: {alpha_channel.shape} / {res_np_img.shape}")
|
| 305 |
alpha_channel = cv2.resize(
|
| 306 |
alpha_channel, dsize=(res_np_img.shape[1], res_np_img.shape[0])
|
| 307 |
)
|
| 308 |
+
print(f"liuyz_here_30_: {alpha_channel.shape} / {res_np_img.shape} / {alpha_channel.dtype} / {res_np_img.dtype}")
|
| 309 |
res_np_img = np.concatenate(
|
| 310 |
(res_np_img, alpha_channel[:, :, np.newaxis]), axis=-1
|
| 311 |
)
|
| 312 |
+
print(f"liuyz_here_40_: {alpha_channel.shape} / {res_np_img.shape} / {alpha_channel.dtype} / {res_np_img.dtype}")
|
| 313 |
+
print(f"process time_2_: {(time.time() - start) * 1000}ms, {alpha_channel.shape}, {res_np_img.shape} / {res_np_img[250][250]} / {res_np_img.dtype}")
|
| 314 |
+
|
| 315 |
image = Image.fromarray(res_np_img)
|
| 316 |
image.save(f'./result_image.png')
|
| 317 |
+
return image_pil # image
|
| 318 |
'''
|
| 319 |
ext = get_image_ext(origin_image_bytes)
|
| 320 |
|
|
|
|
| 356 |
|
| 357 |
return content
|
| 358 |
|
| 359 |
+
image_type = 'filepath' #'pil'
|
| 360 |
def predict(input):
|
| 361 |
print(f'liuyz_0_', input)
|
| 362 |
'''
|
|
|
|
| 373 |
print(f'liuyz_3_', image.convert("RGB").resize((512, 512)).shape)
|
| 374 |
# mask = dict["mask"] # .convert("RGB") #.resize((512, 512))
|
| 375 |
'''
|
| 376 |
+
if image_type == 'filepath':
|
| 377 |
+
output = model_process_filepath(input) # dict["image"], dict["mask"])
|
| 378 |
+
elif image_type == 'pil':
|
| 379 |
+
output = model_process_pil(input)
|
| 380 |
|
| 381 |
# output = mask #output.images[0]
|
| 382 |
# output = pipe(prompt = prompt, image=init_image, mask_image=mask,guidance_scale=7.5)
|
| 383 |
# output = input["mask"]
|
| 384 |
+
# output = None
|
| 385 |
return output #, gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
|
| 386 |
|
| 387 |
print(f'liuyz_500_here_')
|
|
|
|
| 474 |
with gr.Box():
|
| 475 |
with gr.Row():
|
| 476 |
with gr.Column():
|
| 477 |
+
image = gr.Image(source='upload', tool='sketch',type=f'{image_type}', label="Upload").style(height=512)
|
| 478 |
with gr.Row(elem_id="prompt-container").style(mobile_collapse=False, equal_height=True):
|
| 479 |
# prompt = gr.Textbox(placeholder = 'Your prompt (what you want in place of what is erased)', show_label=False, elem_id="input-text")
|
| 480 |
btn = gr.Button("Done!").style(
|