Commit
·
1519098
1
Parent(s):
32b0bc1
chore: Update default base model path in app.py
Browse files
app.py
CHANGED
|
@@ -22,7 +22,7 @@ def parse_args():
|
|
| 22 |
parser.add_argument(
|
| 23 |
"--base_model_path",
|
| 24 |
type=str,
|
| 25 |
-
default="
|
| 26 |
help=(
|
| 27 |
"The path to the base model to use for evaluation. This can be a local path or a model identifier from the Model Hub."
|
| 28 |
),
|
|
|
|
| 22 |
parser.add_argument(
|
| 23 |
"--base_model_path",
|
| 24 |
type=str,
|
| 25 |
+
default="booksforcharlie/stable-diffusion-inpainting,
|
| 26 |
help=(
|
| 27 |
"The path to the base model to use for evaluation. This can be a local path or a model identifier from the Model Hub."
|
| 28 |
),
|
utils.py
CHANGED
|
@@ -583,7 +583,6 @@ def is_xformers_available():
|
|
| 583 |
)
|
| 584 |
|
| 585 |
|
| 586 |
-
|
| 587 |
def resize_and_crop(image, size):
|
| 588 |
# Crop to size ratio
|
| 589 |
w, h = image.size
|
|
@@ -621,49 +620,4 @@ def resize_and_padding(image, size):
|
|
| 621 |
|
| 622 |
|
| 623 |
if __name__ == "__main__":
|
| 624 |
-
import torch
|
| 625 |
-
import torch.nn.functional as F
|
| 626 |
-
from torchvision import transforms
|
| 627 |
-
from PIL import Image, ImageFilter
|
| 628 |
-
import numpy as np
|
| 629 |
-
|
| 630 |
-
def vis_sobel_weight(image_path, mask_path) -> PIL.Image.Image:
|
| 631 |
-
|
| 632 |
-
image = Image.open(image_path).convert("RGB")
|
| 633 |
-
w, h = image.size
|
| 634 |
-
l_w, l_h = w // 8, h // 8
|
| 635 |
-
image = image.resize((l_w, l_h))
|
| 636 |
-
mask = Image.open(mask_path).convert("L").resize((l_w, l_h))
|
| 637 |
-
image_pt = transforms.ToTensor()(image).unsqueeze(0).to("cuda")
|
| 638 |
-
mask_pt = transforms.ToTensor()(mask).unsqueeze(0).to("cuda")
|
| 639 |
-
sobel_pt = sobel(image_pt, mask_pt, scale=1.0)
|
| 640 |
-
sobel_image = sobel_pt.squeeze().cpu().numpy()
|
| 641 |
-
sobel_image = Image.fromarray((sobel_image * 255).astype(np.uint8))
|
| 642 |
-
sobel_image = sobel_image.resize((w, h), resample=Image.NEAREST)
|
| 643 |
-
# 图像平滑
|
| 644 |
-
sobel_image = sobel_image.filter(ImageFilter.SMOOTH)
|
| 645 |
-
from data.utils import grayscale_to_heatmap
|
| 646 |
-
|
| 647 |
-
sobel_image = grayscale_to_heatmap(sobel_image)
|
| 648 |
-
image = Image.open(image_path).convert("RGB").resize((w, h))
|
| 649 |
-
sobel_image = Image.blend(image, sobel_image, alpha=0.5)
|
| 650 |
-
return sobel_image
|
| 651 |
-
|
| 652 |
-
save_folder = "./sobel_vis-2.0"
|
| 653 |
-
if not os.path.exists(save_folder):
|
| 654 |
-
os.makedirs(save_folder)
|
| 655 |
-
from data.utils import scan_files_in_dir
|
| 656 |
-
|
| 657 |
-
for i in scan_files_in_dir(
|
| 658 |
-
"/home/chongzheng/Projects/try-on-project/Datasets/VITONHD-1024/test/Images"
|
| 659 |
-
):
|
| 660 |
-
image_path = i.path
|
| 661 |
-
|
| 662 |
-
if i.path.endswith("-1.jpg"):
|
| 663 |
-
result_path = os.path.join(save_folder, os.path.basename(image_path))
|
| 664 |
-
|
| 665 |
-
mask_path = image_path.replace("Images", "AgnosticMask").replace(
|
| 666 |
-
"-1.jpg", "_mask-1.png"
|
| 667 |
-
)
|
| 668 |
-
vis_sobel_weight(image_path, mask_path).save(result_path)
|
| 669 |
pass
|
|
|
|
| 583 |
)
|
| 584 |
|
| 585 |
|
|
|
|
| 586 |
def resize_and_crop(image, size):
|
| 587 |
# Crop to size ratio
|
| 588 |
w, h = image.size
|
|
|
|
| 620 |
|
| 621 |
|
| 622 |
if __name__ == "__main__":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 623 |
pass
|