File size: 4,753 Bytes
2581217 5688e09 2581217 5688e09 2581217 c7202d2 2581217 e4cb17f 2581217 16f62f4 2581217 520b0be 2581217 5bb0dcf 2581217 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 |
import gradio as gr
import cv2
import matplotlib
import numpy as np
import os
from PIL import Image
import spaces
import torch
import tempfile
from gradio_imageslider import ImageSlider
from huggingface_hub import hf_hub_download
from models.PDFNet import build_model
import torch
import cv2
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
import argparse
from args import get_args_parser
from torchvision.transforms.functional import normalize
import huggingface_hub
from DAM_V2.depth_anything_v2.dpt import DepthAnythingV2
css = """
#img-display-container {
max-height: 100vh;
}
#img-display-input {
max-height: 80vh;
}
#img-display-output {
max-height: 80vh;
}
#download {
height: 62px;
}
"""
# device = 'cuda' if torch.cuda.is_available() else 'cpu'
device = torch.device('cpu')
parser = argparse.ArgumentParser('PDFNet Testing script', parents=[get_args_parser()])
args = parser.parse_args(args=[])
model,model_name = build_model(args)
model_path = hf_hub_download(repo_id="Tennineee/PDFNet",filename="PDFNet_Best.pth", repo_type="model")
model.load_state_dict(torch.load(model_path,map_location='cpu'),strict=False)
model = model.to(device).eval()
DAMV2_configs = {
'vits': {'encoder': 'vits', 'features': 64, 'out_channels': [48, 96, 192, 384]},
'vitb': {'encoder': 'vitb', 'features': 128, 'out_channels': [96, 192, 384, 768]},
'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]},
}
encoder = 'vitb' # or 'vits', 'vitb', 'vitl'
encoder2name = {
'vits': 'Small',
'vitb': 'Base',
'vitl': 'Large',
'vitg': 'Giant', # we are undergoing company review procedures to release our giant model checkpoint
}
model_name = encoder2name[encoder]
DAMV2 = DepthAnythingV2(**DAMV2_configs[encoder])
filepath = hf_hub_download(repo_id=f"depth-anything/Depth-Anything-V2-{model_name}", filename=f"depth_anything_v2_{encoder}.pth", repo_type="model")
state_dict = torch.load(filepath, map_location="cpu")
DAMV2.load_state_dict(state_dict)
DAMV2 = DAMV2.to(device).eval()
title = "# PDFNet"
description = """Official demo for **PDFNet**, here use DAMV2-small to generate depth map.
Please refer to our [paper](https://arxiv.org/abs/2503.06100) and [github](https://github.com/Tennine2077/PDFNet) for more details."""
class GOSNormalize(object):
def __init__(self, mean=[0.485,0.456,0.406], std=[0.229,0.224,0.225]):
self.mean = mean
self.std = std
def __call__(self,image):
image = normalize(image,self.mean,self.std)
return image
transforms = GOSNormalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
def predict(image):
H,W = image.shape[:2]
depth = DAMV2.infer_image(image)
image = torch.nn.functional.interpolate(torch.from_numpy(image).permute(2,0,1)[None,...],size=[1024,1024],mode='bilinear',align_corners=True)[0]
depth = torch.nn.functional.interpolate(torch.from_numpy(depth)[None,None,...],size=[1024,1024],mode='bilinear',align_corners=True)
image = torch.divide(image,255.0)
depth = torch.divide(depth,255.0)
image = transforms(image).unsqueeze(0)
DIS_map = model.inference(image.to(device),depth.to(device))[0][0][0].cpu()
DIS_map = cv2.resize(np.array(DIS_map), (W,H))
# return cv2.resize(np.array(depth[0][0]), (W,H))
return DIS_map
with gr.Blocks(css=css) as demo:
gr.Markdown(title)
gr.Markdown(description)
gr.Markdown("### Dichotomous Image Segmentation demo")
with gr.Row():
input_image = gr.Image(label="Input Image", type='numpy', elem_id='img-display-input')
dis_image_slider = ImageSlider(label="Pedict View", elem_id='img-display-output', position=0.5)
submit = gr.Button(value="Compute")
raw_file = gr.File(label="16-bit raw output (can be considered as disparity)", elem_id="download",)
def on_submit(image):
original_image = image.copy()
DIS_map = predict(image)
DIS_map = (DIS_map - DIS_map.min()) / (DIS_map.max() - DIS_map.min()) * 255.0
raw_DIS_map = Image.fromarray(DIS_map.astype('uint16'))
tmp_raw_DIS_map = tempfile.NamedTemporaryFile(suffix='.png', delete=False)
raw_DIS_map.save(tmp_raw_DIS_map.name)
return [[original_image, DIS_map.astype(np.uint8)], tmp_raw_DIS_map.name]
submit.click(on_submit, inputs=[input_image], outputs=[dis_image_slider, raw_file])
example_files = os.listdir('assets/examples')
example_files.sort()
example_files = [os.path.join('assets/examples', filename) for filename in example_files]
examples = gr.Examples(examples=example_files, inputs=[input_image], outputs=[dis_image_slider, raw_file], fn=on_submit)
if __name__ == '__main__':
demo.queue().launch(share=True)
|