zino36's picture
Update app.py
82fa24a verified
raw
history blame
5.15 kB
import gradio as gr
import cv2
import matplotlib
import numpy as np
import os
from PIL import Image
import mmap
import json
import spaces
import torch
import tempfile
from gradio_imageslider import ImageSlider
from huggingface_hub import hf_hub_download
import safetensors
from depth_anything_v2.dpt import DepthAnythingV2
css = """
#img-display-container {
max-height: 100vh;
}
#img-display-input {
max-height: 80vh;
}
#img-display-output {
max-height: 80vh;
}
#download {
height: 62px;
}
"""
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
model_configs = {
'vits': {'encoder': 'vits', 'features': 64, 'out_channels': [48, 96, 192, 384]},
'vitb': {'encoder': 'vitb', 'features': 128, 'out_channels': [96, 192, 384, 768]},
'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]},
'vitg': {'encoder': 'vitg', 'features': 384, 'out_channels': [1536, 1536, 1536, 1536]}
}
encoder2name = {
'vits': 'Small',
'vitb': 'Base',
'vitl': 'Large',
'vitg': 'Giant', # we are undergoing company review procedures to release our giant model checkpoint
}
encoder = 'vitl'
model_name = encoder2name[encoder]
model = DepthAnythingV2(**model_configs[encoder])
filepath = hf_hub_download(repo_id="depth-anything/Depth-Anything-V2-Metric-Indoor-Large-hf", filename="model.safetensors", repo_type="model")
def create_tensor(storage, info, offset):
DTYPES = {"F32": torch.float32}
dtype = DTYPES[info["dtype"]]
shape = info["shape"]
start, stop = info["data_offsets"]
return torch.asarray(storage[start + offset : stop + offset], dtype=torch.uint8).view(dtype=dtype).reshape(shape)
def load_file(filename):
with open(filename, mode="r", encoding="utf8") as file_obj:
with mmap.mmap(file_obj.fileno(), length=0, access=mmap.ACCESS_READ) as m:
header = m.read(8)
n = int.from_bytes(header, "little")
metadata_bytes = m.read(n)
metadata = json.loads(metadata_bytes)
size = os.stat(filename).st_size
storage = torch.ByteStorage.from_file(filename, shared=False, size=size).untyped()
offset = n + 8
return {name: create_tensor(storage, info, offset) for name, info in metadata.items() if name != "__metadata__"}
tensor_data = safetensors.load(filepath)
# Convert to PyTorch tensor
if isinstance(tensor_data, np.ndarray):
pytorch_tensor = torch.tensor(tensor_data)
elif isinstance(tensor_data, safetensors.Tensor):
pytorch_tensor = torch.tensor(tensor_data.numpy()) # Assuming safetensors Tensor has a .numpy() method
else:
raise TypeError("Unsupported data type from safetensors")
#state_dict = torch.load(filepath, map_location="cpu", weights_only=True)
#state_dict = load_file(filepath)
state_dict = pytorch_tensor
model.load_state_dict(state_dict)
model = model.to(DEVICE).eval()
title = "# Depth Anything V2"
description = """Official demo for **Depth Anything V2**.
Please refer to our [paper](https://arxiv.org/abs/2406.09414), [project page](https://depth-anything-v2.github.io), and [github](https://github.com/DepthAnything/Depth-Anything-V2) for more details."""
@spaces.GPU
def predict_depth(image):
return model.infer_image(image)
with gr.Blocks(css=css) as demo:
gr.Markdown(title)
gr.Markdown(description)
gr.Markdown("### Depth Prediction demo")
with gr.Row():
input_image = gr.Image(label="Input Image", type='numpy', elem_id='img-display-input')
depth_image_slider = ImageSlider(label="Depth Map with Slider View", elem_id='img-display-output', position=0.5)
submit = gr.Button(value="Compute Depth")
gray_depth_file = gr.File(label="Grayscale depth map", elem_id="download",)
raw_file = gr.File(label="16-bit raw output (can be considered as disparity)", elem_id="download",)
cmap = matplotlib.colormaps.get_cmap('Spectral_r')
def on_submit(image):
original_image = image.copy()
h, w = image.shape[:2]
depth = predict_depth(image[:, :, ::-1])
raw_depth = Image.fromarray(depth.astype('uint16'))
tmp_raw_depth = tempfile.NamedTemporaryFile(suffix='.png', delete=False)
raw_depth.save(tmp_raw_depth.name)
depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0
depth = depth.astype(np.uint8)
colored_depth = (cmap(depth)[:, :, :3] * 255).astype(np.uint8)
gray_depth = Image.fromarray(depth)
tmp_gray_depth = tempfile.NamedTemporaryFile(suffix='.png', delete=False)
gray_depth.save(tmp_gray_depth.name)
return [(original_image, colored_depth), tmp_gray_depth.name, tmp_raw_depth.name]
submit.click(on_submit, inputs=[input_image], outputs=[depth_image_slider, gray_depth_file, raw_file])
example_files = os.listdir('assets/examples')
example_files.sort()
example_files = [os.path.join('assets/examples', filename) for filename in example_files]
examples = gr.Examples(examples=example_files, inputs=[input_image], outputs=[depth_image_slider, gray_depth_file, raw_file], fn=on_submit)
if __name__ == '__main__':
demo.queue().launch(share=True)