import argparse from functools import partial import cv2 import requests import os from io import BytesIO from PIL import Image import numpy as np from pathlib import Path import gradio as gr from gradio.inputs import Image as GradioImage from gradio.outputs import Image as GradioOutputImage from gradio.components import Textbox, Button, Slider import warnings import torch os.system("python setup.py build develop --user") os.system("pip install packaging==21.3") warnings.filterwarnings("ignore") from groundingdino.models import build_model from groundingdino.util.slconfig import SLConfig from groundingdino.util.utils import clean_state_dict from groundingdino.util.inference import annotate, load_image, predict import groundingdino.datasets.transforms as T from huggingface_hub import hf_hub_download # Use this command for evaluate the GLIP-T model config_file = "groundingdino/config/GroundingDINO_SwinB_cfg.py" ckpt_repo_id = "ShilongLiu/GroundingDINO" ckpt_filenmae = "groundingdino_swinb_cogcoor.pth" def load_model_hf(model_config_path, repo_id, filename, device='cpu'): args = SLConfig.fromfile(model_config_path) model = build_model(args) args.device = device cache_file = hf_hub_download(repo_id=repo_id, filename=filename) checkpoint = torch.load(cache_file, map_location='cpu') log = model.load_state_dict(clean_state_dict(checkpoint['model']), strict=False) print("Model loaded from {} \n => {}".format(cache_file, log)) _ = model.eval() return model def image_transform_grounding(init_image): transform = T.Compose([ T.RandomResize([800], max_size=1333), T.ToTensor(), T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) image, _ = transform(init_image, None) # 3, h, w return init_image, image def image_transform_grounding_for_vis(init_image): transform = T.Compose([ T.RandomResize([800], max_size=1333), ]) image, _ = transform(init_image, None) # 3, h, w return image model = load_model_hf(config_file, ckpt_repo_id, ckpt_filenmae) def run_grounding(input_image, grounding_caption, box_threshold, text_threshold): init_image = input_image.convert("RGB") original_size = init_image.size _, image_tensor = image_transform_grounding(init_image) image_pil: Image = image_transform_grounding_for_vis(init_image) # run grounidng boxes, logits, phrases = predict(model, image_tensor, grounding_caption, box_threshold, text_threshold, device='cpu') annotated_frame = annotate(image_source=np.asarray(image_pil), boxes=boxes, logits=logits, phrases=phrases) image_with_box = Image.fromarray(cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB)) return image_with_box if __name__ == "__main__": parser = argparse.ArgumentParser("Grounding DINO demo", add_help=True) parser.add_argument("--debug", action="store_true", help="using debug mode") parser.add_argument("--share", action="store_true", help="share the app") args = parser.parse_args() css = """ #mkd { height: 500px; overflow: auto; border: 1px solid #ccc; } """ # Using a simpler block creation method block = gr.Blocks() with block: gr.Markdown("

Grounding DINO

") gr.Markdown("

Open-World Detection with Grounding DINO

") gr.Markdown("

Note the model runs on CPU, so it may take a while to run the model.

") with gr.Row(): with gr.Column(): input_image = GradioImage(type="pil") grounding_caption = Textbox("Detection Prompt") run_button = Button("Run") # Advanced options in a collapsible section box_threshold = Slider( minimum=0.0, maximum=1.0, value=0.25, step=0.001, label="Box Threshold" ) text_threshold = Slider( minimum=0.0, maximum=1.0, value=0.25, step=0.001, label="Text Threshold" ) with gr.Column(): gallery = GradioOutputImage( type="pil" ).scale(full_width=True) run_button.click(fn=run_grounding, inputs=[ input_image, grounding_caption, box_threshold, text_threshold], outputs=[gallery]) # Example setup removed - older versions may not support Examples component block.launch(share=False, show_api=False, show_error=True)