Spaces:
Runtime error
Runtime error
liuyizhang
commited on
Commit
·
058f70d
1
Parent(s):
68b6cad
update app.py && app_cli.py
Browse files- app.py +10 -8
- app_cli.py +6 -4
app.py
CHANGED
|
@@ -15,7 +15,6 @@ if run_gradio:
|
|
| 15 |
os.system("pip install gradio==3.50.2")
|
| 16 |
|
| 17 |
import gradio as gr
|
| 18 |
-
|
| 19 |
from loguru import logger
|
| 20 |
|
| 21 |
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
|
|
@@ -24,6 +23,8 @@ if os.environ.get('IS_MY_DEBUG') is None:
|
|
| 24 |
result = subprocess.run(['pip', 'install', '-e', 'GroundingDINO'], check=True)
|
| 25 |
print(f'pip install GroundingDINO = {result}')
|
| 26 |
|
|
|
|
|
|
|
| 27 |
# result = subprocess.run(['pip', 'list'], check=True)
|
| 28 |
# print(f'pip list = {result}')
|
| 29 |
|
|
@@ -188,7 +189,6 @@ def plot_boxes_to_image(image_pil, tgt):
|
|
| 188 |
|
| 189 |
mask_draw.rectangle([x0, y0, x1, y1], fill=255, width=6)
|
| 190 |
|
| 191 |
-
|
| 192 |
return image_pil, mask
|
| 193 |
|
| 194 |
def load_image(image_path):
|
|
@@ -290,19 +290,20 @@ def mix_masks(imgs):
|
|
| 290 |
re_img = 1 - re_img
|
| 291 |
return Image.fromarray(np.uint8(255*re_img))
|
| 292 |
|
| 293 |
-
def set_device():
|
|
|
|
| 294 |
if os.environ.get('IS_MY_DEBUG') is None:
|
| 295 |
-
device =
|
| 296 |
else:
|
| 297 |
device = 'cpu'
|
| 298 |
print(f'device={device}')
|
| 299 |
-
return device
|
| 300 |
|
| 301 |
def load_groundingdino_model(device):
|
| 302 |
# initialize groundingdino model
|
|
|
|
| 303 |
logger.info(f"initialize groundingdino model...")
|
| 304 |
groundingdino_model = load_model_hf(config_file, ckpt_repo_id, ckpt_filenmae, device=device) #'cpu')
|
| 305 |
-
|
| 306 |
|
| 307 |
def get_sam_vit_h_4b8939():
|
| 308 |
if not os.path.exists('./sam_vit_h_4b8939.pth'):
|
|
@@ -1104,13 +1105,14 @@ if __name__ == "__main__":
|
|
| 1104 |
parser.add_argument("--debug", action="store_true", help="using debug mode")
|
| 1105 |
parser.add_argument("--share", action="store_true", help="share the app")
|
| 1106 |
parser.add_argument("--port", "-p", type=int, default=7860, help="port")
|
|
|
|
| 1107 |
args, _ = parser.parse_known_args()
|
| 1108 |
print(f'args = {args}')
|
| 1109 |
|
| 1110 |
if os.environ.get('IS_MY_DEBUG') is None:
|
| 1111 |
os.system("pip list")
|
| 1112 |
|
| 1113 |
-
|
| 1114 |
if device == 'cpu':
|
| 1115 |
kosmos_enable = False
|
| 1116 |
|
|
@@ -1118,7 +1120,7 @@ if __name__ == "__main__":
|
|
| 1118 |
kosmos_model, kosmos_processor = load_kosmos_model(device)
|
| 1119 |
|
| 1120 |
if groundingdino_enable:
|
| 1121 |
-
|
| 1122 |
|
| 1123 |
if sam_enable:
|
| 1124 |
load_sam_model(device)
|
|
|
|
| 15 |
os.system("pip install gradio==3.50.2")
|
| 16 |
|
| 17 |
import gradio as gr
|
|
|
|
| 18 |
from loguru import logger
|
| 19 |
|
| 20 |
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
|
|
|
|
| 23 |
result = subprocess.run(['pip', 'install', '-e', 'GroundingDINO'], check=True)
|
| 24 |
print(f'pip install GroundingDINO = {result}')
|
| 25 |
|
| 26 |
+
logger.info(f"Start app...")
|
| 27 |
+
|
| 28 |
# result = subprocess.run(['pip', 'list'], check=True)
|
| 29 |
# print(f'pip list = {result}')
|
| 30 |
|
|
|
|
| 189 |
|
| 190 |
mask_draw.rectangle([x0, y0, x1, y1], fill=255, width=6)
|
| 191 |
|
|
|
|
| 192 |
return image_pil, mask
|
| 193 |
|
| 194 |
def load_image(image_path):
|
|
|
|
| 290 |
re_img = 1 - re_img
|
| 291 |
return Image.fromarray(np.uint8(255*re_img))
|
| 292 |
|
| 293 |
+
def set_device(args):
|
| 294 |
+
global device
|
| 295 |
if os.environ.get('IS_MY_DEBUG') is None:
|
| 296 |
+
device = args.cuda if torch.cuda.is_available() else 'cpu'
|
| 297 |
else:
|
| 298 |
device = 'cpu'
|
| 299 |
print(f'device={device}')
|
|
|
|
| 300 |
|
| 301 |
def load_groundingdino_model(device):
|
| 302 |
# initialize groundingdino model
|
| 303 |
+
global groundingdino_model
|
| 304 |
logger.info(f"initialize groundingdino model...")
|
| 305 |
groundingdino_model = load_model_hf(config_file, ckpt_repo_id, ckpt_filenmae, device=device) #'cpu')
|
| 306 |
+
logger.info(f"initialize groundingdino model...{type(groundingdino_model)}")
|
| 307 |
|
| 308 |
def get_sam_vit_h_4b8939():
|
| 309 |
if not os.path.exists('./sam_vit_h_4b8939.pth'):
|
|
|
|
| 1105 |
parser.add_argument("--debug", action="store_true", help="using debug mode")
|
| 1106 |
parser.add_argument("--share", action="store_true", help="share the app")
|
| 1107 |
parser.add_argument("--port", "-p", type=int, default=7860, help="port")
|
| 1108 |
+
parser.add_argument("--cuda", "-c", type=str, default='cuda:0', help="cuda")
|
| 1109 |
args, _ = parser.parse_known_args()
|
| 1110 |
print(f'args = {args}')
|
| 1111 |
|
| 1112 |
if os.environ.get('IS_MY_DEBUG') is None:
|
| 1113 |
os.system("pip list")
|
| 1114 |
|
| 1115 |
+
set_device(args)
|
| 1116 |
if device == 'cpu':
|
| 1117 |
kosmos_enable = False
|
| 1118 |
|
|
|
|
| 1120 |
kosmos_model, kosmos_processor = load_kosmos_model(device)
|
| 1121 |
|
| 1122 |
if groundingdino_enable:
|
| 1123 |
+
load_groundingdino_model('cpu')
|
| 1124 |
|
| 1125 |
if sam_enable:
|
| 1126 |
load_sam_model(device)
|
app_cli.py
CHANGED
|
@@ -53,7 +53,7 @@ from io import BytesIO
|
|
| 53 |
from diffusers import StableDiffusionInpaintPipeline
|
| 54 |
from huggingface_hub import hf_hub_download
|
| 55 |
|
| 56 |
-
from
|
| 57 |
# relate anything
|
| 58 |
from ram_utils import iou, sort_and_deduplicate, relation_classes, MLP, show_anns, ram_show_mask
|
| 59 |
from ram_train_eval import RamModel,RamPredictor
|
|
@@ -85,6 +85,7 @@ def get_args():
|
|
| 85 |
argparser.add_argument("--input_image", "-i", type=str, default="", help="")
|
| 86 |
argparser.add_argument("--text", "-t", type=str, default="", help="")
|
| 87 |
argparser.add_argument("--output_image", "-o", type=str, default="", help="")
|
|
|
|
| 88 |
args = argparser.parse_args()
|
| 89 |
return args
|
| 90 |
|
|
@@ -96,8 +97,8 @@ if __name__ == '__main__':
|
|
| 96 |
logger.info(f'\nargs={args}\n')
|
| 97 |
|
| 98 |
logger.info(f'loading models ... ')
|
| 99 |
-
# set_device() # If you have enough GPUs, you can open this comment
|
| 100 |
-
|
| 101 |
load_sam_model(device)
|
| 102 |
# load_sd_model(device)
|
| 103 |
load_lama_cleaner_model(device)
|
|
@@ -105,7 +106,7 @@ if __name__ == '__main__':
|
|
| 105 |
|
| 106 |
input_image = Image.open(args.input_image)
|
| 107 |
|
| 108 |
-
|
| 109 |
text_prompt = args.text,
|
| 110 |
task_type = 'remove',
|
| 111 |
inpaint_prompt = '',
|
|
@@ -120,6 +121,7 @@ if __name__ == '__main__':
|
|
| 120 |
kosmos_input = None,
|
| 121 |
cleaner_size_limit = -1,
|
| 122 |
)
|
|
|
|
| 123 |
if len(output_images) > 0:
|
| 124 |
logger.info(f'save result to {args.output_image} ... ')
|
| 125 |
output_images[-1].save(args.output_image)
|
|
|
|
| 53 |
from diffusers import StableDiffusionInpaintPipeline
|
| 54 |
from huggingface_hub import hf_hub_download
|
| 55 |
|
| 56 |
+
from util_computer import computer_info
|
| 57 |
# relate anything
|
| 58 |
from ram_utils import iou, sort_and_deduplicate, relation_classes, MLP, show_anns, ram_show_mask
|
| 59 |
from ram_train_eval import RamModel,RamPredictor
|
|
|
|
| 85 |
argparser.add_argument("--input_image", "-i", type=str, default="", help="")
|
| 86 |
argparser.add_argument("--text", "-t", type=str, default="", help="")
|
| 87 |
argparser.add_argument("--output_image", "-o", type=str, default="", help="")
|
| 88 |
+
argparser.add_argument("--cuda", "-c", type=str, default='cpu', help="cuda")
|
| 89 |
args = argparser.parse_args()
|
| 90 |
return args
|
| 91 |
|
|
|
|
| 97 |
logger.info(f'\nargs={args}\n')
|
| 98 |
|
| 99 |
logger.info(f'loading models ... ')
|
| 100 |
+
# set_device(args) # If you have enough GPUs, you can open this comment
|
| 101 |
+
load_groundingdino_model('cpu')
|
| 102 |
load_sam_model(device)
|
| 103 |
# load_sd_model(device)
|
| 104 |
load_lama_cleaner_model(device)
|
|
|
|
| 106 |
|
| 107 |
input_image = Image.open(args.input_image)
|
| 108 |
|
| 109 |
+
run_rets = run_anything_task(input_image = input_image,
|
| 110 |
text_prompt = args.text,
|
| 111 |
task_type = 'remove',
|
| 112 |
inpaint_prompt = '',
|
|
|
|
| 121 |
kosmos_input = None,
|
| 122 |
cleaner_size_limit = -1,
|
| 123 |
)
|
| 124 |
+
output_images = run_rets[0]
|
| 125 |
if len(output_images) > 0:
|
| 126 |
logger.info(f'save result to {args.output_image} ... ')
|
| 127 |
output_images[-1].save(args.output_image)
|