Spaces:
Running
on
Zero
Running
on
Zero
import os | |
import cv2 | |
import numpy as np | |
import torch | |
import gradio as gr | |
import spaces | |
from typing import Tuple | |
from PIL import Image | |
from torchvision import transforms | |
import requests | |
from io import BytesIO | |
import zipfile | |
# Fix the HF space permission error | |
os.environ["HF_MODULES_CACHE"] = os.path.join("/tmp/hf_cache", "modules") | |
import transformers | |
transformers.utils.move_cache() | |
torch.set_float32_matmul_precision('high') | |
torch.jit.script = lambda f: f | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
def refine_foreground(image, mask, r=90): | |
if mask.size != image.size: | |
mask = mask.resize(image.size) | |
image = np.array(image) / 255.0 | |
mask = np.array(mask) / 255.0 | |
estimated_foreground = FB_blur_fusion_foreground_estimator_2(image, mask, r=r) | |
image_masked = Image.fromarray((estimated_foreground * 255.0).astype(np.uint8)) | |
return image_masked | |
def FB_blur_fusion_foreground_estimator_2(image, alpha, r=90): | |
alpha = alpha[:, :, None] | |
F, blur_B = FB_blur_fusion_foreground_estimator(image, image, image, alpha, r) | |
return FB_blur_fusion_foreground_estimator(image, F, blur_B, alpha, r=6)[0] | |
def FB_blur_fusion_foreground_estimator(image, F, B, alpha, r=90): | |
if isinstance(image, Image.Image): | |
image = np.array(image) / 255.0 | |
blurred_alpha = cv2.blur(alpha, (r, r))[:, :, None] | |
blurred_FA = cv2.blur(F * alpha, (r, r)) | |
blurred_F = blurred_FA / (blurred_alpha + 1e-5) | |
blurred_B1A = cv2.blur(B * (1 - alpha), (r, r)) | |
blurred_B = blurred_B1A / ((1 - blurred_alpha) + 1e-5) | |
F = blurred_F + alpha * (image - alpha * blurred_F - (1 - alpha) * blurred_B) | |
F = np.clip(F, 0, 1) | |
return F, blurred_B | |
class ImagePreprocessor(): | |
def __init__(self, resolution: Tuple[int, int] = (1024, 1024)) -> None: | |
self.transform_image = transforms.Compose([ | |
transforms.Resize(resolution[::-1]), | |
transforms.ToTensor(), | |
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), | |
]) | |
def proc(self, image: Image.Image) -> torch.Tensor: | |
return self.transform_image(image) | |
# Fixed weights | |
weights_file = 'BiRefNet' | |
birefnet = transformers.AutoModelForImageSegmentation.from_pretrained( | |
'/'.join(('zhengpeng7', weights_file)), trust_remote_code=True | |
) | |
birefnet.to(device) | |
birefnet.eval(); birefnet.half() | |
def predict(images, resolution): | |
assert images is not None, 'AssertionError: images cannot be None.' | |
_weights_file = '/'.join(('zhengpeng7', weights_file)) | |
print('Using weights: {}.'.format(_weights_file)) | |
try: | |
resolution = [int(int(reso)//32*32) for reso in resolution.strip().split('x')] | |
except: | |
resolution = (1024, 1024) | |
print('Invalid resolution input. Automatically changed to 1024x1024.') | |
if isinstance(images, list): | |
save_paths = [] | |
save_dir = 'preds-BiRefNet' | |
if not os.path.exists(save_dir): | |
os.makedirs(save_dir) | |
tab_is_batch = True | |
else: | |
images = [images] | |
tab_is_batch = False | |
for idx_image, image_src in enumerate(images): | |
if isinstance(image_src, str): | |
if os.path.isfile(image_src): | |
image_ori = Image.open(image_src) | |
else: | |
response = requests.get(image_src) | |
image_data = BytesIO(response.content) | |
image_ori = Image.open(image_data) | |
else: | |
image_ori = Image.fromarray(image_src) | |
image = image_ori.convert('RGB') | |
if resolution is None: | |
resolution_div_by_32 = [int(int(reso)//32*32) for reso in image.size] | |
resolution = resolution_div_by_32 | |
image_preprocessor = ImagePreprocessor(resolution=tuple(resolution)) | |
image_proc = image_preprocessor.proc(image).unsqueeze(0) | |
with torch.no_grad(): | |
preds = birefnet(image_proc.to(device).half())[-1].sigmoid().cpu() | |
pred = preds[0].squeeze() | |
pred_pil = transforms.ToPILImage()(pred) | |
image_masked = refine_foreground(image, pred_pil) | |
image_masked.putalpha(pred_pil.resize(image.size)) | |
torch.cuda.empty_cache() | |
if tab_is_batch: | |
save_file_path = os.path.join(save_dir, "{}.png".format(os.path.splitext(os.path.basename(image_src))[0])) | |
image_masked.save(save_file_path) | |
save_paths.append(save_file_path) | |
if tab_is_batch: | |
zip_file_path = os.path.join(save_dir, "{}.zip".format(save_dir)) | |
with zipfile.ZipFile(zip_file_path, 'w') as zipf: | |
for file in save_paths: | |
zipf.write(file, os.path.basename(file)) | |
return save_paths, zip_file_path | |
else: | |
return image_masked, image_ori | |
descriptions = ( | |
"Upload a picture, and we'll remove the background!\n" | |
"The resolution used is `1024x1024`\n" | |
) | |
tab_image = gr.Interface( | |
fn=predict, | |
inputs=[ | |
gr.Image(label='Upload an image'), | |
gr.Textbox(lines=1, placeholder="Type the resolution (`WxH`) you want, e.g., `1024x1024`.", label="Resolution"), | |
], | |
outputs=gr.ImageSlider(label="Lot Lingo's prediction", type="pil", format='png'), | |
api_name="image", | |
description=descriptions, | |
) | |
tab_text = gr.Interface( | |
fn=predict, | |
inputs=[ | |
gr.Textbox(label="Paste an image URL"), | |
gr.Textbox(lines=1, placeholder="Type the resolution (`WxH`) you want, e.g., `1024x1024`.", label="Resolution"), | |
], | |
outputs=gr.ImageSlider(label="Lot Lingo's prediction", type="pil", format='png'), | |
api_name="URL", | |
) | |
tab_batch = gr.Interface( | |
fn=predict, | |
inputs=[ | |
gr.File(label="Upload multiple images", type="filepath", file_count="multiple"), | |
gr.Textbox(lines=1, placeholder="Type the resolution (`WxH`) you want, e.g., `1024x1024`.", label="Resolution"), | |
], | |
outputs=[gr.Gallery(label="Lot Lingo's predictions"), gr.File(label="Download masked images.")], | |
api_name="batch", | |
) | |
demo = gr.TabbedInterface( | |
[tab_image, tab_text, tab_batch], | |
['image', 'URL', 'batch'], | |
title="Lot Lingo Background Removal Demo", | |
) | |
if __name__ == "__main__": | |
demo.launch(debug=True) | |