Kims12's picture
Update app.py
1f18b3b verified
import os
import sys
from torchvision.transforms import functional
sys.modules["torchvision.transforms.functional_tensor"] = functional
import spaces
from basicsr.archs.srvgg_arch import SRVGGNetCompact
from gfpgan.utils import GFPGANer
from realesrgan.utils import RealESRGANer
import torch
import cv2
import gradio as gr
from gradio_imageslider import ImageSlider # ์Šฌ๋ผ์ด๋” ์ปดํฌ๋„ŒํŠธ ์ถ”๊ฐ€
from PIL import Image # PIL์„ ์‚ฌ์šฉํ•˜์—ฌ numpy ์ด๋ฏธ์ง€๋ฅผ PIL ์ด๋ฏธ์ง€๋กœ ๋ณ€ํ™˜
# ํ•„์ˆ˜ ๋ชจ๋ธ ๋‹ค์šด๋กœ๋“œ
if not os.path.exists('realesr-general-x4v3.pth'):
os.system("wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth -P .")
if not os.path.exists('GFPGANv1.2.pth'):
os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.2.pth -P .")
if not os.path.exists('GFPGANv1.3.pth'):
os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth -P .")
if not os.path.exists('GFPGANv1.4.pth'):
os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth -P .")
if not os.path.exists('RestoreFormer.pth'):
os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/RestoreFormer.pth -P .")
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu')
model_path = 'realesr-general-x4v3.pth'
half = True if torch.cuda.is_available() else False
upsampler = RealESRGANer(scale=4, model_path=model_path, model=model, tile=0, tile_pad=10, pre_pad=0, half=half)
# ์ด๋ฏธ์ง€ ์ €์žฅ ๋””๋ ‰ํ† ๋ฆฌ ์ƒ์„ฑ (ํ•„์š”์‹œ ์ฃผ์„ ํ•ด์ œ)
# os.makedirs('output', exist_ok=True)
@spaces.GPU(duration=120)
def upscaler(img, version, scale):
try:
# ์ž…๋ ฅ๋œ img๋Š” ํŒŒ์ผ ๊ฒฝ๋กœ์ž„
image_array = cv2.imread(img, cv2.IMREAD_UNCHANGED)
if image_array is None:
print("์ด๋ฏธ์ง€ ๋กœ๋“œ ์‹คํŒจ")
return None, None
if len(image_array.shape) == 3 and image_array.shape[2] == 4:
img_mode = 'RGBA'
elif len(image_array.shape) == 2:
img_mode = None
image_array = cv2.cvtColor(image_array, cv2.COLOR_GRAY2BGR)
else:
img_mode = None
h, w = image_array.shape[0:2]
if h < 300:
image_array = cv2.resize(image_array, (w * 2, h * 2), interpolation=cv2.INTER_LANCZOS4)
# ๋ณ€๊ฒฝ ์ „ ์›๋ณธ ์ด๋ฏธ์ง€๋ฅผ RGB (๋˜๋Š” RGBA)๋กœ ๋ณ€ํ™˜
if img_mode == 'RGBA':
original_rgb = cv2.cvtColor(image_array, cv2.COLOR_BGRA2RGBA)
else:
original_rgb = cv2.cvtColor(image_array, cv2.COLOR_BGR2RGB)
face_enhancer = GFPGANer(
model_path=f'{version}.pth',
upscale=2,
arch='RestoreFormer' if version=='RestoreFormer' else 'clean',
channel_multiplier=2,
bg_upsampler=upsampler
)
try:
_, _, output = face_enhancer.enhance(image_array, has_aligned=False, only_center_face=False, paste_back=True)
except RuntimeError as error:
print('์˜ค๋ฅ˜', error)
return None, None
try:
if scale != 2:
interpolation = cv2.INTER_AREA if scale < 2 else cv2.INTER_LANCZOS4
h, w = image_array.shape[0:2]
output = cv2.resize(output, (int(w * scale / 2), int(h * scale / 2)), interpolation=interpolation)
except Exception as error:
print('์ž˜๋ชป๋œ ์žฌ์Šค์ผ€์ผ๋ง ์ž…๋ ฅ.', error)
output_rgb = cv2.cvtColor(output, cv2.COLOR_BGR2RGB)
# numpy ์ด๋ฏธ์ง€๋ฅผ PIL ์ด๋ฏธ์ง€๋กœ ๋ณ€ํ™˜ (์Šฌ๋ผ์ด๋”์—์„œ type="pil" ์‚ฌ์šฉ)
original_pil = Image.fromarray(original_rgb)
output_pil = Image.fromarray(output_rgb)
# ๋ณ€๊ฒฝ ์ „/ํ›„ ์ด๋ฏธ์ง€๋ฅผ ํŠœํ”Œ๋กœ ๋ฐ˜ํ™˜ํ•˜์—ฌ ์Šฌ๋ผ์ด๋”์— ํ‘œ์‹œ
return original_pil, output_pil
except Exception as error:
print('์ „์—ญ ์˜ˆ์™ธ', error)
return None, None
if __name__ == "__main__":
title = "์ด๋ฏธ์ง€ ์—…์Šค์ผ€์ผ ๋ฐ ๋ณต์› [GFPGAN ์•Œ๊ณ ๋ฆฌ์ฆ˜]"
demo = gr.Interface(
upscaler, [
gr.Image(type="filepath", label="์ž…๋ ฅ"),
gr.Radio(['GFPGANv1.2', 'GFPGANv1.3', 'GFPGANv1.4', 'RestoreFormer'], type="value", label="๋ฒ„์ „", value="GFPGANv1.4", visible=False),
gr.Number(label="์žฌ์Šค์ผ€์ผ๋ง ๊ณ„์ˆ˜", value=0, visible=False),
], [
ImageSlider(label="์ถœ๋ ฅ", type="pil")
],
title=title,
examples=[["์˜ˆ์ œ.png", "GFPGANv1.4", 0]],
allow_flagging="never"
)
demo.queue()
demo.launch()