Spaces:
Running
Running
File size: 7,876 Bytes
aac6f76 db8e26c aac6f76 8cbea87 aac6f76 db8e26c aac6f76 db8e26c 173f53e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 |
import gradio as gr
import spaces
from gradio_imageslider import ImageSlider
from image_gen_aux import UpscaleWithModel
from image_gen_aux.utils import load_image
import torch
# This uses https://github.com/asomoza/image_gen_aux/blob/main/src/image_gen_aux/upscalers/README.md
# Also this space has been duplicated from their official huggingface space, https://huggingface.co/spaces/OzzyGT/basic_upscaler
# They did great work, and I was happy to see them to also use my models :) I thought Id duplicate it and extend it.
# It basically made me get a pro account so I can make a Zero GPU space and made me upload a lot of my models on huggingface now so I can use them here ;)
# My models, alphabetically sorted
MODELS = {
"1xDeH264_realplksr": "Phips/1xDeH264_realplksr",
"1xDeJPG_HAT": "Phips/1xDeJPG_HAT",
"1xDeJPG_OmniSR": "Phips/1xDeJPG_OmniSR",
"1xDeJPG_realplksr_otf": "Phips/1xDeJPG_realplksr_otf",
"1xDeJPG_SRFormer_light": "Phips/1xDeJPG_SRFormer_light",
"1xDeNoise_realplksr_otf": "Phips/1xDeNoise_realplksr_otf",
"1xExposureCorrection_compact": "Phips/1xExposureCorrection_compact",
"1xOverExposureCorrection_compact": "Phips/1xOverExposureCorrection_compact",
"1xUnderExposureCorrection_compact": "Phips/1xUnderExposureCorrection_compact",
"2xAoMR_mosr": "Phips/2xAoMR_mosr",
"2xEvangelion_compact": "Phips/2xEvangelion_compact",
"2xEvangelion_dat2": "Phips/2xEvangelion_dat2",
"2xEvangelion_omnisr": "Phips/2xEvangelion_omnisr",
"2xHFA2k_compact_multijpg": "Phips/2xHFA2k_compact_multijpg",
"2xHFA2k_LUDVAE_compact": "Phips/2xHFA2k_LUDVAE_compact",
"2xHFA2k_LUDVAE_SPAN": "Phips/2xHFA2k_LUDVAE_SPAN",
"2xHFA2kAVCCompact": "Phips/2xHFA2kAVCCompact",
"2xHFA2kAVCOmniSR": "Phips/2xHFA2kAVCOmniSR",
"2xHFA2kAVCSRFormer_light": "Phips/2xHFA2kAVCSRFormer_light",
"2xHFA2kCompact": "Phips/2xHFA2kCompact",
"2xHFA2kOmniSR": "Phips/2xHFA2kOmniSR",
"2xHFA2kReal-CUGAN": "Phips/2xHFA2kReal-CUGAN",
"2xHFA2kShallowESRGAN": "Phips/2xHFA2kShallowESRGAN",
"2xHFA2kSPAN": "Phips/2xHFA2kSPAN",
"2xHFA2kSwinIR-S": "Phips/2xHFA2kSwinIR-S",
"2xLexicaRRDBNet": "Phips/2xLexicaRRDBNet",
"2xLexicaRRDBNet_Sharp": "Phips/2xLexicaRRDBNet_Sharp",
"2xNomosUni_compact_multijpg": "Phips/2xNomosUni_compact_multijpg",
"2xNomosUni_compact_multijpg_ldl": "Phips/2xNomosUni_compact_multijpg_ldl",
"2xNomosUni_compact_otf_medium": "Phips/2xNomosUni_compact_otf_medium",
"2xNomosUni_esrgan_multijpg": "Phips/2xNomosUni_esrgan_multijpg",
"2xNomosUni_span_multijpg": "Phips/2xNomosUni_span_multijpg",
"2xNomosUni_span_multijpg_ldl": "Phips/2xNomosUni_span_multijpg_ldl",
"2xParimgCompact": "Phips/2xParimgCompact",
"4x4xTextures_GTAV_rgt-s": "Phips/4xTextures_GTAV_rgt-s",
"4xArtFaces_realplksr_dysample": "Phips/4xArtFaces_realplksr_dysample",
"4xBHI_dat2_multiblur": "Phips/4xBHI_dat2_multiblur",
"4xBHI_dat2_multiblurjpg": "Phips/4xBHI_dat2_multiblurjpg",
"4xBHI_dat2_otf": "Phips/4xBHI_dat2_otf",
"4xBHI_dat2_real": "Phips/4xBHI_dat2_real",
"4xBHI_realplksr_dysample_multi": "Phips/4xBHI_realplksr_dysample_multi",
"4xBHI_realplksr_dysample_multiblur": "Phips/4xBHI_realplksr_dysample_multiblur",
"4xBHI_realplksr_dysample_otf": "Phips/4xBHI_realplksr_dysample_otf",
"4xBHI_realplksr_dysample_otf_nn": "Phips/4xBHI_realplksr_dysample_otf_nn",
"4xBHI_realplksr_dysample_real": "Phips/4xBHI_realplksr_dysample_real",
"4xFaceUpDAT": "Phips/4xFaceUpDAT",
"4xFaceUpLDAT": "Phips/4xFaceUpLDAT",
"4xFaceUpSharpDAT": "Phips/4xFaceUpSharpDAT",
"4xFaceUpSharpLDAT": "Phips/4xFaceUpSharpLDAT",
"4xFFHQDAT": "Phips/4xFFHQDAT",
"4xFFHQLDAT": "Phips/4xFFHQLDAT",
"4xHFA2k": "Phips/4xHFA2k",
"4xHFA2k_ludvae_realplksr_dysample": "Phips/4xHFA2k_ludvae_realplksr_dysample",
"4xHFA2kLUDVAEGRL_small": "Phips/4xHFA2kLUDVAEGRL_small",
"4xHFA2kLUDVAESRFormer_light": "Phips/4xHFA2kLUDVAESRFormer_light",
"4xHFA2kLUDVAESwinIR_light": "Phips/4xHFA2kLUDVAESwinIR_light",
"4xLexicaDAT2_otf": "Phips/4xLexicaDAT2_otf",
"4xLSDIRCompact2": "Phips/4xLSDIRCompact2",
"4xLSDIRCompact": "Phips/4xLSDIRCompact",
"4xLSDIRCompactC3": "Phips/4xLSDIRCompactC3",
"4xLSDIRCompactC": "Phips/4xLSDIRCompactC",
"4xLSDIRCompactCR3": "Phips/4xLSDIRCompactCR3",
"4xLSDIRCompactN3": "Phips/4xLSDIRCompactN3",
"4xLSDIRCompactR3": "Phips/4xLSDIRCompactR3",
"4xLSDIRCompactR": "Phips/4xLSDIRCompactR",
"4xLSDIRDAT": "Phips/4xLSDIRDAT",
"4xNature_realplksr_dysample": "Phips/4xNature_realplksr_dysample",
"4xNomos2_hq_atd": "Phips/4xNomos2_hq_atd",
"4xNomos2_hq_dat2": "Phips/4xNomos2_hq_dat2",
"4xNomos2_hq_drct-l": "Phips/4xNomos2_hq_drct-l",
"4xNomos2_hq_mosr": "Phips/4xNomos2_hq_mosr",
"4xNomos2_otf_esrgan": "Phips/4xNomos2_otf_esrgan",
"4xNomos2_realplksr_dysample": "Phips/4xNomos2_realplksr_dysample",
"4xNomos8k_atd_jpg": "Phips/4xNomos8k_atd_jpg",
"4xNomos8kDAT": "Phips/4xNomos8kDAT",
"4xNomos8kHAT-L_bokeh_jpg": "Phips/4xNomos8kHAT-L_bokeh_jpg",
"4xNomos8kHAT-L_otf": "Phips/4xNomos8kHAT-L_otf",
"4xNomos8kSC": "Phips/4xNomos8kSC",
"4xNomos8kSCHAT-L": "Phips/4xNomos8kSCHAT-L",
"4xNomos8kSCHAT-S": "Phips/4xNomos8kSCHAT-S",
"4xNomos8kSCSRFormer": "Phips/4xNomos8kSCSRFormer",
"4xNomosUni_rgt_multijpg": "Phips/4xNomosUni_rgt_multijpg",
"4xNomosUni_rgt_s_multijpg": "Phips/4xNomosUni_rgt_s_multijpg",
"4xNomosUni_span_multijpg": "Phips/4xNomosUni_span_multijpg",
"4xNomosUniDAT2_box": "Phips/4xNomosUniDAT2_box",
"4xNomosUniDAT2_multijpg_ldl": "Phips/4xNomosUniDAT2_multijpg_ldl",
"4xNomosUniDAT2_multijpg_ldl_sharp": "Phips/4xNomosUniDAT2_multijpg_ldl_sharp",
"4xNomosUniDAT_bokeh_jpg": "Phips/4xNomosUniDAT_bokeh_jpg",
"4xNomosUniDAT_otf": "Phips/4xNomosUniDAT_otf",
"4xNomosWebPhoto_atd": "Phips/4xNomosWebPhoto_atd",
"4xNomosWebPhoto_esrgan": "Phips/4xNomosWebPhoto_esrgan",
"4xNomosWebPhoto_RealPLKSR": "Phips/4xNomosWebPhoto_RealPLKSR",
"4xReal_SSDIR_DAT_GAN": "Phips/4xReal_SSDIR_DAT_GAN",
"4xRealWebPhoto_v3_atd": "Phips/4xRealWebPhoto_v3_atd",
"4xRealWebPhoto_v4_dat2": "Phips/4xRealWebPhoto_v4_dat2",
"4xRealWebPhoto_v4_drct-l": "Phips/4xRealWebPhoto_v4_drct-l",
"4xSSDIRDAT": "Phips/4xSSDIRDAT",
"4xTextureDAT2_otf": "Phips/4xTextureDAT2_otf",
"4xTextures_GTAV_rgt-s": "Phips/4xTextures_GTAV_rgt-s",
"4xTextures_GTAV_rgt-s_dither": "Phips/4xTextures_GTAV_rgt-s_dither",
}
@spaces.GPU
def upscale_image(image, model_selection):
original = load_image(image)
device = "cuda" if torch.cuda.is_available() else "cpu"
upscaler = UpscaleWithModel.from_pretrained(MODELS[model_selection]).to(device)
image = upscaler(original, tiling=True, tile_width=1024, tile_height=1024)
return original, image
def clear_result():
return gr.update(value=None)
title = """<h1 align="center">Image Upscaler</h1>
<div align="center">Use this Space to upscale your images, makes use of the
<a href="https://github.com/asomoza/image_gen_aux">Image Generation Auxiliary Tools</a> library. <br>
This space makes use of <a href="https://github.com/Phhofm/models">my self trained models</a> and tiles at 1024x1024<br>
Here is an <a href="https://huggingface.co/spaces/Phips/Upscaler/resolve/main/input_example1.png">example input image</a> you can use to try it out.</div>
"""
with gr.Blocks() as demo:
gr.HTML(title)
with gr.Row():
with gr.Column():
input_image = gr.Image(type="pil", label="Input Image")
model_selection = gr.Dropdown(
choices=list(MODELS.keys()),
value="4xNomosWebPhoto_RealPLKSR",
label="Model (alphabetically sorted)",
)
run_button = gr.Button("Upscale")
with gr.Column():
result = ImageSlider(
interactive=False,
label="Generated Image",
)
run_button.click(
fn=clear_result,
inputs=None,
outputs=result,
).then(
fn=upscale_image,
inputs=[input_image, model_selection],
outputs=result,
)
demo.launch(share=True)
# Comment for the the sake of commit
|