from diffusers import ( StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DPMSolverMultistepScheduler, ) import gradio as gr import torch from PIL import Image import time import psutil import random from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker start_time = time.time() current_steps = 25 SAFETY_CHECKER = StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker", torch_dtype=torch.float16) class Model: def __init__(self, name, path=""): self.name = name self.path = path if path != "": self.pipe_t2i = StableDiffusionPipeline.from_pretrained( path, torch_dtype=torch.float16, safety_checker=SAFETY_CHECKER ) self.pipe_t2i.scheduler = DPMSolverMultistepScheduler.from_config( self.pipe_t2i.scheduler.config ) self.pipe_i2i = StableDiffusionImg2ImgPipeline(**self.pipe_t2i.components) else: self.pipe_t2i = None self.pipe_i2i = None models = [ Model("2.2", "darkstorm2150/Protogen_v2.2_Official_Release"), Model("3.4", "darkstorm2150/Protogen_x3.4_Official_Release"), Model("5.3", "darkstorm2150/Protogen_v5.3_Official_Release"), Model("5.8", "darkstorm2150/Protogen_x5.8_Official_Release"), Model("Dragon", "darkstorm2150/Protogen_Dragon_Official_Release"), ] MODELS = {m.name: m for m in models} device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶" def error_str(error, title="Error"): return ( f"""#### {title} {error}""" if error else "" ) def inference( model_name, prompt, guidance, steps, n_images=1, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt="", ): print(psutil.virtual_memory()) # print memory usage if seed == 0: seed = random.randint(0, 2147483647) generator = torch.Generator("cuda").manual_seed(seed) try: if img is not None: return ( img_to_img( model_name, prompt, n_images, neg_prompt, img, strength, guidance, steps, width, height, generator, seed, ), f"Done. Seed: {seed}", ) else: return ( txt_to_img( model_name, prompt, n_images, neg_prompt, guidance, steps, width, height, generator, seed, ), f"Done. Seed: {seed}", ) except Exception as e: return None, error_str(e) def txt_to_img( model_name, prompt, n_images, neg_prompt, guidance, steps, width, height, generator, seed, ): pipe = MODELS[model_name].pipe_t2i if torch.cuda.is_available(): pipe = pipe.to("cuda") pipe.enable_xformers_memory_efficient_attention() result = pipe( prompt, negative_prompt=neg_prompt, num_images_per_prompt=n_images, num_inference_steps=int(steps), guidance_scale=guidance, width=width, height=height, generator=generator, ) pipe.to("cpu") return replace_nsfw_images(result) def img_to_img( model_name, prompt, n_images, neg_prompt, img, strength, guidance, steps, width, height, generator, seed, ): pipe = MODELS[model_name].pipe_i2i if torch.cuda.is_available(): pipe = pipe.to("cuda") pipe.enable_xformers_memory_efficient_attention() ratio = min(height / img.height, width / img.width) img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS) result = pipe( prompt, negative_prompt=neg_prompt, num_images_per_prompt=n_images, image=img, num_inference_steps=int(steps), strength=strength, guidance_scale=guidance, generator=generator, ) pipe.to("cpu") return replace_nsfw_images(result) def replace_nsfw_images(results): for i in range(len(results.images)): if results.nsfw_content_detected[i]: results.images[i] = Image.open("nsfw.png") return results.images with gr.Blocks(css="style.css") as demo: gr.HTML( """

Protogen Diffusion

Demo for multiple fine-tuned Protogen Stable Diffusion models.

You can also duplicate this space and upgrade to gpu by going to settings:
Duplicate Space

""" ) with gr.Row(): with gr.Column(scale=55): with gr.Group(): model_name = gr.Dropdown( label="Model", choices=[m.name for m in models], value=models[0].name, ) with gr.Box(visible=False) as custom_model_group: custom_model_path = gr.Textbox( label="Custom model path", placeholder="Path to model, e.g. darkstorm2150/Protogen_x3.4_Official_Release", interactive=True, ) gr.HTML( "
Custom models have to be downloaded first, so give it some time.
" ) with gr.Row(): prompt = gr.Textbox( label="Prompt", show_label=False, max_lines=2, placeholder="Enter prompt.", ).style(container=False) generate = gr.Button(value="Generate").style( rounded=(False, True, True, False) ) # image_out = gr.Image(height=512) gallery = gr.Gallery( label="Generated images", show_label=False, elem_id="gallery" ).style(grid=[2], height="auto") state_info = gr.Textbox(label="State", show_label=False, max_lines=2).style( container=False ) error_output = gr.Markdown() with gr.Column(scale=45): with gr.Tab("Options"): with gr.Group(): neg_prompt = gr.Textbox( label="Negative prompt", placeholder="What to exclude from the image", ) n_images = gr.Slider( label="Images", value=1, minimum=1, maximum=4, step=1 ) with gr.Row(): guidance = gr.Slider( label="Guidance scale", value=7.5, maximum=15 ) steps = gr.Slider( label="Steps", value=current_steps, minimum=2, maximum=75, step=1, ) with gr.Row(): width = gr.Slider( label="Width", value=512, minimum=64, maximum=1024, step=8 ) height = gr.Slider( label="Height", value=512, minimum=64, maximum=1024, step=8 ) seed = gr.Slider( 0, 2147483647, label="Seed (0 = random)", value=0, step=1 ) with gr.Tab("Image to image"): with gr.Group(): image = gr.Image( label="Image", height=256, tool="editor", type="pil" ) strength = gr.Slider( label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5, ) inputs = [ model_name, prompt, guidance, steps, n_images, width, height, seed, image, strength, neg_prompt, ] outputs = [gallery, error_output] prompt.submit(inference, inputs=inputs, outputs=outputs) generate.click(inference, inputs=inputs, outputs=outputs) ex = gr.Examples( [ [models[0].name, "portrait of a beautiful alyx vance half life", 10, 25], [models[1].name, "Brad Pitt with sunglasses, highly realistic", 7.5, 25], [models[2].name, "(extremely detailed CG unity 8k wallpaper), the most beautiful artwork in the world", 7.5, 25], [models[3.name, "(extremely detailed CG unity 8k wallpaper), full shot body photo star lord chris pratt posing in an outdoor spaceship, holding a gun, extremely detailed, trending on ArtStation, trending on CGSociety, Intricate, High Detail, dramatic, realism, beautiful and detailed lighting, shadows", 7.5, 25], [models[4].name, "(extremely detailed CG unity 8k wallpaper), full body portrait of (david:1.1), staring at us with a mysterious gaze, realistic, masterpiece, highest quality, ((scifi)), lens flare, ((light sparkles)), unreal engine, digital painting, trending on ArtStation, trending on CGSociety, Intricate, High Detail, dramatic, realism, beautiful and detailed lighting, shadows", 7.5, 25], ], inputs=[model_name, prompt, guidance, steps], outputs=outputs, fn=inference, cache_examples=False, ) gr.HTML( """

Models by @darkstorm2150 and others. ❤️

This space uses the DPM-Solver++ sampler by Cheng Lu, et al..

Space by: Darkstorm (Victor Espinoza)
Instagram

""" ) print(f"Space built in {time.time() - start_time:.2f} seconds") demo.queue(concurrency_count=1) demo.launch()