K00B404 commited on
Commit
96ef3e3
·
verified ·
1 Parent(s): db81f8c

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +109 -0
app.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.system('git clone https://github.com/tencent-ailab/IP-Adapter.git')
3
+ os.system('mv IP-Adapter IP_Adapter')
4
+
5
+ import gradio as gr
6
+ import torch
7
+ from PIL import Image
8
+ from diffusers import (
9
+ StableDiffusionPipeline, StableDiffusionImg2ImgPipeline,
10
+ StableDiffusionInpaintPipelineLegacy, DDIMScheduler, AutoencoderKL
11
+ )
12
+ from IP_Adapter.ip_adapter import IPAdapter
13
+
14
+ # Paths and device
15
+ base_model_path = "runwayml/stable-diffusion-v1-5"
16
+ vae_model_path = "stabilityai/sd-vae-ft-mse"
17
+ image_encoder_path = "models/image_encoder/"
18
+ ip_ckpt = "models/ip-adapter_sd15.bin"
19
+ device = "cpu" # or "cuda" if using GPU
20
+
21
+ # VAE and scheduler
22
+ noise_scheduler = DDIMScheduler(
23
+ num_train_timesteps=1000,
24
+ beta_start=0.00085,
25
+ beta_end=0.012,
26
+ beta_schedule="scaled_linear",
27
+ clip_sample=False,
28
+ set_alpha_to_one=False,
29
+ steps_offset=1,
30
+ )
31
+ vae = AutoencoderKL.from_pretrained(vae_model_path).to(dtype=torch.float16)
32
+
33
+ def image_grid(imgs, rows, cols):
34
+ assert len(imgs) == rows * cols
35
+ w, h = imgs[0].size
36
+ grid = Image.new('RGB', size=(cols * w, rows * h))
37
+ for i, img in enumerate(imgs):
38
+ grid.paste(img, box=(i % cols * w, i // cols * h))
39
+ return grid
40
+
41
+ def generate_variations(upload_img):
42
+ pipe = StableDiffusionPipeline.from_pretrained(
43
+ base_model_path,
44
+ scheduler=noise_scheduler,
45
+ vae=vae,
46
+ feature_extractor=None,
47
+ safety_checker=None,
48
+ torch_dtype=torch.float16
49
+ )
50
+ ip_model = IPAdapter(pipe, image_encoder_path, ip_ckpt, device)
51
+ images = ip_model.generate(pil_image=upload_img, num_samples=4, num_inference_steps=50, seed=42)
52
+ return image_grid(images, 1, 4)
53
+
54
+ def generate_img2img(base_img, guide_img):
55
+ pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
56
+ base_model_path,
57
+ torch_dtype=torch.float16,
58
+ scheduler=noise_scheduler,
59
+ vae=vae,
60
+ feature_extractor=None,
61
+ safety_checker=None
62
+ )
63
+ ip_model = IPAdapter(pipe, image_encoder_path, ip_ckpt, device)
64
+ images = ip_model.generate(pil_image=base_img, image=guide_img, strength=0.6, num_samples=4, num_inference_steps=50, seed=42)
65
+ return image_grid(images, 1, 4)
66
+
67
+ def generate_inpaint(input_img, masked_img, mask_img):
68
+ pipe = StableDiffusionInpaintPipelineLegacy.from_pretrained(
69
+ base_model_path,
70
+ torch_dtype=torch.float16,
71
+ scheduler=noise_scheduler,
72
+ vae=vae,
73
+ feature_extractor=None,
74
+ safety_checker=None
75
+ )
76
+ ip_model = IPAdapter(pipe, image_encoder_path, ip_ckpt, device)
77
+ images = ip_model.generate(pil_image=input_img, image=masked_img, mask_image=mask_img,
78
+ strength=0.7, num_samples=4, num_inference_steps=50, seed=42)
79
+ return image_grid(images, 1, 4)
80
+
81
+ # Gradio Interface
82
+ with gr.Blocks() as demo:
83
+ gr.Markdown("# IP-Adapter Image Manipulation Demo")
84
+
85
+ with gr.Tab("Image Variations"):
86
+ with gr.Row():
87
+ img_input = gr.Image(type="pil", label="Upload Image")
88
+ img_output = gr.Image(label="Generated Variations")
89
+ img_btn = gr.Button("Generate Variations")
90
+ img_btn.click(fn=generate_variations, inputs=img_input, outputs=img_output)
91
+
92
+ with gr.Tab("Image-to-Image"):
93
+ with gr.Row():
94
+ img1 = gr.Image(type="pil", label="Base Image")
95
+ img2 = gr.Image(type="pil", label="Guide Image")
96
+ img2_out = gr.Image(label="Output")
97
+ btn2 = gr.Button("Generate Img2Img")
98
+ btn2.click(fn=generate_img2img, inputs=[img1, img2], outputs=img2_out)
99
+
100
+ with gr.Tab("Inpainting"):
101
+ with gr.Row():
102
+ inpaint_img = gr.Image(type="pil", label="Input Image")
103
+ masked = gr.Image(type="pil", label="Masked Image")
104
+ mask = gr.Image(type="pil", label="Mask")
105
+ inpaint_out = gr.Image(label="Inpainted")
106
+ btn3 = gr.Button("Generate Inpainting")
107
+ btn3.click(fn=generate_inpaint, inputs=[inpaint_img, masked, mask], outputs=inpaint_out)
108
+
109
+ demo.launch()