Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -8,6 +8,8 @@ from PIL import Image
|
|
8 |
|
9 |
MAX_SEED = np.iinfo(np.int32).max
|
10 |
MAX_IMAGE_SIZE = 1024
|
|
|
|
|
11 |
|
12 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
13 |
model_default = "stable-diffusion-v1-5/stable-diffusion-v1-5"
|
@@ -100,19 +102,26 @@ def infer(
|
|
100 |
):
|
101 |
generator = torch.Generator(device).manual_seed(seed)
|
102 |
|
103 |
-
# Генерация с
|
104 |
if use_ip_adapter and ip_source_image is not None and ip_adapter_image is not None:
|
105 |
pipe_ip_adapter = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
|
106 |
model_default,
|
107 |
controlnet=controlnet,
|
108 |
torch_dtype=torch_dtype
|
109 |
).to(device)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
110 |
|
111 |
# Преобразуем изображения
|
112 |
ip_source_image = preprocess_image(ip_source_image, width, height)
|
113 |
ip_adapter_image = preprocess_image(ip_adapter_image, width, height)
|
114 |
|
115 |
-
# Создаём пайплайн IP_adapter с LoRA, если он ещё не создан
|
116 |
if not hasattr(pipe_ip_adapter, 'lora_loaded') or not pipe_ip_adapter.lora_loaded:
|
117 |
# Загружаем LoRA для UNet
|
118 |
pipe_ip_adapter.unet = PeftModel.from_pretrained(
|
@@ -140,8 +149,7 @@ def infer(
|
|
140 |
ip_adapter_strength = float(ip_adapter_strength)
|
141 |
#strength_ip = float(strength_ip)
|
142 |
|
143 |
-
# Используем IP_adapter с LoRA
|
144 |
-
#pipe = pipe_ip_adapter
|
145 |
prompt_embeds = long_prompt_encoder(prompt, pipe_ip_adapter.tokenizer, pipe_ip_adapter.text_encoder)
|
146 |
negative_prompt_embeds = long_prompt_encoder(negative_prompt, pipe_ip_adapter.tokenizer, pipe_ip_adapter.text_encoder)
|
147 |
prompt_embeds, negative_prompt_embeds = align_embeddings(prompt_embeds, negative_prompt_embeds)
|
@@ -155,7 +163,7 @@ def infer(
|
|
155 |
height=height,
|
156 |
num_inference_steps=num_inference_steps,
|
157 |
guidance_scale=guidance_scale,
|
158 |
-
controlnet_conditioning_scale=ip_adapter_strength,
|
159 |
generator=generator
|
160 |
).images[0]
|
161 |
else:
|
@@ -200,7 +208,6 @@ def infer(
|
|
200 |
#strength_sn = float(strength_sn)
|
201 |
|
202 |
# Используем ControlNet с LoRA
|
203 |
-
#pipe = pipe_controlnet
|
204 |
prompt_embeds = long_prompt_encoder(prompt, pipe_controlnet.tokenizer, pipe_controlnet.text_encoder)
|
205 |
negative_prompt_embeds = long_prompt_encoder(negative_prompt, pipe_controlnet.tokenizer, pipe_controlnet.text_encoder)
|
206 |
prompt_embeds, negative_prompt_embeds = align_embeddings(prompt_embeds, negative_prompt_embeds)
|
|
|
8 |
|
9 |
MAX_SEED = np.iinfo(np.int32).max
|
10 |
MAX_IMAGE_SIZE = 1024
|
11 |
+
IP_ADAPTER = 'h94/IP-Adapter'
|
12 |
+
IP_ADAPTER_WEIGHT_NAME = "ip-adapter-plus_sd15.bin"
|
13 |
|
14 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
15 |
model_default = "stable-diffusion-v1-5/stable-diffusion-v1-5"
|
|
|
102 |
):
|
103 |
generator = torch.Generator(device).manual_seed(seed)
|
104 |
|
105 |
+
# Генерация с Ip_Adapter
|
106 |
if use_ip_adapter and ip_source_image is not None and ip_adapter_image is not None:
|
107 |
pipe_ip_adapter = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
|
108 |
model_default,
|
109 |
controlnet=controlnet,
|
110 |
torch_dtype=torch_dtype
|
111 |
).to(device)
|
112 |
+
|
113 |
+
# Добавим Ip_Adapter
|
114 |
+
pipe_ip_adapter.load_ip_adapter(IP_ADAPTER, subfolder="models", weight_name=IP_ADAPTER_WEIGHT_NAME)
|
115 |
+
#params['ip_adapter_image'] = load_image(ip_image).convert('RGB')
|
116 |
+
pipe_ip_adapter.set_ip_adapter_scale(ip_adapter_strength) # Коэфф учёта влияния Ip_Adapter на итоговое изображение
|
117 |
+
|
118 |
+
|
119 |
|
120 |
# Преобразуем изображения
|
121 |
ip_source_image = preprocess_image(ip_source_image, width, height)
|
122 |
ip_adapter_image = preprocess_image(ip_adapter_image, width, height)
|
123 |
|
124 |
+
# Создаём пайплайн IP_adapter с LoRA, если он ещё не создан
|
125 |
if not hasattr(pipe_ip_adapter, 'lora_loaded') or not pipe_ip_adapter.lora_loaded:
|
126 |
# Загружаем LoRA для UNet
|
127 |
pipe_ip_adapter.unet = PeftModel.from_pretrained(
|
|
|
149 |
ip_adapter_strength = float(ip_adapter_strength)
|
150 |
#strength_ip = float(strength_ip)
|
151 |
|
152 |
+
# Используем IP_adapter с LoRA
|
|
|
153 |
prompt_embeds = long_prompt_encoder(prompt, pipe_ip_adapter.tokenizer, pipe_ip_adapter.text_encoder)
|
154 |
negative_prompt_embeds = long_prompt_encoder(negative_prompt, pipe_ip_adapter.tokenizer, pipe_ip_adapter.text_encoder)
|
155 |
prompt_embeds, negative_prompt_embeds = align_embeddings(prompt_embeds, negative_prompt_embeds)
|
|
|
163 |
height=height,
|
164 |
num_inference_steps=num_inference_steps,
|
165 |
guidance_scale=guidance_scale,
|
166 |
+
controlnet_conditioning_scale=1.0 #ip_adapter_strength,
|
167 |
generator=generator
|
168 |
).images[0]
|
169 |
else:
|
|
|
208 |
#strength_sn = float(strength_sn)
|
209 |
|
210 |
# Используем ControlNet с LoRA
|
|
|
211 |
prompt_embeds = long_prompt_encoder(prompt, pipe_controlnet.tokenizer, pipe_controlnet.text_encoder)
|
212 |
negative_prompt_embeds = long_prompt_encoder(negative_prompt, pipe_controlnet.tokenizer, pipe_controlnet.text_encoder)
|
213 |
prompt_embeds, negative_prompt_embeds = align_embeddings(prompt_embeds, negative_prompt_embeds)
|