Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -22,8 +22,8 @@ WEIGHT_NAME = "ip-adapter_sd15.bin"
|
|
22 |
WEIGHT_NAME_plus = "ip-adapter-plus_sd15.bin"
|
23 |
WEIGHT_NAME_face = "ip-adapter-full-face_sd15.bin"
|
24 |
|
25 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
26 |
model_default = "stable-diffusion-v1-5/stable-diffusion-v1-5"
|
|
|
27 |
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
28 |
|
29 |
def get_lora_sd_pipeline(
|
@@ -105,10 +105,10 @@ pipe_default = get_lora_sd_pipeline(lora_dir='lora_man_animestyle', base_model_n
|
|
105 |
def infer(
|
106 |
prompt,
|
107 |
negative_prompt,
|
|
|
108 |
width=512,
|
109 |
height=512,
|
110 |
num_inference_steps=50,
|
111 |
-
model=model_default, #'stable-diffusion-v1-5/stable-diffusion-v1-5',
|
112 |
seed=4,
|
113 |
guidance_scale=7.5,
|
114 |
lora_scale=0.7,
|
@@ -188,9 +188,9 @@ def infer(
|
|
188 |
image = pipe_ip_adapter(
|
189 |
prompt_embeds=prompt_embeds,
|
190 |
negative_prompt_embeds=negative_prompt_embeds,
|
191 |
-
image=ip_adapter_image,
|
192 |
-
ip_adapter_image=ip_source_image,
|
193 |
-
strength=strength_ip,
|
194 |
width=width,
|
195 |
height=height,
|
196 |
num_inference_steps=num_inference_steps,
|
@@ -276,9 +276,9 @@ def infer(
|
|
276 |
image = pipe_ip_adapter(
|
277 |
prompt_embeds=prompt_embeds,
|
278 |
negative_prompt_embeds=negative_prompt_embeds,
|
279 |
-
image=ip_adapter_image,
|
280 |
-
ip_adapter_image=ip_source_image,
|
281 |
-
strength=strength_ip,
|
282 |
width=width,
|
283 |
height=height,
|
284 |
num_inference_steps=num_inference_steps,
|
@@ -318,7 +318,7 @@ def infer(
|
|
318 |
height=height,
|
319 |
ip_adapter_image=ip_adapter_image,
|
320 |
num_inference_steps=num_inference_steps,
|
321 |
-
strength=strength_ip,
|
322 |
guidance_scale=guidance_scale,
|
323 |
controlnet_conditioning_scale=0.99, #controlnet_conditioning_scale,
|
324 |
generator=generator,
|
@@ -385,7 +385,7 @@ def infer(
|
|
385 |
negative_prompt_embeds=negative_prompt_embeds,
|
386 |
image=cn_source_image,
|
387 |
control_image=control_image,
|
388 |
-
strength=strength_cn,
|
389 |
width=width,
|
390 |
height=height,
|
391 |
num_inference_steps=num_inference_steps,
|
@@ -405,7 +405,7 @@ def infer(
|
|
405 |
generator = torch.Generator(device).manual_seed(seed)
|
406 |
|
407 |
pipe_controlnet = StableDiffusionControlNetPipeline.from_pretrained(
|
408 |
-
model_default,
|
409 |
controlnet=controlnet,
|
410 |
torch_dtype=torch_dtype,
|
411 |
use_safetensors=True
|
@@ -422,7 +422,7 @@ def infer(
|
|
422 |
negative_prompt=negative_prompt,
|
423 |
image=cn_source_image,
|
424 |
control_image=control_image,
|
425 |
-
strength=strength_cn,
|
426 |
width=width,
|
427 |
height=height,
|
428 |
num_inference_steps=num_inference_steps,
|
@@ -445,7 +445,7 @@ def infer(
|
|
445 |
generator = torch.Generator(device).manual_seed(seed)
|
446 |
|
447 |
pipe_controlnet = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
|
448 |
-
model_default,
|
449 |
controlnet=controlnet,
|
450 |
torch_dtype=torch_dtype,
|
451 |
use_safetensors=True
|
@@ -458,12 +458,10 @@ def infer(
|
|
458 |
negative_prompt=negative_prompt,
|
459 |
image=control_image,
|
460 |
control_image=depth_map,
|
461 |
-
#strength=strength_cn, # Коэфф. зашумления, чем больше, тем больше меняется результирующее изображение относитенльно исходного
|
462 |
width=width,
|
463 |
height=height,
|
464 |
num_inference_steps=num_inference_steps,
|
465 |
guidance_scale=guidance_scale,
|
466 |
-
#controlnet_conditioning_scale=control_strength,
|
467 |
generator=generator
|
468 |
).images[0]
|
469 |
else:
|
@@ -712,10 +710,10 @@ with gr.Blocks(css=css) as demo:
|
|
712 |
inputs=[
|
713 |
prompt,
|
714 |
negative_prompt,
|
|
|
715 |
width,
|
716 |
height,
|
717 |
num_inference_steps,
|
718 |
-
model,
|
719 |
seed,
|
720 |
guidance_scale,
|
721 |
lora_scale,
|
|
|
22 |
WEIGHT_NAME_plus = "ip-adapter-plus_sd15.bin"
|
23 |
WEIGHT_NAME_face = "ip-adapter-full-face_sd15.bin"
|
24 |
|
|
|
25 |
model_default = "stable-diffusion-v1-5/stable-diffusion-v1-5"
|
26 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
27 |
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
28 |
|
29 |
def get_lora_sd_pipeline(
|
|
|
105 |
def infer(
|
106 |
prompt,
|
107 |
negative_prompt,
|
108 |
+
model=model_default,
|
109 |
width=512,
|
110 |
height=512,
|
111 |
num_inference_steps=50,
|
|
|
112 |
seed=4,
|
113 |
guidance_scale=7.5,
|
114 |
lora_scale=0.7,
|
|
|
188 |
image = pipe_ip_adapter(
|
189 |
prompt_embeds=prompt_embeds,
|
190 |
negative_prompt_embeds=negative_prompt_embeds,
|
191 |
+
image=ip_adapter_image,
|
192 |
+
ip_adapter_image=ip_source_image,
|
193 |
+
strength=strength_ip,
|
194 |
width=width,
|
195 |
height=height,
|
196 |
num_inference_steps=num_inference_steps,
|
|
|
276 |
image = pipe_ip_adapter(
|
277 |
prompt_embeds=prompt_embeds,
|
278 |
negative_prompt_embeds=negative_prompt_embeds,
|
279 |
+
image=ip_adapter_image,
|
280 |
+
ip_adapter_image=ip_source_image,
|
281 |
+
strength=strength_ip,
|
282 |
width=width,
|
283 |
height=height,
|
284 |
num_inference_steps=num_inference_steps,
|
|
|
318 |
height=height,
|
319 |
ip_adapter_image=ip_adapter_image,
|
320 |
num_inference_steps=num_inference_steps,
|
321 |
+
strength=strength_ip,
|
322 |
guidance_scale=guidance_scale,
|
323 |
controlnet_conditioning_scale=0.99, #controlnet_conditioning_scale,
|
324 |
generator=generator,
|
|
|
385 |
negative_prompt_embeds=negative_prompt_embeds,
|
386 |
image=cn_source_image,
|
387 |
control_image=control_image,
|
388 |
+
strength=strength_cn,
|
389 |
width=width,
|
390 |
height=height,
|
391 |
num_inference_steps=num_inference_steps,
|
|
|
405 |
generator = torch.Generator(device).manual_seed(seed)
|
406 |
|
407 |
pipe_controlnet = StableDiffusionControlNetPipeline.from_pretrained(
|
408 |
+
model_default,
|
409 |
controlnet=controlnet,
|
410 |
torch_dtype=torch_dtype,
|
411 |
use_safetensors=True
|
|
|
422 |
negative_prompt=negative_prompt,
|
423 |
image=cn_source_image,
|
424 |
control_image=control_image,
|
425 |
+
strength=strength_cn,
|
426 |
width=width,
|
427 |
height=height,
|
428 |
num_inference_steps=num_inference_steps,
|
|
|
445 |
generator = torch.Generator(device).manual_seed(seed)
|
446 |
|
447 |
pipe_controlnet = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
|
448 |
+
model_default,
|
449 |
controlnet=controlnet,
|
450 |
torch_dtype=torch_dtype,
|
451 |
use_safetensors=True
|
|
|
458 |
negative_prompt=negative_prompt,
|
459 |
image=control_image,
|
460 |
control_image=depth_map,
|
|
|
461 |
width=width,
|
462 |
height=height,
|
463 |
num_inference_steps=num_inference_steps,
|
464 |
guidance_scale=guidance_scale,
|
|
|
465 |
generator=generator
|
466 |
).images[0]
|
467 |
else:
|
|
|
710 |
inputs=[
|
711 |
prompt,
|
712 |
negative_prompt,
|
713 |
+
model,
|
714 |
width,
|
715 |
height,
|
716 |
num_inference_steps,
|
|
|
717 |
seed,
|
718 |
guidance_scale,
|
719 |
lora_scale,
|