Spaces:
Running
on
Zero
Running
on
Zero
Upload 4 files
Browse files- ledits/__init__.py +55 -0
- ledits/pipeline_leditspp_stable_diffusion.py +1499 -0
- ledits/pipeline_leditspp_stable_diffusion_xl.py +1854 -0
- ledits/pipeline_output.py +43 -0
ledits/__init__.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import TYPE_CHECKING
|
| 2 |
+
|
| 3 |
+
from diffusers.utils import (
|
| 4 |
+
DIFFUSERS_SLOW_IMPORT,
|
| 5 |
+
OptionalDependencyNotAvailable,
|
| 6 |
+
_LazyModule,
|
| 7 |
+
get_objects_from_module,
|
| 8 |
+
is_torch_available,
|
| 9 |
+
is_transformers_available,
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
_dummy_objects = {}
|
| 14 |
+
_import_structure = {}
|
| 15 |
+
|
| 16 |
+
try:
|
| 17 |
+
if not (is_transformers_available() and is_torch_available()):
|
| 18 |
+
raise OptionalDependencyNotAvailable()
|
| 19 |
+
except OptionalDependencyNotAvailable:
|
| 20 |
+
from diffusers.utils import dummy_torch_and_transformers_objects # noqa F403
|
| 21 |
+
|
| 22 |
+
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
|
| 23 |
+
else:
|
| 24 |
+
_import_structure["pipeline_leditspp_stable_diffusion"] = ["LEditsPPPipelineStableDiffusion"]
|
| 25 |
+
_import_structure["pipeline_leditspp_stable_diffusion_xl"] = ["LEditsPPPipelineStableDiffusionXL"]
|
| 26 |
+
|
| 27 |
+
_import_structure["pipeline_output"] = ["LEditsPPDiffusionPipelineOutput", "LEditsPPDiffusionPipelineOutput"]
|
| 28 |
+
|
| 29 |
+
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
|
| 30 |
+
try:
|
| 31 |
+
if not (is_transformers_available() and is_torch_available()):
|
| 32 |
+
raise OptionalDependencyNotAvailable()
|
| 33 |
+
|
| 34 |
+
except OptionalDependencyNotAvailable:
|
| 35 |
+
from diffusers.utils.dummy_torch_and_transformers_objects import *
|
| 36 |
+
else:
|
| 37 |
+
from .pipeline_leditspp_stable_diffusion import (
|
| 38 |
+
LEditsPPDiffusionPipelineOutput,
|
| 39 |
+
LEditsPPInversionPipelineOutput,
|
| 40 |
+
LEditsPPPipelineStableDiffusion,
|
| 41 |
+
)
|
| 42 |
+
from .pipeline_leditspp_stable_diffusion_xl import LEditsPPPipelineStableDiffusionXL
|
| 43 |
+
|
| 44 |
+
else:
|
| 45 |
+
import sys
|
| 46 |
+
|
| 47 |
+
sys.modules[__name__] = _LazyModule(
|
| 48 |
+
__name__,
|
| 49 |
+
globals()["__file__"],
|
| 50 |
+
_import_structure,
|
| 51 |
+
module_spec=__spec__,
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
for name, value in _dummy_objects.items():
|
| 55 |
+
setattr(sys.modules[__name__], name, value)
|
ledits/pipeline_leditspp_stable_diffusion.py
ADDED
|
@@ -0,0 +1,1499 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
import math
|
| 3 |
+
from itertools import repeat
|
| 4 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
import torch.nn.functional as F
|
| 8 |
+
from packaging import version
|
| 9 |
+
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
|
| 10 |
+
|
| 11 |
+
from diffusers.configuration_utils import FrozenDict
|
| 12 |
+
from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
|
| 13 |
+
from diffusers.loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
|
| 14 |
+
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
| 15 |
+
from diffusers.models.attention_processor import Attention, AttnProcessor
|
| 16 |
+
from diffusers.models.lora import adjust_lora_scale_text_encoder
|
| 17 |
+
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
| 18 |
+
from diffusers.schedulers import DDIMScheduler, DPMSolverMultistepScheduler
|
| 19 |
+
from diffusers.utils import (
|
| 20 |
+
USE_PEFT_BACKEND,
|
| 21 |
+
deprecate,
|
| 22 |
+
logging,
|
| 23 |
+
replace_example_docstring,
|
| 24 |
+
scale_lora_layers,
|
| 25 |
+
unscale_lora_layers,
|
| 26 |
+
)
|
| 27 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 28 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
|
| 29 |
+
from .pipeline_output import LEditsPPDiffusionPipelineOutput, LEditsPPInversionPipelineOutput
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 33 |
+
|
| 34 |
+
EXAMPLE_DOC_STRING = """
|
| 35 |
+
Examples:
|
| 36 |
+
```py
|
| 37 |
+
>>> import PIL
|
| 38 |
+
>>> import requests
|
| 39 |
+
>>> import torch
|
| 40 |
+
>>> from io import BytesIO
|
| 41 |
+
|
| 42 |
+
>>> from diffusers import LEditsPPPipelineStableDiffusion
|
| 43 |
+
>>> from diffusers.utils import load_image
|
| 44 |
+
|
| 45 |
+
>>> pipe = LEditsPPPipelineStableDiffusion.from_pretrained(
|
| 46 |
+
... "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16
|
| 47 |
+
... )
|
| 48 |
+
>>> pipe = pipe.to("cuda")
|
| 49 |
+
|
| 50 |
+
>>> img_url = "https://www.aiml.informatik.tu-darmstadt.de/people/mbrack/cherry_blossom.png"
|
| 51 |
+
>>> image = load_image(img_url).convert("RGB")
|
| 52 |
+
|
| 53 |
+
>>> _ = pipe.invert(image=image, num_inversion_steps=50, skip=0.1)
|
| 54 |
+
|
| 55 |
+
>>> edited_image = pipe(
|
| 56 |
+
... editing_prompt=["cherry blossom"], edit_guidance_scale=10.0, edit_threshold=0.75
|
| 57 |
+
... ).images[0]
|
| 58 |
+
```
|
| 59 |
+
"""
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
# Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionAttendAndExcitePipeline.AttentionStore
|
| 63 |
+
class LeditsAttentionStore:
|
| 64 |
+
@staticmethod
|
| 65 |
+
def get_empty_store():
|
| 66 |
+
return {"down_cross": [], "mid_cross": [], "up_cross": [], "down_self": [], "mid_self": [], "up_self": []}
|
| 67 |
+
|
| 68 |
+
def __call__(self, attn, is_cross: bool, place_in_unet: str, editing_prompts, PnP=False):
|
| 69 |
+
# attn.shape = batch_size * head_size, seq_len query, seq_len_key
|
| 70 |
+
if attn.shape[1] <= self.max_size:
|
| 71 |
+
bs = 1 + int(PnP) + editing_prompts
|
| 72 |
+
skip = 2 if PnP else 1 # skip PnP & unconditional
|
| 73 |
+
attn = torch.stack(attn.split(self.batch_size)).permute(1, 0, 2, 3)
|
| 74 |
+
source_batch_size = int(attn.shape[1] // bs)
|
| 75 |
+
self.forward(attn[:, skip * source_batch_size :], is_cross, place_in_unet)
|
| 76 |
+
|
| 77 |
+
def forward(self, attn, is_cross: bool, place_in_unet: str):
|
| 78 |
+
key = f"{place_in_unet}_{'cross' if is_cross else 'self'}"
|
| 79 |
+
|
| 80 |
+
self.step_store[key].append(attn)
|
| 81 |
+
|
| 82 |
+
def between_steps(self, store_step=True):
|
| 83 |
+
if store_step:
|
| 84 |
+
if self.average:
|
| 85 |
+
if len(self.attention_store) == 0:
|
| 86 |
+
self.attention_store = self.step_store
|
| 87 |
+
else:
|
| 88 |
+
for key in self.attention_store:
|
| 89 |
+
for i in range(len(self.attention_store[key])):
|
| 90 |
+
self.attention_store[key][i] += self.step_store[key][i]
|
| 91 |
+
else:
|
| 92 |
+
if len(self.attention_store) == 0:
|
| 93 |
+
self.attention_store = [self.step_store]
|
| 94 |
+
else:
|
| 95 |
+
self.attention_store.append(self.step_store)
|
| 96 |
+
|
| 97 |
+
self.cur_step += 1
|
| 98 |
+
self.step_store = self.get_empty_store()
|
| 99 |
+
|
| 100 |
+
def get_attention(self, step: int):
|
| 101 |
+
if self.average:
|
| 102 |
+
attention = {
|
| 103 |
+
key: [item / self.cur_step for item in self.attention_store[key]] for key in self.attention_store
|
| 104 |
+
}
|
| 105 |
+
else:
|
| 106 |
+
assert step is not None
|
| 107 |
+
attention = self.attention_store[step]
|
| 108 |
+
return attention
|
| 109 |
+
|
| 110 |
+
def aggregate_attention(
|
| 111 |
+
self, attention_maps, prompts, res: Union[int, Tuple[int]], from_where: List[str], is_cross: bool, select: int
|
| 112 |
+
):
|
| 113 |
+
out = [[] for x in range(self.batch_size)]
|
| 114 |
+
if isinstance(res, int):
|
| 115 |
+
num_pixels = res**2
|
| 116 |
+
resolution = (res, res)
|
| 117 |
+
else:
|
| 118 |
+
num_pixels = res[0] * res[1]
|
| 119 |
+
resolution = res[:2]
|
| 120 |
+
|
| 121 |
+
for location in from_where:
|
| 122 |
+
for bs_item in attention_maps[f"{location}_{'cross' if is_cross else 'self'}"]:
|
| 123 |
+
for batch, item in enumerate(bs_item):
|
| 124 |
+
if item.shape[1] == num_pixels:
|
| 125 |
+
cross_maps = item.reshape(len(prompts), -1, *resolution, item.shape[-1])[select]
|
| 126 |
+
out[batch].append(cross_maps)
|
| 127 |
+
|
| 128 |
+
out = torch.stack([torch.cat(x, dim=0) for x in out])
|
| 129 |
+
# average over heads
|
| 130 |
+
out = out.sum(1) / out.shape[1]
|
| 131 |
+
return out
|
| 132 |
+
|
| 133 |
+
def __init__(self, average: bool, batch_size=1, max_resolution=16, max_size: int = None):
|
| 134 |
+
self.step_store = self.get_empty_store()
|
| 135 |
+
self.attention_store = []
|
| 136 |
+
self.cur_step = 0
|
| 137 |
+
self.average = average
|
| 138 |
+
self.batch_size = batch_size
|
| 139 |
+
if max_size is None:
|
| 140 |
+
self.max_size = max_resolution**2
|
| 141 |
+
elif max_size is not None and max_resolution is None:
|
| 142 |
+
self.max_size = max_size
|
| 143 |
+
else:
|
| 144 |
+
raise ValueError("Only allowed to set one of max_resolution or max_size")
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
# Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionAttendAndExcitePipeline.GaussianSmoothing
|
| 148 |
+
class LeditsGaussianSmoothing:
|
| 149 |
+
def __init__(self, device):
|
| 150 |
+
kernel_size = [3, 3]
|
| 151 |
+
sigma = [0.5, 0.5]
|
| 152 |
+
|
| 153 |
+
# The gaussian kernel is the product of the gaussian function of each dimension.
|
| 154 |
+
kernel = 1
|
| 155 |
+
meshgrids = torch.meshgrid([torch.arange(size, dtype=torch.float32) for size in kernel_size])
|
| 156 |
+
for size, std, mgrid in zip(kernel_size, sigma, meshgrids):
|
| 157 |
+
mean = (size - 1) / 2
|
| 158 |
+
kernel *= 1 / (std * math.sqrt(2 * math.pi)) * torch.exp(-(((mgrid - mean) / (2 * std)) ** 2))
|
| 159 |
+
|
| 160 |
+
# Make sure sum of values in gaussian kernel equals 1.
|
| 161 |
+
kernel = kernel / torch.sum(kernel)
|
| 162 |
+
|
| 163 |
+
# Reshape to depthwise convolutional weight
|
| 164 |
+
kernel = kernel.view(1, 1, *kernel.size())
|
| 165 |
+
kernel = kernel.repeat(1, *[1] * (kernel.dim() - 1))
|
| 166 |
+
|
| 167 |
+
self.weight = kernel.to(device)
|
| 168 |
+
|
| 169 |
+
def __call__(self, input):
|
| 170 |
+
"""
|
| 171 |
+
Arguments:
|
| 172 |
+
Apply gaussian filter to input.
|
| 173 |
+
input (torch.Tensor): Input to apply gaussian filter on.
|
| 174 |
+
Returns:
|
| 175 |
+
filtered (torch.Tensor): Filtered output.
|
| 176 |
+
"""
|
| 177 |
+
return F.conv2d(input, weight=self.weight.to(input.dtype))
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
class LEDITSCrossAttnProcessor:
|
| 181 |
+
def __init__(self, attention_store, place_in_unet, pnp, editing_prompts):
|
| 182 |
+
self.attnstore = attention_store
|
| 183 |
+
self.place_in_unet = place_in_unet
|
| 184 |
+
self.editing_prompts = editing_prompts
|
| 185 |
+
self.pnp = pnp
|
| 186 |
+
|
| 187 |
+
def __call__(
|
| 188 |
+
self,
|
| 189 |
+
attn: Attention,
|
| 190 |
+
hidden_states,
|
| 191 |
+
encoder_hidden_states,
|
| 192 |
+
attention_mask=None,
|
| 193 |
+
temb=None,
|
| 194 |
+
):
|
| 195 |
+
batch_size, sequence_length, _ = (
|
| 196 |
+
hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
|
| 197 |
+
)
|
| 198 |
+
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
|
| 199 |
+
|
| 200 |
+
query = attn.to_q(hidden_states)
|
| 201 |
+
|
| 202 |
+
if encoder_hidden_states is None:
|
| 203 |
+
encoder_hidden_states = hidden_states
|
| 204 |
+
elif attn.norm_cross:
|
| 205 |
+
encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
|
| 206 |
+
|
| 207 |
+
key = attn.to_k(encoder_hidden_states)
|
| 208 |
+
value = attn.to_v(encoder_hidden_states)
|
| 209 |
+
|
| 210 |
+
query = attn.head_to_batch_dim(query)
|
| 211 |
+
key = attn.head_to_batch_dim(key)
|
| 212 |
+
value = attn.head_to_batch_dim(value)
|
| 213 |
+
|
| 214 |
+
attention_probs = attn.get_attention_scores(query, key, attention_mask)
|
| 215 |
+
self.attnstore(
|
| 216 |
+
attention_probs,
|
| 217 |
+
is_cross=True,
|
| 218 |
+
place_in_unet=self.place_in_unet,
|
| 219 |
+
editing_prompts=self.editing_prompts,
|
| 220 |
+
PnP=self.pnp,
|
| 221 |
+
)
|
| 222 |
+
|
| 223 |
+
hidden_states = torch.bmm(attention_probs, value)
|
| 224 |
+
hidden_states = attn.batch_to_head_dim(hidden_states)
|
| 225 |
+
|
| 226 |
+
# linear proj
|
| 227 |
+
hidden_states = attn.to_out[0](hidden_states)
|
| 228 |
+
# dropout
|
| 229 |
+
hidden_states = attn.to_out[1](hidden_states)
|
| 230 |
+
|
| 231 |
+
hidden_states = hidden_states / attn.rescale_output_factor
|
| 232 |
+
return hidden_states
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
|
| 236 |
+
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
|
| 237 |
+
"""
|
| 238 |
+
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
|
| 239 |
+
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
|
| 240 |
+
"""
|
| 241 |
+
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
|
| 242 |
+
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
|
| 243 |
+
# rescale the results from guidance (fixes overexposure)
|
| 244 |
+
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
|
| 245 |
+
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
|
| 246 |
+
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
|
| 247 |
+
return noise_cfg
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
class LEditsPPPipelineStableDiffusion(
|
| 251 |
+
DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin
|
| 252 |
+
):
|
| 253 |
+
"""
|
| 254 |
+
Pipeline for textual image editing using LEDits++ with Stable Diffusion.
|
| 255 |
+
|
| 256 |
+
This model inherits from [`DiffusionPipeline`] and builds on the [`StableDiffusionPipeline`]. Check the superclass
|
| 257 |
+
documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular
|
| 258 |
+
device, etc.).
|
| 259 |
+
|
| 260 |
+
Args:
|
| 261 |
+
vae ([`AutoencoderKL`]):
|
| 262 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 263 |
+
text_encoder ([`~transformers.CLIPTextModel`]):
|
| 264 |
+
Frozen text-encoder. Stable Diffusion uses the text portion of
|
| 265 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
| 266 |
+
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
| 267 |
+
tokenizer ([`~transformers.CLIPTokenizer`]):
|
| 268 |
+
Tokenizer of class
|
| 269 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
| 270 |
+
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
| 271 |
+
scheduler ([`DPMSolverMultistepScheduler`] or [`DDIMScheduler`]):
|
| 272 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of
|
| 273 |
+
[`DPMSolverMultistepScheduler`] or [`DDIMScheduler`]. If any other scheduler is passed it will
|
| 274 |
+
automatically be set to [`DPMSolverMultistepScheduler`].
|
| 275 |
+
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 276 |
+
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 277 |
+
Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
|
| 278 |
+
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 279 |
+
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 280 |
+
"""
|
| 281 |
+
|
| 282 |
+
model_cpu_offload_seq = "text_encoder->unet->vae"
|
| 283 |
+
_exclude_from_cpu_offload = ["safety_checker"]
|
| 284 |
+
_callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
|
| 285 |
+
_optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
|
| 286 |
+
|
| 287 |
+
def __init__(
|
| 288 |
+
self,
|
| 289 |
+
vae: AutoencoderKL,
|
| 290 |
+
text_encoder: CLIPTextModel,
|
| 291 |
+
tokenizer: CLIPTokenizer,
|
| 292 |
+
unet: UNet2DConditionModel,
|
| 293 |
+
scheduler: Union[DDIMScheduler, DPMSolverMultistepScheduler],
|
| 294 |
+
safety_checker: StableDiffusionSafetyChecker,
|
| 295 |
+
feature_extractor: CLIPImageProcessor,
|
| 296 |
+
requires_safety_checker: bool = True,
|
| 297 |
+
):
|
| 298 |
+
super().__init__()
|
| 299 |
+
|
| 300 |
+
if not isinstance(scheduler, DDIMScheduler) and not isinstance(scheduler, DPMSolverMultistepScheduler):
|
| 301 |
+
scheduler = DPMSolverMultistepScheduler.from_config(
|
| 302 |
+
scheduler.config, algorithm_type="sde-dpmsolver++", solver_order=2
|
| 303 |
+
)
|
| 304 |
+
logger.warning(
|
| 305 |
+
"This pipeline only supports DDIMScheduler and DPMSolverMultistepScheduler. "
|
| 306 |
+
"The scheduler has been changed to DPMSolverMultistepScheduler."
|
| 307 |
+
)
|
| 308 |
+
|
| 309 |
+
if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
|
| 310 |
+
deprecation_message = (
|
| 311 |
+
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
|
| 312 |
+
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
|
| 313 |
+
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
|
| 314 |
+
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
|
| 315 |
+
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
|
| 316 |
+
" file"
|
| 317 |
+
)
|
| 318 |
+
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
|
| 319 |
+
new_config = dict(scheduler.config)
|
| 320 |
+
new_config["steps_offset"] = 1
|
| 321 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 322 |
+
|
| 323 |
+
if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
|
| 324 |
+
deprecation_message = (
|
| 325 |
+
f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
|
| 326 |
+
" `clip_sample` should be set to False in the configuration file. Please make sure to update the"
|
| 327 |
+
" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
|
| 328 |
+
" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
|
| 329 |
+
" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
|
| 330 |
+
)
|
| 331 |
+
deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
|
| 332 |
+
new_config = dict(scheduler.config)
|
| 333 |
+
new_config["clip_sample"] = False
|
| 334 |
+
scheduler._internal_dict = FrozenDict(new_config)
|
| 335 |
+
|
| 336 |
+
if safety_checker is None and requires_safety_checker:
|
| 337 |
+
logger.warning(
|
| 338 |
+
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
| 339 |
+
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
| 340 |
+
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
| 341 |
+
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
| 342 |
+
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
| 343 |
+
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
| 344 |
+
)
|
| 345 |
+
|
| 346 |
+
if safety_checker is not None and feature_extractor is None:
|
| 347 |
+
raise ValueError(
|
| 348 |
+
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
|
| 349 |
+
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
| 350 |
+
)
|
| 351 |
+
|
| 352 |
+
is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
|
| 353 |
+
version.parse(unet.config._diffusers_version).base_version
|
| 354 |
+
) < version.parse("0.9.0.dev0")
|
| 355 |
+
is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
|
| 356 |
+
if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
|
| 357 |
+
deprecation_message = (
|
| 358 |
+
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 359 |
+
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 360 |
+
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 361 |
+
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
|
| 362 |
+
" \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
| 363 |
+
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 364 |
+
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 365 |
+
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
| 366 |
+
" the `unet/config.json` file"
|
| 367 |
+
)
|
| 368 |
+
deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
|
| 369 |
+
new_config = dict(unet.config)
|
| 370 |
+
new_config["sample_size"] = 64
|
| 371 |
+
unet._internal_dict = FrozenDict(new_config)
|
| 372 |
+
|
| 373 |
+
self.register_modules(
|
| 374 |
+
vae=vae,
|
| 375 |
+
text_encoder=text_encoder,
|
| 376 |
+
tokenizer=tokenizer,
|
| 377 |
+
unet=unet,
|
| 378 |
+
scheduler=scheduler,
|
| 379 |
+
safety_checker=safety_checker,
|
| 380 |
+
feature_extractor=feature_extractor,
|
| 381 |
+
)
|
| 382 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 383 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 384 |
+
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
| 385 |
+
|
| 386 |
+
self.inversion_steps = None
|
| 387 |
+
|
| 388 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
|
| 389 |
+
def run_safety_checker(self, image, device, dtype):
|
| 390 |
+
if self.safety_checker is None:
|
| 391 |
+
has_nsfw_concept = None
|
| 392 |
+
else:
|
| 393 |
+
if torch.is_tensor(image):
|
| 394 |
+
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
|
| 395 |
+
else:
|
| 396 |
+
feature_extractor_input = self.image_processor.numpy_to_pil(image)
|
| 397 |
+
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
|
| 398 |
+
image, has_nsfw_concept = self.safety_checker(
|
| 399 |
+
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
|
| 400 |
+
)
|
| 401 |
+
return image, has_nsfw_concept
|
| 402 |
+
|
| 403 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
|
| 404 |
+
def decode_latents(self, latents):
|
| 405 |
+
deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
|
| 406 |
+
deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
|
| 407 |
+
|
| 408 |
+
latents = 1 / self.vae.config.scaling_factor * latents
|
| 409 |
+
image = self.vae.decode(latents, return_dict=False)[0]
|
| 410 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 411 |
+
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
| 412 |
+
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
| 413 |
+
return image
|
| 414 |
+
|
| 415 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
| 416 |
+
def prepare_extra_step_kwargs(self, eta, generator=None):
|
| 417 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 418 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 419 |
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
| 420 |
+
# and should be between [0, 1]
|
| 421 |
+
|
| 422 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 423 |
+
extra_step_kwargs = {}
|
| 424 |
+
if accepts_eta:
|
| 425 |
+
extra_step_kwargs["eta"] = eta
|
| 426 |
+
|
| 427 |
+
# check if the scheduler accepts generator
|
| 428 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 429 |
+
if accepts_generator:
|
| 430 |
+
extra_step_kwargs["generator"] = generator
|
| 431 |
+
return extra_step_kwargs
|
| 432 |
+
|
| 433 |
+
# Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs
|
| 434 |
+
def check_inputs(
|
| 435 |
+
self,
|
| 436 |
+
negative_prompt=None,
|
| 437 |
+
editing_prompt_embeddings=None,
|
| 438 |
+
negative_prompt_embeds=None,
|
| 439 |
+
callback_on_step_end_tensor_inputs=None,
|
| 440 |
+
):
|
| 441 |
+
if callback_on_step_end_tensor_inputs is not None and not all(
|
| 442 |
+
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
|
| 443 |
+
):
|
| 444 |
+
raise ValueError(
|
| 445 |
+
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
|
| 446 |
+
)
|
| 447 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 448 |
+
raise ValueError(
|
| 449 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 450 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 451 |
+
)
|
| 452 |
+
|
| 453 |
+
if editing_prompt_embeddings is not None and negative_prompt_embeds is not None:
|
| 454 |
+
if editing_prompt_embeddings.shape != negative_prompt_embeds.shape:
|
| 455 |
+
raise ValueError(
|
| 456 |
+
"`editing_prompt_embeddings` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
| 457 |
+
f" got: `editing_prompt_embeddings` {editing_prompt_embeddings.shape} != `negative_prompt_embeds`"
|
| 458 |
+
f" {negative_prompt_embeds.shape}."
|
| 459 |
+
)
|
| 460 |
+
|
| 461 |
+
# Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
|
| 462 |
+
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, latents):
|
| 463 |
+
# shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
|
| 464 |
+
|
| 465 |
+
# if latents.shape != shape:
|
| 466 |
+
# raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
|
| 467 |
+
|
| 468 |
+
latents = latents.to(device)
|
| 469 |
+
|
| 470 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 471 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 472 |
+
return latents
|
| 473 |
+
|
| 474 |
+
def prepare_unet(self, attention_store, PnP: bool = False):
|
| 475 |
+
attn_procs = {}
|
| 476 |
+
for name in self.unet.attn_processors.keys():
|
| 477 |
+
if name.startswith("mid_block"):
|
| 478 |
+
place_in_unet = "mid"
|
| 479 |
+
elif name.startswith("up_blocks"):
|
| 480 |
+
place_in_unet = "up"
|
| 481 |
+
elif name.startswith("down_blocks"):
|
| 482 |
+
place_in_unet = "down"
|
| 483 |
+
else:
|
| 484 |
+
continue
|
| 485 |
+
|
| 486 |
+
if "attn2" in name and place_in_unet != "mid":
|
| 487 |
+
attn_procs[name] = LEDITSCrossAttnProcessor(
|
| 488 |
+
attention_store=attention_store,
|
| 489 |
+
place_in_unet=place_in_unet,
|
| 490 |
+
pnp=PnP,
|
| 491 |
+
editing_prompts=self.enabled_editing_prompts,
|
| 492 |
+
)
|
| 493 |
+
else:
|
| 494 |
+
attn_procs[name] = AttnProcessor()
|
| 495 |
+
|
| 496 |
+
self.unet.set_attn_processor(attn_procs)
|
| 497 |
+
|
| 498 |
+
def encode_prompt(
|
| 499 |
+
self,
|
| 500 |
+
device,
|
| 501 |
+
num_images_per_prompt,
|
| 502 |
+
enable_edit_guidance,
|
| 503 |
+
negative_prompt=None,
|
| 504 |
+
editing_prompt=None,
|
| 505 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 506 |
+
editing_prompt_embeds: Optional[torch.Tensor] = None,
|
| 507 |
+
lora_scale: Optional[float] = None,
|
| 508 |
+
clip_skip: Optional[int] = None,
|
| 509 |
+
):
|
| 510 |
+
r"""
|
| 511 |
+
Encodes the prompt into text encoder hidden states.
|
| 512 |
+
|
| 513 |
+
Args:
|
| 514 |
+
device: (`torch.device`):
|
| 515 |
+
torch device
|
| 516 |
+
num_images_per_prompt (`int`):
|
| 517 |
+
number of images that should be generated per prompt
|
| 518 |
+
enable_edit_guidance (`bool`):
|
| 519 |
+
whether to perform any editing or reconstruct the input image instead
|
| 520 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 521 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 522 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 523 |
+
less than `1`).
|
| 524 |
+
editing_prompt (`str` or `List[str]`, *optional*):
|
| 525 |
+
Editing prompt(s) to be encoded. If not defined, one has to pass `editing_prompt_embeds` instead.
|
| 526 |
+
editing_prompt_embeds (`torch.Tensor`, *optional*):
|
| 527 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
| 528 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
| 529 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 530 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 531 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 532 |
+
argument.
|
| 533 |
+
lora_scale (`float`, *optional*):
|
| 534 |
+
A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 535 |
+
clip_skip (`int`, *optional*):
|
| 536 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 537 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 538 |
+
"""
|
| 539 |
+
# set lora scale so that monkey patched LoRA
|
| 540 |
+
# function of text encoder can correctly access it
|
| 541 |
+
if lora_scale is not None and isinstance(self, LoraLoaderMixin):
|
| 542 |
+
self._lora_scale = lora_scale
|
| 543 |
+
|
| 544 |
+
# dynamically adjust the LoRA scale
|
| 545 |
+
if not USE_PEFT_BACKEND:
|
| 546 |
+
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
|
| 547 |
+
else:
|
| 548 |
+
scale_lora_layers(self.text_encoder, lora_scale)
|
| 549 |
+
|
| 550 |
+
batch_size = self.batch_size
|
| 551 |
+
num_edit_tokens = None
|
| 552 |
+
|
| 553 |
+
if negative_prompt_embeds is None:
|
| 554 |
+
uncond_tokens: List[str]
|
| 555 |
+
if negative_prompt is None:
|
| 556 |
+
uncond_tokens = [""] * batch_size
|
| 557 |
+
elif isinstance(negative_prompt, str):
|
| 558 |
+
uncond_tokens = [negative_prompt]
|
| 559 |
+
elif batch_size != len(negative_prompt):
|
| 560 |
+
raise ValueError(
|
| 561 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but exoected"
|
| 562 |
+
f"{batch_size} based on the input images. Please make sure that passed `negative_prompt` matches"
|
| 563 |
+
" the batch size of `prompt`."
|
| 564 |
+
)
|
| 565 |
+
else:
|
| 566 |
+
uncond_tokens = negative_prompt
|
| 567 |
+
|
| 568 |
+
# textual inversion: procecss multi-vector tokens if necessary
|
| 569 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 570 |
+
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
|
| 571 |
+
|
| 572 |
+
uncond_input = self.tokenizer(
|
| 573 |
+
uncond_tokens,
|
| 574 |
+
padding="max_length",
|
| 575 |
+
max_length=self.tokenizer.model_max_length,
|
| 576 |
+
truncation=True,
|
| 577 |
+
return_tensors="pt",
|
| 578 |
+
)
|
| 579 |
+
|
| 580 |
+
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
| 581 |
+
attention_mask = uncond_input.attention_mask.to(device)
|
| 582 |
+
else:
|
| 583 |
+
attention_mask = None
|
| 584 |
+
|
| 585 |
+
negative_prompt_embeds = self.text_encoder(
|
| 586 |
+
uncond_input.input_ids.to(device),
|
| 587 |
+
attention_mask=attention_mask,
|
| 588 |
+
)
|
| 589 |
+
negative_prompt_embeds = negative_prompt_embeds[0]
|
| 590 |
+
|
| 591 |
+
if self.text_encoder is not None:
|
| 592 |
+
prompt_embeds_dtype = self.text_encoder.dtype
|
| 593 |
+
elif self.unet is not None:
|
| 594 |
+
prompt_embeds_dtype = self.unet.dtype
|
| 595 |
+
else:
|
| 596 |
+
prompt_embeds_dtype = negative_prompt_embeds.dtype
|
| 597 |
+
|
| 598 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 599 |
+
|
| 600 |
+
if enable_edit_guidance:
|
| 601 |
+
if editing_prompt_embeds is None:
|
| 602 |
+
# textual inversion: procecss multi-vector tokens if necessary
|
| 603 |
+
# if isinstance(self, TextualInversionLoaderMixin):
|
| 604 |
+
# prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
|
| 605 |
+
if isinstance(editing_prompt, str):
|
| 606 |
+
editing_prompt = [editing_prompt]
|
| 607 |
+
|
| 608 |
+
max_length = negative_prompt_embeds.shape[1]
|
| 609 |
+
text_inputs = self.tokenizer(
|
| 610 |
+
[x for item in editing_prompt for x in repeat(item, batch_size)],
|
| 611 |
+
padding="max_length",
|
| 612 |
+
max_length=max_length,
|
| 613 |
+
truncation=True,
|
| 614 |
+
return_tensors="pt",
|
| 615 |
+
return_length=True,
|
| 616 |
+
)
|
| 617 |
+
|
| 618 |
+
num_edit_tokens = text_inputs.length - 2 # not counting startoftext and endoftext
|
| 619 |
+
text_input_ids = text_inputs.input_ids
|
| 620 |
+
untruncated_ids = self.tokenizer(
|
| 621 |
+
[x for item in editing_prompt for x in repeat(item, batch_size)],
|
| 622 |
+
padding="longest",
|
| 623 |
+
return_tensors="pt",
|
| 624 |
+
).input_ids
|
| 625 |
+
|
| 626 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
| 627 |
+
text_input_ids, untruncated_ids
|
| 628 |
+
):
|
| 629 |
+
removed_text = self.tokenizer.batch_decode(
|
| 630 |
+
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
| 631 |
+
)
|
| 632 |
+
logger.warning(
|
| 633 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
| 634 |
+
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
| 635 |
+
)
|
| 636 |
+
|
| 637 |
+
if (
|
| 638 |
+
hasattr(self.text_encoder.config, "use_attention_mask")
|
| 639 |
+
and self.text_encoder.config.use_attention_mask
|
| 640 |
+
):
|
| 641 |
+
attention_mask = text_inputs.attention_mask.to(device)
|
| 642 |
+
else:
|
| 643 |
+
attention_mask = None
|
| 644 |
+
|
| 645 |
+
if clip_skip is None:
|
| 646 |
+
editing_prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
|
| 647 |
+
editing_prompt_embeds = editing_prompt_embeds[0]
|
| 648 |
+
else:
|
| 649 |
+
editing_prompt_embeds = self.text_encoder(
|
| 650 |
+
text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
|
| 651 |
+
)
|
| 652 |
+
# Access the `hidden_states` first, that contains a tuple of
|
| 653 |
+
# all the hidden states from the encoder layers. Then index into
|
| 654 |
+
# the tuple to access the hidden states from the desired layer.
|
| 655 |
+
editing_prompt_embeds = editing_prompt_embeds[-1][-(clip_skip + 1)]
|
| 656 |
+
# We also need to apply the final LayerNorm here to not mess with the
|
| 657 |
+
# representations. The `last_hidden_states` that we typically use for
|
| 658 |
+
# obtaining the final prompt representations passes through the LayerNorm
|
| 659 |
+
# layer.
|
| 660 |
+
editing_prompt_embeds = self.text_encoder.text_model.final_layer_norm(editing_prompt_embeds)
|
| 661 |
+
|
| 662 |
+
editing_prompt_embeds = editing_prompt_embeds.to(dtype=negative_prompt_embeds.dtype, device=device)
|
| 663 |
+
|
| 664 |
+
bs_embed_edit, seq_len, _ = editing_prompt_embeds.shape
|
| 665 |
+
editing_prompt_embeds = editing_prompt_embeds.to(dtype=negative_prompt_embeds.dtype, device=device)
|
| 666 |
+
editing_prompt_embeds = editing_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 667 |
+
editing_prompt_embeds = editing_prompt_embeds.view(bs_embed_edit * num_images_per_prompt, seq_len, -1)
|
| 668 |
+
|
| 669 |
+
# get unconditional embeddings for classifier free guidance
|
| 670 |
+
|
| 671 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 672 |
+
seq_len = negative_prompt_embeds.shape[1]
|
| 673 |
+
|
| 674 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
|
| 675 |
+
|
| 676 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 677 |
+
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 678 |
+
|
| 679 |
+
if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
|
| 680 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
| 681 |
+
unscale_lora_layers(self.text_encoder, lora_scale)
|
| 682 |
+
|
| 683 |
+
return editing_prompt_embeds, negative_prompt_embeds, num_edit_tokens
|
| 684 |
+
|
| 685 |
+
@property
|
| 686 |
+
def guidance_rescale(self):
|
| 687 |
+
return self._guidance_rescale
|
| 688 |
+
|
| 689 |
+
@property
|
| 690 |
+
def clip_skip(self):
|
| 691 |
+
return self._clip_skip
|
| 692 |
+
|
| 693 |
+
@property
|
| 694 |
+
def cross_attention_kwargs(self):
|
| 695 |
+
return self._cross_attention_kwargs
|
| 696 |
+
|
| 697 |
+
@torch.no_grad()
|
| 698 |
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
| 699 |
+
def __call__(
|
| 700 |
+
self,
|
| 701 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 702 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
| 703 |
+
output_type: Optional[str] = "pil",
|
| 704 |
+
return_dict: bool = True,
|
| 705 |
+
editing_prompt: Optional[Union[str, List[str]]] = None,
|
| 706 |
+
editing_prompt_embeds: Optional[torch.Tensor] = None,
|
| 707 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 708 |
+
reverse_editing_direction: Optional[Union[bool, List[bool]]] = False,
|
| 709 |
+
edit_guidance_scale: Optional[Union[float, List[float]]] = 5,
|
| 710 |
+
edit_warmup_steps: Optional[Union[int, List[int]]] = 0,
|
| 711 |
+
edit_cooldown_steps: Optional[Union[int, List[int]]] = None,
|
| 712 |
+
edit_threshold: Optional[Union[float, List[float]]] = 0.9,
|
| 713 |
+
user_mask: Optional[torch.Tensor] = None,
|
| 714 |
+
sem_guidance: Optional[List[torch.Tensor]] = None,
|
| 715 |
+
use_cross_attn_mask: bool = False,
|
| 716 |
+
use_intersect_mask: bool = True,
|
| 717 |
+
attn_store_steps: Optional[List[int]] = [],
|
| 718 |
+
store_averaged_over_steps: bool = True,
|
| 719 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 720 |
+
guidance_rescale: float = 0.0,
|
| 721 |
+
clip_skip: Optional[int] = None,
|
| 722 |
+
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
|
| 723 |
+
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
| 724 |
+
**kwargs,
|
| 725 |
+
):
|
| 726 |
+
r"""
|
| 727 |
+
The call function to the pipeline for editing. The
|
| 728 |
+
[`~pipelines.ledits_pp.LEditsPPPipelineStableDiffusion.invert`] method has to be called beforehand. Edits will
|
| 729 |
+
always be performed for the last inverted image(s).
|
| 730 |
+
|
| 731 |
+
Args:
|
| 732 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 733 |
+
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
| 734 |
+
if `guidance_scale` is less than `1`).
|
| 735 |
+
generator (`torch.Generator`, *optional*):
|
| 736 |
+
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
| 737 |
+
to make generation deterministic.
|
| 738 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 739 |
+
The output format of the generate image. Choose between
|
| 740 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 741 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 742 |
+
Whether or not to return a [`~pipelines.ledits_pp.LEditsPPDiffusionPipelineOutput`] instead of a plain
|
| 743 |
+
tuple.
|
| 744 |
+
editing_prompt (`str` or `List[str]`, *optional*):
|
| 745 |
+
The prompt or prompts to guide the image generation. The image is reconstructed by setting
|
| 746 |
+
`editing_prompt = None`. Guidance direction of prompt should be specified via
|
| 747 |
+
`reverse_editing_direction`.
|
| 748 |
+
editing_prompt_embeds (`torch.Tensor>`, *optional*):
|
| 749 |
+
Pre-computed embeddings to use for guiding the image generation. Guidance direction of embedding should
|
| 750 |
+
be specified via `reverse_editing_direction`.
|
| 751 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 752 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
|
| 753 |
+
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
|
| 754 |
+
reverse_editing_direction (`bool` or `List[bool]`, *optional*, defaults to `False`):
|
| 755 |
+
Whether the corresponding prompt in `editing_prompt` should be increased or decreased.
|
| 756 |
+
edit_guidance_scale (`float` or `List[float]`, *optional*, defaults to 5):
|
| 757 |
+
Guidance scale for guiding the image generation. If provided as list values should correspond to
|
| 758 |
+
`editing_prompt`. `edit_guidance_scale` is defined as `s_e` of equation 12 of [LEDITS++
|
| 759 |
+
Paper](https://arxiv.org/abs/2301.12247).
|
| 760 |
+
edit_warmup_steps (`float` or `List[float]`, *optional*, defaults to 10):
|
| 761 |
+
Number of diffusion steps (for each prompt) for which guidance will not be applied.
|
| 762 |
+
edit_cooldown_steps (`float` or `List[float]`, *optional*, defaults to `None`):
|
| 763 |
+
Number of diffusion steps (for each prompt) after which guidance will no longer be applied.
|
| 764 |
+
edit_threshold (`float` or `List[float]`, *optional*, defaults to 0.9):
|
| 765 |
+
Masking threshold of guidance. Threshold should be proportional to the image region that is modified.
|
| 766 |
+
'edit_threshold' is defined as 'λ' of equation 12 of [LEDITS++
|
| 767 |
+
Paper](https://arxiv.org/abs/2301.12247).
|
| 768 |
+
user_mask (`torch.Tensor`, *optional*):
|
| 769 |
+
User-provided mask for even better control over the editing process. This is helpful when LEDITS++'s
|
| 770 |
+
implicit masks do not meet user preferences.
|
| 771 |
+
sem_guidance (`List[torch.Tensor]`, *optional*):
|
| 772 |
+
List of pre-generated guidance vectors to be applied at generation. Length of the list has to
|
| 773 |
+
correspond to `num_inference_steps`.
|
| 774 |
+
use_cross_attn_mask (`bool`, defaults to `False`):
|
| 775 |
+
Whether cross-attention masks are used. Cross-attention masks are always used when use_intersect_mask
|
| 776 |
+
is set to true. Cross-attention masks are defined as 'M^1' of equation 12 of [LEDITS++
|
| 777 |
+
paper](https://arxiv.org/pdf/2311.16711.pdf).
|
| 778 |
+
use_intersect_mask (`bool`, defaults to `True`):
|
| 779 |
+
Whether the masking term is calculated as intersection of cross-attention masks and masks derived from
|
| 780 |
+
the noise estimate. Cross-attention mask are defined as 'M^1' and masks derived from the noise estimate
|
| 781 |
+
are defined as 'M^2' of equation 12 of [LEDITS++ paper](https://arxiv.org/pdf/2311.16711.pdf).
|
| 782 |
+
attn_store_steps (`List[int]`, *optional*):
|
| 783 |
+
Steps for which the attention maps are stored in the AttentionStore. Just for visualization purposes.
|
| 784 |
+
store_averaged_over_steps (`bool`, defaults to `True`):
|
| 785 |
+
Whether the attention maps for the 'attn_store_steps' are stored averaged over the diffusion steps. If
|
| 786 |
+
False, attention maps for each step are stores separately. Just for visualization purposes.
|
| 787 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 788 |
+
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
|
| 789 |
+
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 790 |
+
guidance_rescale (`float`, *optional*, defaults to 0.0):
|
| 791 |
+
Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
|
| 792 |
+
Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
|
| 793 |
+
using zero terminal SNR.
|
| 794 |
+
clip_skip (`int`, *optional*):
|
| 795 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 796 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 797 |
+
callback_on_step_end (`Callable`, *optional*):
|
| 798 |
+
A function that calls at the end of each denoising steps during the inference. The function is called
|
| 799 |
+
with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
|
| 800 |
+
callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
|
| 801 |
+
`callback_on_step_end_tensor_inputs`.
|
| 802 |
+
callback_on_step_end_tensor_inputs (`List`, *optional*):
|
| 803 |
+
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
|
| 804 |
+
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
|
| 805 |
+
`._callback_tensor_inputs` attribute of your pipeline class.
|
| 806 |
+
|
| 807 |
+
Examples:
|
| 808 |
+
|
| 809 |
+
Returns:
|
| 810 |
+
[`~pipelines.ledits_pp.LEditsPPDiffusionPipelineOutput`] or `tuple`:
|
| 811 |
+
[`~pipelines.ledits_pp.LEditsPPDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When
|
| 812 |
+
returning a tuple, the first element is a list with the generated images, and the second element is a list
|
| 813 |
+
of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw)
|
| 814 |
+
content, according to the `safety_checker`.
|
| 815 |
+
"""
|
| 816 |
+
|
| 817 |
+
if self.inversion_steps is None:
|
| 818 |
+
raise ValueError(
|
| 819 |
+
"You need to invert an input image first before calling the pipeline. The `invert` method has to be called beforehand. Edits will always be performed for the last inverted image(s)."
|
| 820 |
+
)
|
| 821 |
+
|
| 822 |
+
eta = self.eta
|
| 823 |
+
num_images_per_prompt = 1
|
| 824 |
+
latents = self.init_latents
|
| 825 |
+
|
| 826 |
+
zs = self.zs
|
| 827 |
+
self.scheduler.set_timesteps(len(self.scheduler.timesteps))
|
| 828 |
+
|
| 829 |
+
if use_intersect_mask:
|
| 830 |
+
use_cross_attn_mask = True
|
| 831 |
+
|
| 832 |
+
if use_cross_attn_mask:
|
| 833 |
+
self.smoothing = LeditsGaussianSmoothing(self.device)
|
| 834 |
+
|
| 835 |
+
if user_mask is not None:
|
| 836 |
+
user_mask = user_mask.to(self.device)
|
| 837 |
+
|
| 838 |
+
org_prompt = ""
|
| 839 |
+
|
| 840 |
+
# 1. Check inputs. Raise error if not correct
|
| 841 |
+
self.check_inputs(
|
| 842 |
+
negative_prompt,
|
| 843 |
+
editing_prompt_embeds,
|
| 844 |
+
negative_prompt_embeds,
|
| 845 |
+
callback_on_step_end_tensor_inputs,
|
| 846 |
+
)
|
| 847 |
+
|
| 848 |
+
self._guidance_rescale = guidance_rescale
|
| 849 |
+
self._clip_skip = clip_skip
|
| 850 |
+
self._cross_attention_kwargs = cross_attention_kwargs
|
| 851 |
+
|
| 852 |
+
# 2. Define call parameters
|
| 853 |
+
batch_size = self.batch_size
|
| 854 |
+
|
| 855 |
+
if editing_prompt:
|
| 856 |
+
enable_edit_guidance = True
|
| 857 |
+
if isinstance(editing_prompt, str):
|
| 858 |
+
editing_prompt = [editing_prompt]
|
| 859 |
+
self.enabled_editing_prompts = len(editing_prompt)
|
| 860 |
+
elif editing_prompt_embeds is not None:
|
| 861 |
+
enable_edit_guidance = True
|
| 862 |
+
self.enabled_editing_prompts = editing_prompt_embeds.shape[0]
|
| 863 |
+
else:
|
| 864 |
+
self.enabled_editing_prompts = 0
|
| 865 |
+
enable_edit_guidance = False
|
| 866 |
+
|
| 867 |
+
# 3. Encode input prompt
|
| 868 |
+
lora_scale = (
|
| 869 |
+
self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
|
| 870 |
+
)
|
| 871 |
+
|
| 872 |
+
edit_concepts, uncond_embeddings, num_edit_tokens = self.encode_prompt(
|
| 873 |
+
editing_prompt=editing_prompt,
|
| 874 |
+
device=self.device,
|
| 875 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 876 |
+
enable_edit_guidance=enable_edit_guidance,
|
| 877 |
+
negative_prompt=negative_prompt,
|
| 878 |
+
editing_prompt_embeds=editing_prompt_embeds,
|
| 879 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 880 |
+
lora_scale=lora_scale,
|
| 881 |
+
clip_skip=self.clip_skip,
|
| 882 |
+
)
|
| 883 |
+
|
| 884 |
+
# For classifier free guidance, we need to do two forward passes.
|
| 885 |
+
# Here we concatenate the unconditional and text embeddings into a single batch
|
| 886 |
+
# to avoid doing two forward passes
|
| 887 |
+
if enable_edit_guidance:
|
| 888 |
+
text_embeddings = torch.cat([uncond_embeddings, edit_concepts])
|
| 889 |
+
self.text_cross_attention_maps = [editing_prompt] if isinstance(editing_prompt, str) else editing_prompt
|
| 890 |
+
else:
|
| 891 |
+
text_embeddings = torch.cat([uncond_embeddings])
|
| 892 |
+
|
| 893 |
+
# 4. Prepare timesteps
|
| 894 |
+
# self.scheduler.set_timesteps(num_inference_steps, device=self.device)
|
| 895 |
+
timesteps = self.inversion_steps
|
| 896 |
+
t_to_idx = {int(v): k for k, v in enumerate(timesteps[-zs.shape[0] :])}
|
| 897 |
+
|
| 898 |
+
if use_cross_attn_mask:
|
| 899 |
+
self.attention_store = LeditsAttentionStore(
|
| 900 |
+
average=store_averaged_over_steps,
|
| 901 |
+
batch_size=batch_size,
|
| 902 |
+
max_size=(latents.shape[-2] / 4.0) * (latents.shape[-1] / 4.0),
|
| 903 |
+
max_resolution=None,
|
| 904 |
+
)
|
| 905 |
+
self.prepare_unet(self.attention_store, PnP=False)
|
| 906 |
+
resolution = latents.shape[-2:]
|
| 907 |
+
att_res = (int(resolution[0] / 4), int(resolution[1] / 4))
|
| 908 |
+
|
| 909 |
+
# 5. Prepare latent variables
|
| 910 |
+
num_channels_latents = self.unet.config.in_channels
|
| 911 |
+
latents = self.prepare_latents(
|
| 912 |
+
batch_size * num_images_per_prompt,
|
| 913 |
+
num_channels_latents,
|
| 914 |
+
None,
|
| 915 |
+
None,
|
| 916 |
+
text_embeddings.dtype,
|
| 917 |
+
self.device,
|
| 918 |
+
latents,
|
| 919 |
+
)
|
| 920 |
+
|
| 921 |
+
# 6. Prepare extra step kwargs.
|
| 922 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(eta)
|
| 923 |
+
|
| 924 |
+
self.sem_guidance = None
|
| 925 |
+
self.activation_mask = None
|
| 926 |
+
|
| 927 |
+
# 7. Denoising loop
|
| 928 |
+
num_warmup_steps = 0
|
| 929 |
+
with self.progress_bar(total=len(timesteps)) as progress_bar:
|
| 930 |
+
for i, t in enumerate(timesteps):
|
| 931 |
+
# expand the latents if we are doing classifier free guidance
|
| 932 |
+
|
| 933 |
+
if enable_edit_guidance:
|
| 934 |
+
latent_model_input = torch.cat([latents] * (1 + self.enabled_editing_prompts))
|
| 935 |
+
else:
|
| 936 |
+
latent_model_input = latents
|
| 937 |
+
|
| 938 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 939 |
+
|
| 940 |
+
text_embed_input = text_embeddings
|
| 941 |
+
|
| 942 |
+
# predict the noise residual
|
| 943 |
+
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embed_input).sample
|
| 944 |
+
|
| 945 |
+
noise_pred_out = noise_pred.chunk(1 + self.enabled_editing_prompts) # [b,4, 64, 64]
|
| 946 |
+
noise_pred_uncond = noise_pred_out[0]
|
| 947 |
+
noise_pred_edit_concepts = noise_pred_out[1:]
|
| 948 |
+
|
| 949 |
+
noise_guidance_edit = torch.zeros(
|
| 950 |
+
noise_pred_uncond.shape,
|
| 951 |
+
device=self.device,
|
| 952 |
+
dtype=noise_pred_uncond.dtype,
|
| 953 |
+
)
|
| 954 |
+
|
| 955 |
+
if sem_guidance is not None and len(sem_guidance) > i:
|
| 956 |
+
noise_guidance_edit += sem_guidance[i].to(self.device)
|
| 957 |
+
|
| 958 |
+
elif enable_edit_guidance:
|
| 959 |
+
if self.activation_mask is None:
|
| 960 |
+
self.activation_mask = torch.zeros(
|
| 961 |
+
(len(timesteps), len(noise_pred_edit_concepts), *noise_pred_edit_concepts[0].shape)
|
| 962 |
+
)
|
| 963 |
+
|
| 964 |
+
if self.sem_guidance is None:
|
| 965 |
+
self.sem_guidance = torch.zeros((len(timesteps), *noise_pred_uncond.shape))
|
| 966 |
+
|
| 967 |
+
for c, noise_pred_edit_concept in enumerate(noise_pred_edit_concepts):
|
| 968 |
+
if isinstance(edit_warmup_steps, list):
|
| 969 |
+
edit_warmup_steps_c = edit_warmup_steps[c]
|
| 970 |
+
else:
|
| 971 |
+
edit_warmup_steps_c = edit_warmup_steps
|
| 972 |
+
if i < edit_warmup_steps_c:
|
| 973 |
+
continue
|
| 974 |
+
|
| 975 |
+
if isinstance(edit_guidance_scale, list):
|
| 976 |
+
edit_guidance_scale_c = edit_guidance_scale[c]
|
| 977 |
+
else:
|
| 978 |
+
edit_guidance_scale_c = edit_guidance_scale
|
| 979 |
+
|
| 980 |
+
if isinstance(edit_threshold, list):
|
| 981 |
+
edit_threshold_c = edit_threshold[c]
|
| 982 |
+
else:
|
| 983 |
+
edit_threshold_c = edit_threshold
|
| 984 |
+
if isinstance(reverse_editing_direction, list):
|
| 985 |
+
reverse_editing_direction_c = reverse_editing_direction[c]
|
| 986 |
+
else:
|
| 987 |
+
reverse_editing_direction_c = reverse_editing_direction
|
| 988 |
+
|
| 989 |
+
if isinstance(edit_cooldown_steps, list):
|
| 990 |
+
edit_cooldown_steps_c = edit_cooldown_steps[c]
|
| 991 |
+
elif edit_cooldown_steps is None:
|
| 992 |
+
edit_cooldown_steps_c = i + 1
|
| 993 |
+
else:
|
| 994 |
+
edit_cooldown_steps_c = edit_cooldown_steps
|
| 995 |
+
|
| 996 |
+
if i >= edit_cooldown_steps_c:
|
| 997 |
+
continue
|
| 998 |
+
|
| 999 |
+
noise_guidance_edit_tmp = noise_pred_edit_concept - noise_pred_uncond
|
| 1000 |
+
|
| 1001 |
+
if reverse_editing_direction_c:
|
| 1002 |
+
noise_guidance_edit_tmp = noise_guidance_edit_tmp * -1
|
| 1003 |
+
|
| 1004 |
+
noise_guidance_edit_tmp = noise_guidance_edit_tmp * edit_guidance_scale_c
|
| 1005 |
+
|
| 1006 |
+
if user_mask is not None:
|
| 1007 |
+
noise_guidance_edit_tmp = noise_guidance_edit_tmp * user_mask
|
| 1008 |
+
|
| 1009 |
+
if use_cross_attn_mask:
|
| 1010 |
+
out = self.attention_store.aggregate_attention(
|
| 1011 |
+
attention_maps=self.attention_store.step_store,
|
| 1012 |
+
prompts=self.text_cross_attention_maps,
|
| 1013 |
+
res=att_res,
|
| 1014 |
+
from_where=["up", "down"],
|
| 1015 |
+
is_cross=True,
|
| 1016 |
+
select=self.text_cross_attention_maps.index(editing_prompt[c]),
|
| 1017 |
+
)
|
| 1018 |
+
attn_map = out[:, :, :, 1 : 1 + num_edit_tokens[c]] # 0 -> startoftext
|
| 1019 |
+
|
| 1020 |
+
# average over all tokens
|
| 1021 |
+
if attn_map.shape[3] != num_edit_tokens[c]:
|
| 1022 |
+
raise ValueError(
|
| 1023 |
+
f"Incorrect shape of attention_map. Expected size {num_edit_tokens[c]}, but found {attn_map.shape[3]}!"
|
| 1024 |
+
)
|
| 1025 |
+
|
| 1026 |
+
attn_map = torch.sum(attn_map, dim=3)
|
| 1027 |
+
|
| 1028 |
+
# gaussian_smoothing
|
| 1029 |
+
attn_map = F.pad(attn_map.unsqueeze(1), (1, 1, 1, 1), mode="reflect")
|
| 1030 |
+
attn_map = self.smoothing(attn_map).squeeze(1)
|
| 1031 |
+
|
| 1032 |
+
# torch.quantile function expects float32
|
| 1033 |
+
if attn_map.dtype == torch.float32:
|
| 1034 |
+
tmp = torch.quantile(attn_map.flatten(start_dim=1), edit_threshold_c, dim=1)
|
| 1035 |
+
else:
|
| 1036 |
+
tmp = torch.quantile(
|
| 1037 |
+
attn_map.flatten(start_dim=1).to(torch.float32), edit_threshold_c, dim=1
|
| 1038 |
+
).to(attn_map.dtype)
|
| 1039 |
+
attn_mask = torch.where(
|
| 1040 |
+
attn_map >= tmp.unsqueeze(1).unsqueeze(1).repeat(1, *att_res), 1.0, 0.0
|
| 1041 |
+
)
|
| 1042 |
+
|
| 1043 |
+
# resolution must match latent space dimension
|
| 1044 |
+
attn_mask = F.interpolate(
|
| 1045 |
+
attn_mask.unsqueeze(1),
|
| 1046 |
+
noise_guidance_edit_tmp.shape[-2:], # 64,64
|
| 1047 |
+
).repeat(1, 4, 1, 1)
|
| 1048 |
+
self.activation_mask[i, c] = attn_mask.detach().cpu()
|
| 1049 |
+
if not use_intersect_mask:
|
| 1050 |
+
noise_guidance_edit_tmp = noise_guidance_edit_tmp * attn_mask
|
| 1051 |
+
|
| 1052 |
+
if use_intersect_mask:
|
| 1053 |
+
if t <= 800:
|
| 1054 |
+
noise_guidance_edit_tmp_quantile = torch.abs(noise_guidance_edit_tmp)
|
| 1055 |
+
noise_guidance_edit_tmp_quantile = torch.sum(
|
| 1056 |
+
noise_guidance_edit_tmp_quantile, dim=1, keepdim=True
|
| 1057 |
+
)
|
| 1058 |
+
noise_guidance_edit_tmp_quantile = noise_guidance_edit_tmp_quantile.repeat(
|
| 1059 |
+
1, self.unet.config.in_channels, 1, 1
|
| 1060 |
+
)
|
| 1061 |
+
|
| 1062 |
+
# torch.quantile function expects float32
|
| 1063 |
+
if noise_guidance_edit_tmp_quantile.dtype == torch.float32:
|
| 1064 |
+
tmp = torch.quantile(
|
| 1065 |
+
noise_guidance_edit_tmp_quantile.flatten(start_dim=2),
|
| 1066 |
+
edit_threshold_c,
|
| 1067 |
+
dim=2,
|
| 1068 |
+
keepdim=False,
|
| 1069 |
+
)
|
| 1070 |
+
else:
|
| 1071 |
+
tmp = torch.quantile(
|
| 1072 |
+
noise_guidance_edit_tmp_quantile.flatten(start_dim=2).to(torch.float32),
|
| 1073 |
+
edit_threshold_c,
|
| 1074 |
+
dim=2,
|
| 1075 |
+
keepdim=False,
|
| 1076 |
+
).to(noise_guidance_edit_tmp_quantile.dtype)
|
| 1077 |
+
|
| 1078 |
+
intersect_mask = (
|
| 1079 |
+
torch.where(
|
| 1080 |
+
noise_guidance_edit_tmp_quantile >= tmp[:, :, None, None],
|
| 1081 |
+
torch.ones_like(noise_guidance_edit_tmp),
|
| 1082 |
+
torch.zeros_like(noise_guidance_edit_tmp),
|
| 1083 |
+
)
|
| 1084 |
+
* attn_mask
|
| 1085 |
+
)
|
| 1086 |
+
|
| 1087 |
+
self.activation_mask[i, c] = intersect_mask.detach().cpu()
|
| 1088 |
+
|
| 1089 |
+
noise_guidance_edit_tmp = noise_guidance_edit_tmp * intersect_mask
|
| 1090 |
+
|
| 1091 |
+
else:
|
| 1092 |
+
# print(f"only attention mask for step {i}")
|
| 1093 |
+
noise_guidance_edit_tmp = noise_guidance_edit_tmp * attn_mask
|
| 1094 |
+
|
| 1095 |
+
elif not use_cross_attn_mask:
|
| 1096 |
+
# calculate quantile
|
| 1097 |
+
noise_guidance_edit_tmp_quantile = torch.abs(noise_guidance_edit_tmp)
|
| 1098 |
+
noise_guidance_edit_tmp_quantile = torch.sum(
|
| 1099 |
+
noise_guidance_edit_tmp_quantile, dim=1, keepdim=True
|
| 1100 |
+
)
|
| 1101 |
+
noise_guidance_edit_tmp_quantile = noise_guidance_edit_tmp_quantile.repeat(1, 4, 1, 1)
|
| 1102 |
+
|
| 1103 |
+
# torch.quantile function expects float32
|
| 1104 |
+
if noise_guidance_edit_tmp_quantile.dtype == torch.float32:
|
| 1105 |
+
tmp = torch.quantile(
|
| 1106 |
+
noise_guidance_edit_tmp_quantile.flatten(start_dim=2),
|
| 1107 |
+
edit_threshold_c,
|
| 1108 |
+
dim=2,
|
| 1109 |
+
keepdim=False,
|
| 1110 |
+
)
|
| 1111 |
+
else:
|
| 1112 |
+
tmp = torch.quantile(
|
| 1113 |
+
noise_guidance_edit_tmp_quantile.flatten(start_dim=2).to(torch.float32),
|
| 1114 |
+
edit_threshold_c,
|
| 1115 |
+
dim=2,
|
| 1116 |
+
keepdim=False,
|
| 1117 |
+
).to(noise_guidance_edit_tmp_quantile.dtype)
|
| 1118 |
+
|
| 1119 |
+
self.activation_mask[i, c] = (
|
| 1120 |
+
torch.where(
|
| 1121 |
+
noise_guidance_edit_tmp_quantile >= tmp[:, :, None, None],
|
| 1122 |
+
torch.ones_like(noise_guidance_edit_tmp),
|
| 1123 |
+
torch.zeros_like(noise_guidance_edit_tmp),
|
| 1124 |
+
)
|
| 1125 |
+
.detach()
|
| 1126 |
+
.cpu()
|
| 1127 |
+
)
|
| 1128 |
+
|
| 1129 |
+
noise_guidance_edit_tmp = torch.where(
|
| 1130 |
+
noise_guidance_edit_tmp_quantile >= tmp[:, :, None, None],
|
| 1131 |
+
noise_guidance_edit_tmp,
|
| 1132 |
+
torch.zeros_like(noise_guidance_edit_tmp),
|
| 1133 |
+
)
|
| 1134 |
+
|
| 1135 |
+
noise_guidance_edit += noise_guidance_edit_tmp
|
| 1136 |
+
|
| 1137 |
+
self.sem_guidance[i] = noise_guidance_edit.detach().cpu()
|
| 1138 |
+
|
| 1139 |
+
noise_pred = noise_pred_uncond + noise_guidance_edit
|
| 1140 |
+
|
| 1141 |
+
if enable_edit_guidance and self.guidance_rescale > 0.0:
|
| 1142 |
+
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
|
| 1143 |
+
noise_pred = rescale_noise_cfg(
|
| 1144 |
+
noise_pred,
|
| 1145 |
+
noise_pred_edit_concepts.mean(dim=0, keepdim=False),
|
| 1146 |
+
guidance_rescale=self.guidance_rescale,
|
| 1147 |
+
)
|
| 1148 |
+
|
| 1149 |
+
idx = t_to_idx[int(t)]
|
| 1150 |
+
latents = self.scheduler.step(
|
| 1151 |
+
noise_pred, t, latents, variance_noise=zs[idx], **extra_step_kwargs
|
| 1152 |
+
).prev_sample
|
| 1153 |
+
|
| 1154 |
+
# step callback
|
| 1155 |
+
if use_cross_attn_mask:
|
| 1156 |
+
store_step = i in attn_store_steps
|
| 1157 |
+
self.attention_store.between_steps(store_step)
|
| 1158 |
+
|
| 1159 |
+
if callback_on_step_end is not None:
|
| 1160 |
+
callback_kwargs = {}
|
| 1161 |
+
for k in callback_on_step_end_tensor_inputs:
|
| 1162 |
+
callback_kwargs[k] = locals()[k]
|
| 1163 |
+
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
|
| 1164 |
+
|
| 1165 |
+
latents = callback_outputs.pop("latents", latents)
|
| 1166 |
+
# prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
|
| 1167 |
+
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
|
| 1168 |
+
|
| 1169 |
+
# call the callback, if provided
|
| 1170 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
| 1171 |
+
progress_bar.update()
|
| 1172 |
+
|
| 1173 |
+
# 8. Post-processing
|
| 1174 |
+
if not output_type == "latent":
|
| 1175 |
+
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[
|
| 1176 |
+
0
|
| 1177 |
+
]
|
| 1178 |
+
image, has_nsfw_concept = self.run_safety_checker(image, self.device, text_embeddings.dtype)
|
| 1179 |
+
else:
|
| 1180 |
+
image = latents
|
| 1181 |
+
has_nsfw_concept = None
|
| 1182 |
+
|
| 1183 |
+
if has_nsfw_concept is None:
|
| 1184 |
+
do_denormalize = [True] * image.shape[0]
|
| 1185 |
+
else:
|
| 1186 |
+
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
|
| 1187 |
+
|
| 1188 |
+
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
|
| 1189 |
+
|
| 1190 |
+
# Offload all models
|
| 1191 |
+
self.maybe_free_model_hooks()
|
| 1192 |
+
|
| 1193 |
+
if not return_dict:
|
| 1194 |
+
return (image, has_nsfw_concept)
|
| 1195 |
+
|
| 1196 |
+
return LEditsPPDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
| 1197 |
+
|
| 1198 |
+
@torch.no_grad()
|
| 1199 |
+
def invert(
|
| 1200 |
+
self,
|
| 1201 |
+
image: PipelineImageInput,
|
| 1202 |
+
source_prompt: str = "",
|
| 1203 |
+
source_guidance_scale: float = 3.5,
|
| 1204 |
+
num_inversion_steps: int = 30,
|
| 1205 |
+
skip: float = 0.15,
|
| 1206 |
+
generator: Optional[torch.Generator] = None,
|
| 1207 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 1208 |
+
clip_skip: Optional[int] = None,
|
| 1209 |
+
height: Optional[int] = None,
|
| 1210 |
+
width: Optional[int] = None,
|
| 1211 |
+
resize_mode: Optional[str] = "default",
|
| 1212 |
+
crops_coords: Optional[Tuple[int, int, int, int]] = None,
|
| 1213 |
+
):
|
| 1214 |
+
r"""
|
| 1215 |
+
The function to the pipeline for image inversion as described by the [LEDITS++
|
| 1216 |
+
Paper](https://arxiv.org/abs/2301.12247). If the scheduler is set to [`~schedulers.DDIMScheduler`] the
|
| 1217 |
+
inversion proposed by [edit-friendly DPDM](https://arxiv.org/abs/2304.06140) will be performed instead.
|
| 1218 |
+
|
| 1219 |
+
Args:
|
| 1220 |
+
image (`PipelineImageInput`):
|
| 1221 |
+
Input for the image(s) that are to be edited. Multiple input images have to default to the same aspect
|
| 1222 |
+
ratio.
|
| 1223 |
+
source_prompt (`str`, defaults to `""`):
|
| 1224 |
+
Prompt describing the input image that will be used for guidance during inversion. Guidance is disabled
|
| 1225 |
+
if the `source_prompt` is `""`.
|
| 1226 |
+
source_guidance_scale (`float`, defaults to `3.5`):
|
| 1227 |
+
Strength of guidance during inversion.
|
| 1228 |
+
num_inversion_steps (`int`, defaults to `30`):
|
| 1229 |
+
Number of total performed inversion steps after discarding the initial `skip` steps.
|
| 1230 |
+
skip (`float`, defaults to `0.15`):
|
| 1231 |
+
Portion of initial steps that will be ignored for inversion and subsequent generation. Lower values
|
| 1232 |
+
will lead to stronger changes to the input image. `skip` has to be between `0` and `1`.
|
| 1233 |
+
generator (`torch.Generator`, *optional*):
|
| 1234 |
+
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make inversion
|
| 1235 |
+
deterministic.
|
| 1236 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 1237 |
+
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
|
| 1238 |
+
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 1239 |
+
clip_skip (`int`, *optional*):
|
| 1240 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 1241 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 1242 |
+
height (`int`, *optional*, defaults to `None`):
|
| 1243 |
+
The height in preprocessed image. If `None`, will use the `get_default_height_width()` to get default
|
| 1244 |
+
height.
|
| 1245 |
+
width (`int`, *optional*`, defaults to `None`):
|
| 1246 |
+
The width in preprocessed. If `None`, will use get_default_height_width()` to get the default width.
|
| 1247 |
+
resize_mode (`str`, *optional*, defaults to `default`):
|
| 1248 |
+
The resize mode, can be one of `default` or `fill`. If `default`, will resize the image to fit within
|
| 1249 |
+
the specified width and height, and it may not maintaining the original aspect ratio. If `fill`, will
|
| 1250 |
+
resize the image to fit within the specified width and height, maintaining the aspect ratio, and then
|
| 1251 |
+
center the image within the dimensions, filling empty with data from image. If `crop`, will resize the
|
| 1252 |
+
image to fit within the specified width and height, maintaining the aspect ratio, and then center the
|
| 1253 |
+
image within the dimensions, cropping the excess. Note that resize_mode `fill` and `crop` are only
|
| 1254 |
+
supported for PIL image input.
|
| 1255 |
+
crops_coords (`List[Tuple[int, int, int, int]]`, *optional*, defaults to `None`):
|
| 1256 |
+
The crop coordinates for each image in the batch. If `None`, will not crop the image.
|
| 1257 |
+
|
| 1258 |
+
Returns:
|
| 1259 |
+
[`~pipelines.ledits_pp.LEditsPPInversionPipelineOutput`]: Output will contain the resized input image(s)
|
| 1260 |
+
and respective VAE reconstruction(s).
|
| 1261 |
+
"""
|
| 1262 |
+
# Reset attn processor, we do not want to store attn maps during inversion
|
| 1263 |
+
self.unet.set_attn_processor(AttnProcessor())
|
| 1264 |
+
|
| 1265 |
+
self.eta = 1.0
|
| 1266 |
+
|
| 1267 |
+
self.scheduler.config.timestep_spacing = "leading"
|
| 1268 |
+
self.scheduler.set_timesteps(int(num_inversion_steps * (1 + skip)))
|
| 1269 |
+
self.inversion_steps = self.scheduler.timesteps[-num_inversion_steps:]
|
| 1270 |
+
timesteps = self.inversion_steps
|
| 1271 |
+
|
| 1272 |
+
# 1. encode image
|
| 1273 |
+
x0, resized = self.encode_image(
|
| 1274 |
+
image,
|
| 1275 |
+
dtype=self.text_encoder.dtype,
|
| 1276 |
+
height=height,
|
| 1277 |
+
width=width,
|
| 1278 |
+
resize_mode=resize_mode,
|
| 1279 |
+
crops_coords=crops_coords,
|
| 1280 |
+
)
|
| 1281 |
+
self.batch_size = x0.shape[0]
|
| 1282 |
+
|
| 1283 |
+
# autoencoder reconstruction
|
| 1284 |
+
image_rec = self.vae.decode(x0 / self.vae.config.scaling_factor, return_dict=False, generator=generator)[0]
|
| 1285 |
+
image_rec = self.image_processor.postprocess(image_rec, output_type="pil")
|
| 1286 |
+
|
| 1287 |
+
# 2. get embeddings
|
| 1288 |
+
do_classifier_free_guidance = source_guidance_scale > 1.0
|
| 1289 |
+
|
| 1290 |
+
lora_scale = cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
|
| 1291 |
+
|
| 1292 |
+
uncond_embedding, text_embeddings, _ = self.encode_prompt(
|
| 1293 |
+
num_images_per_prompt=1,
|
| 1294 |
+
device=self.device,
|
| 1295 |
+
negative_prompt=None,
|
| 1296 |
+
enable_edit_guidance=do_classifier_free_guidance,
|
| 1297 |
+
editing_prompt=source_prompt,
|
| 1298 |
+
lora_scale=lora_scale,
|
| 1299 |
+
clip_skip=clip_skip,
|
| 1300 |
+
)
|
| 1301 |
+
|
| 1302 |
+
# 3. find zs and xts
|
| 1303 |
+
variance_noise_shape = (num_inversion_steps, *x0.shape)
|
| 1304 |
+
|
| 1305 |
+
# intermediate latents
|
| 1306 |
+
t_to_idx = {int(v): k for k, v in enumerate(timesteps)}
|
| 1307 |
+
xts = torch.zeros(size=variance_noise_shape, device=self.device, dtype=uncond_embedding.dtype)
|
| 1308 |
+
|
| 1309 |
+
for t in reversed(timesteps):
|
| 1310 |
+
idx = num_inversion_steps - t_to_idx[int(t)] - 1
|
| 1311 |
+
noise = randn_tensor(shape=x0.shape, generator=generator, device=self.device, dtype=x0.dtype)
|
| 1312 |
+
xts[idx] = self.scheduler.add_noise(x0, noise, torch.Tensor([t]))
|
| 1313 |
+
xts = torch.cat([x0.unsqueeze(0), xts], dim=0)
|
| 1314 |
+
|
| 1315 |
+
self.scheduler.set_timesteps(len(self.scheduler.timesteps))
|
| 1316 |
+
# noise maps
|
| 1317 |
+
zs = torch.zeros(size=variance_noise_shape, device=self.device, dtype=uncond_embedding.dtype)
|
| 1318 |
+
|
| 1319 |
+
with self.progress_bar(total=len(timesteps)) as progress_bar:
|
| 1320 |
+
for t in timesteps:
|
| 1321 |
+
idx = num_inversion_steps - t_to_idx[int(t)] - 1
|
| 1322 |
+
# 1. predict noise residual
|
| 1323 |
+
xt = xts[idx + 1]
|
| 1324 |
+
|
| 1325 |
+
noise_pred = self.unet(xt, timestep=t, encoder_hidden_states=uncond_embedding).sample
|
| 1326 |
+
|
| 1327 |
+
if not source_prompt == "":
|
| 1328 |
+
noise_pred_cond = self.unet(xt, timestep=t, encoder_hidden_states=text_embeddings).sample
|
| 1329 |
+
noise_pred = noise_pred + source_guidance_scale * (noise_pred_cond - noise_pred)
|
| 1330 |
+
|
| 1331 |
+
xtm1 = xts[idx]
|
| 1332 |
+
z, xtm1_corrected = compute_noise(self.scheduler, xtm1, xt, t, noise_pred, self.eta)
|
| 1333 |
+
zs[idx] = z
|
| 1334 |
+
|
| 1335 |
+
# correction to avoid error accumulation
|
| 1336 |
+
xts[idx] = xtm1_corrected
|
| 1337 |
+
|
| 1338 |
+
progress_bar.update()
|
| 1339 |
+
|
| 1340 |
+
self.init_latents = xts[-1].expand(self.batch_size, -1, -1, -1)
|
| 1341 |
+
zs = zs.flip(0)
|
| 1342 |
+
self.zs = zs
|
| 1343 |
+
|
| 1344 |
+
return LEditsPPInversionPipelineOutput(images=resized, vae_reconstruction_images=image_rec)
|
| 1345 |
+
|
| 1346 |
+
@torch.no_grad()
|
| 1347 |
+
def encode_image(self, image, dtype=None, height=None, width=None, resize_mode="default", crops_coords=None):
|
| 1348 |
+
image = self.image_processor.preprocess(
|
| 1349 |
+
image=image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords
|
| 1350 |
+
)
|
| 1351 |
+
resized = self.image_processor.postprocess(image=image, output_type="pil")
|
| 1352 |
+
|
| 1353 |
+
if max(image.shape[-2:]) > self.vae.config["sample_size"] * 1.5:
|
| 1354 |
+
logger.warning(
|
| 1355 |
+
"Your input images far exceed the default resolution of the underlying diffusion model. "
|
| 1356 |
+
"The output images may contain severe artifacts! "
|
| 1357 |
+
"Consider down-sampling the input using the `height` and `width` parameters"
|
| 1358 |
+
)
|
| 1359 |
+
image = image.to(dtype)
|
| 1360 |
+
|
| 1361 |
+
x0 = self.vae.encode(image.to(self.device)).latent_dist.mode()
|
| 1362 |
+
x0 = x0.to(dtype)
|
| 1363 |
+
x0 = self.vae.config.scaling_factor * x0
|
| 1364 |
+
return x0, resized
|
| 1365 |
+
|
| 1366 |
+
|
| 1367 |
+
def compute_noise_ddim(scheduler, prev_latents, latents, timestep, noise_pred, eta):
|
| 1368 |
+
# 1. get previous step value (=t-1)
|
| 1369 |
+
prev_timestep = timestep - scheduler.config.num_train_timesteps // scheduler.num_inference_steps
|
| 1370 |
+
|
| 1371 |
+
# 2. compute alphas, betas
|
| 1372 |
+
alpha_prod_t = scheduler.alphas_cumprod[timestep]
|
| 1373 |
+
alpha_prod_t_prev = (
|
| 1374 |
+
scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else scheduler.final_alpha_cumprod
|
| 1375 |
+
)
|
| 1376 |
+
|
| 1377 |
+
beta_prod_t = 1 - alpha_prod_t
|
| 1378 |
+
|
| 1379 |
+
# 3. compute predicted original sample from predicted noise also called
|
| 1380 |
+
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
|
| 1381 |
+
pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5)
|
| 1382 |
+
|
| 1383 |
+
# 4. Clip "predicted x_0"
|
| 1384 |
+
if scheduler.config.clip_sample:
|
| 1385 |
+
pred_original_sample = torch.clamp(pred_original_sample, -1, 1)
|
| 1386 |
+
|
| 1387 |
+
# 5. compute variance: "sigma_t(η)" -> see formula (16)
|
| 1388 |
+
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
|
| 1389 |
+
variance = scheduler._get_variance(timestep, prev_timestep)
|
| 1390 |
+
std_dev_t = eta * variance ** (0.5)
|
| 1391 |
+
|
| 1392 |
+
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
|
| 1393 |
+
pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * noise_pred
|
| 1394 |
+
|
| 1395 |
+
# modifed so that updated xtm1 is returned as well (to avoid error accumulation)
|
| 1396 |
+
mu_xt = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction
|
| 1397 |
+
if variance > 0.0:
|
| 1398 |
+
noise = (prev_latents - mu_xt) / (variance ** (0.5) * eta)
|
| 1399 |
+
else:
|
| 1400 |
+
noise = torch.tensor([0.0]).to(latents.device)
|
| 1401 |
+
|
| 1402 |
+
return noise, mu_xt + (eta * variance**0.5) * noise
|
| 1403 |
+
|
| 1404 |
+
|
| 1405 |
+
def compute_noise_sde_dpm_pp_2nd(scheduler, prev_latents, latents, timestep, noise_pred, eta):
|
| 1406 |
+
def first_order_update(model_output, sample): # timestep, prev_timestep, sample):
|
| 1407 |
+
sigma_t, sigma_s = scheduler.sigmas[scheduler.step_index + 1], scheduler.sigmas[scheduler.step_index]
|
| 1408 |
+
alpha_t, sigma_t = scheduler._sigma_to_alpha_sigma_t(sigma_t)
|
| 1409 |
+
alpha_s, sigma_s = scheduler._sigma_to_alpha_sigma_t(sigma_s)
|
| 1410 |
+
lambda_t = torch.log(alpha_t) - torch.log(sigma_t)
|
| 1411 |
+
lambda_s = torch.log(alpha_s) - torch.log(sigma_s)
|
| 1412 |
+
|
| 1413 |
+
h = lambda_t - lambda_s
|
| 1414 |
+
|
| 1415 |
+
mu_xt = (sigma_t / sigma_s * torch.exp(-h)) * sample + (alpha_t * (1 - torch.exp(-2.0 * h))) * model_output
|
| 1416 |
+
|
| 1417 |
+
mu_xt = scheduler.dpm_solver_first_order_update(
|
| 1418 |
+
model_output=model_output, sample=sample, noise=torch.zeros_like(sample)
|
| 1419 |
+
)
|
| 1420 |
+
|
| 1421 |
+
sigma = sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h))
|
| 1422 |
+
if sigma > 0.0:
|
| 1423 |
+
noise = (prev_latents - mu_xt) / sigma
|
| 1424 |
+
else:
|
| 1425 |
+
noise = torch.tensor([0.0]).to(sample.device)
|
| 1426 |
+
|
| 1427 |
+
prev_sample = mu_xt + sigma * noise
|
| 1428 |
+
return noise, prev_sample
|
| 1429 |
+
|
| 1430 |
+
def second_order_update(model_output_list, sample): # timestep_list, prev_timestep, sample):
|
| 1431 |
+
sigma_t, sigma_s0, sigma_s1 = (
|
| 1432 |
+
scheduler.sigmas[scheduler.step_index + 1],
|
| 1433 |
+
scheduler.sigmas[scheduler.step_index],
|
| 1434 |
+
scheduler.sigmas[scheduler.step_index - 1],
|
| 1435 |
+
)
|
| 1436 |
+
|
| 1437 |
+
alpha_t, sigma_t = scheduler._sigma_to_alpha_sigma_t(sigma_t)
|
| 1438 |
+
alpha_s0, sigma_s0 = scheduler._sigma_to_alpha_sigma_t(sigma_s0)
|
| 1439 |
+
alpha_s1, sigma_s1 = scheduler._sigma_to_alpha_sigma_t(sigma_s1)
|
| 1440 |
+
|
| 1441 |
+
lambda_t = torch.log(alpha_t) - torch.log(sigma_t)
|
| 1442 |
+
lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0)
|
| 1443 |
+
lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1)
|
| 1444 |
+
|
| 1445 |
+
m0, m1 = model_output_list[-1], model_output_list[-2]
|
| 1446 |
+
|
| 1447 |
+
h, h_0 = lambda_t - lambda_s0, lambda_s0 - lambda_s1
|
| 1448 |
+
r0 = h_0 / h
|
| 1449 |
+
D0, D1 = m0, (1.0 / r0) * (m0 - m1)
|
| 1450 |
+
|
| 1451 |
+
mu_xt = (
|
| 1452 |
+
(sigma_t / sigma_s0 * torch.exp(-h)) * sample
|
| 1453 |
+
+ (alpha_t * (1 - torch.exp(-2.0 * h))) * D0
|
| 1454 |
+
+ 0.5 * (alpha_t * (1 - torch.exp(-2.0 * h))) * D1
|
| 1455 |
+
)
|
| 1456 |
+
|
| 1457 |
+
sigma = sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h))
|
| 1458 |
+
if sigma > 0.0:
|
| 1459 |
+
noise = (prev_latents - mu_xt) / sigma
|
| 1460 |
+
else:
|
| 1461 |
+
noise = torch.tensor([0.0]).to(sample.device)
|
| 1462 |
+
|
| 1463 |
+
prev_sample = mu_xt + sigma * noise
|
| 1464 |
+
|
| 1465 |
+
return noise, prev_sample
|
| 1466 |
+
|
| 1467 |
+
if scheduler.step_index is None:
|
| 1468 |
+
scheduler._init_step_index(timestep)
|
| 1469 |
+
|
| 1470 |
+
model_output = scheduler.convert_model_output(model_output=noise_pred, sample=latents)
|
| 1471 |
+
for i in range(scheduler.config.solver_order - 1):
|
| 1472 |
+
scheduler.model_outputs[i] = scheduler.model_outputs[i + 1]
|
| 1473 |
+
scheduler.model_outputs[-1] = model_output
|
| 1474 |
+
|
| 1475 |
+
if scheduler.lower_order_nums < 1:
|
| 1476 |
+
noise, prev_sample = first_order_update(model_output, latents)
|
| 1477 |
+
else:
|
| 1478 |
+
noise, prev_sample = second_order_update(scheduler.model_outputs, latents)
|
| 1479 |
+
|
| 1480 |
+
if scheduler.lower_order_nums < scheduler.config.solver_order:
|
| 1481 |
+
scheduler.lower_order_nums += 1
|
| 1482 |
+
|
| 1483 |
+
# upon completion increase step index by one
|
| 1484 |
+
scheduler._step_index += 1
|
| 1485 |
+
|
| 1486 |
+
return noise, prev_sample
|
| 1487 |
+
|
| 1488 |
+
|
| 1489 |
+
def compute_noise(scheduler, *args):
|
| 1490 |
+
if isinstance(scheduler, DDIMScheduler):
|
| 1491 |
+
return compute_noise_ddim(scheduler, *args)
|
| 1492 |
+
elif (
|
| 1493 |
+
isinstance(scheduler, DPMSolverMultistepScheduler)
|
| 1494 |
+
and scheduler.config.algorithm_type == "sde-dpmsolver++"
|
| 1495 |
+
and scheduler.config.solver_order == 2
|
| 1496 |
+
):
|
| 1497 |
+
return compute_noise_sde_dpm_pp_2nd(scheduler, *args)
|
| 1498 |
+
else:
|
| 1499 |
+
raise NotImplementedError
|
ledits/pipeline_leditspp_stable_diffusion_xl.py
ADDED
|
@@ -0,0 +1,1854 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import inspect
|
| 16 |
+
import math
|
| 17 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
| 18 |
+
|
| 19 |
+
import torch
|
| 20 |
+
import torch.nn.functional as F
|
| 21 |
+
from transformers import (
|
| 22 |
+
CLIPImageProcessor,
|
| 23 |
+
CLIPTextModel,
|
| 24 |
+
CLIPTextModelWithProjection,
|
| 25 |
+
CLIPTokenizer,
|
| 26 |
+
CLIPVisionModelWithProjection,
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
|
| 30 |
+
from diffusers.loaders import (
|
| 31 |
+
FromSingleFileMixin,
|
| 32 |
+
IPAdapterMixin,
|
| 33 |
+
StableDiffusionXLLoraLoaderMixin,
|
| 34 |
+
TextualInversionLoaderMixin,
|
| 35 |
+
)
|
| 36 |
+
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
| 37 |
+
from diffusers.models.attention_processor import (
|
| 38 |
+
Attention,
|
| 39 |
+
AttnProcessor,
|
| 40 |
+
AttnProcessor2_0,
|
| 41 |
+
XFormersAttnProcessor,
|
| 42 |
+
)
|
| 43 |
+
from diffusers.models.lora import adjust_lora_scale_text_encoder
|
| 44 |
+
from diffusers.schedulers import DDIMScheduler, DPMSolverMultistepScheduler
|
| 45 |
+
from diffusers.utils import (
|
| 46 |
+
USE_PEFT_BACKEND,
|
| 47 |
+
is_invisible_watermark_available,
|
| 48 |
+
is_torch_xla_available,
|
| 49 |
+
logging,
|
| 50 |
+
replace_example_docstring,
|
| 51 |
+
scale_lora_layers,
|
| 52 |
+
unscale_lora_layers,
|
| 53 |
+
)
|
| 54 |
+
from diffusers.utils.torch_utils import randn_tensor
|
| 55 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
|
| 56 |
+
from .pipeline_output import LEditsPPDiffusionPipelineOutput, LEditsPPInversionPipelineOutput
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
if is_invisible_watermark_available():
|
| 60 |
+
from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker
|
| 61 |
+
|
| 62 |
+
if is_torch_xla_available():
|
| 63 |
+
import torch_xla.core.xla_model as xm
|
| 64 |
+
|
| 65 |
+
XLA_AVAILABLE = True
|
| 66 |
+
else:
|
| 67 |
+
XLA_AVAILABLE = False
|
| 68 |
+
|
| 69 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 70 |
+
|
| 71 |
+
EXAMPLE_DOC_STRING = """
|
| 72 |
+
Examples:
|
| 73 |
+
```py
|
| 74 |
+
>>> import torch
|
| 75 |
+
>>> import PIL
|
| 76 |
+
>>> import requests
|
| 77 |
+
>>> from io import BytesIO
|
| 78 |
+
|
| 79 |
+
>>> from diffusers import LEditsPPPipelineStableDiffusionXL
|
| 80 |
+
|
| 81 |
+
>>> pipe = LEditsPPPipelineStableDiffusionXL.from_pretrained(
|
| 82 |
+
... "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
|
| 83 |
+
... )
|
| 84 |
+
>>> pipe = pipe.to("cuda")
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
>>> def download_image(url):
|
| 88 |
+
... response = requests.get(url)
|
| 89 |
+
... return PIL.Image.open(BytesIO(response.content)).convert("RGB")
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
>>> img_url = "https://www.aiml.informatik.tu-darmstadt.de/people/mbrack/tennis.jpg"
|
| 93 |
+
>>> image = download_image(img_url)
|
| 94 |
+
|
| 95 |
+
>>> _ = pipe.invert(image=image, num_inversion_steps=50, skip=0.2)
|
| 96 |
+
|
| 97 |
+
>>> edited_image = pipe(
|
| 98 |
+
... editing_prompt=["tennis ball", "tomato"],
|
| 99 |
+
... reverse_editing_direction=[True, False],
|
| 100 |
+
... edit_guidance_scale=[5.0, 10.0],
|
| 101 |
+
... edit_threshold=[0.9, 0.85],
|
| 102 |
+
... ).images[0]
|
| 103 |
+
```
|
| 104 |
+
"""
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
# Copied from diffusers.pipelines.ledits_pp.pipeline_leditspp_stable_diffusion.LeditsAttentionStore
|
| 108 |
+
class LeditsAttentionStore:
|
| 109 |
+
@staticmethod
|
| 110 |
+
def get_empty_store():
|
| 111 |
+
return {"down_cross": [], "mid_cross": [], "up_cross": [], "down_self": [], "mid_self": [], "up_self": []}
|
| 112 |
+
|
| 113 |
+
def __call__(self, attn, is_cross: bool, place_in_unet: str, editing_prompts, PnP=False):
|
| 114 |
+
# attn.shape = batch_size * head_size, seq_len query, seq_len_key
|
| 115 |
+
if attn.shape[1] <= self.max_size:
|
| 116 |
+
bs = 1 + int(PnP) + editing_prompts
|
| 117 |
+
skip = 2 if PnP else 1 # skip PnP & unconditional
|
| 118 |
+
attn = torch.stack(attn.split(self.batch_size)).permute(1, 0, 2, 3)
|
| 119 |
+
source_batch_size = int(attn.shape[1] // bs)
|
| 120 |
+
self.forward(attn[:, skip * source_batch_size :], is_cross, place_in_unet)
|
| 121 |
+
|
| 122 |
+
def forward(self, attn, is_cross: bool, place_in_unet: str):
|
| 123 |
+
key = f"{place_in_unet}_{'cross' if is_cross else 'self'}"
|
| 124 |
+
|
| 125 |
+
self.step_store[key].append(attn)
|
| 126 |
+
|
| 127 |
+
def between_steps(self, store_step=True):
|
| 128 |
+
if store_step:
|
| 129 |
+
if self.average:
|
| 130 |
+
if len(self.attention_store) == 0:
|
| 131 |
+
self.attention_store = self.step_store
|
| 132 |
+
else:
|
| 133 |
+
for key in self.attention_store:
|
| 134 |
+
for i in range(len(self.attention_store[key])):
|
| 135 |
+
self.attention_store[key][i] += self.step_store[key][i]
|
| 136 |
+
else:
|
| 137 |
+
if len(self.attention_store) == 0:
|
| 138 |
+
self.attention_store = [self.step_store]
|
| 139 |
+
else:
|
| 140 |
+
self.attention_store.append(self.step_store)
|
| 141 |
+
|
| 142 |
+
self.cur_step += 1
|
| 143 |
+
self.step_store = self.get_empty_store()
|
| 144 |
+
|
| 145 |
+
def get_attention(self, step: int):
|
| 146 |
+
if self.average:
|
| 147 |
+
attention = {
|
| 148 |
+
key: [item / self.cur_step for item in self.attention_store[key]] for key in self.attention_store
|
| 149 |
+
}
|
| 150 |
+
else:
|
| 151 |
+
assert step is not None
|
| 152 |
+
attention = self.attention_store[step]
|
| 153 |
+
return attention
|
| 154 |
+
|
| 155 |
+
def aggregate_attention(
|
| 156 |
+
self, attention_maps, prompts, res: Union[int, Tuple[int]], from_where: List[str], is_cross: bool, select: int
|
| 157 |
+
):
|
| 158 |
+
out = [[] for x in range(self.batch_size)]
|
| 159 |
+
if isinstance(res, int):
|
| 160 |
+
num_pixels = res**2
|
| 161 |
+
resolution = (res, res)
|
| 162 |
+
else:
|
| 163 |
+
num_pixels = res[0] * res[1]
|
| 164 |
+
resolution = res[:2]
|
| 165 |
+
|
| 166 |
+
for location in from_where:
|
| 167 |
+
for bs_item in attention_maps[f"{location}_{'cross' if is_cross else 'self'}"]:
|
| 168 |
+
for batch, item in enumerate(bs_item):
|
| 169 |
+
if item.shape[1] == num_pixels:
|
| 170 |
+
cross_maps = item.reshape(len(prompts), -1, *resolution, item.shape[-1])[select]
|
| 171 |
+
out[batch].append(cross_maps)
|
| 172 |
+
|
| 173 |
+
out = torch.stack([torch.cat(x, dim=0) for x in out])
|
| 174 |
+
# average over heads
|
| 175 |
+
out = out.sum(1) / out.shape[1]
|
| 176 |
+
return out
|
| 177 |
+
|
| 178 |
+
def __init__(self, average: bool, batch_size=1, max_resolution=16, max_size: int = None):
|
| 179 |
+
self.step_store = self.get_empty_store()
|
| 180 |
+
self.attention_store = []
|
| 181 |
+
self.cur_step = 0
|
| 182 |
+
self.average = average
|
| 183 |
+
self.batch_size = batch_size
|
| 184 |
+
if max_size is None:
|
| 185 |
+
self.max_size = max_resolution**2
|
| 186 |
+
elif max_size is not None and max_resolution is None:
|
| 187 |
+
self.max_size = max_size
|
| 188 |
+
else:
|
| 189 |
+
raise ValueError("Only allowed to set one of max_resolution or max_size")
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
# Copied from diffusers.pipelines.ledits_pp.pipeline_leditspp_stable_diffusion.LeditsGaussianSmoothing
|
| 193 |
+
class LeditsGaussianSmoothing:
|
| 194 |
+
def __init__(self, device):
|
| 195 |
+
kernel_size = [3, 3]
|
| 196 |
+
sigma = [0.5, 0.5]
|
| 197 |
+
|
| 198 |
+
# The gaussian kernel is the product of the gaussian function of each dimension.
|
| 199 |
+
kernel = 1
|
| 200 |
+
meshgrids = torch.meshgrid([torch.arange(size, dtype=torch.float32) for size in kernel_size])
|
| 201 |
+
for size, std, mgrid in zip(kernel_size, sigma, meshgrids):
|
| 202 |
+
mean = (size - 1) / 2
|
| 203 |
+
kernel *= 1 / (std * math.sqrt(2 * math.pi)) * torch.exp(-(((mgrid - mean) / (2 * std)) ** 2))
|
| 204 |
+
|
| 205 |
+
# Make sure sum of values in gaussian kernel equals 1.
|
| 206 |
+
kernel = kernel / torch.sum(kernel)
|
| 207 |
+
|
| 208 |
+
# Reshape to depthwise convolutional weight
|
| 209 |
+
kernel = kernel.view(1, 1, *kernel.size())
|
| 210 |
+
kernel = kernel.repeat(1, *[1] * (kernel.dim() - 1))
|
| 211 |
+
|
| 212 |
+
self.weight = kernel.to(device)
|
| 213 |
+
|
| 214 |
+
def __call__(self, input):
|
| 215 |
+
"""
|
| 216 |
+
Arguments:
|
| 217 |
+
Apply gaussian filter to input.
|
| 218 |
+
input (torch.Tensor): Input to apply gaussian filter on.
|
| 219 |
+
Returns:
|
| 220 |
+
filtered (torch.Tensor): Filtered output.
|
| 221 |
+
"""
|
| 222 |
+
return F.conv2d(input, weight=self.weight.to(input.dtype))
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
# Copied from diffusers.pipelines.ledits_pp.pipeline_leditspp_stable_diffusion.LEDITSCrossAttnProcessor
|
| 226 |
+
class LEDITSCrossAttnProcessor:
|
| 227 |
+
def __init__(self, attention_store, place_in_unet, pnp, editing_prompts):
|
| 228 |
+
self.attnstore = attention_store
|
| 229 |
+
self.place_in_unet = place_in_unet
|
| 230 |
+
self.editing_prompts = editing_prompts
|
| 231 |
+
self.pnp = pnp
|
| 232 |
+
|
| 233 |
+
def __call__(
|
| 234 |
+
self,
|
| 235 |
+
attn: Attention,
|
| 236 |
+
hidden_states,
|
| 237 |
+
encoder_hidden_states,
|
| 238 |
+
attention_mask=None,
|
| 239 |
+
temb=None,
|
| 240 |
+
):
|
| 241 |
+
batch_size, sequence_length, _ = (
|
| 242 |
+
hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
|
| 243 |
+
)
|
| 244 |
+
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
|
| 245 |
+
|
| 246 |
+
query = attn.to_q(hidden_states)
|
| 247 |
+
|
| 248 |
+
if encoder_hidden_states is None:
|
| 249 |
+
encoder_hidden_states = hidden_states
|
| 250 |
+
elif attn.norm_cross:
|
| 251 |
+
encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
|
| 252 |
+
|
| 253 |
+
key = attn.to_k(encoder_hidden_states)
|
| 254 |
+
value = attn.to_v(encoder_hidden_states)
|
| 255 |
+
|
| 256 |
+
query = attn.head_to_batch_dim(query)
|
| 257 |
+
key = attn.head_to_batch_dim(key)
|
| 258 |
+
value = attn.head_to_batch_dim(value)
|
| 259 |
+
|
| 260 |
+
attention_probs = attn.get_attention_scores(query, key, attention_mask)
|
| 261 |
+
self.attnstore(
|
| 262 |
+
attention_probs,
|
| 263 |
+
is_cross=True,
|
| 264 |
+
place_in_unet=self.place_in_unet,
|
| 265 |
+
editing_prompts=self.editing_prompts,
|
| 266 |
+
PnP=self.pnp,
|
| 267 |
+
)
|
| 268 |
+
|
| 269 |
+
hidden_states = torch.bmm(attention_probs, value)
|
| 270 |
+
hidden_states = attn.batch_to_head_dim(hidden_states)
|
| 271 |
+
|
| 272 |
+
# linear proj
|
| 273 |
+
hidden_states = attn.to_out[0](hidden_states)
|
| 274 |
+
# dropout
|
| 275 |
+
hidden_states = attn.to_out[1](hidden_states)
|
| 276 |
+
|
| 277 |
+
hidden_states = hidden_states / attn.rescale_output_factor
|
| 278 |
+
return hidden_states
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
class LEditsPPPipelineStableDiffusionXL(
|
| 282 |
+
DiffusionPipeline,
|
| 283 |
+
FromSingleFileMixin,
|
| 284 |
+
StableDiffusionXLLoraLoaderMixin,
|
| 285 |
+
TextualInversionLoaderMixin,
|
| 286 |
+
IPAdapterMixin,
|
| 287 |
+
):
|
| 288 |
+
"""
|
| 289 |
+
Pipeline for textual image editing using LEDits++ with Stable Diffusion XL.
|
| 290 |
+
|
| 291 |
+
This model inherits from [`DiffusionPipeline`] and builds on the [`StableDiffusionXLPipeline`]. Check the
|
| 292 |
+
superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a
|
| 293 |
+
particular device, etc.).
|
| 294 |
+
|
| 295 |
+
In addition the pipeline inherits the following loading methods:
|
| 296 |
+
- *LoRA*: [`LEditsPPPipelineStableDiffusionXL.load_lora_weights`]
|
| 297 |
+
- *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`]
|
| 298 |
+
|
| 299 |
+
as well as the following saving methods:
|
| 300 |
+
- *LoRA*: [`loaders.StableDiffusionXLPipeline.save_lora_weights`]
|
| 301 |
+
|
| 302 |
+
Args:
|
| 303 |
+
vae ([`AutoencoderKL`]):
|
| 304 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
| 305 |
+
text_encoder ([`~transformers.CLIPTextModel`]):
|
| 306 |
+
Frozen text-encoder. Stable Diffusion XL uses the text portion of
|
| 307 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
| 308 |
+
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
| 309 |
+
text_encoder_2 ([`~transformers.CLIPTextModelWithProjection`]):
|
| 310 |
+
Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
|
| 311 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
|
| 312 |
+
specifically the
|
| 313 |
+
[laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
|
| 314 |
+
variant.
|
| 315 |
+
tokenizer ([`~transformers.CLIPTokenizer`]):
|
| 316 |
+
Tokenizer of class
|
| 317 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
| 318 |
+
tokenizer_2 ([`~transformers.CLIPTokenizer`]):
|
| 319 |
+
Second Tokenizer of class
|
| 320 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
| 321 |
+
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
| 322 |
+
scheduler ([`DPMSolverMultistepScheduler`] or [`DDIMScheduler`]):
|
| 323 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of
|
| 324 |
+
[`DPMSolverMultistepScheduler`] or [`DDIMScheduler`]. If any other scheduler is passed it will
|
| 325 |
+
automatically be set to [`DPMSolverMultistepScheduler`].
|
| 326 |
+
force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
|
| 327 |
+
Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of
|
| 328 |
+
`stabilityai/stable-diffusion-xl-base-1-0`.
|
| 329 |
+
add_watermarker (`bool`, *optional*):
|
| 330 |
+
Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to
|
| 331 |
+
watermark output images. If not defined, it will default to True if the package is installed, otherwise no
|
| 332 |
+
watermarker will be used.
|
| 333 |
+
"""
|
| 334 |
+
|
| 335 |
+
model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae"
|
| 336 |
+
_optional_components = [
|
| 337 |
+
"tokenizer",
|
| 338 |
+
"tokenizer_2",
|
| 339 |
+
"text_encoder",
|
| 340 |
+
"text_encoder_2",
|
| 341 |
+
"image_encoder",
|
| 342 |
+
"feature_extractor",
|
| 343 |
+
]
|
| 344 |
+
_callback_tensor_inputs = [
|
| 345 |
+
"latents",
|
| 346 |
+
"prompt_embeds",
|
| 347 |
+
"negative_prompt_embeds",
|
| 348 |
+
"add_text_embeds",
|
| 349 |
+
"add_time_ids",
|
| 350 |
+
"negative_pooled_prompt_embeds",
|
| 351 |
+
"negative_add_time_ids",
|
| 352 |
+
]
|
| 353 |
+
|
| 354 |
+
def __init__(
|
| 355 |
+
self,
|
| 356 |
+
vae: AutoencoderKL,
|
| 357 |
+
text_encoder: CLIPTextModel,
|
| 358 |
+
text_encoder_2: CLIPTextModelWithProjection,
|
| 359 |
+
tokenizer: CLIPTokenizer,
|
| 360 |
+
tokenizer_2: CLIPTokenizer,
|
| 361 |
+
unet: UNet2DConditionModel,
|
| 362 |
+
scheduler: Union[DPMSolverMultistepScheduler, DDIMScheduler],
|
| 363 |
+
image_encoder: CLIPVisionModelWithProjection = None,
|
| 364 |
+
feature_extractor: CLIPImageProcessor = None,
|
| 365 |
+
force_zeros_for_empty_prompt: bool = True,
|
| 366 |
+
add_watermarker: Optional[bool] = None,
|
| 367 |
+
):
|
| 368 |
+
super().__init__()
|
| 369 |
+
|
| 370 |
+
self.register_modules(
|
| 371 |
+
vae=vae,
|
| 372 |
+
text_encoder=text_encoder,
|
| 373 |
+
text_encoder_2=text_encoder_2,
|
| 374 |
+
tokenizer=tokenizer,
|
| 375 |
+
tokenizer_2=tokenizer_2,
|
| 376 |
+
unet=unet,
|
| 377 |
+
scheduler=scheduler,
|
| 378 |
+
image_encoder=image_encoder,
|
| 379 |
+
feature_extractor=feature_extractor,
|
| 380 |
+
)
|
| 381 |
+
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
|
| 382 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
| 383 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
| 384 |
+
|
| 385 |
+
if not isinstance(scheduler, DDIMScheduler) and not isinstance(scheduler, DPMSolverMultistepScheduler):
|
| 386 |
+
self.scheduler = DPMSolverMultistepScheduler.from_config(
|
| 387 |
+
scheduler.config, algorithm_type="sde-dpmsolver++", solver_order=2
|
| 388 |
+
)
|
| 389 |
+
logger.warning(
|
| 390 |
+
"This pipeline only supports DDIMScheduler and DPMSolverMultistepScheduler. "
|
| 391 |
+
"The scheduler has been changed to DPMSolverMultistepScheduler."
|
| 392 |
+
)
|
| 393 |
+
|
| 394 |
+
self.default_sample_size = self.unet.config.sample_size
|
| 395 |
+
|
| 396 |
+
add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
|
| 397 |
+
|
| 398 |
+
if add_watermarker:
|
| 399 |
+
self.watermark = StableDiffusionXLWatermarker()
|
| 400 |
+
else:
|
| 401 |
+
self.watermark = None
|
| 402 |
+
self.inversion_steps = None
|
| 403 |
+
|
| 404 |
+
def encode_prompt(
|
| 405 |
+
self,
|
| 406 |
+
device: Optional[torch.device] = None,
|
| 407 |
+
num_images_per_prompt: int = 1,
|
| 408 |
+
negative_prompt: Optional[str] = None,
|
| 409 |
+
negative_prompt_2: Optional[str] = None,
|
| 410 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 411 |
+
negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
|
| 412 |
+
lora_scale: Optional[float] = None,
|
| 413 |
+
clip_skip: Optional[int] = None,
|
| 414 |
+
enable_edit_guidance: bool = True,
|
| 415 |
+
editing_prompt: Optional[str] = None,
|
| 416 |
+
editing_prompt_embeds: Optional[torch.Tensor] = None,
|
| 417 |
+
editing_pooled_prompt_embeds: Optional[torch.Tensor] = None,
|
| 418 |
+
avg_diff = None,
|
| 419 |
+
avg_diff_2 = None,
|
| 420 |
+
correlation_weight_factor = 0.7,
|
| 421 |
+
scale=2,
|
| 422 |
+
) -> object:
|
| 423 |
+
r"""
|
| 424 |
+
Encodes the prompt into text encoder hidden states.
|
| 425 |
+
|
| 426 |
+
Args:
|
| 427 |
+
device: (`torch.device`):
|
| 428 |
+
torch device
|
| 429 |
+
num_images_per_prompt (`int`):
|
| 430 |
+
number of images that should be generated per prompt
|
| 431 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 432 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 433 |
+
`negative_prompt_embeds` instead.
|
| 434 |
+
negative_prompt_2 (`str` or `List[str]`, *optional*):
|
| 435 |
+
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
|
| 436 |
+
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
|
| 437 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 438 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 439 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 440 |
+
argument.
|
| 441 |
+
negative_pooled_prompt_embeds (`torch.Tensor`, *optional*):
|
| 442 |
+
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 443 |
+
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
|
| 444 |
+
input argument.
|
| 445 |
+
lora_scale (`float`, *optional*):
|
| 446 |
+
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
| 447 |
+
clip_skip (`int`, *optional*):
|
| 448 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 449 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 450 |
+
enable_edit_guidance (`bool`):
|
| 451 |
+
Whether to guide towards an editing prompt or not.
|
| 452 |
+
editing_prompt (`str` or `List[str]`, *optional*):
|
| 453 |
+
Editing prompt(s) to be encoded. If not defined and 'enable_edit_guidance' is True, one has to pass
|
| 454 |
+
`editing_prompt_embeds` instead.
|
| 455 |
+
editing_prompt_embeds (`torch.Tensor`, *optional*):
|
| 456 |
+
Pre-generated edit text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
| 457 |
+
If not provided and 'enable_edit_guidance' is True, editing_prompt_embeds will be generated from
|
| 458 |
+
`editing_prompt` input argument.
|
| 459 |
+
editing_pooled_prompt_embeds (`torch.Tensor`, *optional*):
|
| 460 |
+
Pre-generated edit pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 461 |
+
weighting. If not provided, pooled editing_pooled_prompt_embeds will be generated from `editing_prompt`
|
| 462 |
+
input argument.
|
| 463 |
+
"""
|
| 464 |
+
device = device or self._execution_device
|
| 465 |
+
|
| 466 |
+
# set lora scale so that monkey patched LoRA
|
| 467 |
+
# function of text encoder can correctly access it
|
| 468 |
+
if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):
|
| 469 |
+
self._lora_scale = lora_scale
|
| 470 |
+
|
| 471 |
+
# dynamically adjust the LoRA scale
|
| 472 |
+
if self.text_encoder is not None:
|
| 473 |
+
if not USE_PEFT_BACKEND:
|
| 474 |
+
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
|
| 475 |
+
else:
|
| 476 |
+
scale_lora_layers(self.text_encoder, lora_scale)
|
| 477 |
+
|
| 478 |
+
if self.text_encoder_2 is not None:
|
| 479 |
+
if not USE_PEFT_BACKEND:
|
| 480 |
+
adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
|
| 481 |
+
else:
|
| 482 |
+
scale_lora_layers(self.text_encoder_2, lora_scale)
|
| 483 |
+
|
| 484 |
+
batch_size = self.batch_size
|
| 485 |
+
|
| 486 |
+
# Define tokenizers and text encoders
|
| 487 |
+
tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
|
| 488 |
+
text_encoders = (
|
| 489 |
+
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
|
| 490 |
+
)
|
| 491 |
+
num_edit_tokens = 0
|
| 492 |
+
|
| 493 |
+
# get unconditional embeddings for classifier free guidance
|
| 494 |
+
zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
|
| 495 |
+
|
| 496 |
+
if negative_prompt_embeds is None:
|
| 497 |
+
negative_prompt = negative_prompt or ""
|
| 498 |
+
negative_prompt_2 = negative_prompt_2 or negative_prompt
|
| 499 |
+
|
| 500 |
+
# normalize str to list
|
| 501 |
+
negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
|
| 502 |
+
negative_prompt_2 = (
|
| 503 |
+
batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
|
| 504 |
+
)
|
| 505 |
+
|
| 506 |
+
uncond_tokens: List[str]
|
| 507 |
+
|
| 508 |
+
if batch_size != len(negative_prompt):
|
| 509 |
+
raise ValueError(
|
| 510 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but image inversion "
|
| 511 |
+
f" has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
| 512 |
+
" the batch size of the input images."
|
| 513 |
+
)
|
| 514 |
+
else:
|
| 515 |
+
uncond_tokens = [negative_prompt, negative_prompt_2]
|
| 516 |
+
|
| 517 |
+
j=0
|
| 518 |
+
negative_prompt_embeds_list = []
|
| 519 |
+
for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
|
| 520 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 521 |
+
negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
|
| 522 |
+
|
| 523 |
+
|
| 524 |
+
uncond_input = tokenizer(
|
| 525 |
+
negative_prompt,
|
| 526 |
+
padding="max_length",
|
| 527 |
+
max_length=tokenizer.model_max_length,
|
| 528 |
+
truncation=True,
|
| 529 |
+
return_tensors="pt",
|
| 530 |
+
)
|
| 531 |
+
toks = uncond_input.input_ids
|
| 532 |
+
|
| 533 |
+
negative_prompt_embeds = text_encoder(
|
| 534 |
+
uncond_input.input_ids.to(device),
|
| 535 |
+
output_hidden_states=True,
|
| 536 |
+
)
|
| 537 |
+
# We are only ALWAYS interested in the pooled output of the final text encoder
|
| 538 |
+
negative_pooled_prompt_embeds = negative_prompt_embeds[0]
|
| 539 |
+
negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
|
| 540 |
+
|
| 541 |
+
if avg_diff is not None and avg_diff_2 is not None:
|
| 542 |
+
#scale=3
|
| 543 |
+
print("SHALOM neg")
|
| 544 |
+
normed_prompt_embeds = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1, keepdim=True)
|
| 545 |
+
sims = normed_prompt_embeds[0] @ normed_prompt_embeds[0].T
|
| 546 |
+
if j == 0:
|
| 547 |
+
weights = sims[toks.argmax(), :][None, :, None].repeat(1, 1, 768)
|
| 548 |
+
|
| 549 |
+
standard_weights = torch.ones_like(weights)
|
| 550 |
+
|
| 551 |
+
weights = standard_weights + (weights - standard_weights) * correlation_weight_factor
|
| 552 |
+
edit_concepts_embeds = negative_prompt_embeds + (weights * avg_diff[None, :].repeat(1,tokenizer.model_max_length, 1) * scale)
|
| 553 |
+
else:
|
| 554 |
+
weights = sims[toks.argmax(), :][None, :, None].repeat(1, 1, 1280)
|
| 555 |
+
|
| 556 |
+
standard_weights = torch.ones_like(weights)
|
| 557 |
+
|
| 558 |
+
weights = standard_weights + (weights - standard_weights) * correlation_weight_factor
|
| 559 |
+
edit_concepts_embeds = negative_prompt_embeds + (weights * avg_diff_2[None, :].repeat(1, tokenizer.model_max_length, 1) * scale)
|
| 560 |
+
|
| 561 |
+
|
| 562 |
+
negative_prompt_embeds_list.append(negative_prompt_embeds)
|
| 563 |
+
j+=1
|
| 564 |
+
|
| 565 |
+
negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
|
| 566 |
+
|
| 567 |
+
if zero_out_negative_prompt:
|
| 568 |
+
negative_prompt_embeds = torch.zeros_like(negative_prompt_embeds)
|
| 569 |
+
negative_pooled_prompt_embeds = torch.zeros_like(negative_pooled_prompt_embeds)
|
| 570 |
+
|
| 571 |
+
if enable_edit_guidance and editing_prompt_embeds is None:
|
| 572 |
+
editing_prompt_2 = editing_prompt
|
| 573 |
+
|
| 574 |
+
editing_prompts = [editing_prompt, editing_prompt_2]
|
| 575 |
+
edit_prompt_embeds_list = []
|
| 576 |
+
|
| 577 |
+
i = 0
|
| 578 |
+
for editing_prompt, tokenizer, text_encoder in zip(editing_prompts, tokenizers, text_encoders):
|
| 579 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
| 580 |
+
editing_prompt = self.maybe_convert_prompt(editing_prompt, tokenizer)
|
| 581 |
+
|
| 582 |
+
max_length = negative_prompt_embeds.shape[1]
|
| 583 |
+
edit_concepts_input = tokenizer(
|
| 584 |
+
# [x for item in editing_prompt for x in repeat(item, batch_size)],
|
| 585 |
+
editing_prompt,
|
| 586 |
+
padding="max_length",
|
| 587 |
+
max_length=max_length,
|
| 588 |
+
truncation=True,
|
| 589 |
+
return_tensors="pt",
|
| 590 |
+
return_length=True,
|
| 591 |
+
)
|
| 592 |
+
num_edit_tokens = edit_concepts_input.length - 2
|
| 593 |
+
toks = edit_concepts_input.input_ids
|
| 594 |
+
edit_concepts_embeds = text_encoder(
|
| 595 |
+
edit_concepts_input.input_ids.to(device),
|
| 596 |
+
output_hidden_states=True,
|
| 597 |
+
)
|
| 598 |
+
# We are only ALWAYS interested in the pooled output of the final text encoder
|
| 599 |
+
editing_pooled_prompt_embeds = edit_concepts_embeds[0]
|
| 600 |
+
if clip_skip is None:
|
| 601 |
+
edit_concepts_embeds = edit_concepts_embeds.hidden_states[-2]
|
| 602 |
+
else:
|
| 603 |
+
# "2" because SDXL always indexes from the penultimate layer.
|
| 604 |
+
edit_concepts_embeds = edit_concepts_embeds.hidden_states[-(clip_skip + 2)]
|
| 605 |
+
|
| 606 |
+
print("SHALOM???")
|
| 607 |
+
if avg_diff is not None and avg_diff_2 is not None:
|
| 608 |
+
#scale=3
|
| 609 |
+
print("SHALOM")
|
| 610 |
+
normed_prompt_embeds = edit_concepts_embeds / edit_concepts_embeds.norm(dim=-1, keepdim=True)
|
| 611 |
+
sims = normed_prompt_embeds[0] @ normed_prompt_embeds[0].T
|
| 612 |
+
if i == 0:
|
| 613 |
+
weights = sims[toks.argmax(), :][None, :, None].repeat(1, 1, 768)
|
| 614 |
+
|
| 615 |
+
standard_weights = torch.ones_like(weights)
|
| 616 |
+
|
| 617 |
+
weights = standard_weights + (weights - standard_weights) * correlation_weight_factor
|
| 618 |
+
edit_concepts_embeds = edit_concepts_embeds + (weights * avg_diff[None, :].repeat(1,tokenizer.model_max_length, 1) * scale)
|
| 619 |
+
else:
|
| 620 |
+
weights = sims[toks.argmax(), :][None, :, None].repeat(1, 1, 1280)
|
| 621 |
+
|
| 622 |
+
standard_weights = torch.ones_like(weights)
|
| 623 |
+
|
| 624 |
+
weights = standard_weights + (weights - standard_weights) * correlation_weight_factor
|
| 625 |
+
edit_concepts_embeds = edit_concepts_embeds + (weights * avg_diff_2[None, :].repeat(1, tokenizer.model_max_length, 1) * scale)
|
| 626 |
+
|
| 627 |
+
edit_prompt_embeds_list.append(edit_concepts_embeds)
|
| 628 |
+
i+=1
|
| 629 |
+
|
| 630 |
+
edit_concepts_embeds = torch.concat(edit_prompt_embeds_list, dim=-1)
|
| 631 |
+
elif not enable_edit_guidance:
|
| 632 |
+
edit_concepts_embeds = None
|
| 633 |
+
editing_pooled_prompt_embeds = None
|
| 634 |
+
|
| 635 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
|
| 636 |
+
bs_embed, seq_len, _ = negative_prompt_embeds.shape
|
| 637 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
| 638 |
+
seq_len = negative_prompt_embeds.shape[1]
|
| 639 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
|
| 640 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
| 641 |
+
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
| 642 |
+
|
| 643 |
+
if enable_edit_guidance:
|
| 644 |
+
bs_embed_edit, seq_len, _ = edit_concepts_embeds.shape
|
| 645 |
+
edit_concepts_embeds = edit_concepts_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
|
| 646 |
+
edit_concepts_embeds = edit_concepts_embeds.repeat(1, num_images_per_prompt, 1)
|
| 647 |
+
edit_concepts_embeds = edit_concepts_embeds.view(bs_embed_edit * num_images_per_prompt, seq_len, -1)
|
| 648 |
+
|
| 649 |
+
negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
|
| 650 |
+
bs_embed * num_images_per_prompt, -1
|
| 651 |
+
)
|
| 652 |
+
|
| 653 |
+
if enable_edit_guidance:
|
| 654 |
+
editing_pooled_prompt_embeds = editing_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
|
| 655 |
+
bs_embed_edit * num_images_per_prompt, -1
|
| 656 |
+
)
|
| 657 |
+
|
| 658 |
+
if self.text_encoder is not None:
|
| 659 |
+
if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
|
| 660 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
| 661 |
+
unscale_lora_layers(self.text_encoder, lora_scale)
|
| 662 |
+
|
| 663 |
+
if self.text_encoder_2 is not None:
|
| 664 |
+
if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
|
| 665 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
| 666 |
+
unscale_lora_layers(self.text_encoder_2, lora_scale)
|
| 667 |
+
|
| 668 |
+
return (
|
| 669 |
+
negative_prompt_embeds,
|
| 670 |
+
edit_concepts_embeds,
|
| 671 |
+
negative_pooled_prompt_embeds,
|
| 672 |
+
editing_pooled_prompt_embeds,
|
| 673 |
+
num_edit_tokens,
|
| 674 |
+
)
|
| 675 |
+
|
| 676 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
| 677 |
+
def prepare_extra_step_kwargs(self, eta, generator=None):
|
| 678 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
| 679 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
| 680 |
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
| 681 |
+
# and should be between [0, 1]
|
| 682 |
+
|
| 683 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 684 |
+
extra_step_kwargs = {}
|
| 685 |
+
if accepts_eta:
|
| 686 |
+
extra_step_kwargs["eta"] = eta
|
| 687 |
+
|
| 688 |
+
# check if the scheduler accepts generator
|
| 689 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
| 690 |
+
if accepts_generator:
|
| 691 |
+
extra_step_kwargs["generator"] = generator
|
| 692 |
+
return extra_step_kwargs
|
| 693 |
+
|
| 694 |
+
def check_inputs(
|
| 695 |
+
self,
|
| 696 |
+
negative_prompt=None,
|
| 697 |
+
negative_prompt_2=None,
|
| 698 |
+
negative_prompt_embeds=None,
|
| 699 |
+
negative_pooled_prompt_embeds=None,
|
| 700 |
+
):
|
| 701 |
+
if negative_prompt is not None and negative_prompt_embeds is not None:
|
| 702 |
+
raise ValueError(
|
| 703 |
+
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
| 704 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 705 |
+
)
|
| 706 |
+
elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
|
| 707 |
+
raise ValueError(
|
| 708 |
+
f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
|
| 709 |
+
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
| 710 |
+
)
|
| 711 |
+
|
| 712 |
+
if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
|
| 713 |
+
raise ValueError(
|
| 714 |
+
"If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
|
| 715 |
+
)
|
| 716 |
+
|
| 717 |
+
# Modified from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
|
| 718 |
+
def prepare_latents(self, device, latents):
|
| 719 |
+
latents = latents.to(device)
|
| 720 |
+
|
| 721 |
+
# scale the initial noise by the standard deviation required by the scheduler
|
| 722 |
+
latents = latents * self.scheduler.init_noise_sigma
|
| 723 |
+
return latents
|
| 724 |
+
|
| 725 |
+
def _get_add_time_ids(
|
| 726 |
+
self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None
|
| 727 |
+
):
|
| 728 |
+
add_time_ids = list(original_size + crops_coords_top_left + target_size)
|
| 729 |
+
|
| 730 |
+
passed_add_embed_dim = (
|
| 731 |
+
self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim
|
| 732 |
+
)
|
| 733 |
+
expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
|
| 734 |
+
|
| 735 |
+
if expected_add_embed_dim != passed_add_embed_dim:
|
| 736 |
+
raise ValueError(
|
| 737 |
+
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
|
| 738 |
+
)
|
| 739 |
+
|
| 740 |
+
add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
|
| 741 |
+
return add_time_ids
|
| 742 |
+
|
| 743 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae
|
| 744 |
+
def upcast_vae(self):
|
| 745 |
+
dtype = self.vae.dtype
|
| 746 |
+
self.vae.to(dtype=torch.float32)
|
| 747 |
+
use_torch_2_0_or_xformers = isinstance(
|
| 748 |
+
self.vae.decoder.mid_block.attentions[0].processor,
|
| 749 |
+
(
|
| 750 |
+
AttnProcessor2_0,
|
| 751 |
+
XFormersAttnProcessor,
|
| 752 |
+
),
|
| 753 |
+
)
|
| 754 |
+
# if xformers or torch_2_0 is used attention block does not need
|
| 755 |
+
# to be in float32 which can save lots of memory
|
| 756 |
+
if use_torch_2_0_or_xformers:
|
| 757 |
+
self.vae.post_quant_conv.to(dtype)
|
| 758 |
+
self.vae.decoder.conv_in.to(dtype)
|
| 759 |
+
self.vae.decoder.mid_block.to(dtype)
|
| 760 |
+
|
| 761 |
+
# Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
|
| 762 |
+
def get_guidance_scale_embedding(
|
| 763 |
+
self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32
|
| 764 |
+
) -> torch.Tensor:
|
| 765 |
+
"""
|
| 766 |
+
See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
|
| 767 |
+
|
| 768 |
+
Args:
|
| 769 |
+
w (`torch.Tensor`):
|
| 770 |
+
Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings.
|
| 771 |
+
embedding_dim (`int`, *optional*, defaults to 512):
|
| 772 |
+
Dimension of the embeddings to generate.
|
| 773 |
+
dtype (`torch.dtype`, *optional*, defaults to `torch.float32`):
|
| 774 |
+
Data type of the generated embeddings.
|
| 775 |
+
|
| 776 |
+
Returns:
|
| 777 |
+
`torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`.
|
| 778 |
+
"""
|
| 779 |
+
assert len(w.shape) == 1
|
| 780 |
+
w = w * 1000.0
|
| 781 |
+
|
| 782 |
+
half_dim = embedding_dim // 2
|
| 783 |
+
emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
|
| 784 |
+
emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
|
| 785 |
+
emb = w.to(dtype)[:, None] * emb[None, :]
|
| 786 |
+
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
|
| 787 |
+
if embedding_dim % 2 == 1: # zero pad
|
| 788 |
+
emb = torch.nn.functional.pad(emb, (0, 1))
|
| 789 |
+
assert emb.shape == (w.shape[0], embedding_dim)
|
| 790 |
+
return emb
|
| 791 |
+
|
| 792 |
+
@property
|
| 793 |
+
def guidance_scale(self):
|
| 794 |
+
return self._guidance_scale
|
| 795 |
+
|
| 796 |
+
@property
|
| 797 |
+
def guidance_rescale(self):
|
| 798 |
+
return self._guidance_rescale
|
| 799 |
+
|
| 800 |
+
@property
|
| 801 |
+
def clip_skip(self):
|
| 802 |
+
return self._clip_skip
|
| 803 |
+
|
| 804 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
| 805 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
| 806 |
+
# corresponds to doing no classifier free guidance.
|
| 807 |
+
@property
|
| 808 |
+
def do_classifier_free_guidance(self):
|
| 809 |
+
return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
|
| 810 |
+
|
| 811 |
+
@property
|
| 812 |
+
def cross_attention_kwargs(self):
|
| 813 |
+
return self._cross_attention_kwargs
|
| 814 |
+
|
| 815 |
+
@property
|
| 816 |
+
def denoising_end(self):
|
| 817 |
+
return self._denoising_end
|
| 818 |
+
|
| 819 |
+
@property
|
| 820 |
+
def num_timesteps(self):
|
| 821 |
+
return self._num_timesteps
|
| 822 |
+
|
| 823 |
+
# Copied from diffusers.pipelines.ledits_pp.pipeline_leditspp_stable_diffusion.LEditsPPPipelineStableDiffusion.prepare_unet
|
| 824 |
+
def prepare_unet(self, attention_store, PnP: bool = False):
|
| 825 |
+
attn_procs = {}
|
| 826 |
+
for name in self.unet.attn_processors.keys():
|
| 827 |
+
if name.startswith("mid_block"):
|
| 828 |
+
place_in_unet = "mid"
|
| 829 |
+
elif name.startswith("up_blocks"):
|
| 830 |
+
place_in_unet = "up"
|
| 831 |
+
elif name.startswith("down_blocks"):
|
| 832 |
+
place_in_unet = "down"
|
| 833 |
+
else:
|
| 834 |
+
continue
|
| 835 |
+
|
| 836 |
+
if "attn2" in name and place_in_unet != "mid":
|
| 837 |
+
attn_procs[name] = LEDITSCrossAttnProcessor(
|
| 838 |
+
attention_store=attention_store,
|
| 839 |
+
place_in_unet=place_in_unet,
|
| 840 |
+
pnp=PnP,
|
| 841 |
+
editing_prompts=self.enabled_editing_prompts,
|
| 842 |
+
)
|
| 843 |
+
else:
|
| 844 |
+
attn_procs[name] = AttnProcessor()
|
| 845 |
+
|
| 846 |
+
self.unet.set_attn_processor(attn_procs)
|
| 847 |
+
|
| 848 |
+
@torch.no_grad()
|
| 849 |
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
| 850 |
+
def __call__(
|
| 851 |
+
self,
|
| 852 |
+
denoising_end: Optional[float] = None,
|
| 853 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
| 854 |
+
negative_prompt_2: Optional[Union[str, List[str]]] = None,
|
| 855 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
| 856 |
+
negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
|
| 857 |
+
ip_adapter_image: Optional[PipelineImageInput] = None,
|
| 858 |
+
output_type: Optional[str] = "pil",
|
| 859 |
+
return_dict: bool = True,
|
| 860 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 861 |
+
guidance_rescale: float = 0.0,
|
| 862 |
+
crops_coords_top_left: Tuple[int, int] = (0, 0),
|
| 863 |
+
target_size: Optional[Tuple[int, int]] = None,
|
| 864 |
+
editing_prompt: Optional[Union[str, List[str]]] = None,
|
| 865 |
+
editing_prompt_embeddings: Optional[torch.Tensor] = None,
|
| 866 |
+
editing_pooled_prompt_embeds: Optional[torch.Tensor] = None,
|
| 867 |
+
reverse_editing_direction: Optional[Union[bool, List[bool]]] = False,
|
| 868 |
+
edit_guidance_scale: Optional[Union[float, List[float]]] = 5,
|
| 869 |
+
edit_warmup_steps: Optional[Union[int, List[int]]] = 0,
|
| 870 |
+
edit_cooldown_steps: Optional[Union[int, List[int]]] = None,
|
| 871 |
+
edit_threshold: Optional[Union[float, List[float]]] = 0.9,
|
| 872 |
+
sem_guidance: Optional[List[torch.Tensor]] = None,
|
| 873 |
+
use_cross_attn_mask: bool = False,
|
| 874 |
+
use_intersect_mask: bool = False,
|
| 875 |
+
user_mask: Optional[torch.Tensor] = None,
|
| 876 |
+
attn_store_steps: Optional[List[int]] = [],
|
| 877 |
+
store_averaged_over_steps: bool = True,
|
| 878 |
+
clip_skip: Optional[int] = None,
|
| 879 |
+
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
|
| 880 |
+
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
| 881 |
+
avg_diff = None,
|
| 882 |
+
avg_diff_2 = None,
|
| 883 |
+
correlation_weight_factor = 0.7,
|
| 884 |
+
scale=2,
|
| 885 |
+
**kwargs,
|
| 886 |
+
):
|
| 887 |
+
r"""
|
| 888 |
+
The call function to the pipeline for editing. The
|
| 889 |
+
[`~pipelines.ledits_pp.LEditsPPPipelineStableDiffusionXL.invert`] method has to be called beforehand. Edits
|
| 890 |
+
will always be performed for the last inverted image(s).
|
| 891 |
+
|
| 892 |
+
Args:
|
| 893 |
+
denoising_end (`float`, *optional*):
|
| 894 |
+
When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be
|
| 895 |
+
completed before it is intentionally prematurely terminated. As a result, the returned sample will
|
| 896 |
+
still retain a substantial amount of noise as determined by the discrete timesteps selected by the
|
| 897 |
+
scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a
|
| 898 |
+
"Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image
|
| 899 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 900 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 901 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 902 |
+
less than `1`).
|
| 903 |
+
negative_prompt_2 (`str` or `List[str]`, *optional*):
|
| 904 |
+
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
|
| 905 |
+
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
|
| 906 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
| 907 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 908 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
| 909 |
+
argument.
|
| 910 |
+
negative_pooled_prompt_embeds (`torch.Tensor`, *optional*):
|
| 911 |
+
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 912 |
+
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
|
| 913 |
+
input argument.
|
| 914 |
+
ip_adapter_image: (`PipelineImageInput`, *optional*):
|
| 915 |
+
Optional image input to work with IP Adapters.
|
| 916 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 917 |
+
The output format of the generate image. Choose between
|
| 918 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 919 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 920 |
+
Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead
|
| 921 |
+
of a plain tuple.
|
| 922 |
+
callback (`Callable`, *optional*):
|
| 923 |
+
A function that will be called every `callback_steps` steps during inference. The function will be
|
| 924 |
+
called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
|
| 925 |
+
callback_steps (`int`, *optional*, defaults to 1):
|
| 926 |
+
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
| 927 |
+
called at every step.
|
| 928 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 929 |
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
| 930 |
+
`self.processor` in
|
| 931 |
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 932 |
+
guidance_rescale (`float`, *optional*, defaults to 0.7):
|
| 933 |
+
Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are
|
| 934 |
+
Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of
|
| 935 |
+
[Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf).
|
| 936 |
+
Guidance rescale factor should fix overexposure when using zero terminal SNR.
|
| 937 |
+
crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
|
| 938 |
+
`crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
|
| 939 |
+
`crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
|
| 940 |
+
`crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
|
| 941 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 942 |
+
target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
| 943 |
+
For most cases, `target_size` should be set to the desired height and width of the generated image. If
|
| 944 |
+
not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in
|
| 945 |
+
section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 946 |
+
editing_prompt (`str` or `List[str]`, *optional*):
|
| 947 |
+
The prompt or prompts to guide the image generation. The image is reconstructed by setting
|
| 948 |
+
`editing_prompt = None`. Guidance direction of prompt should be specified via
|
| 949 |
+
`reverse_editing_direction`.
|
| 950 |
+
editing_prompt_embeddings (`torch.Tensor`, *optional*):
|
| 951 |
+
Pre-generated edit text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
| 952 |
+
If not provided, editing_prompt_embeddings will be generated from `editing_prompt` input argument.
|
| 953 |
+
editing_pooled_prompt_embeddings (`torch.Tensor`, *optional*):
|
| 954 |
+
Pre-generated pooled edit text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
| 955 |
+
weighting. If not provided, editing_prompt_embeddings will be generated from `editing_prompt` input
|
| 956 |
+
argument.
|
| 957 |
+
reverse_editing_direction (`bool` or `List[bool]`, *optional*, defaults to `False`):
|
| 958 |
+
Whether the corresponding prompt in `editing_prompt` should be increased or decreased.
|
| 959 |
+
edit_guidance_scale (`float` or `List[float]`, *optional*, defaults to 5):
|
| 960 |
+
Guidance scale for guiding the image generation. If provided as list values should correspond to
|
| 961 |
+
`editing_prompt`. `edit_guidance_scale` is defined as `s_e` of equation 12 of [LEDITS++
|
| 962 |
+
Paper](https://arxiv.org/abs/2301.12247).
|
| 963 |
+
edit_warmup_steps (`float` or `List[float]`, *optional*, defaults to 10):
|
| 964 |
+
Number of diffusion steps (for each prompt) for which guidance is not applied.
|
| 965 |
+
edit_cooldown_steps (`float` or `List[float]`, *optional*, defaults to `None`):
|
| 966 |
+
Number of diffusion steps (for each prompt) after which guidance is no longer applied.
|
| 967 |
+
edit_threshold (`float` or `List[float]`, *optional*, defaults to 0.9):
|
| 968 |
+
Masking threshold of guidance. Threshold should be proportional to the image region that is modified.
|
| 969 |
+
'edit_threshold' is defined as 'λ' of equation 12 of [LEDITS++
|
| 970 |
+
Paper](https://arxiv.org/abs/2301.12247).
|
| 971 |
+
sem_guidance (`List[torch.Tensor]`, *optional*):
|
| 972 |
+
List of pre-generated guidance vectors to be applied at generation. Length of the list has to
|
| 973 |
+
correspond to `num_inference_steps`.
|
| 974 |
+
use_cross_attn_mask:
|
| 975 |
+
Whether cross-attention masks are used. Cross-attention masks are always used when use_intersect_mask
|
| 976 |
+
is set to true. Cross-attention masks are defined as 'M^1' of equation 12 of [LEDITS++
|
| 977 |
+
paper](https://arxiv.org/pdf/2311.16711.pdf).
|
| 978 |
+
use_intersect_mask:
|
| 979 |
+
Whether the masking term is calculated as intersection of cross-attention masks and masks derived from
|
| 980 |
+
the noise estimate. Cross-attention mask are defined as 'M^1' and masks derived from the noise estimate
|
| 981 |
+
are defined as 'M^2' of equation 12 of [LEDITS++ paper](https://arxiv.org/pdf/2311.16711.pdf).
|
| 982 |
+
user_mask:
|
| 983 |
+
User-provided mask for even better control over the editing process. This is helpful when LEDITS++'s
|
| 984 |
+
implicit masks do not meet user preferences.
|
| 985 |
+
attn_store_steps:
|
| 986 |
+
Steps for which the attention maps are stored in the AttentionStore. Just for visualization purposes.
|
| 987 |
+
store_averaged_over_steps:
|
| 988 |
+
Whether the attention maps for the 'attn_store_steps' are stored averaged over the diffusion steps. If
|
| 989 |
+
False, attention maps for each step are stores separately. Just for visualization purposes.
|
| 990 |
+
clip_skip (`int`, *optional*):
|
| 991 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
| 992 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
| 993 |
+
callback_on_step_end (`Callable`, *optional*):
|
| 994 |
+
A function that calls at the end of each denoising steps during the inference. The function is called
|
| 995 |
+
with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
|
| 996 |
+
callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
|
| 997 |
+
`callback_on_step_end_tensor_inputs`.
|
| 998 |
+
callback_on_step_end_tensor_inputs (`List`, *optional*):
|
| 999 |
+
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
|
| 1000 |
+
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
|
| 1001 |
+
`._callback_tensor_inputs` attribute of your pipeline class.
|
| 1002 |
+
|
| 1003 |
+
Examples:
|
| 1004 |
+
|
| 1005 |
+
Returns:
|
| 1006 |
+
[`~pipelines.ledits_pp.LEditsPPDiffusionPipelineOutput`] or `tuple`:
|
| 1007 |
+
[`~pipelines.ledits_pp.LEditsPPDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When
|
| 1008 |
+
returning a tuple, the first element is a list with the generated images.
|
| 1009 |
+
"""
|
| 1010 |
+
if self.inversion_steps is None:
|
| 1011 |
+
raise ValueError(
|
| 1012 |
+
"You need to invert an input image first before calling the pipeline. The `invert` method has to be called beforehand. Edits will always be performed for the last inverted image(s)."
|
| 1013 |
+
)
|
| 1014 |
+
|
| 1015 |
+
eta = self.eta
|
| 1016 |
+
num_images_per_prompt = 1
|
| 1017 |
+
latents = self.init_latents
|
| 1018 |
+
|
| 1019 |
+
zs = self.zs
|
| 1020 |
+
self.scheduler.set_timesteps(len(self.scheduler.timesteps))
|
| 1021 |
+
|
| 1022 |
+
if use_intersect_mask:
|
| 1023 |
+
use_cross_attn_mask = True
|
| 1024 |
+
|
| 1025 |
+
if use_cross_attn_mask:
|
| 1026 |
+
self.smoothing = LeditsGaussianSmoothing(self.device)
|
| 1027 |
+
|
| 1028 |
+
if user_mask is not None:
|
| 1029 |
+
user_mask = user_mask.to(self.device)
|
| 1030 |
+
|
| 1031 |
+
# TODO: Check inputs
|
| 1032 |
+
# 1. Check inputs. Raise error if not correct
|
| 1033 |
+
# self.check_inputs(
|
| 1034 |
+
# callback_steps,
|
| 1035 |
+
# negative_prompt,
|
| 1036 |
+
# negative_prompt_2,
|
| 1037 |
+
# prompt_embeds,
|
| 1038 |
+
# negative_prompt_embeds,
|
| 1039 |
+
# pooled_prompt_embeds,
|
| 1040 |
+
# negative_pooled_prompt_embeds,
|
| 1041 |
+
# )
|
| 1042 |
+
self._guidance_rescale = guidance_rescale
|
| 1043 |
+
self._clip_skip = clip_skip
|
| 1044 |
+
self._cross_attention_kwargs = cross_attention_kwargs
|
| 1045 |
+
self._denoising_end = denoising_end
|
| 1046 |
+
|
| 1047 |
+
# 2. Define call parameters
|
| 1048 |
+
batch_size = self.batch_size
|
| 1049 |
+
|
| 1050 |
+
device = self._execution_device
|
| 1051 |
+
|
| 1052 |
+
if editing_prompt:
|
| 1053 |
+
enable_edit_guidance = True
|
| 1054 |
+
if isinstance(editing_prompt, str):
|
| 1055 |
+
editing_prompt = [editing_prompt]
|
| 1056 |
+
self.enabled_editing_prompts = len(editing_prompt)
|
| 1057 |
+
elif editing_prompt_embeddings is not None:
|
| 1058 |
+
enable_edit_guidance = True
|
| 1059 |
+
self.enabled_editing_prompts = editing_prompt_embeddings.shape[0]
|
| 1060 |
+
else:
|
| 1061 |
+
self.enabled_editing_prompts = 0
|
| 1062 |
+
enable_edit_guidance = False
|
| 1063 |
+
print("negative_prompt", negative_prompt)
|
| 1064 |
+
# 3. Encode input prompt
|
| 1065 |
+
text_encoder_lora_scale = (
|
| 1066 |
+
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
|
| 1067 |
+
)
|
| 1068 |
+
(
|
| 1069 |
+
prompt_embeds,
|
| 1070 |
+
edit_prompt_embeds,
|
| 1071 |
+
negative_pooled_prompt_embeds,
|
| 1072 |
+
pooled_edit_embeds,
|
| 1073 |
+
num_edit_tokens,
|
| 1074 |
+
) = self.encode_prompt(
|
| 1075 |
+
device=device,
|
| 1076 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 1077 |
+
negative_prompt=negative_prompt,
|
| 1078 |
+
negative_prompt_2=negative_prompt_2,
|
| 1079 |
+
negative_prompt_embeds=negative_prompt_embeds,
|
| 1080 |
+
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
|
| 1081 |
+
lora_scale=text_encoder_lora_scale,
|
| 1082 |
+
clip_skip=self.clip_skip,
|
| 1083 |
+
enable_edit_guidance=enable_edit_guidance,
|
| 1084 |
+
editing_prompt=editing_prompt,
|
| 1085 |
+
editing_prompt_embeds=editing_prompt_embeddings,
|
| 1086 |
+
editing_pooled_prompt_embeds=editing_pooled_prompt_embeds,
|
| 1087 |
+
avg_diff = avg_diff,
|
| 1088 |
+
avg_diff_2 = avg_diff_2,
|
| 1089 |
+
correlation_weight_factor = correlation_weight_factor,
|
| 1090 |
+
scale=scale,
|
| 1091 |
+
)
|
| 1092 |
+
|
| 1093 |
+
# 4. Prepare timesteps
|
| 1094 |
+
# self.scheduler.set_timesteps(num_inference_steps, device=device)
|
| 1095 |
+
|
| 1096 |
+
timesteps = self.inversion_steps
|
| 1097 |
+
t_to_idx = {int(v): k for k, v in enumerate(timesteps)}
|
| 1098 |
+
|
| 1099 |
+
if use_cross_attn_mask:
|
| 1100 |
+
self.attention_store = LeditsAttentionStore(
|
| 1101 |
+
average=store_averaged_over_steps,
|
| 1102 |
+
batch_size=batch_size,
|
| 1103 |
+
max_size=(latents.shape[-2] / 4.0) * (latents.shape[-1] / 4.0),
|
| 1104 |
+
max_resolution=None,
|
| 1105 |
+
)
|
| 1106 |
+
self.prepare_unet(self.attention_store)
|
| 1107 |
+
resolution = latents.shape[-2:]
|
| 1108 |
+
att_res = (int(resolution[0] / 4), int(resolution[1] / 4))
|
| 1109 |
+
|
| 1110 |
+
# 5. Prepare latent variables
|
| 1111 |
+
latents = self.prepare_latents(device=device, latents=latents)
|
| 1112 |
+
|
| 1113 |
+
# 6. Prepare extra step kwargs.
|
| 1114 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(eta)
|
| 1115 |
+
|
| 1116 |
+
if self.text_encoder_2 is None:
|
| 1117 |
+
text_encoder_projection_dim = int(negative_pooled_prompt_embeds.shape[-1])
|
| 1118 |
+
else:
|
| 1119 |
+
text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
|
| 1120 |
+
|
| 1121 |
+
# 7. Prepare added time ids & embeddings
|
| 1122 |
+
add_text_embeds = negative_pooled_prompt_embeds
|
| 1123 |
+
add_time_ids = self._get_add_time_ids(
|
| 1124 |
+
self.size,
|
| 1125 |
+
crops_coords_top_left,
|
| 1126 |
+
self.size,
|
| 1127 |
+
dtype=negative_pooled_prompt_embeds.dtype,
|
| 1128 |
+
text_encoder_projection_dim=text_encoder_projection_dim,
|
| 1129 |
+
)
|
| 1130 |
+
|
| 1131 |
+
if enable_edit_guidance:
|
| 1132 |
+
prompt_embeds = torch.cat([prompt_embeds, edit_prompt_embeds], dim=0)
|
| 1133 |
+
add_text_embeds = torch.cat([add_text_embeds, pooled_edit_embeds], dim=0)
|
| 1134 |
+
edit_concepts_time_ids = add_time_ids.repeat(edit_prompt_embeds.shape[0], 1)
|
| 1135 |
+
add_time_ids = torch.cat([add_time_ids, edit_concepts_time_ids], dim=0)
|
| 1136 |
+
self.text_cross_attention_maps = [editing_prompt] if isinstance(editing_prompt, str) else editing_prompt
|
| 1137 |
+
|
| 1138 |
+
prompt_embeds = prompt_embeds.to(device)
|
| 1139 |
+
add_text_embeds = add_text_embeds.to(device)
|
| 1140 |
+
add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
|
| 1141 |
+
|
| 1142 |
+
if ip_adapter_image is not None:
|
| 1143 |
+
# TODO: fix image encoding
|
| 1144 |
+
image_embeds, negative_image_embeds = self.encode_image(ip_adapter_image, device, num_images_per_prompt)
|
| 1145 |
+
if self.do_classifier_free_guidance:
|
| 1146 |
+
image_embeds = torch.cat([negative_image_embeds, image_embeds])
|
| 1147 |
+
image_embeds = image_embeds.to(device)
|
| 1148 |
+
|
| 1149 |
+
# 8. Denoising loop
|
| 1150 |
+
self.sem_guidance = None
|
| 1151 |
+
self.activation_mask = None
|
| 1152 |
+
|
| 1153 |
+
if (
|
| 1154 |
+
self.denoising_end is not None
|
| 1155 |
+
and isinstance(self.denoising_end, float)
|
| 1156 |
+
and self.denoising_end > 0
|
| 1157 |
+
and self.denoising_end < 1
|
| 1158 |
+
):
|
| 1159 |
+
discrete_timestep_cutoff = int(
|
| 1160 |
+
round(
|
| 1161 |
+
self.scheduler.config.num_train_timesteps
|
| 1162 |
+
- (self.denoising_end * self.scheduler.config.num_train_timesteps)
|
| 1163 |
+
)
|
| 1164 |
+
)
|
| 1165 |
+
num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))
|
| 1166 |
+
timesteps = timesteps[:num_inference_steps]
|
| 1167 |
+
|
| 1168 |
+
# 9. Optionally get Guidance Scale Embedding
|
| 1169 |
+
timestep_cond = None
|
| 1170 |
+
if self.unet.config.time_cond_proj_dim is not None:
|
| 1171 |
+
guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
|
| 1172 |
+
timestep_cond = self.get_guidance_scale_embedding(
|
| 1173 |
+
guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
|
| 1174 |
+
).to(device=device, dtype=latents.dtype)
|
| 1175 |
+
|
| 1176 |
+
self._num_timesteps = len(timesteps)
|
| 1177 |
+
with self.progress_bar(total=self._num_timesteps) as progress_bar:
|
| 1178 |
+
for i, t in enumerate(timesteps):
|
| 1179 |
+
# expand the latents if we are doing classifier free guidance
|
| 1180 |
+
latent_model_input = torch.cat([latents] * (1 + self.enabled_editing_prompts))
|
| 1181 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 1182 |
+
# predict the noise residual
|
| 1183 |
+
added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
|
| 1184 |
+
if ip_adapter_image is not None:
|
| 1185 |
+
added_cond_kwargs["image_embeds"] = image_embeds
|
| 1186 |
+
noise_pred = self.unet(
|
| 1187 |
+
latent_model_input,
|
| 1188 |
+
t,
|
| 1189 |
+
encoder_hidden_states=prompt_embeds,
|
| 1190 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 1191 |
+
added_cond_kwargs=added_cond_kwargs,
|
| 1192 |
+
return_dict=False,
|
| 1193 |
+
)[0]
|
| 1194 |
+
|
| 1195 |
+
noise_pred_out = noise_pred.chunk(1 + self.enabled_editing_prompts) # [b,4, 64, 64]
|
| 1196 |
+
noise_pred_uncond = noise_pred_out[0]
|
| 1197 |
+
noise_pred_edit_concepts = noise_pred_out[1:]
|
| 1198 |
+
|
| 1199 |
+
noise_guidance_edit = torch.zeros(
|
| 1200 |
+
noise_pred_uncond.shape,
|
| 1201 |
+
device=self.device,
|
| 1202 |
+
dtype=noise_pred_uncond.dtype,
|
| 1203 |
+
)
|
| 1204 |
+
|
| 1205 |
+
if sem_guidance is not None and len(sem_guidance) > i:
|
| 1206 |
+
noise_guidance_edit += sem_guidance[i].to(self.device)
|
| 1207 |
+
|
| 1208 |
+
elif enable_edit_guidance:
|
| 1209 |
+
if self.activation_mask is None:
|
| 1210 |
+
self.activation_mask = torch.zeros(
|
| 1211 |
+
(len(timesteps), self.enabled_editing_prompts, *noise_pred_edit_concepts[0].shape)
|
| 1212 |
+
)
|
| 1213 |
+
if self.sem_guidance is None:
|
| 1214 |
+
self.sem_guidance = torch.zeros((len(timesteps), *noise_pred_uncond.shape))
|
| 1215 |
+
|
| 1216 |
+
# noise_guidance_edit = torch.zeros_like(noise_guidance)
|
| 1217 |
+
for c, noise_pred_edit_concept in enumerate(noise_pred_edit_concepts):
|
| 1218 |
+
if isinstance(edit_warmup_steps, list):
|
| 1219 |
+
edit_warmup_steps_c = edit_warmup_steps[c]
|
| 1220 |
+
else:
|
| 1221 |
+
edit_warmup_steps_c = edit_warmup_steps
|
| 1222 |
+
if i < edit_warmup_steps_c:
|
| 1223 |
+
continue
|
| 1224 |
+
|
| 1225 |
+
if isinstance(edit_guidance_scale, list):
|
| 1226 |
+
edit_guidance_scale_c = edit_guidance_scale[c]
|
| 1227 |
+
else:
|
| 1228 |
+
edit_guidance_scale_c = edit_guidance_scale
|
| 1229 |
+
|
| 1230 |
+
if isinstance(edit_threshold, list):
|
| 1231 |
+
edit_threshold_c = edit_threshold[c]
|
| 1232 |
+
else:
|
| 1233 |
+
edit_threshold_c = edit_threshold
|
| 1234 |
+
if isinstance(reverse_editing_direction, list):
|
| 1235 |
+
reverse_editing_direction_c = reverse_editing_direction[c]
|
| 1236 |
+
else:
|
| 1237 |
+
reverse_editing_direction_c = reverse_editing_direction
|
| 1238 |
+
|
| 1239 |
+
if isinstance(edit_cooldown_steps, list):
|
| 1240 |
+
edit_cooldown_steps_c = edit_cooldown_steps[c]
|
| 1241 |
+
elif edit_cooldown_steps is None:
|
| 1242 |
+
edit_cooldown_steps_c = i + 1
|
| 1243 |
+
else:
|
| 1244 |
+
edit_cooldown_steps_c = edit_cooldown_steps
|
| 1245 |
+
|
| 1246 |
+
if i >= edit_cooldown_steps_c:
|
| 1247 |
+
continue
|
| 1248 |
+
|
| 1249 |
+
noise_guidance_edit_tmp = noise_pred_edit_concept - noise_pred_uncond
|
| 1250 |
+
|
| 1251 |
+
if reverse_editing_direction_c:
|
| 1252 |
+
noise_guidance_edit_tmp = noise_guidance_edit_tmp * -1
|
| 1253 |
+
|
| 1254 |
+
noise_guidance_edit_tmp = noise_guidance_edit_tmp * edit_guidance_scale_c
|
| 1255 |
+
|
| 1256 |
+
if user_mask is not None:
|
| 1257 |
+
noise_guidance_edit_tmp = noise_guidance_edit_tmp * user_mask
|
| 1258 |
+
|
| 1259 |
+
if use_cross_attn_mask:
|
| 1260 |
+
out = self.attention_store.aggregate_attention(
|
| 1261 |
+
attention_maps=self.attention_store.step_store,
|
| 1262 |
+
prompts=self.text_cross_attention_maps,
|
| 1263 |
+
res=att_res,
|
| 1264 |
+
from_where=["up", "down"],
|
| 1265 |
+
is_cross=True,
|
| 1266 |
+
select=self.text_cross_attention_maps.index(editing_prompt[c]),
|
| 1267 |
+
)
|
| 1268 |
+
attn_map = out[:, :, :, 1 : 1 + num_edit_tokens[c]] # 0 -> startoftext
|
| 1269 |
+
|
| 1270 |
+
# average over all tokens
|
| 1271 |
+
if attn_map.shape[3] != num_edit_tokens[c]:
|
| 1272 |
+
raise ValueError(
|
| 1273 |
+
f"Incorrect shape of attention_map. Expected size {num_edit_tokens[c]}, but found {attn_map.shape[3]}!"
|
| 1274 |
+
)
|
| 1275 |
+
attn_map = torch.sum(attn_map, dim=3)
|
| 1276 |
+
|
| 1277 |
+
# gaussian_smoothing
|
| 1278 |
+
attn_map = F.pad(attn_map.unsqueeze(1), (1, 1, 1, 1), mode="reflect")
|
| 1279 |
+
attn_map = self.smoothing(attn_map).squeeze(1)
|
| 1280 |
+
|
| 1281 |
+
# torch.quantile function expects float32
|
| 1282 |
+
if attn_map.dtype == torch.float32:
|
| 1283 |
+
tmp = torch.quantile(attn_map.flatten(start_dim=1), edit_threshold_c, dim=1)
|
| 1284 |
+
else:
|
| 1285 |
+
tmp = torch.quantile(
|
| 1286 |
+
attn_map.flatten(start_dim=1).to(torch.float32), edit_threshold_c, dim=1
|
| 1287 |
+
).to(attn_map.dtype)
|
| 1288 |
+
attn_mask = torch.where(
|
| 1289 |
+
attn_map >= tmp.unsqueeze(1).unsqueeze(1).repeat(1, *att_res), 1.0, 0.0
|
| 1290 |
+
)
|
| 1291 |
+
|
| 1292 |
+
# resolution must match latent space dimension
|
| 1293 |
+
attn_mask = F.interpolate(
|
| 1294 |
+
attn_mask.unsqueeze(1),
|
| 1295 |
+
noise_guidance_edit_tmp.shape[-2:], # 64,64
|
| 1296 |
+
).repeat(1, 4, 1, 1)
|
| 1297 |
+
self.activation_mask[i, c] = attn_mask.detach().cpu()
|
| 1298 |
+
if not use_intersect_mask:
|
| 1299 |
+
noise_guidance_edit_tmp = noise_guidance_edit_tmp * attn_mask
|
| 1300 |
+
|
| 1301 |
+
if use_intersect_mask:
|
| 1302 |
+
noise_guidance_edit_tmp_quantile = torch.abs(noise_guidance_edit_tmp)
|
| 1303 |
+
noise_guidance_edit_tmp_quantile = torch.sum(
|
| 1304 |
+
noise_guidance_edit_tmp_quantile, dim=1, keepdim=True
|
| 1305 |
+
)
|
| 1306 |
+
noise_guidance_edit_tmp_quantile = noise_guidance_edit_tmp_quantile.repeat(
|
| 1307 |
+
1, self.unet.config.in_channels, 1, 1
|
| 1308 |
+
)
|
| 1309 |
+
|
| 1310 |
+
# torch.quantile function expects float32
|
| 1311 |
+
if noise_guidance_edit_tmp_quantile.dtype == torch.float32:
|
| 1312 |
+
tmp = torch.quantile(
|
| 1313 |
+
noise_guidance_edit_tmp_quantile.flatten(start_dim=2),
|
| 1314 |
+
edit_threshold_c,
|
| 1315 |
+
dim=2,
|
| 1316 |
+
keepdim=False,
|
| 1317 |
+
)
|
| 1318 |
+
else:
|
| 1319 |
+
tmp = torch.quantile(
|
| 1320 |
+
noise_guidance_edit_tmp_quantile.flatten(start_dim=2).to(torch.float32),
|
| 1321 |
+
edit_threshold_c,
|
| 1322 |
+
dim=2,
|
| 1323 |
+
keepdim=False,
|
| 1324 |
+
).to(noise_guidance_edit_tmp_quantile.dtype)
|
| 1325 |
+
|
| 1326 |
+
intersect_mask = (
|
| 1327 |
+
torch.where(
|
| 1328 |
+
noise_guidance_edit_tmp_quantile >= tmp[:, :, None, None],
|
| 1329 |
+
torch.ones_like(noise_guidance_edit_tmp),
|
| 1330 |
+
torch.zeros_like(noise_guidance_edit_tmp),
|
| 1331 |
+
)
|
| 1332 |
+
* attn_mask
|
| 1333 |
+
)
|
| 1334 |
+
|
| 1335 |
+
self.activation_mask[i, c] = intersect_mask.detach().cpu()
|
| 1336 |
+
|
| 1337 |
+
noise_guidance_edit_tmp = noise_guidance_edit_tmp * intersect_mask
|
| 1338 |
+
|
| 1339 |
+
elif not use_cross_attn_mask:
|
| 1340 |
+
# calculate quantile
|
| 1341 |
+
noise_guidance_edit_tmp_quantile = torch.abs(noise_guidance_edit_tmp)
|
| 1342 |
+
noise_guidance_edit_tmp_quantile = torch.sum(
|
| 1343 |
+
noise_guidance_edit_tmp_quantile, dim=1, keepdim=True
|
| 1344 |
+
)
|
| 1345 |
+
noise_guidance_edit_tmp_quantile = noise_guidance_edit_tmp_quantile.repeat(1, 4, 1, 1)
|
| 1346 |
+
|
| 1347 |
+
# torch.quantile function expects float32
|
| 1348 |
+
if noise_guidance_edit_tmp_quantile.dtype == torch.float32:
|
| 1349 |
+
tmp = torch.quantile(
|
| 1350 |
+
noise_guidance_edit_tmp_quantile.flatten(start_dim=2),
|
| 1351 |
+
edit_threshold_c,
|
| 1352 |
+
dim=2,
|
| 1353 |
+
keepdim=False,
|
| 1354 |
+
)
|
| 1355 |
+
else:
|
| 1356 |
+
tmp = torch.quantile(
|
| 1357 |
+
noise_guidance_edit_tmp_quantile.flatten(start_dim=2).to(torch.float32),
|
| 1358 |
+
edit_threshold_c,
|
| 1359 |
+
dim=2,
|
| 1360 |
+
keepdim=False,
|
| 1361 |
+
).to(noise_guidance_edit_tmp_quantile.dtype)
|
| 1362 |
+
|
| 1363 |
+
self.activation_mask[i, c] = (
|
| 1364 |
+
torch.where(
|
| 1365 |
+
noise_guidance_edit_tmp_quantile >= tmp[:, :, None, None],
|
| 1366 |
+
torch.ones_like(noise_guidance_edit_tmp),
|
| 1367 |
+
torch.zeros_like(noise_guidance_edit_tmp),
|
| 1368 |
+
)
|
| 1369 |
+
.detach()
|
| 1370 |
+
.cpu()
|
| 1371 |
+
)
|
| 1372 |
+
|
| 1373 |
+
noise_guidance_edit_tmp = torch.where(
|
| 1374 |
+
noise_guidance_edit_tmp_quantile >= tmp[:, :, None, None],
|
| 1375 |
+
noise_guidance_edit_tmp,
|
| 1376 |
+
torch.zeros_like(noise_guidance_edit_tmp),
|
| 1377 |
+
)
|
| 1378 |
+
|
| 1379 |
+
noise_guidance_edit += noise_guidance_edit_tmp
|
| 1380 |
+
|
| 1381 |
+
self.sem_guidance[i] = noise_guidance_edit.detach().cpu()
|
| 1382 |
+
|
| 1383 |
+
noise_pred = noise_pred_uncond + noise_guidance_edit
|
| 1384 |
+
|
| 1385 |
+
# compute the previous noisy sample x_t -> x_t-1
|
| 1386 |
+
if enable_edit_guidance and self.guidance_rescale > 0.0:
|
| 1387 |
+
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
|
| 1388 |
+
noise_pred = rescale_noise_cfg(
|
| 1389 |
+
noise_pred,
|
| 1390 |
+
noise_pred_edit_concepts.mean(dim=0, keepdim=False),
|
| 1391 |
+
guidance_rescale=self.guidance_rescale,
|
| 1392 |
+
)
|
| 1393 |
+
|
| 1394 |
+
idx = t_to_idx[int(t)]
|
| 1395 |
+
latents = self.scheduler.step(
|
| 1396 |
+
noise_pred, t, latents, variance_noise=zs[idx], **extra_step_kwargs, return_dict=False
|
| 1397 |
+
)[0]
|
| 1398 |
+
|
| 1399 |
+
# step callback
|
| 1400 |
+
if use_cross_attn_mask:
|
| 1401 |
+
store_step = i in attn_store_steps
|
| 1402 |
+
self.attention_store.between_steps(store_step)
|
| 1403 |
+
|
| 1404 |
+
if callback_on_step_end is not None:
|
| 1405 |
+
callback_kwargs = {}
|
| 1406 |
+
for k in callback_on_step_end_tensor_inputs:
|
| 1407 |
+
callback_kwargs[k] = locals()[k]
|
| 1408 |
+
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
|
| 1409 |
+
|
| 1410 |
+
latents = callback_outputs.pop("latents", latents)
|
| 1411 |
+
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
|
| 1412 |
+
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
|
| 1413 |
+
add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds)
|
| 1414 |
+
negative_pooled_prompt_embeds = callback_outputs.pop(
|
| 1415 |
+
"negative_pooled_prompt_embeds", negative_pooled_prompt_embeds
|
| 1416 |
+
)
|
| 1417 |
+
add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids)
|
| 1418 |
+
# negative_add_time_ids = callback_outputs.pop("negative_add_time_ids", negative_add_time_ids)
|
| 1419 |
+
|
| 1420 |
+
# call the callback, if provided
|
| 1421 |
+
if i == len(timesteps) - 1 or ((i + 1) > 0 and (i + 1) % self.scheduler.order == 0):
|
| 1422 |
+
progress_bar.update()
|
| 1423 |
+
|
| 1424 |
+
if XLA_AVAILABLE:
|
| 1425 |
+
xm.mark_step()
|
| 1426 |
+
|
| 1427 |
+
if not output_type == "latent":
|
| 1428 |
+
# make sure the VAE is in float32 mode, as it overflows in float16
|
| 1429 |
+
needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
|
| 1430 |
+
|
| 1431 |
+
if needs_upcasting:
|
| 1432 |
+
self.upcast_vae()
|
| 1433 |
+
latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
|
| 1434 |
+
|
| 1435 |
+
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
|
| 1436 |
+
|
| 1437 |
+
# cast back to fp16 if needed
|
| 1438 |
+
if needs_upcasting:
|
| 1439 |
+
self.vae.to(dtype=torch.float16)
|
| 1440 |
+
else:
|
| 1441 |
+
image = latents
|
| 1442 |
+
|
| 1443 |
+
if not output_type == "latent":
|
| 1444 |
+
# apply watermark if available
|
| 1445 |
+
if self.watermark is not None:
|
| 1446 |
+
image = self.watermark.apply_watermark(image)
|
| 1447 |
+
|
| 1448 |
+
image = self.image_processor.postprocess(image, output_type=output_type)
|
| 1449 |
+
|
| 1450 |
+
# Offload all models
|
| 1451 |
+
self.maybe_free_model_hooks()
|
| 1452 |
+
|
| 1453 |
+
if not return_dict:
|
| 1454 |
+
return (image,)
|
| 1455 |
+
|
| 1456 |
+
return LEditsPPDiffusionPipelineOutput(images=image, nsfw_content_detected=None)
|
| 1457 |
+
|
| 1458 |
+
@torch.no_grad()
|
| 1459 |
+
# Modified from diffusers.pipelines.ledits_pp.pipeline_leditspp_stable_diffusion.LEditsPPPipelineStableDiffusion.encode_image
|
| 1460 |
+
def encode_image(self, image, dtype=None, height=None, width=None, resize_mode="default", crops_coords=None):
|
| 1461 |
+
image = self.image_processor.preprocess(
|
| 1462 |
+
image=image, height=height, width=width, resize_mode=resize_mode, crops_coords=crops_coords
|
| 1463 |
+
)
|
| 1464 |
+
resized = self.image_processor.postprocess(image=image, output_type="pil")
|
| 1465 |
+
|
| 1466 |
+
if max(image.shape[-2:]) > self.vae.config["sample_size"] * 1.5:
|
| 1467 |
+
logger.warning(
|
| 1468 |
+
"Your input images far exceed the default resolution of the underlying diffusion model. "
|
| 1469 |
+
"The output images may contain severe artifacts! "
|
| 1470 |
+
"Consider down-sampling the input using the `height` and `width` parameters"
|
| 1471 |
+
)
|
| 1472 |
+
image = image.to(self.device, dtype=dtype)
|
| 1473 |
+
needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
|
| 1474 |
+
|
| 1475 |
+
if needs_upcasting:
|
| 1476 |
+
image = image.float()
|
| 1477 |
+
self.upcast_vae()
|
| 1478 |
+
|
| 1479 |
+
x0 = self.vae.encode(image).latent_dist.mode()
|
| 1480 |
+
x0 = x0.to(dtype)
|
| 1481 |
+
# cast back to fp16 if needed
|
| 1482 |
+
if needs_upcasting:
|
| 1483 |
+
self.vae.to(dtype=torch.float16)
|
| 1484 |
+
|
| 1485 |
+
x0 = self.vae.config.scaling_factor * x0
|
| 1486 |
+
return x0, resized
|
| 1487 |
+
|
| 1488 |
+
@torch.no_grad()
|
| 1489 |
+
def invert(
|
| 1490 |
+
self,
|
| 1491 |
+
image: PipelineImageInput,
|
| 1492 |
+
source_prompt: str = "",
|
| 1493 |
+
source_guidance_scale=3.5,
|
| 1494 |
+
negative_prompt: str = None,
|
| 1495 |
+
negative_prompt_2: str = None,
|
| 1496 |
+
num_inversion_steps: int = 50,
|
| 1497 |
+
skip: float = 0.15,
|
| 1498 |
+
generator: Optional[torch.Generator] = None,
|
| 1499 |
+
crops_coords_top_left: Tuple[int, int] = (0, 0),
|
| 1500 |
+
num_zero_noise_steps: int = 3,
|
| 1501 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
| 1502 |
+
):
|
| 1503 |
+
r"""
|
| 1504 |
+
The function to the pipeline for image inversion as described by the [LEDITS++
|
| 1505 |
+
Paper](https://arxiv.org/abs/2301.12247). If the scheduler is set to [`~schedulers.DDIMScheduler`] the
|
| 1506 |
+
inversion proposed by [edit-friendly DPDM](https://arxiv.org/abs/2304.06140) will be performed instead.
|
| 1507 |
+
|
| 1508 |
+
Args:
|
| 1509 |
+
image (`PipelineImageInput`):
|
| 1510 |
+
Input for the image(s) that are to be edited. Multiple input images have to default to the same aspect
|
| 1511 |
+
ratio.
|
| 1512 |
+
source_prompt (`str`, defaults to `""`):
|
| 1513 |
+
Prompt describing the input image that will be used for guidance during inversion. Guidance is disabled
|
| 1514 |
+
if the `source_prompt` is `""`.
|
| 1515 |
+
source_guidance_scale (`float`, defaults to `3.5`):
|
| 1516 |
+
Strength of guidance during inversion.
|
| 1517 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
| 1518 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
| 1519 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
| 1520 |
+
less than `1`).
|
| 1521 |
+
negative_prompt_2 (`str` or `List[str]`, *optional*):
|
| 1522 |
+
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
|
| 1523 |
+
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
|
| 1524 |
+
num_inversion_steps (`int`, defaults to `50`):
|
| 1525 |
+
Number of total performed inversion steps after discarding the initial `skip` steps.
|
| 1526 |
+
skip (`float`, defaults to `0.15`):
|
| 1527 |
+
Portion of initial steps that will be ignored for inversion and subsequent generation. Lower values
|
| 1528 |
+
will lead to stronger changes to the input image. `skip` has to be between `0` and `1`.
|
| 1529 |
+
generator (`torch.Generator`, *optional*):
|
| 1530 |
+
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make inversion
|
| 1531 |
+
deterministic.
|
| 1532 |
+
crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
|
| 1533 |
+
`crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
|
| 1534 |
+
`crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
|
| 1535 |
+
`crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
|
| 1536 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
| 1537 |
+
num_zero_noise_steps (`int`, defaults to `3`):
|
| 1538 |
+
Number of final diffusion steps that will not renoise the current image. If no steps are set to zero
|
| 1539 |
+
SD-XL in combination with [`DPMSolverMultistepScheduler`] will produce noise artifacts.
|
| 1540 |
+
cross_attention_kwargs (`dict`, *optional*):
|
| 1541 |
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
| 1542 |
+
`self.processor` in
|
| 1543 |
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
| 1544 |
+
|
| 1545 |
+
Returns:
|
| 1546 |
+
[`~pipelines.ledits_pp.LEditsPPInversionPipelineOutput`]: Output will contain the resized input image(s)
|
| 1547 |
+
and respective VAE reconstruction(s).
|
| 1548 |
+
"""
|
| 1549 |
+
|
| 1550 |
+
# Reset attn processor, we do not want to store attn maps during inversion
|
| 1551 |
+
self.unet.set_attn_processor(AttnProcessor())
|
| 1552 |
+
|
| 1553 |
+
self.eta = 1.0
|
| 1554 |
+
|
| 1555 |
+
self.scheduler.config.timestep_spacing = "leading"
|
| 1556 |
+
self.scheduler.set_timesteps(int(num_inversion_steps * (1 + skip)))
|
| 1557 |
+
self.inversion_steps = self.scheduler.timesteps[-num_inversion_steps:]
|
| 1558 |
+
timesteps = self.inversion_steps
|
| 1559 |
+
|
| 1560 |
+
num_images_per_prompt = 1
|
| 1561 |
+
|
| 1562 |
+
device = self._execution_device
|
| 1563 |
+
|
| 1564 |
+
# 0. Ensure that only uncond embedding is used if prompt = ""
|
| 1565 |
+
if source_prompt == "":
|
| 1566 |
+
# noise pred should only be noise_pred_uncond
|
| 1567 |
+
source_guidance_scale = 0.0
|
| 1568 |
+
do_classifier_free_guidance = False
|
| 1569 |
+
else:
|
| 1570 |
+
do_classifier_free_guidance = source_guidance_scale > 1.0
|
| 1571 |
+
|
| 1572 |
+
# 1. prepare image
|
| 1573 |
+
x0, resized = self.encode_image(image, dtype=self.text_encoder_2.dtype)
|
| 1574 |
+
width = x0.shape[2] * self.vae_scale_factor
|
| 1575 |
+
height = x0.shape[3] * self.vae_scale_factor
|
| 1576 |
+
self.size = (height, width)
|
| 1577 |
+
|
| 1578 |
+
self.batch_size = x0.shape[0]
|
| 1579 |
+
|
| 1580 |
+
# 2. get embeddings
|
| 1581 |
+
text_encoder_lora_scale = (
|
| 1582 |
+
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
|
| 1583 |
+
)
|
| 1584 |
+
|
| 1585 |
+
if isinstance(source_prompt, str):
|
| 1586 |
+
source_prompt = [source_prompt] * self.batch_size
|
| 1587 |
+
|
| 1588 |
+
(
|
| 1589 |
+
negative_prompt_embeds,
|
| 1590 |
+
prompt_embeds,
|
| 1591 |
+
negative_pooled_prompt_embeds,
|
| 1592 |
+
edit_pooled_prompt_embeds,
|
| 1593 |
+
_,
|
| 1594 |
+
) = self.encode_prompt(
|
| 1595 |
+
device=device,
|
| 1596 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 1597 |
+
negative_prompt=negative_prompt,
|
| 1598 |
+
negative_prompt_2=negative_prompt_2,
|
| 1599 |
+
editing_prompt=source_prompt,
|
| 1600 |
+
lora_scale=text_encoder_lora_scale,
|
| 1601 |
+
enable_edit_guidance=do_classifier_free_guidance,
|
| 1602 |
+
)
|
| 1603 |
+
if self.text_encoder_2 is None:
|
| 1604 |
+
text_encoder_projection_dim = int(negative_pooled_prompt_embeds.shape[-1])
|
| 1605 |
+
else:
|
| 1606 |
+
text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
|
| 1607 |
+
|
| 1608 |
+
# 3. Prepare added time ids & embeddings
|
| 1609 |
+
add_text_embeds = negative_pooled_prompt_embeds
|
| 1610 |
+
add_time_ids = self._get_add_time_ids(
|
| 1611 |
+
self.size,
|
| 1612 |
+
crops_coords_top_left,
|
| 1613 |
+
self.size,
|
| 1614 |
+
dtype=negative_prompt_embeds.dtype,
|
| 1615 |
+
text_encoder_projection_dim=text_encoder_projection_dim,
|
| 1616 |
+
)
|
| 1617 |
+
|
| 1618 |
+
if do_classifier_free_guidance:
|
| 1619 |
+
negative_prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
|
| 1620 |
+
add_text_embeds = torch.cat([add_text_embeds, edit_pooled_prompt_embeds], dim=0)
|
| 1621 |
+
add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0)
|
| 1622 |
+
|
| 1623 |
+
negative_prompt_embeds = negative_prompt_embeds.to(device)
|
| 1624 |
+
|
| 1625 |
+
add_text_embeds = add_text_embeds.to(device)
|
| 1626 |
+
add_time_ids = add_time_ids.to(device).repeat(self.batch_size * num_images_per_prompt, 1)
|
| 1627 |
+
|
| 1628 |
+
# autoencoder reconstruction
|
| 1629 |
+
if self.vae.dtype == torch.float16 and self.vae.config.force_upcast:
|
| 1630 |
+
self.upcast_vae()
|
| 1631 |
+
x0_tmp = x0.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
|
| 1632 |
+
image_rec = self.vae.decode(
|
| 1633 |
+
x0_tmp / self.vae.config.scaling_factor, return_dict=False, generator=generator
|
| 1634 |
+
)[0]
|
| 1635 |
+
elif self.vae.config.force_upcast:
|
| 1636 |
+
x0_tmp = x0.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
|
| 1637 |
+
image_rec = self.vae.decode(
|
| 1638 |
+
x0_tmp / self.vae.config.scaling_factor, return_dict=False, generator=generator
|
| 1639 |
+
)[0]
|
| 1640 |
+
else:
|
| 1641 |
+
image_rec = self.vae.decode(x0 / self.vae.config.scaling_factor, return_dict=False, generator=generator)[0]
|
| 1642 |
+
|
| 1643 |
+
image_rec = self.image_processor.postprocess(image_rec, output_type="pil")
|
| 1644 |
+
|
| 1645 |
+
# 5. find zs and xts
|
| 1646 |
+
variance_noise_shape = (num_inversion_steps, *x0.shape)
|
| 1647 |
+
|
| 1648 |
+
# intermediate latents
|
| 1649 |
+
t_to_idx = {int(v): k for k, v in enumerate(timesteps)}
|
| 1650 |
+
xts = torch.zeros(size=variance_noise_shape, device=self.device, dtype=negative_prompt_embeds.dtype)
|
| 1651 |
+
|
| 1652 |
+
for t in reversed(timesteps):
|
| 1653 |
+
idx = num_inversion_steps - t_to_idx[int(t)] - 1
|
| 1654 |
+
noise = randn_tensor(shape=x0.shape, generator=generator, device=self.device, dtype=x0.dtype)
|
| 1655 |
+
xts[idx] = self.scheduler.add_noise(x0, noise, t.unsqueeze(0))
|
| 1656 |
+
xts = torch.cat([x0.unsqueeze(0), xts], dim=0)
|
| 1657 |
+
|
| 1658 |
+
# noise maps
|
| 1659 |
+
zs = torch.zeros(size=variance_noise_shape, device=self.device, dtype=negative_prompt_embeds.dtype)
|
| 1660 |
+
|
| 1661 |
+
self.scheduler.set_timesteps(len(self.scheduler.timesteps))
|
| 1662 |
+
|
| 1663 |
+
for t in self.progress_bar(timesteps):
|
| 1664 |
+
idx = num_inversion_steps - t_to_idx[int(t)] - 1
|
| 1665 |
+
# 1. predict noise residual
|
| 1666 |
+
xt = xts[idx + 1]
|
| 1667 |
+
|
| 1668 |
+
latent_model_input = torch.cat([xt] * 2) if do_classifier_free_guidance else xt
|
| 1669 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
| 1670 |
+
|
| 1671 |
+
added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids}
|
| 1672 |
+
|
| 1673 |
+
noise_pred = self.unet(
|
| 1674 |
+
latent_model_input,
|
| 1675 |
+
t,
|
| 1676 |
+
encoder_hidden_states=negative_prompt_embeds,
|
| 1677 |
+
cross_attention_kwargs=cross_attention_kwargs,
|
| 1678 |
+
added_cond_kwargs=added_cond_kwargs,
|
| 1679 |
+
return_dict=False,
|
| 1680 |
+
)[0]
|
| 1681 |
+
|
| 1682 |
+
# 2. perform guidance
|
| 1683 |
+
if do_classifier_free_guidance:
|
| 1684 |
+
noise_pred_out = noise_pred.chunk(2)
|
| 1685 |
+
noise_pred_uncond, noise_pred_text = noise_pred_out[0], noise_pred_out[1]
|
| 1686 |
+
noise_pred = noise_pred_uncond + source_guidance_scale * (noise_pred_text - noise_pred_uncond)
|
| 1687 |
+
|
| 1688 |
+
xtm1 = xts[idx]
|
| 1689 |
+
z, xtm1_corrected = compute_noise(self.scheduler, xtm1, xt, t, noise_pred, self.eta)
|
| 1690 |
+
zs[idx] = z
|
| 1691 |
+
|
| 1692 |
+
# correction to avoid error accumulation
|
| 1693 |
+
xts[idx] = xtm1_corrected
|
| 1694 |
+
|
| 1695 |
+
self.init_latents = xts[-1]
|
| 1696 |
+
zs = zs.flip(0)
|
| 1697 |
+
|
| 1698 |
+
if num_zero_noise_steps > 0:
|
| 1699 |
+
zs[-num_zero_noise_steps:] = torch.zeros_like(zs[-num_zero_noise_steps:])
|
| 1700 |
+
self.zs = zs
|
| 1701 |
+
return LEditsPPInversionPipelineOutput(images=resized, vae_reconstruction_images=image_rec)
|
| 1702 |
+
|
| 1703 |
+
|
| 1704 |
+
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.rescale_noise_cfg
|
| 1705 |
+
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
|
| 1706 |
+
"""
|
| 1707 |
+
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
|
| 1708 |
+
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
|
| 1709 |
+
"""
|
| 1710 |
+
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
|
| 1711 |
+
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
|
| 1712 |
+
# rescale the results from guidance (fixes overexposure)
|
| 1713 |
+
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
|
| 1714 |
+
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
|
| 1715 |
+
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
|
| 1716 |
+
return noise_cfg
|
| 1717 |
+
|
| 1718 |
+
|
| 1719 |
+
# Copied from diffusers.pipelines.ledits_pp.pipeline_leditspp_stable_diffusion.compute_noise_ddim
|
| 1720 |
+
def compute_noise_ddim(scheduler, prev_latents, latents, timestep, noise_pred, eta):
|
| 1721 |
+
# 1. get previous step value (=t-1)
|
| 1722 |
+
prev_timestep = timestep - scheduler.config.num_train_timesteps // scheduler.num_inference_steps
|
| 1723 |
+
|
| 1724 |
+
# 2. compute alphas, betas
|
| 1725 |
+
alpha_prod_t = scheduler.alphas_cumprod[timestep]
|
| 1726 |
+
alpha_prod_t_prev = (
|
| 1727 |
+
scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else scheduler.final_alpha_cumprod
|
| 1728 |
+
)
|
| 1729 |
+
|
| 1730 |
+
beta_prod_t = 1 - alpha_prod_t
|
| 1731 |
+
|
| 1732 |
+
# 3. compute predicted original sample from predicted noise also called
|
| 1733 |
+
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
|
| 1734 |
+
pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5)
|
| 1735 |
+
|
| 1736 |
+
# 4. Clip "predicted x_0"
|
| 1737 |
+
if scheduler.config.clip_sample:
|
| 1738 |
+
pred_original_sample = torch.clamp(pred_original_sample, -1, 1)
|
| 1739 |
+
|
| 1740 |
+
# 5. compute variance: "sigma_t(η)" -> see formula (16)
|
| 1741 |
+
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
|
| 1742 |
+
variance = scheduler._get_variance(timestep, prev_timestep)
|
| 1743 |
+
std_dev_t = eta * variance ** (0.5)
|
| 1744 |
+
|
| 1745 |
+
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
|
| 1746 |
+
pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * noise_pred
|
| 1747 |
+
|
| 1748 |
+
# modifed so that updated xtm1 is returned as well (to avoid error accumulation)
|
| 1749 |
+
mu_xt = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction
|
| 1750 |
+
if variance > 0.0:
|
| 1751 |
+
noise = (prev_latents - mu_xt) / (variance ** (0.5) * eta)
|
| 1752 |
+
else:
|
| 1753 |
+
noise = torch.tensor([0.0]).to(latents.device)
|
| 1754 |
+
|
| 1755 |
+
return noise, mu_xt + (eta * variance**0.5) * noise
|
| 1756 |
+
|
| 1757 |
+
|
| 1758 |
+
# Copied from diffusers.pipelines.ledits_pp.pipeline_leditspp_stable_diffusion.compute_noise_sde_dpm_pp_2nd
|
| 1759 |
+
def compute_noise_sde_dpm_pp_2nd(scheduler, prev_latents, latents, timestep, noise_pred, eta):
|
| 1760 |
+
def first_order_update(model_output, sample): # timestep, prev_timestep, sample):
|
| 1761 |
+
sigma_t, sigma_s = scheduler.sigmas[scheduler.step_index + 1], scheduler.sigmas[scheduler.step_index]
|
| 1762 |
+
alpha_t, sigma_t = scheduler._sigma_to_alpha_sigma_t(sigma_t)
|
| 1763 |
+
alpha_s, sigma_s = scheduler._sigma_to_alpha_sigma_t(sigma_s)
|
| 1764 |
+
lambda_t = torch.log(alpha_t) - torch.log(sigma_t)
|
| 1765 |
+
lambda_s = torch.log(alpha_s) - torch.log(sigma_s)
|
| 1766 |
+
|
| 1767 |
+
h = lambda_t - lambda_s
|
| 1768 |
+
|
| 1769 |
+
mu_xt = (sigma_t / sigma_s * torch.exp(-h)) * sample + (alpha_t * (1 - torch.exp(-2.0 * h))) * model_output
|
| 1770 |
+
|
| 1771 |
+
mu_xt = scheduler.dpm_solver_first_order_update(
|
| 1772 |
+
model_output=model_output, sample=sample, noise=torch.zeros_like(sample)
|
| 1773 |
+
)
|
| 1774 |
+
|
| 1775 |
+
sigma = sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h))
|
| 1776 |
+
if sigma > 0.0:
|
| 1777 |
+
noise = (prev_latents - mu_xt) / sigma
|
| 1778 |
+
else:
|
| 1779 |
+
noise = torch.tensor([0.0]).to(sample.device)
|
| 1780 |
+
|
| 1781 |
+
prev_sample = mu_xt + sigma * noise
|
| 1782 |
+
return noise, prev_sample
|
| 1783 |
+
|
| 1784 |
+
def second_order_update(model_output_list, sample): # timestep_list, prev_timestep, sample):
|
| 1785 |
+
sigma_t, sigma_s0, sigma_s1 = (
|
| 1786 |
+
scheduler.sigmas[scheduler.step_index + 1],
|
| 1787 |
+
scheduler.sigmas[scheduler.step_index],
|
| 1788 |
+
scheduler.sigmas[scheduler.step_index - 1],
|
| 1789 |
+
)
|
| 1790 |
+
|
| 1791 |
+
alpha_t, sigma_t = scheduler._sigma_to_alpha_sigma_t(sigma_t)
|
| 1792 |
+
alpha_s0, sigma_s0 = scheduler._sigma_to_alpha_sigma_t(sigma_s0)
|
| 1793 |
+
alpha_s1, sigma_s1 = scheduler._sigma_to_alpha_sigma_t(sigma_s1)
|
| 1794 |
+
|
| 1795 |
+
lambda_t = torch.log(alpha_t) - torch.log(sigma_t)
|
| 1796 |
+
lambda_s0 = torch.log(alpha_s0) - torch.log(sigma_s0)
|
| 1797 |
+
lambda_s1 = torch.log(alpha_s1) - torch.log(sigma_s1)
|
| 1798 |
+
|
| 1799 |
+
m0, m1 = model_output_list[-1], model_output_list[-2]
|
| 1800 |
+
|
| 1801 |
+
h, h_0 = lambda_t - lambda_s0, lambda_s0 - lambda_s1
|
| 1802 |
+
r0 = h_0 / h
|
| 1803 |
+
D0, D1 = m0, (1.0 / r0) * (m0 - m1)
|
| 1804 |
+
|
| 1805 |
+
mu_xt = (
|
| 1806 |
+
(sigma_t / sigma_s0 * torch.exp(-h)) * sample
|
| 1807 |
+
+ (alpha_t * (1 - torch.exp(-2.0 * h))) * D0
|
| 1808 |
+
+ 0.5 * (alpha_t * (1 - torch.exp(-2.0 * h))) * D1
|
| 1809 |
+
)
|
| 1810 |
+
|
| 1811 |
+
sigma = sigma_t * torch.sqrt(1.0 - torch.exp(-2 * h))
|
| 1812 |
+
if sigma > 0.0:
|
| 1813 |
+
noise = (prev_latents - mu_xt) / sigma
|
| 1814 |
+
else:
|
| 1815 |
+
noise = torch.tensor([0.0]).to(sample.device)
|
| 1816 |
+
|
| 1817 |
+
prev_sample = mu_xt + sigma * noise
|
| 1818 |
+
|
| 1819 |
+
return noise, prev_sample
|
| 1820 |
+
|
| 1821 |
+
if scheduler.step_index is None:
|
| 1822 |
+
scheduler._init_step_index(timestep)
|
| 1823 |
+
|
| 1824 |
+
model_output = scheduler.convert_model_output(model_output=noise_pred, sample=latents)
|
| 1825 |
+
for i in range(scheduler.config.solver_order - 1):
|
| 1826 |
+
scheduler.model_outputs[i] = scheduler.model_outputs[i + 1]
|
| 1827 |
+
scheduler.model_outputs[-1] = model_output
|
| 1828 |
+
|
| 1829 |
+
if scheduler.lower_order_nums < 1:
|
| 1830 |
+
noise, prev_sample = first_order_update(model_output, latents)
|
| 1831 |
+
else:
|
| 1832 |
+
noise, prev_sample = second_order_update(scheduler.model_outputs, latents)
|
| 1833 |
+
|
| 1834 |
+
if scheduler.lower_order_nums < scheduler.config.solver_order:
|
| 1835 |
+
scheduler.lower_order_nums += 1
|
| 1836 |
+
|
| 1837 |
+
# upon completion increase step index by one
|
| 1838 |
+
scheduler._step_index += 1
|
| 1839 |
+
|
| 1840 |
+
return noise, prev_sample
|
| 1841 |
+
|
| 1842 |
+
|
| 1843 |
+
# Copied from diffusers.pipelines.ledits_pp.pipeline_leditspp_stable_diffusion.compute_noise
|
| 1844 |
+
def compute_noise(scheduler, *args):
|
| 1845 |
+
if isinstance(scheduler, DDIMScheduler):
|
| 1846 |
+
return compute_noise_ddim(scheduler, *args)
|
| 1847 |
+
elif (
|
| 1848 |
+
isinstance(scheduler, DPMSolverMultistepScheduler)
|
| 1849 |
+
and scheduler.config.algorithm_type == "sde-dpmsolver++"
|
| 1850 |
+
and scheduler.config.solver_order == 2
|
| 1851 |
+
):
|
| 1852 |
+
return compute_noise_sde_dpm_pp_2nd(scheduler, *args)
|
| 1853 |
+
else:
|
| 1854 |
+
raise NotImplementedError
|
ledits/pipeline_output.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dataclasses import dataclass
|
| 2 |
+
from typing import List, Optional, Union
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import PIL.Image
|
| 6 |
+
|
| 7 |
+
from diffusers.utils import BaseOutput
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
@dataclass
|
| 11 |
+
class LEditsPPDiffusionPipelineOutput(BaseOutput):
|
| 12 |
+
"""
|
| 13 |
+
Output class for LEdits++ Diffusion pipelines.
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
images (`List[PIL.Image.Image]` or `np.ndarray`)
|
| 17 |
+
List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width,
|
| 18 |
+
num_channels)`.
|
| 19 |
+
nsfw_content_detected (`List[bool]`)
|
| 20 |
+
List indicating whether the corresponding generated image contains “not-safe-for-work” (nsfw) content or
|
| 21 |
+
`None` if safety checking could not be performed.
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
images: Union[List[PIL.Image.Image], np.ndarray]
|
| 25 |
+
nsfw_content_detected: Optional[List[bool]]
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
@dataclass
|
| 29 |
+
class LEditsPPInversionPipelineOutput(BaseOutput):
|
| 30 |
+
"""
|
| 31 |
+
Output class for LEdits++ Diffusion pipelines.
|
| 32 |
+
|
| 33 |
+
Args:
|
| 34 |
+
input_images (`List[PIL.Image.Image]` or `np.ndarray`)
|
| 35 |
+
List of the cropped and resized input images as PIL images of length `batch_size` or NumPy array of shape `
|
| 36 |
+
(batch_size, height, width, num_channels)`.
|
| 37 |
+
vae_reconstruction_images (`List[PIL.Image.Image]` or `np.ndarray`)
|
| 38 |
+
List of VAE reconstruction of all input images as PIL images of length `batch_size` or NumPy array of shape
|
| 39 |
+
` (batch_size, height, width, num_channels)`.
|
| 40 |
+
"""
|
| 41 |
+
|
| 42 |
+
images: Union[List[PIL.Image.Image], np.ndarray]
|
| 43 |
+
vae_reconstruction_images: Union[List[PIL.Image.Image], np.ndarray]
|