text
stringlengths
96
319k
id
stringlengths
14
178
metadata
dict
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Splits related API.""" import abc import collections import copy import dataclasses import re from dataclasses import dataclass from typing import Dict, List, Optional, Union from .arrow_reader import FileInstructions, make_file_instructions from .naming import _split_re from .utils.py_utils import NonMutableDict, asdict @dataclass class SplitInfo: name: str = dataclasses.field(default="", metadata={"include_in_asdict_even_if_is_default": True}) num_bytes: int = dataclasses.field(default=0, metadata={"include_in_asdict_even_if_is_default": True}) num_examples: int = dataclasses.field(default=0, metadata={"include_in_asdict_even_if_is_default": True}) shard_lengths: Optional[List[int]] = None # Deprecated # For backward compatibility, this field needs to always be included in files like # dataset_infos.json and dataset_info.json files # To do so, we always include it in the output of datasets.utils.py_utils.asdict(split_info) dataset_name: Optional[str] = dataclasses.field( default=None, metadata={"include_in_asdict_even_if_is_default": True} ) @property def file_instructions(self): """Returns the list of dict(filename, take, skip).""" # `self.dataset_name` is assigned in `SplitDict.add()`. instructions = make_file_instructions( name=self.dataset_name, split_infos=[self], instruction=str(self.name), ) return instructions.file_instructions @dataclass class SubSplitInfo: """Wrapper around a sub split info. This class expose info on the subsplit: ``` ds, info = datasets.load_dataset(..., split='train[75%:]', with_info=True) info.splits['train[75%:]'].num_examples ``` """ instructions: FileInstructions @property def num_examples(self): """Returns the number of example in the subsplit.""" return self.instructions.num_examples @property def file_instructions(self): """Returns the list of dict(filename, take, skip).""" return self.instructions.file_instructions class SplitBase(metaclass=abc.ABCMeta): # pylint: disable=line-too-long """Abstract base class for Split compositionality. See the [guide on splits](../loading#slice-splits) for more information. There are three parts to the composition: 1) The splits are composed (defined, merged, split,...) together before calling the `.as_dataset()` function. This is done with the `__add__`, `__getitem__`, which return a tree of `SplitBase` (whose leaf are the `NamedSplit` objects) ``` split = datasets.Split.TRAIN + datasets.Split.TEST.subsplit(datasets.percent[:50]) ``` 2) The `SplitBase` is forwarded to the `.as_dataset()` function to be resolved into actual read instruction. This is done by the `.get_read_instruction()` method which takes the real dataset splits (name, number of shards,...) and parse the tree to return a `SplitReadInstruction()` object ``` read_instruction = split.get_read_instruction(self.info.splits) ``` 3) The `SplitReadInstruction` is then used in the `tf.data.Dataset` pipeline to define which files to read and how to skip examples within file. """ # pylint: enable=line-too-long @abc.abstractmethod def get_read_instruction(self, split_dict): """Parse the descriptor tree and compile all read instructions together. Args: split_dict: `dict`, The `dict[split_name, SplitInfo]` of the dataset Returns: split_read_instruction: `SplitReadInstruction` """ raise NotImplementedError("Abstract method") def __eq__(self, other): """Equality: datasets.Split.TRAIN == 'train'.""" if isinstance(other, (NamedSplit, str)): return False raise NotImplementedError("Equality is not implemented between merged/sub splits.") def __ne__(self, other): """InEquality: datasets.Split.TRAIN != 'test'.""" return not self.__eq__(other) def __add__(self, other): """Merging: datasets.Split.TRAIN + datasets.Split.TEST.""" return _SplitMerged(self, other) def subsplit(self, arg=None, k=None, percent=None, weighted=None): # pylint: disable=redefined-outer-name """Divides this split into subsplits. There are 3 ways to define subsplits, which correspond to the 3 arguments `k` (get `k` even subsplits), `percent` (get a slice of the dataset with `datasets.percent`), and `weighted` (get subsplits with proportions specified by `weighted`). Example:: ``` # 50% train, 50% test train, test = split.subsplit(k=2) # 50% train, 25% test, 25% validation train, test, validation = split.subsplit(weighted=[2, 1, 1]) # Extract last 20% subsplit = split.subsplit(datasets.percent[-20:]) ``` Warning: k and weighted will be converted into percent which mean that values below the percent will be rounded up or down. The final split may be bigger to deal with remainders. For instance: ``` train, test, valid = split.subsplit(k=3) # 33%, 33%, 34% s1, s2, s3, s4 = split.subsplit(weighted=[2, 2, 1, 1]) # 33%, 33%, 16%, 18% ``` Args: arg: If no kwargs are given, `arg` will be interpreted as one of `k`, `percent`, or `weighted` depending on the type. For example: ``` split.subsplit(10) # Equivalent to split.subsplit(k=10) split.subsplit(datasets.percent[:-20]) # percent=datasets.percent[:-20] split.subsplit([1, 1, 2]) # weighted=[1, 1, 2] ``` k: `int` If set, subdivide the split into `k` equal parts. percent: `datasets.percent slice`, return a single subsplit corresponding to a slice of the original split. For example: `split.subsplit(datasets.percent[-20:]) # Last 20% of the dataset`. weighted: `list[int]`, return a list of subsplits whose proportions match the normalized sum of the list. For example: `split.subsplit(weighted=[1, 1, 2]) # 25%, 25%, 50%`. Returns: A subsplit or list of subsplits extracted from this split object. """ # Note that the percent kwargs redefine the outer name datasets.percent. This # is done for consistency (.subsplit(percent=datasets.percent[:40])) if sum(bool(x) for x in (arg, k, percent, weighted)) != 1: raise ValueError("Only one argument of subsplit should be set.") # Auto deduce k if isinstance(arg, int): k = arg elif isinstance(arg, slice): percent = arg elif isinstance(arg, list): weighted = arg if not (k or percent or weighted): raise ValueError( f"Invalid split argument {arg}. Only list, slice and int supported. " "One of k, weighted or percent should be set to a non empty value." ) def assert_slices_coverage(slices): # Ensure that the expended slices cover all percents. assert sum((list(range(*s.indices(100))) for s in slices), []) == list(range(100)) if k: if not 0 < k <= 100: raise ValueError(f"Subsplit k should be between 0 and 100, got {k}") shift = 100 // k slices = [slice(i * shift, (i + 1) * shift) for i in range(k)] # Round up last element to ensure all elements are taken slices[-1] = slice(slices[-1].start, 100) # Internal check to ensure full coverage assert_slices_coverage(slices) return tuple(_SubSplit(self, s) for s in slices) elif percent: return _SubSplit(self, percent) elif weighted: # Normalize the weighted sum total = sum(weighted) weighted = [100 * x // total for x in weighted] # Create the slice for each of the elements start = 0 stop = 0 slices = [] for v in weighted: stop += v slices.append(slice(start, stop)) start = stop # Round up last element to ensure all elements are taken slices[-1] = slice(slices[-1].start, 100) # Internal check to ensure full coverage assert_slices_coverage(slices) return tuple(_SubSplit(self, s) for s in slices) else: # Should not be possible raise ValueError("Could not determine the split") # 2 requirements: # 1. datasets.percent be sliceable # 2. datasets.percent be documented # # Instances are not documented, so we want datasets.percent to be a class, but to # have it be sliceable, we need this metaclass. class PercentSliceMeta(type): def __getitem__(cls, slice_value): if not isinstance(slice_value, slice): raise ValueError(f"datasets.percent should only be called with slice, not {slice_value}") return slice_value class PercentSlice(metaclass=PercentSliceMeta): # pylint: disable=line-too-long """Syntactic sugar for defining slice subsplits: `datasets.percent[75:-5]`. See the [guide on splits](../loading#slice-splits) for more information. """ # pylint: enable=line-too-long pass percent = PercentSlice # pylint: disable=invalid-name class _SplitMerged(SplitBase): """Represent two split descriptors merged together.""" def __init__(self, split1, split2): self._split1 = split1 self._split2 = split2 def get_read_instruction(self, split_dict): read_instruction1 = self._split1.get_read_instruction(split_dict) read_instruction2 = self._split2.get_read_instruction(split_dict) return read_instruction1 + read_instruction2 def __repr__(self): return f"({repr(self._split1)} + {repr(self._split2)})" class _SubSplit(SplitBase): """Represent a sub split of a split descriptor.""" def __init__(self, split, slice_value): self._split = split self._slice_value = slice_value def get_read_instruction(self, split_dict): return self._split.get_read_instruction(split_dict)[self._slice_value] def __repr__(self): slice_str = "{start}:{stop}" if self._slice_value.step is not None: slice_str += ":{step}" slice_str = slice_str.format( start="" if self._slice_value.start is None else self._slice_value.start, stop="" if self._slice_value.stop is None else self._slice_value.stop, step=self._slice_value.step, ) return f"{repr(self._split)}(datasets.percent[{slice_str}])" class NamedSplit(SplitBase): """Descriptor corresponding to a named split (train, test, ...). Example: Each descriptor can be composed with other using addition or slice: ```py split = datasets.Split.TRAIN.subsplit(datasets.percent[0:25]) + datasets.Split.TEST ``` The resulting split will correspond to 25% of the train split merged with 100% of the test split. A split cannot be added twice, so the following will fail: ```py split = ( datasets.Split.TRAIN.subsplit(datasets.percent[:25]) + datasets.Split.TRAIN.subsplit(datasets.percent[75:]) ) # Error split = datasets.Split.TEST + datasets.Split.ALL # Error ``` The slices can be applied only one time. So the following are valid: ```py split = ( datasets.Split.TRAIN.subsplit(datasets.percent[:25]) + datasets.Split.TEST.subsplit(datasets.percent[:50]) ) split = (datasets.Split.TRAIN + datasets.Split.TEST).subsplit(datasets.percent[:50]) ``` But this is not valid: ```py train = datasets.Split.TRAIN test = datasets.Split.TEST split = train.subsplit(datasets.percent[:25]).subsplit(datasets.percent[:25]) split = (train.subsplit(datasets.percent[:25]) + test).subsplit(datasets.percent[:50]) ``` """ def __init__(self, name): self._name = name split_names_from_instruction = [split_instruction.split("[")[0] for split_instruction in name.split("+")] for split_name in split_names_from_instruction: if not re.match(_split_re, split_name): raise ValueError(f"Split name should match '{_split_re}' but got '{split_name}'.") def __str__(self): return self._name def __repr__(self): return f"NamedSplit({self._name!r})" def __eq__(self, other): """Equality: datasets.Split.TRAIN == 'train'.""" if isinstance(other, NamedSplit): return self._name == other._name # pylint: disable=protected-access elif isinstance(other, SplitBase): return False elif isinstance(other, str): # Other should be string return self._name == other else: return False def __lt__(self, other): return self._name < other._name # pylint: disable=protected-access def __hash__(self): return hash(self._name) def get_read_instruction(self, split_dict): return SplitReadInstruction(split_dict[self._name]) class NamedSplitAll(NamedSplit): """Split corresponding to the union of all defined dataset splits.""" def __init__(self): super().__init__("all") def __repr__(self): return "NamedSplitAll()" def get_read_instruction(self, split_dict): # Merge all dataset split together read_instructions = [SplitReadInstruction(s) for s in split_dict.values()] return sum(read_instructions, SplitReadInstruction()) class Split: # pylint: disable=line-too-long """`Enum` for dataset splits. Datasets are typically split into different subsets to be used at various stages of training and evaluation. - `TRAIN`: the training data. - `VALIDATION`: the validation data. If present, this is typically used as evaluation data while iterating on a model (e.g. changing hyperparameters, model architecture, etc.). - `TEST`: the testing data. This is the data to report metrics on. Typically you do not want to use this during model iteration as you may overfit to it. - `ALL`: the union of all defined dataset splits. All splits, including compositions inherit from `datasets.SplitBase`. See the [guide](../load_hub#splits) on splits for more information. Example: ```py >>> datasets.SplitGenerator( ... name=datasets.Split.TRAIN, ... gen_kwargs={"split_key": "train", "files": dl_manager.download_and extract(url)}, ... ), ... datasets.SplitGenerator( ... name=datasets.Split.VALIDATION, ... gen_kwargs={"split_key": "validation", "files": dl_manager.download_and extract(url)}, ... ), ... datasets.SplitGenerator( ... name=datasets.Split.TEST, ... gen_kwargs={"split_key": "test", "files": dl_manager.download_and extract(url)}, ... ) ``` """ # pylint: enable=line-too-long TRAIN = NamedSplit("train") TEST = NamedSplit("test") VALIDATION = NamedSplit("validation") ALL = NamedSplitAll() def __new__(cls, name): """Create a custom split with datasets.Split('custom_name').""" return NamedSplitAll() if name == "all" else NamedSplit(name) # Similar to SplitInfo, but contain an additional slice info SlicedSplitInfo = collections.namedtuple( "SlicedSplitInfo", [ "split_info", "slice_value", ], ) # noqa: E231 class SplitReadInstruction: """Object containing the reading instruction for the dataset. Similarly to `SplitDescriptor` nodes, this object can be composed with itself, but the resolution happens instantaneously, instead of keeping track of the tree, such as all instructions are compiled and flattened in a single SplitReadInstruction object containing the list of files and slice to use. Once resolved, the instructions can be accessed with: ``` read_instructions.get_list_sliced_split_info() # List of splits to use ``` """ def __init__(self, split_info=None): self._splits = NonMutableDict(error_msg="Overlap between splits. Split {key} has been added with itself.") if split_info: self.add(SlicedSplitInfo(split_info=split_info, slice_value=None)) def add(self, sliced_split): """Add a SlicedSplitInfo the read instructions.""" # TODO(epot): Check that the number of examples per shard % 100 == 0 # Otherwise the slices value may be unbalanced and not exactly reflect the # requested slice. self._splits[sliced_split.split_info.name] = sliced_split def __add__(self, other): """Merging split together.""" # Will raise error if a split has already be added (NonMutableDict) # TODO(epot): If a split is already added but there is no overlap between # the slices, should merge the slices (ex: [:10] + [80:]) split_instruction = SplitReadInstruction() split_instruction._splits.update(self._splits) # pylint: disable=protected-access split_instruction._splits.update(other._splits) # pylint: disable=protected-access return split_instruction def __getitem__(self, slice_value): """Sub-splits.""" # Will raise an error if a split has already been sliced split_instruction = SplitReadInstruction() for v in self._splits.values(): if v.slice_value is not None: raise ValueError(f"Trying to slice Split {v.split_info.name} which has already been sliced") v = v._asdict() v["slice_value"] = slice_value split_instruction.add(SlicedSplitInfo(**v)) return split_instruction def get_list_sliced_split_info(self): return list(self._splits.values()) class SplitDict(dict): """Split info object.""" def __init__(self, *args, dataset_name=None, **kwargs): super().__init__(*args, **kwargs) self.dataset_name = dataset_name def __getitem__(self, key: Union[SplitBase, str]): # 1st case: The key exists: `info.splits['train']` if str(key) in self: return super().__getitem__(str(key)) # 2nd case: Uses instructions: `info.splits['train[50%]']` else: instructions = make_file_instructions( name=self.dataset_name, split_infos=self.values(), instruction=key, ) return SubSplitInfo(instructions) def __setitem__(self, key: Union[SplitBase, str], value: SplitInfo): if key != value.name: raise ValueError(f"Cannot add elem. (key mismatch: '{key}' != '{value.name}')") super().__setitem__(key, value) def add(self, split_info: SplitInfo): """Add the split info.""" if split_info.name in self: raise ValueError(f"Split {split_info.name} already present") split_info.dataset_name = self.dataset_name super().__setitem__(split_info.name, split_info) @property def total_num_examples(self): """Return the total number of examples.""" return sum(s.num_examples for s in self.values()) @classmethod def from_split_dict(cls, split_infos: Union[List, Dict], dataset_name: Optional[str] = None): """Returns a new SplitDict initialized from a Dict or List of `split_infos`.""" if isinstance(split_infos, dict): split_infos = list(split_infos.values()) if dataset_name is None: dataset_name = split_infos[0].get("dataset_name") if split_infos else None split_dict = cls(dataset_name=dataset_name) for split_info in split_infos: if isinstance(split_info, dict): split_info = SplitInfo(**split_info) split_dict.add(split_info) return split_dict def to_split_dict(self): """Returns a list of SplitInfo protos that we have.""" out = [] for split_name, split_info in self.items(): split_info = copy.deepcopy(split_info) split_info.name = split_name out.append(split_info) return out def copy(self): return SplitDict.from_split_dict(self.to_split_dict(), self.dataset_name) def _to_yaml_list(self) -> list: out = [asdict(s) for s in self.to_split_dict()] # we don't need the shard lengths in YAML, since it depends on max_shard_size and num_proc for split_info_dict in out: split_info_dict.pop("shard_lengths", None) # we don't need the dataset_name attribute that is deprecated for split_info_dict in out: split_info_dict.pop("dataset_name", None) return out @classmethod def _from_yaml_list(cls, yaml_data: list) -> "SplitDict": return cls.from_split_dict(yaml_data) @dataclass class SplitGenerator: """Defines the split information for the generator. This should be used as returned value of `GeneratorBasedBuilder._split_generators`. See `GeneratorBasedBuilder._split_generators` for more info and example of usage. Args: name (`str`): Name of the `Split` for which the generator will create the examples. **gen_kwargs (additional keyword arguments): Keyword arguments to forward to the `DatasetBuilder._generate_examples` method of the builder. Example: ```py >>> datasets.SplitGenerator( ... name=datasets.Split.TRAIN, ... gen_kwargs={"split_key": "train", "files": dl_manager.download_and_extract(url)}, ... ) ``` """ name: str gen_kwargs: Dict = dataclasses.field(default_factory=dict) split_info: SplitInfo = dataclasses.field(init=False) def __post_init__(self): self.name = str(self.name) # Make sure we convert NamedSplits in strings NamedSplit(self.name) # check that it's a valid split name self.split_info = SplitInfo(name=self.name)
datasets/src/datasets/splits.py/0
{ "file_path": "datasets/src/datasets/splits.py", "repo_id": "datasets", "token_count": 9577 }
import re import textwrap from collections import Counter from itertools import groupby from operator import itemgetter from typing import Any, ClassVar, Dict, List, Optional, Tuple import yaml from huggingface_hub import DatasetCardData from ..config import METADATA_CONFIGS_FIELD from ..features import Features from ..info import DatasetInfo, DatasetInfosDict from ..naming import _split_re from ..utils.logging import get_logger logger = get_logger(__name__) class _NoDuplicateSafeLoader(yaml.SafeLoader): def _check_no_duplicates_on_constructed_node(self, node): keys = [self.constructed_objects[key_node] for key_node, _ in node.value] keys = [tuple(key) if isinstance(key, list) else key for key in keys] counter = Counter(keys) duplicate_keys = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(f"Got duplicate yaml keys: {duplicate_keys}") def construct_mapping(self, node, deep=False): mapping = super().construct_mapping(node, deep=deep) self._check_no_duplicates_on_constructed_node(node) return mapping def _split_yaml_from_readme(readme_content: str) -> Tuple[Optional[str], str]: full_content = list(readme_content.splitlines()) if full_content and full_content[0] == "---" and "---" in full_content[1:]: sep_idx = full_content[1:].index("---") + 1 yamlblock = "\n".join(full_content[1:sep_idx]) return yamlblock, "\n".join(full_content[sep_idx + 1 :]) return None, "\n".join(full_content) class MetadataConfigs(Dict[str, Dict[str, Any]]): """Should be in format {config_name: {**config_params}}.""" FIELD_NAME: ClassVar[str] = METADATA_CONFIGS_FIELD @staticmethod def _raise_if_data_files_field_not_valid(metadata_config: dict): yaml_data_files = metadata_config.get("data_files") if yaml_data_files is not None: yaml_error_message = textwrap.dedent( f""" Expected data_files in YAML to be either a string or a list of strings or a list of dicts with two keys: 'split' and 'path', but got {yaml_data_files} Examples of data_files in YAML: data_files: data.csv data_files: data/*.png data_files: - part0/* - part1/* data_files: - split: train path: train/* - split: test path: test/* data_files: - split: train path: - train/part1/* - train/part2/* - split: test path: test/* PS: some symbols like dashes '-' are not allowed in split names """ ) if not isinstance(yaml_data_files, (list, str)): raise ValueError(yaml_error_message) if isinstance(yaml_data_files, list): for yaml_data_files_item in yaml_data_files: if ( not isinstance(yaml_data_files_item, (str, dict)) or isinstance(yaml_data_files_item, dict) and not ( len(yaml_data_files_item) == 2 and "split" in yaml_data_files_item and re.match(_split_re, yaml_data_files_item["split"]) and isinstance(yaml_data_files_item.get("path"), (str, list)) ) ): raise ValueError(yaml_error_message) @classmethod def _from_exported_parquet_files_and_dataset_infos( cls, parquet_commit_hash: str, exported_parquet_files: List[Dict[str, Any]], dataset_infos: DatasetInfosDict, ) -> "MetadataConfigs": metadata_configs = { config_name: { "data_files": [ { "split": split_name, "path": [ parquet_file["url"].replace("refs%2Fconvert%2Fparquet", parquet_commit_hash) for parquet_file in parquet_files_for_split ], } for split_name, parquet_files_for_split in groupby(parquet_files_for_config, itemgetter("split")) ], "version": str(dataset_infos.get(config_name, DatasetInfo()).version or "0.0.0"), } for config_name, parquet_files_for_config in groupby(exported_parquet_files, itemgetter("config")) } if dataset_infos: # Preserve order of configs and splits metadata_configs = { config_name: { "data_files": [ data_file for split_name in dataset_info.splits for data_file in metadata_configs[config_name]["data_files"] if data_file["split"] == split_name ], "version": metadata_configs[config_name]["version"], } for config_name, dataset_info in dataset_infos.items() } return cls(metadata_configs) @classmethod def from_dataset_card_data(cls, dataset_card_data: DatasetCardData) -> "MetadataConfigs": if dataset_card_data.get(cls.FIELD_NAME): metadata_configs = dataset_card_data[cls.FIELD_NAME] if not isinstance(metadata_configs, list): raise ValueError(f"Expected {cls.FIELD_NAME} to be a list, but got '{metadata_configs}'") for metadata_config in metadata_configs: if "config_name" not in metadata_config: raise ValueError( f"Each config must include `config_name` field with a string name of a config, " f"but got {metadata_config}. " ) cls._raise_if_data_files_field_not_valid(metadata_config) return cls( { config.pop("config_name"): { param: value if param != "features" else Features._from_yaml_list(value) for param, value in config.items() } for metadata_config in metadata_configs if (config := metadata_config.copy()) } ) return cls() def to_dataset_card_data(self, dataset_card_data: DatasetCardData) -> None: if self: for metadata_config in self.values(): self._raise_if_data_files_field_not_valid(metadata_config) current_metadata_configs = self.from_dataset_card_data(dataset_card_data) total_metadata_configs = dict(sorted({**current_metadata_configs, **self}.items())) for config_name, config_metadata in total_metadata_configs.items(): config_metadata.pop("config_name", None) dataset_card_data[self.FIELD_NAME] = [ {"config_name": config_name, **config_metadata} for config_name, config_metadata in total_metadata_configs.items() ] def get_default_config_name(self) -> Optional[str]: default_config_name = None for config_name, metadata_config in self.items(): if len(self) == 1 or config_name == "default" or metadata_config.get("default"): if default_config_name is None: default_config_name = config_name else: raise ValueError( f"Dataset has several default configs: '{default_config_name}' and '{config_name}'." ) return default_config_name # DEPRECATED - just here to support old versions of evaluate like 0.2.2 # To support new tasks on the Hugging Face Hub, please open a PR for this file: # https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/src/pipelines.ts known_task_ids = { "image-classification": [], "translation": [], "image-segmentation": [], "fill-mask": [], "automatic-speech-recognition": [], "token-classification": [], "sentence-similarity": [], "audio-classification": [], "question-answering": [], "summarization": [], "zero-shot-classification": [], "table-to-text": [], "feature-extraction": [], "other": [], "multiple-choice": [], "text-classification": [], "text-to-image": [], "text2text-generation": [], "zero-shot-image-classification": [], "tabular-classification": [], "tabular-regression": [], "image-to-image": [], "tabular-to-text": [], "unconditional-image-generation": [], "text-retrieval": [], "text-to-speech": [], "object-detection": [], "audio-to-audio": [], "text-generation": [], "conversational": [], "table-question-answering": [], "visual-question-answering": [], "image-to-text": [], "reinforcement-learning": [], "voice-activity-detection": [], "time-series-forecasting": [], "document-question-answering": [], }
datasets/src/datasets/utils/metadata.py/0
{ "file_path": "datasets/src/datasets/utils/metadata.py", "repo_id": "datasets", "token_count": 4646 }
--- TODO: "Add YAML tags here. Delete these instructions and copy-paste the YAML tags obtained with the online tagging app: https://huggingface.co/spaces/huggingface/datasets-tagging" --- # Dataset Card for [Dataset Name] ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary [More Information Needed] ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions Thanks to [@github-username](https://github.com/<github-username>) for adding this dataset.
datasets/templates/README.md/0
{ "file_path": "datasets/templates/README.md", "repo_id": "datasets", "token_count": 819 }
from unittest.mock import patch import pyspark import pytest from datasets.builder import InvalidConfigName from datasets.data_files import DataFilesList from datasets.packaged_modules.spark.spark import ( Spark, SparkConfig, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def _get_expected_row_ids_and_row_dicts_for_partition_order(df, partition_order): expected_row_ids_and_row_dicts = [] for part_id in partition_order: partition = df.where(f"SPARK_PARTITION_ID() = {part_id}").collect() for row_idx, row in enumerate(partition): expected_row_ids_and_row_dicts.append((f"{part_id}_{row_idx}", row.asDict())) return expected_row_ids_and_row_dicts def test_config_raises_when_invalid_name() -> None: with pytest.raises(InvalidConfigName, match="Bad characters"): _ = SparkConfig(name="name-with-*-invalid-character") @pytest.mark.parametrize("data_files", ["str_path", ["str_path"], DataFilesList(["str_path"], [()])]) def test_config_raises_when_invalid_data_files(data_files) -> None: with pytest.raises(ValueError, match="Expected a DataFilesDict"): _ = SparkConfig(name="name", data_files=data_files) @require_not_windows @require_dill_gt_0_3_2 def test_repartition_df_if_needed(): spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate() df = spark.range(100).repartition(1) spark_builder = Spark(df) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def test_generate_iterable_examples(): spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate() df = spark.range(10).repartition(2) partition_order = [1, 0] iterator = _generate_iterable_examples(df, partition_order) # Reverse the partitions. expected_row_ids_and_row_dicts = _get_expected_row_ids_and_row_dicts_for_partition_order(df, partition_order) for i, (row_id, row_dict) in enumerate(iterator): expected_row_id, expected_row_dict = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def test_spark_examples_iterable(): spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate() df = spark.range(10).repartition(1) it = SparkExamplesIterable(df) assert it.num_shards == 1 for i, (row_id, row_dict) in enumerate(it): assert row_id == f"0_{i}" assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def test_spark_examples_iterable_shuffle(): spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate() df = spark.range(30).repartition(3) # Mock the generator so that shuffle reverses the partition indices. with patch("numpy.random.Generator") as generator_mock: generator_mock.shuffle.side_effect = lambda x: x.reverse() expected_row_ids_and_row_dicts = _get_expected_row_ids_and_row_dicts_for_partition_order(df, [2, 1, 0]) shuffled_it = SparkExamplesIterable(df).shuffle_data_sources(generator_mock) assert shuffled_it.num_shards == 3 for i, (row_id, row_dict) in enumerate(shuffled_it): expected_row_id, expected_row_dict = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def test_spark_examples_iterable_shard(): spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate() df = spark.range(20).repartition(4) # Partitions 0 and 2 shard_it_1 = SparkExamplesIterable(df).shard_data_sources(index=0, num_shards=2, contiguous=False) assert shard_it_1.num_shards == 2 expected_row_ids_and_row_dicts_1 = _get_expected_row_ids_and_row_dicts_for_partition_order(df, [0, 2]) for i, (row_id, row_dict) in enumerate(shard_it_1): expected_row_id, expected_row_dict = expected_row_ids_and_row_dicts_1[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 shard_it_2 = SparkExamplesIterable(df).shard_data_sources(index=1, num_shards=2, contiguous=False) assert shard_it_2.num_shards == 2 expected_row_ids_and_row_dicts_2 = _get_expected_row_ids_and_row_dicts_for_partition_order(df, [1, 3]) for i, (row_id, row_dict) in enumerate(shard_it_2): expected_row_id, expected_row_dict = expected_row_ids_and_row_dicts_2[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def test_repartition_df_if_needed_max_num_df_rows(): spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate() df = spark.range(100).repartition(1) spark_builder = Spark(df) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 100
datasets/tests/packaged_modules/test_spark.py/0
{ "file_path": "datasets/tests/packaged_modules/test_spark.py", "repo_id": "datasets", "token_count": 2267 }
import os import re from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from fsspec.registry import _registry as _fsspec_registry from fsspec.spec import AbstractBufferedFile, AbstractFileSystem from huggingface_hub.errors import OfflineModeIsEnabled from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( _get_extraction_protocol, _prepare_single_hop_path_and_storage_options, cached_path, fsspec_get, fsspec_head, get_from_cache, xdirname, xexists, xgetsize, xglob, xisdir, xisfile, xjoin, xlistdir, xnumpy_load, xopen, xPath, xrelpath, xsplit, xsplitext, xwalk, ) from datasets.utils.hub import hf_dataset_url from .utils import slow FILE_CONTENT = """\ Text data. Second line of data.""" FILE_PATH = "file" TEST_URL = "https://huggingface.co/datasets/hf-internal-testing/dataset_with_script/resolve/main/some_text.txt" TEST_URL_CONTENT = "foo\nbar\nfoobar" TEST_GG_DRIVE_FILENAME = "train.tsv" TEST_GG_DRIVE_URL = "https://drive.google.com/uc?export=download&id=17bOgBDc3hRCoPZ89EYtKDzK-yXAWat94" TEST_GG_DRIVE_GZIPPED_URL = "https://drive.google.com/uc?export=download&id=1Bt4Garpf0QLiwkJhHJzXaVa0I0H5Qhwz" TEST_GG_DRIVE_ZIPPED_URL = "https://drive.google.com/uc?export=download&id=1k92sUfpHxKq8PXWRr7Y5aNHXwOCNUmqh" TEST_GG_DRIVE_CONTENT = """\ pokemon_name, type Charmander, fire Squirtle, water Bulbasaur, grass""" @pytest.fixture(scope="session") def zstd_path(tmp_path_factory): path = tmp_path_factory.mktemp("data") / (FILE_PATH + ".zstd") data = bytes(FILE_CONTENT, "utf-8") with zstd.open(path, "wb") as f: f.write(data) return path @pytest.fixture def tmpfs_file(tmpfs): with open(os.path.join(tmpfs.local_root_dir, FILE_PATH), "w") as f: f.write(FILE_CONTENT) return FILE_PATH @pytest.mark.parametrize("compression_format", ["gzip", "xz", "zstd"]) def test_cached_path_extract(compression_format, gz_file, xz_file, zstd_path, tmp_path, text_file): input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path} input_path = input_paths[compression_format] cache_dir = tmp_path / "cache" download_config = DownloadConfig(cache_dir=cache_dir, extract_compressed_file=True) extracted_path = cached_path(input_path, download_config=download_config) with open(extracted_path) as f: extracted_file_content = f.read() with open(text_file) as f: expected_file_content = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize("default_extracted", [True, False]) @pytest.mark.parametrize("default_cache_dir", [True, False]) def test_extracted_datasets_path(default_extracted, default_cache_dir, xz_file, tmp_path, monkeypatch): custom_cache_dir = "custom_cache" custom_extracted_dir = "custom_extracted_dir" custom_extracted_path = tmp_path / "custom_extracted_path" if default_extracted: expected = ("downloads" if default_cache_dir else custom_cache_dir, "extracted") else: monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR", custom_extracted_dir) monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(custom_extracted_path)) expected = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) filename = xz_file download_config = ( DownloadConfig(extract_compressed_file=True) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=True) ) extracted_file_path = cached_path(filename, download_config=download_config) assert Path(extracted_file_path).parent.parts[-2:] == expected def test_cached_path_local(text_file): # input absolute path -> output absolute path text_file_abs = str(Path(text_file).resolve()) assert os.path.samefile(cached_path(text_file_abs), text_file_abs) # input relative path -> output absolute path text_file = __file__ text_file_abs = str(Path(text_file).resolve()) text_file_rel = str(Path(text_file).resolve().relative_to(Path(os.getcwd()))) assert os.path.samefile(cached_path(text_file_rel), text_file_abs) def test_cached_path_missing_local(tmp_path): # absolute path missing_file = str(tmp_path.resolve() / "__missing_file__.txt") with pytest.raises(FileNotFoundError): cached_path(missing_file) # relative path missing_file = "./__missing_file__.txt" with pytest.raises(FileNotFoundError): cached_path(missing_file) def test_get_from_cache_fsspec(tmpfs_file): output_path = get_from_cache(f"tmp://{tmpfs_file}") with open(output_path) as f: output_file_content = f.read() assert output_file_content == FILE_CONTENT @patch("datasets.config.HF_HUB_OFFLINE", True) def test_cached_path_offline(): with pytest.raises(OfflineModeIsEnabled): cached_path("https://huggingface.co") @patch("datasets.config.HF_HUB_OFFLINE", True) def test_fsspec_offline(tmp_path_factory): filename = tmp_path_factory.mktemp("data") / "file.html" with pytest.raises(OfflineModeIsEnabled): fsspec_get("s3://huggingface.co", temp_file=filename) with pytest.raises(OfflineModeIsEnabled): fsspec_head("s3://huggingface.co") @pytest.mark.parametrize( "urlpath, download_config, expected_urlpath, expected_storage_options", [ ( "https://huggingface.co/datasets/hf-internal-testing/dataset_with_script/resolve/main/some_text.txt", DownloadConfig(), "hf://datasets/hf-internal-testing/dataset_with_script@main/some_text.txt", {"hf": {"endpoint": "https://huggingface.co", "token": None}}, ), ( "https://huggingface.co/datasets/hf-internal-testing/dataset_with_script/resolve/main/some_text.txt", DownloadConfig(token="MY-TOKEN"), "hf://datasets/hf-internal-testing/dataset_with_script@main/some_text.txt", {"hf": {"endpoint": "https://huggingface.co", "token": "MY-TOKEN"}}, ), ( "https://huggingface.co/datasets/hf-internal-testing/dataset_with_script/resolve/main/some_text.txt", DownloadConfig(token="MY-TOKEN", storage_options={"hf": {"on_error": "omit"}}), "hf://datasets/hf-internal-testing/dataset_with_script@main/some_text.txt", {"hf": {"endpoint": "https://huggingface.co", "token": "MY-TOKEN", "on_error": "omit"}}, ), ( "https://domain.org/data.txt", DownloadConfig(), "https://domain.org/data.txt", {"https": {"client_kwargs": {"trust_env": True}}}, ), ( "https://domain.org/data.txt", DownloadConfig(storage_options={"https": {"block_size": "omit"}}), "https://domain.org/data.txt", {"https": {"client_kwargs": {"trust_env": True}, "block_size": "omit"}}, ), ( "https://domain.org/data.txt", DownloadConfig(storage_options={"https": {"client_kwargs": {"raise_for_status": True}}}), "https://domain.org/data.txt", {"https": {"client_kwargs": {"trust_env": True, "raise_for_status": True}}}, ), ( "https://domain.org/data.txt", DownloadConfig(storage_options={"https": {"client_kwargs": {"trust_env": False}}}), "https://domain.org/data.txt", {"https": {"client_kwargs": {"trust_env": False}}}, ), ( "https://raw.githubusercontent.com/data.txt", DownloadConfig(storage_options={"https": {"headers": {"x-test": "true"}}}), "https://raw.githubusercontent.com/data.txt", { "https": { "client_kwargs": {"trust_env": True}, "headers": {"x-test": "true", "Accept-Encoding": "identity"}, } }, ), ], ) def test_prepare_single_hop_path_and_storage_options( urlpath, download_config, expected_urlpath, expected_storage_options ): original_download_config_storage_options = str(download_config.storage_options) prepared_urlpath, storage_options = _prepare_single_hop_path_and_storage_options(urlpath, download_config) assert prepared_urlpath == expected_urlpath assert storage_options == expected_storage_options # Check that DownloadConfig.storage_options are not modified: assert str(download_config.storage_options) == original_download_config_storage_options class DummyTestFS(AbstractFileSystem): protocol = "mock" _file_class = AbstractBufferedFile _fs_contents = ( {"name": "top_level", "type": "directory"}, {"name": "top_level/second_level", "type": "directory"}, {"name": "top_level/second_level/date=2019-10-01", "type": "directory"}, { "name": "top_level/second_level/date=2019-10-01/a.parquet", "type": "file", "size": 100, }, { "name": "top_level/second_level/date=2019-10-01/b.parquet", "type": "file", "size": 100, }, {"name": "top_level/second_level/date=2019-10-02", "type": "directory"}, { "name": "top_level/second_level/date=2019-10-02/a.parquet", "type": "file", "size": 100, }, {"name": "top_level/second_level/date=2019-10-04", "type": "directory"}, { "name": "top_level/second_level/date=2019-10-04/a.parquet", "type": "file", "size": 100, }, {"name": "misc", "type": "directory"}, {"name": "misc/foo.txt", "type": "file", "size": 100}, {"name": "glob_test", "type": "directory", "size": 0}, {"name": "glob_test/hat", "type": "directory", "size": 0}, {"name": "glob_test/hat/^foo.txt", "type": "file", "size": 100}, {"name": "glob_test/dollar", "type": "directory", "size": 0}, {"name": "glob_test/dollar/$foo.txt", "type": "file", "size": 100}, {"name": "glob_test/lbrace", "type": "directory", "size": 0}, {"name": "glob_test/lbrace/{foo.txt", "type": "file", "size": 100}, {"name": "glob_test/rbrace", "type": "directory", "size": 0}, {"name": "glob_test/rbrace/}foo.txt", "type": "file", "size": 100}, ) def __getitem__(self, name): for item in self._fs_contents: if item["name"] == name: return item raise IndexError(f"{name} not found!") def ls(self, path, detail=True, refresh=True, **kwargs): if kwargs.pop("strip_proto", True): path = self._strip_protocol(path) files = not refresh and self._ls_from_cache(path) if not files: files = [file for file in self._fs_contents if path == self._parent(file["name"])] files.sort(key=lambda file: file["name"]) self.dircache[path.rstrip("/")] = files if detail: return files return [file["name"] for file in files] def _open( self, path, mode="rb", block_size=None, autocommit=True, cache_options=None, **kwargs, ): return self._file_class( self, path, mode, block_size, autocommit, cache_options=cache_options, **kwargs, ) @pytest.fixture def mock_fsspec2(): # to avoid the name collision with `mock_fsspec` from fixtures/fsspec.py _fsspec_registry["mock"] = DummyTestFS yield del _fsspec_registry["mock"] def _readd_double_slash_removed_by_path(path_as_posix: str) -> str: """Path(...) on an url path like zip://file.txt::http://host.com/data.zip converts the :// to :/ This function readds the :// It handles cases like: - https://host.com/data.zip - C://data.zip - zip://file.txt::https://host.com/data.zip - zip://file.txt::/Users/username/data.zip - zip://file.txt::C://data.zip Args: path_as_posix (str): output of Path(...).as_posix() Returns: str: the url path with :// instead of :/ """ return re.sub("([A-z]:/)([A-z:])", r"\g<1>/\g<2>", path_as_posix) @pytest.mark.parametrize( "input_path, paths_to_join, expected_path", [ ( "https://host.com/archive.zip", ("file.txt",), "https://host.com/archive.zip/file.txt", ), ( "zip://::https://host.com/archive.zip", ("file.txt",), "zip://file.txt::https://host.com/archive.zip", ), ( "zip://folder::https://host.com/archive.zip", ("file.txt",), "zip://folder/file.txt::https://host.com/archive.zip", ), ( ".", ("file.txt",), os.path.join(".", "file.txt"), ), ( str(Path().resolve()), ("file.txt",), str((Path().resolve() / "file.txt")), ), ], ) def test_xjoin(input_path, paths_to_join, expected_path): output_path = xjoin(input_path, *paths_to_join) assert output_path == expected_path output_path = xPath(input_path).joinpath(*paths_to_join) assert output_path == xPath(expected_path) @pytest.mark.parametrize( "input_path, expected_path", [ (str(Path(__file__).resolve()), str(Path(__file__).resolve().parent)), ("https://host.com/archive.zip", "https://host.com"), ( "zip://file.txt::https://host.com/archive.zip", "zip://::https://host.com/archive.zip", ), ( "zip://folder/file.txt::https://host.com/archive.zip", "zip://folder::https://host.com/archive.zip", ), ], ) def test_xdirname(input_path, expected_path): output_path = xdirname(input_path) output_path = _readd_double_slash_removed_by_path(Path(output_path).as_posix()) assert output_path == _readd_double_slash_removed_by_path(Path(expected_path).as_posix()) @pytest.mark.parametrize( "input_path, exists", [ ("tmp_path/file.txt", True), ("tmp_path/file_that_doesnt_exist.txt", False), ("mock://top_level/second_level/date=2019-10-01/a.parquet", True), ("mock://top_level/second_level/date=2019-10-01/file_that_doesnt_exist.parquet", False), ], ) def test_xexists(input_path, exists, tmp_path, mock_fsspec2): if input_path.startswith("tmp_path"): input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path)) (tmp_path / "file.txt").touch() assert xexists(input_path) is exists @pytest.mark.integration def test_xexists_private(hf_private_dataset_repo_txt_data, hf_token): root_url = hf_dataset_url(hf_private_dataset_repo_txt_data, "") download_config = DownloadConfig(token=hf_token) assert xexists(root_url + "data/text_data.txt", download_config=download_config) assert not xexists(root_url + "file_that_doesnt_exist.txt", download_config=download_config) @pytest.mark.parametrize( "input_path, expected_head_and_tail", [ ( str(Path(__file__).resolve()), (str(Path(__file__).resolve().parent), str(Path(__file__).resolve().name)), ), ("https://host.com/archive.zip", ("https://host.com", "archive.zip")), ("zip://file.txt::https://host.com/archive.zip", ("zip://::https://host.com/archive.zip", "file.txt")), ("zip://folder::https://host.com/archive.zip", ("zip://::https://host.com/archive.zip", "folder")), ("zip://::https://host.com/archive.zip", ("zip://::https://host.com/archive.zip", "")), ], ) def test_xsplit(input_path, expected_head_and_tail): output_path, tail = xsplit(input_path) expected_path, expected_tail = expected_head_and_tail output_path = _readd_double_slash_removed_by_path(Path(output_path).as_posix()) expected_path = _readd_double_slash_removed_by_path(Path(expected_path).as_posix()) assert output_path == expected_path assert tail == expected_tail @pytest.mark.parametrize( "input_path, expected_path_and_ext", [ ( str(Path(__file__).resolve()), (str(Path(__file__).resolve().with_suffix("")), str(Path(__file__).resolve().suffix)), ), ("https://host.com/archive.zip", ("https://host.com/archive", ".zip")), ("zip://file.txt::https://host.com/archive.zip", ("zip://file::https://host.com/archive.zip", ".txt")), ("zip://folder::https://host.com/archive.zip", ("zip://folder::https://host.com/archive.zip", "")), ("zip://::https://host.com/archive.zip", ("zip://::https://host.com/archive.zip", "")), ], ) def test_xsplitext(input_path, expected_path_and_ext): output_path, ext = xsplitext(input_path) expected_path, expected_ext = expected_path_and_ext output_path = _readd_double_slash_removed_by_path(Path(output_path).as_posix()) expected_path = _readd_double_slash_removed_by_path(Path(expected_path).as_posix()) assert output_path == expected_path assert ext == expected_ext def test_xopen_local(text_path): with xopen(text_path, "r", encoding="utf-8") as f, open(text_path, encoding="utf-8") as expected_file: assert list(f) == list(expected_file) with xPath(text_path).open("r", encoding="utf-8") as f, open(text_path, encoding="utf-8") as expected_file: assert list(f) == list(expected_file) @pytest.mark.integration def test_xopen_remote(): with xopen(TEST_URL, "r", encoding="utf-8") as f: assert list(f) == TEST_URL_CONTENT.splitlines(keepends=True) with xPath(TEST_URL).open("r", encoding="utf-8") as f: assert list(f) == TEST_URL_CONTENT.splitlines(keepends=True) @pytest.mark.parametrize( "input_path, expected_paths", [ ("tmp_path", ["file1.txt", "file2.txt"]), ("mock://", ["glob_test", "misc", "top_level"]), ("mock://top_level", ["second_level"]), ("mock://top_level/second_level/date=2019-10-01", ["a.parquet", "b.parquet"]), ], ) def test_xlistdir(input_path, expected_paths, tmp_path, mock_fsspec2): if input_path.startswith("tmp_path"): input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path)) for file in ["file1.txt", "file2.txt"]: (tmp_path / file).touch() output_paths = sorted(xlistdir(input_path)) assert output_paths == expected_paths @pytest.mark.integration def test_xlistdir_private(hf_private_dataset_repo_zipped_txt_data, hf_token): root_url = hf_dataset_url(hf_private_dataset_repo_zipped_txt_data, "data.zip") download_config = DownloadConfig(token=hf_token) assert len(xlistdir("zip://::" + root_url, download_config=download_config)) == 1 assert len(xlistdir("zip://main_dir::" + root_url, download_config=download_config)) == 2 with pytest.raises(FileNotFoundError): xlistdir("zip://qwertyuiop::" + root_url, download_config=download_config) with pytest.raises(FileNotFoundError): xlistdir(root_url, download_config=download_config) @pytest.mark.parametrize( "input_path, isdir", [ ("tmp_path", True), ("tmp_path/file.txt", False), ("mock://", True), ("mock://top_level", True), ("mock://dir_that_doesnt_exist", False), ], ) def test_xisdir(input_path, isdir, tmp_path, mock_fsspec2): if input_path.startswith("tmp_path"): input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path)) (tmp_path / "file.txt").touch() assert xisdir(input_path) == isdir @pytest.mark.integration def test_xisdir_private(hf_private_dataset_repo_zipped_txt_data, hf_token): root_url = hf_dataset_url(hf_private_dataset_repo_zipped_txt_data, "data.zip") download_config = DownloadConfig(token=hf_token) assert xisdir("zip://::" + root_url, download_config=download_config) is True assert xisdir("zip://main_dir::" + root_url, download_config=download_config) is True assert xisdir("zip://qwertyuiop::" + root_url, download_config=download_config) is False assert xisdir(root_url, download_config=download_config) is False @pytest.mark.parametrize( "input_path, isfile", [ ("tmp_path/file.txt", True), ("tmp_path/file_that_doesnt_exist.txt", False), ("mock://", False), ("mock://top_level/second_level/date=2019-10-01/a.parquet", True), ], ) def test_xisfile(input_path, isfile, tmp_path, mock_fsspec2): if input_path.startswith("tmp_path"): input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path)) (tmp_path / "file.txt").touch() assert xisfile(input_path) == isfile @pytest.mark.integration def test_xisfile_private(hf_private_dataset_repo_txt_data, hf_token): root_url = hf_dataset_url(hf_private_dataset_repo_txt_data, "") download_config = DownloadConfig(token=hf_token) assert xisfile(root_url + "data/text_data.txt", download_config=download_config) is True assert xisfile(root_url + "qwertyuiop", download_config=download_config) is False @pytest.mark.parametrize( "input_path, size", [ ("tmp_path/file.txt", 100), ("mock://", 0), ("mock://top_level/second_level/date=2019-10-01/a.parquet", 100), ], ) def test_xgetsize(input_path, size, tmp_path, mock_fsspec2): if input_path.startswith("tmp_path"): input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path)) (tmp_path / "file.txt").touch() (tmp_path / "file.txt").write_bytes(b"x" * 100) assert xgetsize(input_path) == size @pytest.mark.integration def test_xgetsize_private(hf_private_dataset_repo_txt_data, hf_token): root_url = hf_dataset_url(hf_private_dataset_repo_txt_data, "") download_config = DownloadConfig(token=hf_token) assert xgetsize(root_url + "data/text_data.txt", download_config=download_config) == 39 with pytest.raises(FileNotFoundError): xgetsize(root_url + "qwertyuiop", download_config=download_config) @pytest.mark.parametrize( "input_path, expected_paths", [ ("tmp_path/*.txt", ["file1.txt", "file2.txt"]), ("mock://*", ["mock://glob_test", "mock://misc", "mock://top_level"]), ("mock://top_*", ["mock://top_level"]), ( "mock://top_level/second_level/date=2019-10-0[1-4]", [ "mock://top_level/second_level/date=2019-10-01", "mock://top_level/second_level/date=2019-10-02", "mock://top_level/second_level/date=2019-10-04", ], ), ( "mock://top_level/second_level/date=2019-10-0[1-4]/*", [ "mock://top_level/second_level/date=2019-10-01/a.parquet", "mock://top_level/second_level/date=2019-10-01/b.parquet", "mock://top_level/second_level/date=2019-10-02/a.parquet", "mock://top_level/second_level/date=2019-10-04/a.parquet", ], ), ], ) def test_xglob(input_path, expected_paths, tmp_path, mock_fsspec2): if input_path.startswith("tmp_path"): input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path)) expected_paths = [str(tmp_path / file) for file in expected_paths] for file in ["file1.txt", "file2.txt", "README.md"]: (tmp_path / file).touch() output_paths = sorted(xglob(input_path)) assert output_paths == expected_paths @pytest.mark.integration def test_xglob_private(hf_private_dataset_repo_zipped_txt_data, hf_token): root_url = hf_dataset_url(hf_private_dataset_repo_zipped_txt_data, "data.zip") download_config = DownloadConfig(token=hf_token) assert len(xglob("zip://**::" + root_url, download_config=download_config)) == 3 assert len(xglob("zip://qwertyuiop/*::" + root_url, download_config=download_config)) == 0 @pytest.mark.parametrize( "input_path, expected_outputs", [ ("tmp_path", [("", [], ["file1.txt", "file2.txt", "README.md"])]), ( "mock://top_level/second_level", [ ("mock://top_level/second_level", ["date=2019-10-01", "date=2019-10-02", "date=2019-10-04"], []), ("mock://top_level/second_level/date=2019-10-01", [], ["a.parquet", "b.parquet"]), ("mock://top_level/second_level/date=2019-10-02", [], ["a.parquet"]), ("mock://top_level/second_level/date=2019-10-04", [], ["a.parquet"]), ], ), ], ) def test_xwalk(input_path, expected_outputs, tmp_path, mock_fsspec2): if input_path.startswith("tmp_path"): input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path)) expected_outputs = sorted( [ (str(tmp_path / dirpath).rstrip("/"), sorted(dirnames), sorted(filenames)) for dirpath, dirnames, filenames in expected_outputs ] ) for file in ["file1.txt", "file2.txt", "README.md"]: (tmp_path / file).touch() outputs = sorted(xwalk(input_path)) outputs = [(dirpath, sorted(dirnames), sorted(filenames)) for dirpath, dirnames, filenames in outputs] assert outputs == expected_outputs @pytest.mark.integration def test_xwalk_private(hf_private_dataset_repo_zipped_txt_data, hf_token): root_url = hf_dataset_url(hf_private_dataset_repo_zipped_txt_data, "data.zip") download_config = DownloadConfig(token=hf_token) assert len(list(xwalk("zip://::" + root_url, download_config=download_config))) == 2 assert len(list(xwalk("zip://main_dir::" + root_url, download_config=download_config))) == 1 assert len(list(xwalk("zip://qwertyuiop::" + root_url, download_config=download_config))) == 0 @pytest.mark.parametrize( "input_path, start_path, expected_path", [ ("dir1/dir2/file.txt".replace("/", os.path.sep), "dir1", "dir2/file.txt".replace("/", os.path.sep)), ("dir1/dir2/file.txt".replace("/", os.path.sep), "dir1/dir2".replace("/", os.path.sep), "file.txt"), ("zip://file.txt::https://host.com/archive.zip", "zip://::https://host.com/archive.zip", "file.txt"), ( "zip://folder/file.txt::https://host.com/archive.zip", "zip://::https://host.com/archive.zip", "folder/file.txt", ), ( "zip://folder/file.txt::https://host.com/archive.zip", "zip://folder::https://host.com/archive.zip", "file.txt", ), ], ) def test_xrelpath(input_path, start_path, expected_path): output_path = xrelpath(input_path, start=start_path) assert output_path == expected_path class TestxPath: @pytest.mark.parametrize( "input_path", [ "https://host.com/archive.zip", "zip://file.txt::https://host.com/archive.zip", "zip://dir/file.txt::https://host.com/archive.zip", "file.txt", str(Path().resolve() / "file.txt"), ], ) def test_xpath_str(self, input_path): assert str(xPath(input_path)) == input_path @pytest.mark.parametrize( "input_path, expected_path", [ ("https://host.com/archive.zip", "https://host.com/archive.zip"), ("zip://file.txt::https://host.com/archive.zip", "zip://file.txt::https://host.com/archive.zip"), ("zip://dir/file.txt::https://host.com/archive.zip", "zip://dir/file.txt::https://host.com/archive.zip"), ("file.txt", "file.txt"), (str(Path().resolve() / "file.txt"), (Path().resolve() / "file.txt").as_posix()), ], ) def test_xpath_as_posix(self, input_path, expected_path): assert xPath(input_path).as_posix() == expected_path @pytest.mark.parametrize( "input_path, exists", [ ("tmp_path/file.txt", True), ("tmp_path/file_that_doesnt_exist.txt", False), ("mock://top_level/second_level/date=2019-10-01/a.parquet", True), ("mock://top_level/second_level/date=2019-10-01/file_that_doesnt_exist.parquet", False), ], ) def test_xpath_exists(self, input_path, exists, tmp_path, mock_fsspec2): if input_path.startswith("tmp_path"): input_path = input_path.replace("/", os.sep).replace("tmp_path", str(tmp_path)) (tmp_path / "file.txt").touch() assert xexists(input_path) is exists @pytest.mark.parametrize( "input_path, pattern, expected_paths", [ ("tmp_path", "*.txt", ["file1.txt", "file2.txt"]), ("mock://", "*", ["mock://glob_test", "mock://misc", "mock://top_level"]), ("mock://", "top_*", ["mock://top_level"]), ( "mock://top_level/second_level", "date=2019-10-0[1-4]", [ "mock://top_level/second_level/date=2019-10-01", "mock://top_level/second_level/date=2019-10-02", "mock://top_level/second_level/date=2019-10-04", ], ), ( "mock://top_level/second_level", "date=2019-10-0[1-4]/*", [ "mock://top_level/second_level/date=2019-10-01/a.parquet", "mock://top_level/second_level/date=2019-10-01/b.parquet", "mock://top_level/second_level/date=2019-10-02/a.parquet", "mock://top_level/second_level/date=2019-10-04/a.parquet", ], ), ], ) def test_xpath_glob(self, input_path, pattern, expected_paths, tmp_path, mock_fsspec2): if input_path == "tmp_path": input_path = tmp_path expected_paths = [tmp_path / file for file in expected_paths] for file in ["file1.txt", "file2.txt", "README.md"]: (tmp_path / file).touch() else: expected_paths = [Path(file) for file in expected_paths] output_paths = sorted(xPath(input_path).glob(pattern)) assert output_paths == expected_paths @pytest.mark.parametrize( "input_path, pattern, expected_paths", [ ("tmp_path", "*.txt", ["file1.txt", "file2.txt"]), ( "mock://", "date=2019-10-0[1-4]", [ "mock://top_level/second_level/date=2019-10-01", "mock://top_level/second_level/date=2019-10-02", "mock://top_level/second_level/date=2019-10-04", ], ), ( "mock://top_level", "date=2019-10-0[1-4]", [ "mock://top_level/second_level/date=2019-10-01", "mock://top_level/second_level/date=2019-10-02", "mock://top_level/second_level/date=2019-10-04", ], ), ( "mock://", "date=2019-10-0[1-4]/*", [ "mock://top_level/second_level/date=2019-10-01/a.parquet", "mock://top_level/second_level/date=2019-10-01/b.parquet", "mock://top_level/second_level/date=2019-10-02/a.parquet", "mock://top_level/second_level/date=2019-10-04/a.parquet", ], ), ( "mock://top_level", "date=2019-10-0[1-4]/*", [ "mock://top_level/second_level/date=2019-10-01/a.parquet", "mock://top_level/second_level/date=2019-10-01/b.parquet", "mock://top_level/second_level/date=2019-10-02/a.parquet", "mock://top_level/second_level/date=2019-10-04/a.parquet", ], ), ], ) def test_xpath_rglob(self, input_path, pattern, expected_paths, tmp_path, mock_fsspec2): if input_path == "tmp_path": input_path = tmp_path dir_path = tmp_path / "dir" dir_path.mkdir() expected_paths = [dir_path / file for file in expected_paths] for file in ["file1.txt", "file2.txt", "README.md"]: (dir_path / file).touch() else: expected_paths = [Path(file) for file in expected_paths] output_paths = sorted(xPath(input_path).rglob(pattern)) assert output_paths == expected_paths @pytest.mark.parametrize( "input_path, expected_path", [ ("https://host.com/archive.zip", "https://host.com"), ("zip://file.txt::https://host.com/archive.zip", "zip://::https://host.com/archive.zip"), ("zip://dir/file.txt::https://host.com/archive.zip", "zip://dir::https://host.com/archive.zip"), ("file.txt", ""), (str(Path().resolve() / "file.txt"), str(Path().resolve())), ], ) def test_xpath_parent(self, input_path, expected_path): assert xPath(input_path).parent == xPath(expected_path) @pytest.mark.parametrize( "input_path, expected", [ ("https://host.com/archive.zip", "archive.zip"), ("zip://file.txt::https://host.com/archive.zip", "file.txt"), ("zip://dir/file.txt::https://host.com/archive.zip", "file.txt"), ("file.txt", "file.txt"), (str(Path().resolve() / "file.txt"), "file.txt"), ], ) def test_xpath_name(self, input_path, expected): assert xPath(input_path).name == expected @pytest.mark.parametrize( "input_path, expected", [ ("https://host.com/archive.zip", "archive"), ("zip://file.txt::https://host.com/archive.zip", "file"), ("zip://dir/file.txt::https://host.com/archive.zip", "file"), ("file.txt", "file"), (str(Path().resolve() / "file.txt"), "file"), ], ) def test_xpath_stem(self, input_path, expected): assert xPath(input_path).stem == expected @pytest.mark.parametrize( "input_path, expected", [ ("https://host.com/archive.zip", ".zip"), ("zip://file.txt::https://host.com/archive.zip", ".txt"), ("zip://dir/file.txt::https://host.com/archive.zip", ".txt"), ("file.txt", ".txt"), (str(Path().resolve() / "file.txt"), ".txt"), ], ) def test_xpath_suffix(self, input_path, expected): assert xPath(input_path).suffix == expected @pytest.mark.parametrize( "input_path, suffix, expected", [ ("https://host.com/archive.zip", ".ann", "https://host.com/archive.ann"), ("zip://file.txt::https://host.com/archive.zip", ".ann", "zip://file.ann::https://host.com/archive.zip"), ( "zip://dir/file.txt::https://host.com/archive.zip", ".ann", "zip://dir/file.ann::https://host.com/archive.zip", ), ("file.txt", ".ann", "file.ann"), (str(Path().resolve() / "file.txt"), ".ann", str(Path().resolve() / "file.ann")), ], ) def test_xpath_with_suffix(self, input_path, suffix, expected): assert xPath(input_path).with_suffix(suffix) == xPath(expected) @pytest.mark.parametrize( "urlpath, expected_protocol", [ ("zip://train-00000.json.gz::https://foo.bar/data.zip", "gzip"), ("https://foo.bar/train.json.gz?dl=1", "gzip"), ("http://opus.nlpl.eu/download.php?f=Bianet/v1/moses/en-ku.txt.zip", "zip"), ("https://github.com/user/what-time-is-it/blob/master/gutenberg_time_phrases.zip?raw=true", "zip"), ("https://github.com/user/repo/blob/master/data/morph_train.tsv?raw=true", None), ("https://repo.org/bitstream/handle/20.500.12185/346/annotated_corpus.zip?sequence=3&isAllowed=y", "zip"), ("https://zenodo.org/record/2787612/files/SICK.zip?download=1", "zip"), ], ) def test_get_extraction_protocol(urlpath, expected_protocol): assert _get_extraction_protocol(urlpath) == expected_protocol @pytest.mark.parametrize( "urlpath, expected_protocol", [ (TEST_GG_DRIVE_GZIPPED_URL, "gzip"), (TEST_GG_DRIVE_ZIPPED_URL, "zip"), ], ) @slow # otherwise it spams Google Drive and the CI gets banned def test_get_extraction_protocol_gg_drive(urlpath, expected_protocol): assert _get_extraction_protocol(urlpath) == expected_protocol @slow # otherwise it spams Google Drive and the CI gets banned @pytest.mark.integration def test_streaming_gg_drive(): with xopen(TEST_GG_DRIVE_URL) as f: assert f.read() == TEST_GG_DRIVE_CONTENT def test_xnumpy_load(tmp_path): import numpy as np expected_x = np.arange(10) npy_path = tmp_path / "data-x.npy" np.save(npy_path, expected_x) x = xnumpy_load(npy_path) assert np.array_equal(x, expected_x) npz_path = tmp_path / "data.npz" np.savez(npz_path, x=expected_x) with xnumpy_load(npz_path) as f: x = f["x"] assert np.array_equal(x, expected_x)
datasets/tests/test_file_utils.py/0
{ "file_path": "datasets/tests/test_file_utils.py", "repo_id": "datasets", "token_count": 17517 }
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss pytestmark = pytest.mark.integration @require_faiss class IndexableDatasetTest(TestCase): def _create_dummy_dataset(self): dset = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(x) for x in np.arange(30).tolist()]}) return dset def test_add_faiss_index(self): import faiss dset: Dataset = self._create_dummy_dataset() dset = dset.map( lambda ex, i: {"vecs": i * np.ones(5, dtype=np.float32)}, with_indices=True, keep_in_memory=True ) dset = dset.add_faiss_index("vecs", batch_size=100, metric_type=faiss.METRIC_INNER_PRODUCT) scores, examples = dset.get_nearest_examples("vecs", np.ones(5, dtype=np.float32)) self.assertEqual(examples["filename"][0], "my_name-train_29") dset.drop_index("vecs") def test_add_faiss_index_errors(self): import faiss dset: Dataset = self._create_dummy_dataset() with pytest.raises(ValueError, match="Wrong feature type for column 'filename'"): _ = dset.add_faiss_index("filename", batch_size=100, metric_type=faiss.METRIC_INNER_PRODUCT) def test_add_faiss_index_from_external_arrays(self): import faiss dset: Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5)) * np.arange(30).reshape(-1, 1), index_name="vecs", batch_size=100, metric_type=faiss.METRIC_INNER_PRODUCT, ) scores, examples = dset.get_nearest_examples("vecs", np.ones(5, dtype=np.float32)) self.assertEqual(examples["filename"][0], "my_name-train_29") def test_serialization(self): import faiss dset: Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5)) * np.arange(30).reshape(-1, 1), index_name="vecs", metric_type=faiss.METRIC_INNER_PRODUCT, ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=False) as tmp_file: dset.save_faiss_index("vecs", tmp_file.name) dset.load_faiss_index("vecs2", tmp_file.name) os.unlink(tmp_file.name) scores, examples = dset.get_nearest_examples("vecs2", np.ones(5, dtype=np.float32)) self.assertEqual(examples["filename"][0], "my_name-train_29") def test_drop_index(self): dset: Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5)) * np.arange(30).reshape(-1, 1), index_name="vecs" ) dset.drop_index("vecs") self.assertRaises(MissingIndex, partial(dset.get_nearest_examples, "vecs2", np.ones(5, dtype=np.float32))) def test_add_elasticsearch_index(self): from elasticsearch import Elasticsearch dset: Dataset = self._create_dummy_dataset() with ( patch("elasticsearch.Elasticsearch.search") as mocked_search, patch("elasticsearch.client.IndicesClient.create") as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk") as mocked_bulk, ): mocked_index_create.return_value = {"acknowledged": True} mocked_bulk.return_value([(True, None)] * 30) mocked_search.return_value = {"hits": {"hits": [{"_score": 1, "_id": 29}]}} es_client = Elasticsearch() dset.add_elasticsearch_index("filename", es_client=es_client) scores, examples = dset.get_nearest_examples("filename", "my_name-train_29") self.assertEqual(examples["filename"][0], "my_name-train_29") @require_faiss class FaissIndexTest(TestCase): def test_flat_ip(self): import faiss index = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT) # add vectors index.add_vectors(np.eye(5, dtype=np.float32)) self.assertIsNotNone(index.faiss_index) self.assertEqual(index.faiss_index.ntotal, 5) index.add_vectors(np.zeros((5, 5), dtype=np.float32)) self.assertEqual(index.faiss_index.ntotal, 10) # single query query = np.zeros(5, dtype=np.float32) query[1] = 1 scores, indices = index.search(query) self.assertRaises(ValueError, index.search, query.reshape(-1, 1)) self.assertGreater(scores[0], 0) self.assertEqual(indices[0], 1) # batched queries queries = np.eye(5, dtype=np.float32)[::-1] total_scores, total_indices = index.search_batch(queries) self.assertRaises(ValueError, index.search_batch, queries[0]) best_scores = [scores[0] for scores in total_scores] best_indices = [indices[0] for indices in total_indices] self.assertGreater(np.min(best_scores), 0) self.assertListEqual([4, 3, 2, 1, 0], best_indices) def test_factory(self): import faiss index = FaissIndex(string_factory="Flat") index.add_vectors(np.eye(5, dtype=np.float32)) self.assertIsInstance(index.faiss_index, faiss.IndexFlat) index = FaissIndex(string_factory="LSH") index.add_vectors(np.eye(5, dtype=np.float32)) self.assertIsInstance(index.faiss_index, faiss.IndexLSH) with self.assertRaises(ValueError): _ = FaissIndex(string_factory="Flat", custom_index=faiss.IndexFlat(5)) def test_custom(self): import faiss custom_index = faiss.IndexFlat(5) index = FaissIndex(custom_index=custom_index) index.add_vectors(np.eye(5, dtype=np.float32)) self.assertIsInstance(index.faiss_index, faiss.IndexFlat) def test_serialization(self): import faiss index = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT) index.add_vectors(np.eye(5, dtype=np.float32)) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=False) as tmp_file: index.save(tmp_file.name) index = FaissIndex.load(tmp_file.name) os.unlink(tmp_file.name) query = np.zeros(5, dtype=np.float32) query[1] = 1 scores, indices = index.search(query) self.assertGreater(scores[0], 0) self.assertEqual(indices[0], 1) @require_faiss def test_serialization_fs(mockfs): import faiss index = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT) index.add_vectors(np.eye(5, dtype=np.float32)) index_name = "index.faiss" path = f"mock://{index_name}" index.save(path, storage_options=mockfs.storage_options) index = FaissIndex.load(path, storage_options=mockfs.storage_options) query = np.zeros(5, dtype=np.float32) query[1] = 1 scores, indices = index.search(query) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class ElasticSearchIndexTest(TestCase): def test_elasticsearch(self): from elasticsearch import Elasticsearch with ( patch("elasticsearch.Elasticsearch.search") as mocked_search, patch("elasticsearch.client.IndicesClient.create") as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk") as mocked_bulk, ): es_client = Elasticsearch() mocked_index_create.return_value = {"acknowledged": True} index = ElasticSearchIndex(es_client=es_client) mocked_bulk.return_value([(True, None)] * 3) index.add_documents(["foo", "bar", "foobar"]) # single query query = "foo" mocked_search.return_value = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} scores, indices = index.search(query) self.assertEqual(scores[0], 1) self.assertEqual(indices[0], 0) # single query with timeout query = "foo" mocked_search.return_value = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} scores, indices = index.search(query, request_timeout=30) self.assertEqual(scores[0], 1) self.assertEqual(indices[0], 0) # batched queries queries = ["foo", "bar", "foobar"] mocked_search.return_value = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} total_scores, total_indices = index.search_batch(queries) best_scores = [scores[0] for scores in total_scores] best_indices = [indices[0] for indices in total_indices] self.assertGreater(np.min(best_scores), 0) self.assertListEqual([1, 1, 1], best_indices) # batched queries with timeout queries = ["foo", "bar", "foobar"] mocked_search.return_value = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} total_scores, total_indices = index.search_batch(queries, request_timeout=30) best_scores = [scores[0] for scores in total_scores] best_indices = [indices[0] for indices in total_indices] self.assertGreater(np.min(best_scores), 0) self.assertListEqual([1, 1, 1], best_indices)
datasets/tests/test_search.py/0
{ "file_path": "datasets/tests/test_search.py", "repo_id": "datasets", "token_count": 4553 }
import os import sys import torch from diffusers import ( AutoPipelineForImage2Image, AutoPipelineForInpainting, AutoPipelineForText2Image, ControlNetModel, LCMScheduler, StableDiffusionAdapterPipeline, StableDiffusionControlNetPipeline, StableDiffusionXLAdapterPipeline, StableDiffusionXLControlNetPipeline, T2IAdapter, WuerstchenCombinedPipeline, ) from diffusers.utils import load_image sys.path.append(".") from utils import ( # noqa: E402 BASE_PATH, PROMPT, BenchmarkInfo, benchmark_fn, bytes_to_giga_bytes, flush, generate_csv_dict, write_to_csv, ) RESOLUTION_MAPPING = { "Lykon/DreamShaper": (512, 512), "lllyasviel/sd-controlnet-canny": (512, 512), "diffusers/controlnet-canny-sdxl-1.0": (1024, 1024), "TencentARC/t2iadapter_canny_sd14v1": (512, 512), "TencentARC/t2i-adapter-canny-sdxl-1.0": (1024, 1024), "stabilityai/stable-diffusion-2-1": (768, 768), "stabilityai/stable-diffusion-xl-base-1.0": (1024, 1024), "stabilityai/stable-diffusion-xl-refiner-1.0": (1024, 1024), "stabilityai/sdxl-turbo": (512, 512), } class BaseBenchmak: pipeline_class = None def __init__(self, args): super().__init__() def run_inference(self, args): raise NotImplementedError def benchmark(self, args): raise NotImplementedError def get_result_filepath(self, args): pipeline_class_name = str(self.pipe.__class__.__name__) name = ( args.ckpt.replace("/", "_") + "_" + pipeline_class_name + f"-bs@{args.batch_size}-steps@{args.num_inference_steps}-mco@{args.model_cpu_offload}-compile@{args.run_compile}.csv" ) filepath = os.path.join(BASE_PATH, name) return filepath class TextToImageBenchmark(BaseBenchmak): pipeline_class = AutoPipelineForText2Image def __init__(self, args): pipe = self.pipeline_class.from_pretrained(args.ckpt, torch_dtype=torch.float16) pipe = pipe.to("cuda") if args.run_compile: if not isinstance(pipe, WuerstchenCombinedPipeline): pipe.unet.to(memory_format=torch.channels_last) print("Run torch compile") pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) if hasattr(pipe, "movq") and getattr(pipe, "movq", None) is not None: pipe.movq.to(memory_format=torch.channels_last) pipe.movq = torch.compile(pipe.movq, mode="reduce-overhead", fullgraph=True) else: print("Run torch compile") pipe.decoder = torch.compile(pipe.decoder, mode="reduce-overhead", fullgraph=True) pipe.vqgan = torch.compile(pipe.vqgan, mode="reduce-overhead", fullgraph=True) pipe.set_progress_bar_config(disable=True) self.pipe = pipe def run_inference(self, pipe, args): _ = pipe( prompt=PROMPT, num_inference_steps=args.num_inference_steps, num_images_per_prompt=args.batch_size, ) def benchmark(self, args): flush() print(f"[INFO] {self.pipe.__class__.__name__}: Running benchmark with: {vars(args)}\n") time = benchmark_fn(self.run_inference, self.pipe, args) # in seconds. memory = bytes_to_giga_bytes(torch.cuda.max_memory_allocated()) # in GBs. benchmark_info = BenchmarkInfo(time=time, memory=memory) pipeline_class_name = str(self.pipe.__class__.__name__) flush() csv_dict = generate_csv_dict( pipeline_cls=pipeline_class_name, ckpt=args.ckpt, args=args, benchmark_info=benchmark_info ) filepath = self.get_result_filepath(args) write_to_csv(filepath, csv_dict) print(f"Logs written to: {filepath}") flush() class TurboTextToImageBenchmark(TextToImageBenchmark): def __init__(self, args): super().__init__(args) def run_inference(self, pipe, args): _ = pipe( prompt=PROMPT, num_inference_steps=args.num_inference_steps, num_images_per_prompt=args.batch_size, guidance_scale=0.0, ) class LCMLoRATextToImageBenchmark(TextToImageBenchmark): lora_id = "latent-consistency/lcm-lora-sdxl" def __init__(self, args): super().__init__(args) self.pipe.load_lora_weights(self.lora_id) self.pipe.fuse_lora() self.pipe.unload_lora_weights() self.pipe.scheduler = LCMScheduler.from_config(self.pipe.scheduler.config) def get_result_filepath(self, args): pipeline_class_name = str(self.pipe.__class__.__name__) name = ( self.lora_id.replace("/", "_") + "_" + pipeline_class_name + f"-bs@{args.batch_size}-steps@{args.num_inference_steps}-mco@{args.model_cpu_offload}-compile@{args.run_compile}.csv" ) filepath = os.path.join(BASE_PATH, name) return filepath def run_inference(self, pipe, args): _ = pipe( prompt=PROMPT, num_inference_steps=args.num_inference_steps, num_images_per_prompt=args.batch_size, guidance_scale=1.0, ) def benchmark(self, args): flush() print(f"[INFO] {self.pipe.__class__.__name__}: Running benchmark with: {vars(args)}\n") time = benchmark_fn(self.run_inference, self.pipe, args) # in seconds. memory = bytes_to_giga_bytes(torch.cuda.max_memory_allocated()) # in GBs. benchmark_info = BenchmarkInfo(time=time, memory=memory) pipeline_class_name = str(self.pipe.__class__.__name__) flush() csv_dict = generate_csv_dict( pipeline_cls=pipeline_class_name, ckpt=self.lora_id, args=args, benchmark_info=benchmark_info ) filepath = self.get_result_filepath(args) write_to_csv(filepath, csv_dict) print(f"Logs written to: {filepath}") flush() class ImageToImageBenchmark(TextToImageBenchmark): pipeline_class = AutoPipelineForImage2Image url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/benchmarking/1665_Girl_with_a_Pearl_Earring.jpg" image = load_image(url).convert("RGB") def __init__(self, args): super().__init__(args) self.image = self.image.resize(RESOLUTION_MAPPING[args.ckpt]) def run_inference(self, pipe, args): _ = pipe( prompt=PROMPT, image=self.image, num_inference_steps=args.num_inference_steps, num_images_per_prompt=args.batch_size, ) class TurboImageToImageBenchmark(ImageToImageBenchmark): def __init__(self, args): super().__init__(args) def run_inference(self, pipe, args): _ = pipe( prompt=PROMPT, image=self.image, num_inference_steps=args.num_inference_steps, num_images_per_prompt=args.batch_size, guidance_scale=0.0, strength=0.5, ) class InpaintingBenchmark(ImageToImageBenchmark): pipeline_class = AutoPipelineForInpainting mask_url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/benchmarking/overture-creations-5sI6fQgYIuo_mask.png" mask = load_image(mask_url).convert("RGB") def __init__(self, args): super().__init__(args) self.image = self.image.resize(RESOLUTION_MAPPING[args.ckpt]) self.mask = self.mask.resize(RESOLUTION_MAPPING[args.ckpt]) def run_inference(self, pipe, args): _ = pipe( prompt=PROMPT, image=self.image, mask_image=self.mask, num_inference_steps=args.num_inference_steps, num_images_per_prompt=args.batch_size, ) class IPAdapterTextToImageBenchmark(TextToImageBenchmark): url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/load_neg_embed.png" image = load_image(url) def __init__(self, args): pipe = self.pipeline_class.from_pretrained(args.ckpt, torch_dtype=torch.float16).to("cuda") pipe.load_ip_adapter( args.ip_adapter_id[0], subfolder="models" if "sdxl" not in args.ip_adapter_id[1] else "sdxl_models", weight_name=args.ip_adapter_id[1], ) if args.run_compile: pipe.unet.to(memory_format=torch.channels_last) print("Run torch compile") pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) pipe.set_progress_bar_config(disable=True) self.pipe = pipe def run_inference(self, pipe, args): _ = pipe( prompt=PROMPT, ip_adapter_image=self.image, num_inference_steps=args.num_inference_steps, num_images_per_prompt=args.batch_size, ) class ControlNetBenchmark(TextToImageBenchmark): pipeline_class = StableDiffusionControlNetPipeline aux_network_class = ControlNetModel root_ckpt = "Lykon/DreamShaper" url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/benchmarking/canny_image_condition.png" image = load_image(url).convert("RGB") def __init__(self, args): aux_network = self.aux_network_class.from_pretrained(args.ckpt, torch_dtype=torch.float16) pipe = self.pipeline_class.from_pretrained(self.root_ckpt, controlnet=aux_network, torch_dtype=torch.float16) pipe = pipe.to("cuda") pipe.set_progress_bar_config(disable=True) self.pipe = pipe if args.run_compile: pipe.unet.to(memory_format=torch.channels_last) pipe.controlnet.to(memory_format=torch.channels_last) print("Run torch compile") pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) pipe.controlnet = torch.compile(pipe.controlnet, mode="reduce-overhead", fullgraph=True) self.image = self.image.resize(RESOLUTION_MAPPING[args.ckpt]) def run_inference(self, pipe, args): _ = pipe( prompt=PROMPT, image=self.image, num_inference_steps=args.num_inference_steps, num_images_per_prompt=args.batch_size, ) class ControlNetSDXLBenchmark(ControlNetBenchmark): pipeline_class = StableDiffusionXLControlNetPipeline root_ckpt = "stabilityai/stable-diffusion-xl-base-1.0" def __init__(self, args): super().__init__(args) class T2IAdapterBenchmark(ControlNetBenchmark): pipeline_class = StableDiffusionAdapterPipeline aux_network_class = T2IAdapter root_ckpt = "Lykon/DreamShaper" url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/benchmarking/canny_for_adapter.png" image = load_image(url).convert("L") def __init__(self, args): aux_network = self.aux_network_class.from_pretrained(args.ckpt, torch_dtype=torch.float16) pipe = self.pipeline_class.from_pretrained(self.root_ckpt, adapter=aux_network, torch_dtype=torch.float16) pipe = pipe.to("cuda") pipe.set_progress_bar_config(disable=True) self.pipe = pipe if args.run_compile: pipe.unet.to(memory_format=torch.channels_last) pipe.adapter.to(memory_format=torch.channels_last) print("Run torch compile") pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) pipe.adapter = torch.compile(pipe.adapter, mode="reduce-overhead", fullgraph=True) self.image = self.image.resize(RESOLUTION_MAPPING[args.ckpt]) class T2IAdapterSDXLBenchmark(T2IAdapterBenchmark): pipeline_class = StableDiffusionXLAdapterPipeline root_ckpt = "stabilityai/stable-diffusion-xl-base-1.0" url = "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/benchmarking/canny_for_adapter_sdxl.png" image = load_image(url) def __init__(self, args): super().__init__(args)
diffusers/benchmarks/base_classes.py/0
{ "file_path": "diffusers/benchmarks/base_classes.py", "repo_id": "diffusers", "token_count": 5583 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # IP-Adapter [IP-Adapter](https://hf.co/papers/2308.06721) is a lightweight adapter that enables prompting a diffusion model with an image. This method decouples the cross-attention layers of the image and text features. The image features are generated from an image encoder. <Tip> Learn how to load an IP-Adapter checkpoint and image in the IP-Adapter [loading](../../using-diffusers/loading_adapters#ip-adapter) guide, and you can see how to use it in the [usage](../../using-diffusers/ip_adapter) guide. </Tip> ## IPAdapterMixin [[autodoc]] loaders.ip_adapter.IPAdapterMixin ## SD3IPAdapterMixin [[autodoc]] loaders.ip_adapter.SD3IPAdapterMixin - all - is_ip_adapter_active ## IPAdapterMaskProcessor [[autodoc]] image_processor.IPAdapterMaskProcessor
diffusers/docs/source/en/api/loaders/ip_adapter.md/0
{ "file_path": "diffusers/docs/source/en/api/loaders/ip_adapter.md", "repo_id": "diffusers", "token_count": 388 }
<!-- Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # AutoencoderKLAllegro The 3D variational autoencoder (VAE) model with KL loss used in [Allegro](https://github.com/rhymes-ai/Allegro) was introduced in [Allegro: Open the Black Box of Commercial-Level Video Generation Model](https://huggingface.co/papers/2410.15458) by RhymesAI. The model can be loaded with the following code snippet. ```python from diffusers import AutoencoderKLAllegro vae = AutoencoderKLCogVideoX.from_pretrained("rhymes-ai/Allegro", subfolder="vae", torch_dtype=torch.float32).to("cuda") ``` ## AutoencoderKLAllegro [[autodoc]] AutoencoderKLAllegro - decode - encode - all ## AutoencoderKLOutput [[autodoc]] models.autoencoders.autoencoder_kl.AutoencoderKLOutput ## DecoderOutput [[autodoc]] models.autoencoders.vae.DecoderOutput
diffusers/docs/source/en/api/models/autoencoderkl_allegro.md/0
{ "file_path": "diffusers/docs/source/en/api/models/autoencoderkl_allegro.md", "repo_id": "diffusers", "token_count": 427 }
<!-- Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. --> # LTX Video [LTX Video](https://huggingface.co/Lightricks/LTX-Video) is the first DiT-based video generation model capable of generating high-quality videos in real-time. It produces 24 FPS videos at a 768x512 resolution faster than they can be watched. Trained on a large-scale dataset of diverse videos, the model generates high-resolution videos with realistic and varied content. We provide a model for both text-to-video as well as image + text-to-video usecases. <Tip> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines. </Tip> Available models: | Model name | Recommended dtype | |:-------------:|:-----------------:| | [`LTX Video 0.9.0`](https://huggingface.co/Lightricks/LTX-Video/blob/main/ltx-video-2b-v0.9.safetensors) | `torch.bfloat16` | | [`LTX Video 0.9.1`](https://huggingface.co/Lightricks/LTX-Video/blob/main/ltx-video-2b-v0.9.1.safetensors) | `torch.bfloat16` | Note: The recommended dtype is for the transformer component. The VAE and text encoders can be either `torch.float32`, `torch.bfloat16` or `torch.float16` but the recommended dtype is `torch.bfloat16` as used in the original repository. ## Loading Single Files Loading the original LTX Video checkpoints is also possible with [`~ModelMixin.from_single_file`]. We recommend using `from_single_file` for the Lightricks series of models, as they plan to release multiple models in the future in the single file format. ```python import torch from diffusers import AutoencoderKLLTXVideo, LTXImageToVideoPipeline, LTXVideoTransformer3DModel # `single_file_url` could also be https://huggingface.co/Lightricks/LTX-Video/ltx-video-2b-v0.9.1.safetensors single_file_url = "https://huggingface.co/Lightricks/LTX-Video/ltx-video-2b-v0.9.safetensors" transformer = LTXVideoTransformer3DModel.from_single_file( single_file_url, torch_dtype=torch.bfloat16 ) vae = AutoencoderKLLTXVideo.from_single_file(single_file_url, torch_dtype=torch.bfloat16) pipe = LTXImageToVideoPipeline.from_pretrained( "Lightricks/LTX-Video", transformer=transformer, vae=vae, torch_dtype=torch.bfloat16 ) # ... inference code ... ``` Alternatively, the pipeline can be used to load the weights with [`~FromSingleFileMixin.from_single_file`]. ```python import torch from diffusers import LTXImageToVideoPipeline from transformers import T5EncoderModel, T5Tokenizer single_file_url = "https://huggingface.co/Lightricks/LTX-Video/ltx-video-2b-v0.9.safetensors" text_encoder = T5EncoderModel.from_pretrained( "Lightricks/LTX-Video", subfolder="text_encoder", torch_dtype=torch.bfloat16 ) tokenizer = T5Tokenizer.from_pretrained( "Lightricks/LTX-Video", subfolder="tokenizer", torch_dtype=torch.bfloat16 ) pipe = LTXImageToVideoPipeline.from_single_file( single_file_url, text_encoder=text_encoder, tokenizer=tokenizer, torch_dtype=torch.bfloat16 ) ``` Loading [LTX GGUF checkpoints](https://huggingface.co/city96/LTX-Video-gguf) are also supported: ```py import torch from diffusers.utils import export_to_video from diffusers import LTXPipeline, LTXVideoTransformer3DModel, GGUFQuantizationConfig ckpt_path = ( "https://huggingface.co/city96/LTX-Video-gguf/blob/main/ltx-video-2b-v0.9-Q3_K_S.gguf" ) transformer = LTXVideoTransformer3DModel.from_single_file( ckpt_path, quantization_config=GGUFQuantizationConfig(compute_dtype=torch.bfloat16), torch_dtype=torch.bfloat16, ) pipe = LTXPipeline.from_pretrained( "Lightricks/LTX-Video", transformer=transformer, torch_dtype=torch.bfloat16, ) pipe.enable_model_cpu_offload() prompt = "A woman with long brown hair and light skin smiles at another woman with long blonde hair. The woman with brown hair wears a black jacket and has a small, barely noticeable mole on her right cheek. The camera angle is a close-up, focused on the woman with brown hair's face. The lighting is warm and natural, likely from the setting sun, casting a soft glow on the scene. The scene appears to be real-life footage" negative_prompt = "worst quality, inconsistent motion, blurry, jittery, distorted" video = pipe( prompt=prompt, negative_prompt=negative_prompt, width=704, height=480, num_frames=161, num_inference_steps=50, ).frames[0] export_to_video(video, "output_gguf_ltx.mp4", fps=24) ``` Make sure to read the [documentation on GGUF](../../quantization/gguf) to learn more about our GGUF support. <!-- TODO(aryan): Update this when official weights are supported --> Loading and running inference with [LTX Video 0.9.1](https://huggingface.co/Lightricks/LTX-Video/blob/main/ltx-video-2b-v0.9.1.safetensors) weights. ```python import torch from diffusers import LTXPipeline from diffusers.utils import export_to_video pipe = LTXPipeline.from_pretrained("a-r-r-o-w/LTX-Video-0.9.1-diffusers", torch_dtype=torch.bfloat16) pipe.to("cuda") prompt = "A woman with long brown hair and light skin smiles at another woman with long blonde hair. The woman with brown hair wears a black jacket and has a small, barely noticeable mole on her right cheek. The camera angle is a close-up, focused on the woman with brown hair's face. The lighting is warm and natural, likely from the setting sun, casting a soft glow on the scene. The scene appears to be real-life footage" negative_prompt = "worst quality, inconsistent motion, blurry, jittery, distorted" video = pipe( prompt=prompt, negative_prompt=negative_prompt, width=768, height=512, num_frames=161, decode_timestep=0.03, decode_noise_scale=0.025, num_inference_steps=50, ).frames[0] export_to_video(video, "output.mp4", fps=24) ``` Refer to [this section](https://huggingface.co/docs/diffusers/main/en/api/pipelines/cogvideox#memory-optimization) to learn more about optimizing memory consumption. ## Quantization Quantization helps reduce the memory requirements of very large models by storing model weights in a lower precision data type. However, quantization may have varying impact on video quality depending on the video model. Refer to the [Quantization](../../quantization/overview) overview to learn more about supported quantization backends and selecting a quantization backend that supports your use case. The example below demonstrates how to load a quantized [`LTXPipeline`] for inference with bitsandbytes. ```py import torch from diffusers import BitsAndBytesConfig as DiffusersBitsAndBytesConfig, LTXVideoTransformer3DModel, LTXPipeline from diffusers.utils import export_to_video from transformers import BitsAndBytesConfig as BitsAndBytesConfig, T5EncoderModel quant_config = BitsAndBytesConfig(load_in_8bit=True) text_encoder_8bit = T5EncoderModel.from_pretrained( "Lightricks/LTX-Video", subfolder="text_encoder", quantization_config=quant_config, torch_dtype=torch.float16, ) quant_config = DiffusersBitsAndBytesConfig(load_in_8bit=True) transformer_8bit = LTXVideoTransformer3DModel.from_pretrained( "Lightricks/LTX-Video", subfolder="transformer", quantization_config=quant_config, torch_dtype=torch.float16, ) pipeline = LTXPipeline.from_pretrained( "Lightricks/LTX-Video", text_encoder=text_encoder_8bit, transformer=transformer_8bit, torch_dtype=torch.float16, device_map="balanced", ) prompt = "A detailed wooden toy ship with intricately carved masts and sails is seen gliding smoothly over a plush, blue carpet that mimics the waves of the sea. The ship's hull is painted a rich brown, with tiny windows. The carpet, soft and textured, provides a perfect backdrop, resembling an oceanic expanse. Surrounding the ship are various other toys and children's items, hinting at a playful environment. The scene captures the innocence and imagination of childhood, with the toy ship's journey symbolizing endless adventures in a whimsical, indoor setting." video = pipeline(prompt=prompt, num_frames=161, num_inference_steps=50).frames[0] export_to_video(video, "ship.mp4", fps=24) ``` ## LTXPipeline [[autodoc]] LTXPipeline - all - __call__ ## LTXImageToVideoPipeline [[autodoc]] LTXImageToVideoPipeline - all - __call__ ## LTXPipelineOutput [[autodoc]] pipelines.ltx.pipeline_output.LTXPipelineOutput
diffusers/docs/source/en/api/pipelines/ltx_video.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/ltx_video.md", "repo_id": "diffusers", "token_count": 2911 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Shap-E The Shap-E model was proposed in [Shap-E: Generating Conditional 3D Implicit Functions](https://huggingface.co/papers/2305.02463) by Alex Nichol and Heewoo Jun from [OpenAI](https://github.com/openai). The abstract from the paper is: *We present Shap-E, a conditional generative model for 3D assets. Unlike recent work on 3D generative models which produce a single output representation, Shap-E directly generates the parameters of implicit functions that can be rendered as both textured meshes and neural radiance fields. We train Shap-E in two stages: first, we train an encoder that deterministically maps 3D assets into the parameters of an implicit function; second, we train a conditional diffusion model on outputs of the encoder. When trained on a large dataset of paired 3D and text data, our resulting models are capable of generating complex and diverse 3D assets in a matter of seconds. When compared to Point-E, an explicit generative model over point clouds, Shap-E converges faster and reaches comparable or better sample quality despite modeling a higher-dimensional, multi-representation output space.* The original codebase can be found at [openai/shap-e](https://github.com/openai/shap-e). <Tip> See the [reuse components across pipelines](../../using-diffusers/loading#reuse-a-pipeline) section to learn how to efficiently load the same components into multiple pipelines. </Tip> ## ShapEPipeline [[autodoc]] ShapEPipeline - all - __call__ ## ShapEImg2ImgPipeline [[autodoc]] ShapEImg2ImgPipeline - all - __call__ ## ShapEPipelineOutput [[autodoc]] pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput
diffusers/docs/source/en/api/pipelines/shap_e.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/shap_e.md", "repo_id": "diffusers", "token_count": 590 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # DDIMScheduler [Denoising Diffusion Implicit Models](https://huggingface.co/papers/2010.02502) (DDIM) by Jiaming Song, Chenlin Meng and Stefano Ermon. The abstract from the paper is: *Denoising diffusion probabilistic models (DDPMs) have achieved high quality image generation without adversarial training, yet they require simulating a Markov chain for many steps to produce a sample. To accelerate sampling, we present denoising diffusion implicit models (DDIMs), a more efficient class of iterative implicit probabilistic models with the same training procedure as DDPMs. In DDPMs, the generative process is defined as the reverse of a Markovian diffusion process. We construct a class of non-Markovian diffusion processes that lead to the same training objective, but whose reverse process can be much faster to sample from. We empirically demonstrate that DDIMs can produce high quality samples 10× to 50× faster in terms of wall-clock time compared to DDPMs, allow us to trade off computation for sample quality, and can perform semantically meaningful image interpolation directly in the latent space.* The original codebase of this paper can be found at [ermongroup/ddim](https://github.com/ermongroup/ddim), and you can contact the author on [tsong.me](https://tsong.me/). ## Tips The paper [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) claims that a mismatch between the training and inference settings leads to suboptimal inference generation results for Stable Diffusion. To fix this, the authors propose: <Tip warning={true}> 🧪 This is an experimental feature! </Tip> 1. rescale the noise schedule to enforce zero terminal signal-to-noise ratio (SNR) ```py pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config, rescale_betas_zero_snr=True) ``` 2. train a model with `v_prediction` (add the following argument to the [train_text_to_image.py](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py) or [train_text_to_image_lora.py](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_lora.py) scripts) ```bash --prediction_type="v_prediction" ``` 3. change the sampler to always start from the last timestep ```py pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing") ``` 4. rescale classifier-free guidance to prevent over-exposure ```py image = pipe(prompt, guidance_rescale=0.7).images[0] ``` For example: ```py from diffusers import DiffusionPipeline, DDIMScheduler import torch pipe = DiffusionPipeline.from_pretrained("ptx0/pseudo-journey-v2", torch_dtype=torch.float16) pipe.scheduler = DDIMScheduler.from_config( pipe.scheduler.config, rescale_betas_zero_snr=True, timestep_spacing="trailing" ) pipe.to("cuda") prompt = "A lion in galaxies, spirals, nebulae, stars, smoke, iridescent, intricate detail, octane render, 8k" image = pipe(prompt, guidance_rescale=0.7).images[0] image ``` ## DDIMScheduler [[autodoc]] DDIMScheduler ## DDIMSchedulerOutput [[autodoc]] schedulers.scheduling_ddim.DDIMSchedulerOutput
diffusers/docs/source/en/api/schedulers/ddim.md/0
{ "file_path": "diffusers/docs/source/en/api/schedulers/ddim.md", "repo_id": "diffusers", "token_count": 1122 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # LMSDiscreteScheduler `LMSDiscreteScheduler` is a linear multistep scheduler for discrete beta schedules. The scheduler is ported from and created by [Katherine Crowson](https://github.com/crowsonkb/), and the original implementation can be found at [crowsonkb/k-diffusion](https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L181). ## LMSDiscreteScheduler [[autodoc]] LMSDiscreteScheduler ## LMSDiscreteSchedulerOutput [[autodoc]] schedulers.scheduling_lms_discrete.LMSDiscreteSchedulerOutput
diffusers/docs/source/en/api/schedulers/lms_discrete.md/0
{ "file_path": "diffusers/docs/source/en/api/schedulers/lms_discrete.md", "repo_id": "diffusers", "token_count": 335 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # OpenVINO 🤗 [Optimum](https://github.com/huggingface/optimum-intel) provides Stable Diffusion pipelines compatible with OpenVINO to perform inference on a variety of Intel processors (see the [full list](https://docs.openvino.ai/latest/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html) of supported devices). You'll need to install 🤗 Optimum Intel with the `--upgrade-strategy eager` option to ensure [`optimum-intel`](https://github.com/huggingface/optimum-intel) is using the latest version: ```bash pip install --upgrade-strategy eager optimum["openvino"] ``` This guide will show you how to use the Stable Diffusion and Stable Diffusion XL (SDXL) pipelines with OpenVINO. ## Stable Diffusion To load and run inference, use the [`~optimum.intel.OVStableDiffusionPipeline`]. If you want to load a PyTorch model and convert it to the OpenVINO format on-the-fly, set `export=True`: ```python from optimum.intel import OVStableDiffusionPipeline model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" pipeline = OVStableDiffusionPipeline.from_pretrained(model_id, export=True) prompt = "sailing ship in storm by Rembrandt" image = pipeline(prompt).images[0] # Don't forget to save the exported model pipeline.save_pretrained("openvino-sd-v1-5") ``` To further speed-up inference, statically reshape the model. If you change any parameters such as the outputs height or width, you’ll need to statically reshape your model again. ```python # Define the shapes related to the inputs and desired outputs batch_size, num_images, height, width = 1, 1, 512, 512 # Statically reshape the model pipeline.reshape(batch_size, height, width, num_images) # Compile the model before inference pipeline.compile() image = pipeline( prompt, height=height, width=width, num_images_per_prompt=num_images, ).images[0] ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/optimum/documentation-images/resolve/main/intel/openvino/stable_diffusion_v1_5_sail_boat_rembrandt.png"> </div> You can find more examples in the 🤗 Optimum [documentation](https://huggingface.co/docs/optimum/intel/inference#stable-diffusion), and Stable Diffusion is supported for text-to-image, image-to-image, and inpainting. ## Stable Diffusion XL To load and run inference with SDXL, use the [`~optimum.intel.OVStableDiffusionXLPipeline`]: ```python from optimum.intel import OVStableDiffusionXLPipeline model_id = "stabilityai/stable-diffusion-xl-base-1.0" pipeline = OVStableDiffusionXLPipeline.from_pretrained(model_id) prompt = "sailing ship in storm by Rembrandt" image = pipeline(prompt).images[0] ``` To further speed-up inference, [statically reshape](#stable-diffusion) the model as shown in the Stable Diffusion section. You can find more examples in the 🤗 Optimum [documentation](https://huggingface.co/docs/optimum/intel/inference#stable-diffusion-xl), and running SDXL in OpenVINO is supported for text-to-image and image-to-image.
diffusers/docs/source/en/optimization/open_vino.md/0
{ "file_path": "diffusers/docs/source/en/optimization/open_vino.md", "repo_id": "diffusers", "token_count": 1115 }
# Create a dataset for training There are many datasets on the [Hub](https://huggingface.co/datasets?task_categories=task_categories:text-to-image&sort=downloads) to train a model on, but if you can't find one you're interested in or want to use your own, you can create a dataset with the 🤗 [Datasets](https://huggingface.co/docs/datasets) library. The dataset structure depends on the task you want to train your model on. The most basic dataset structure is a directory of images for tasks like unconditional image generation. Another dataset structure may be a directory of images and a text file containing their corresponding text captions for tasks like text-to-image generation. This guide will show you two ways to create a dataset to finetune on: - provide a folder of images to the `--train_data_dir` argument - upload a dataset to the Hub and pass the dataset repository id to the `--dataset_name` argument <Tip> 💡 Learn more about how to create an image dataset for training in the [Create an image dataset](https://huggingface.co/docs/datasets/image_dataset) guide. </Tip> ## Provide a dataset as a folder For unconditional generation, you can provide your own dataset as a folder of images. The training script uses the [`ImageFolder`](https://huggingface.co/docs/datasets/en/image_dataset#imagefolder) builder from 🤗 Datasets to automatically build a dataset from the folder. Your directory structure should look like: ```bash data_dir/xxx.png data_dir/xxy.png data_dir/[...]/xxz.png ``` Pass the path to the dataset directory to the `--train_data_dir` argument, and then you can start training: ```bash accelerate launch train_unconditional.py \ --train_data_dir <path-to-train-directory> \ <other-arguments> ``` ## Upload your data to the Hub <Tip> 💡 For more details and context about creating and uploading a dataset to the Hub, take a look at the [Image search with 🤗 Datasets](https://huggingface.co/blog/image-search-datasets) post. </Tip> Start by creating a dataset with the [`ImageFolder`](https://huggingface.co/docs/datasets/image_load#imagefolder) feature, which creates an `image` column containing the PIL-encoded images. You can use the `data_dir` or `data_files` parameters to specify the location of the dataset. The `data_files` parameter supports mapping specific files to dataset splits like `train` or `test`: ```python from datasets import load_dataset # example 1: local folder dataset = load_dataset("imagefolder", data_dir="path_to_your_folder") # example 2: local files (supported formats are tar, gzip, zip, xz, rar, zstd) dataset = load_dataset("imagefolder", data_files="path_to_zip_file") # example 3: remote files (supported formats are tar, gzip, zip, xz, rar, zstd) dataset = load_dataset( "imagefolder", data_files="https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip", ) # example 4: providing several splits dataset = load_dataset( "imagefolder", data_files={"train": ["path/to/file1", "path/to/file2"], "test": ["path/to/file3", "path/to/file4"]} ) ``` Then use the [`~datasets.Dataset.push_to_hub`] method to upload the dataset to the Hub: ```python # assuming you have ran the huggingface-cli login command in a terminal dataset.push_to_hub("name_of_your_dataset") # if you want to push to a private repo, simply pass private=True: dataset.push_to_hub("name_of_your_dataset", private=True) ``` Now the dataset is available for training by passing the dataset name to the `--dataset_name` argument: ```bash accelerate launch --mixed_precision="fp16" train_text_to_image.py \ --pretrained_model_name_or_path="stable-diffusion-v1-5/stable-diffusion-v1-5" \ --dataset_name="name_of_your_dataset" \ <other-arguments> ``` ## Next steps Now that you've created a dataset, you can plug it into the `train_data_dir` (if your dataset is local) or `dataset_name` (if your dataset is on the Hub) arguments of a training script. For your next steps, feel free to try and use your dataset to train a model for [unconditional generation](unconditional_training) or [text-to-image generation](text2image)!
diffusers/docs/source/en/training/create_dataset.md/0
{ "file_path": "diffusers/docs/source/en/training/create_dataset.md", "repo_id": "diffusers", "token_count": 1310 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # AutoPipeline Diffusers provides many pipelines for basic tasks like generating images, videos, audio, and inpainting. On top of these, there are specialized pipelines for adapters and features like upscaling, super-resolution, and more. Different pipeline classes can even use the same checkpoint because they share the same pretrained model! With so many different pipelines, it can be overwhelming to know which pipeline class to use. The [AutoPipeline](../api/pipelines/auto_pipeline) class is designed to simplify the variety of pipelines in Diffusers. It is a generic *task-first* pipeline that lets you focus on a task ([`AutoPipelineForText2Image`], [`AutoPipelineForImage2Image`], and [`AutoPipelineForInpainting`]) without needing to know the specific pipeline class. The [AutoPipeline](../api/pipelines/auto_pipeline) automatically detects the correct pipeline class to use. For example, let's use the [dreamlike-art/dreamlike-photoreal-2.0](https://hf.co/dreamlike-art/dreamlike-photoreal-2.0) checkpoint. Under the hood, [AutoPipeline](../api/pipelines/auto_pipeline): 1. Detects a `"stable-diffusion"` class from the [model_index.json](https://hf.co/dreamlike-art/dreamlike-photoreal-2.0/blob/main/model_index.json) file. 2. Depending on the task you're interested in, it loads the [`StableDiffusionPipeline`], [`StableDiffusionImg2ImgPipeline`], or [`StableDiffusionInpaintPipeline`]. Any parameter (`strength`, `num_inference_steps`, etc.) you would pass to these specific pipelines can also be passed to the [AutoPipeline](../api/pipelines/auto_pipeline). <hfoptions id="autopipeline"> <hfoption id="text-to-image"> ```py from diffusers import AutoPipelineForText2Image import torch pipe_txt2img = AutoPipelineForText2Image.from_pretrained( "dreamlike-art/dreamlike-photoreal-2.0", torch_dtype=torch.float16, use_safetensors=True ).to("cuda") prompt = "cinematic photo of Godzilla eating sushi with a cat in a izakaya, 35mm photograph, film, professional, 4k, highly detailed" generator = torch.Generator(device="cpu").manual_seed(37) image = pipe_txt2img(prompt, generator=generator).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/autopipeline-text2img.png"/> </div> </hfoption> <hfoption id="image-to-image"> ```py from diffusers import AutoPipelineForImage2Image from diffusers.utils import load_image import torch pipe_img2img = AutoPipelineForImage2Image.from_pretrained( "dreamlike-art/dreamlike-photoreal-2.0", torch_dtype=torch.float16, use_safetensors=True ).to("cuda") init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/autopipeline-text2img.png") prompt = "cinematic photo of Godzilla eating burgers with a cat in a fast food restaurant, 35mm photograph, film, professional, 4k, highly detailed" generator = torch.Generator(device="cpu").manual_seed(53) image = pipe_img2img(prompt, image=init_image, generator=generator).images[0] image ``` Notice how the [dreamlike-art/dreamlike-photoreal-2.0](https://hf.co/dreamlike-art/dreamlike-photoreal-2.0) checkpoint is used for both text-to-image and image-to-image tasks? To save memory and avoid loading the checkpoint twice, use the [`~DiffusionPipeline.from_pipe`] method. ```py pipe_img2img = AutoPipelineForImage2Image.from_pipe(pipe_txt2img).to("cuda") image = pipeline(prompt, image=init_image, generator=generator).images[0] image ``` You can learn more about the [`~DiffusionPipeline.from_pipe`] method in the [Reuse a pipeline](../using-diffusers/loading#reuse-a-pipeline) guide. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/autopipeline-img2img.png"/> </div> </hfoption> <hfoption id="inpainting"> ```py from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image import torch pipeline = AutoPipelineForInpainting.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, use_safetensors=True ).to("cuda") init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/autopipeline-img2img.png") mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/autopipeline-mask.png") prompt = "cinematic photo of a owl, 35mm photograph, film, professional, 4k, highly detailed" generator = torch.Generator(device="cpu").manual_seed(38) image = pipeline(prompt, image=init_image, mask_image=mask_image, generator=generator, strength=0.4).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/autopipeline-inpaint.png"/> </div> </hfoption> </hfoptions> ## Unsupported checkpoints The [AutoPipeline](../api/pipelines/auto_pipeline) supports [Stable Diffusion](../api/pipelines/stable_diffusion/overview), [Stable Diffusion XL](../api/pipelines/stable_diffusion/stable_diffusion_xl), [ControlNet](../api/pipelines/controlnet), [Kandinsky 2.1](../api/pipelines/kandinsky.md), [Kandinsky 2.2](../api/pipelines/kandinsky_v22), and [DeepFloyd IF](../api/pipelines/deepfloyd_if) checkpoints. If you try to load an unsupported checkpoint, you'll get an error. ```py from diffusers import AutoPipelineForImage2Image import torch pipeline = AutoPipelineForImage2Image.from_pretrained( "openai/shap-e-img2img", torch_dtype=torch.float16, use_safetensors=True ) "ValueError: AutoPipeline can't find a pipeline linked to ShapEImg2ImgPipeline for None" ```
diffusers/docs/source/en/tutorials/autopipeline.md/0
{ "file_path": "diffusers/docs/source/en/tutorials/autopipeline.md", "repo_id": "diffusers", "token_count": 2061 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Controlling image quality The components of a diffusion model, like the UNet and scheduler, can be optimized to improve the quality of generated images leading to better details. These techniques are especially useful if you don't have the resources to simply use a larger model for inference. You can enable these techniques during inference without any additional training. This guide will show you how to turn these techniques on in your pipeline and how to configure them to improve the quality of your generated images. ## Details [FreeU](https://hf.co/papers/2309.11497) improves image details by rebalancing the UNet's backbone and skip connection weights. The skip connections can cause the model to overlook some of the backbone semantics which may lead to unnatural image details in the generated image. This technique does not require any additional training and can be applied on the fly during inference for tasks like image-to-image and text-to-video. Use the [`~pipelines.StableDiffusionMixin.enable_freeu`] method on your pipeline and configure the scaling factors for the backbone (`b1` and `b2`) and skip connections (`s1` and `s2`). The number after each scaling factor corresponds to the stage in the UNet where the factor is applied. Take a look at the [FreeU](https://github.com/ChenyangSi/FreeU#parameters) repository for reference hyperparameters for different models. <hfoptions id="freeu"> <hfoption id="Stable Diffusion v1-5"> ```py import torch from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, safety_checker=None ).to("cuda") pipeline.enable_freeu(s1=0.9, s2=0.2, b1=1.5, b2=1.6) generator = torch.Generator(device="cpu").manual_seed(33) prompt = "" image = pipeline(prompt, generator=generator).images[0] image ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdv15-no-freeu.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">FreeU disabled</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdv15-freeu.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">FreeU enabled</figcaption> </div> </div> </hfoption> <hfoption id="Stable Diffusion v2-1"> ```py import torch from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16, safety_checker=None ).to("cuda") pipeline.enable_freeu(s1=0.9, s2=0.2, b1=1.4, b2=1.6) generator = torch.Generator(device="cpu").manual_seed(80) prompt = "A squirrel eating a burger" image = pipeline(prompt, generator=generator).images[0] image ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdv21-no-freeu.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">FreeU disabled</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdv21-freeu.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">FreeU enabled</figcaption> </div> </div> </hfoption> <hfoption id="Stable Diffusion XL"> ```py import torch from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, ).to("cuda") pipeline.enable_freeu(s1=0.9, s2=0.2, b1=1.3, b2=1.4) generator = torch.Generator(device="cpu").manual_seed(13) prompt = "A squirrel eating a burger" image = pipeline(prompt, generator=generator).images[0] image ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdxl-no-freeu.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">FreeU disabled</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdxl-freeu.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">FreeU enabled</figcaption> </div> </div> </hfoption> <hfoption id="Zeroscope"> ```py import torch from diffusers import DiffusionPipeline from diffusers.utils import export_to_video pipeline = DiffusionPipeline.from_pretrained( "damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16 ).to("cuda") # values come from https://github.com/lyn-rgb/FreeU_Diffusers#video-pipelines pipeline.enable_freeu(b1=1.2, b2=1.4, s1=0.9, s2=0.2) prompt = "Confident teddy bear surfer rides the wave in the tropics" generator = torch.Generator(device="cpu").manual_seed(47) video_frames = pipeline(prompt, generator=generator).frames[0] export_to_video(video_frames, "teddy_bear.mp4", fps=10) ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/video-no-freeu.gif"/> <figcaption class="mt-2 text-center text-sm text-gray-500">FreeU disabled</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/video-freeu.gif"/> <figcaption class="mt-2 text-center text-sm text-gray-500">FreeU enabled</figcaption> </div> </div> </hfoption> </hfoptions> Call the [`pipelines.StableDiffusionMixin.disable_freeu`] method to disable FreeU. ```py pipeline.disable_freeu() ```
diffusers/docs/source/en/using-diffusers/image_quality.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/image_quality.md", "repo_id": "diffusers", "token_count": 2199 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Scheduler features The scheduler is an important component of any diffusion model because it controls the entire denoising (or sampling) process. There are many types of schedulers, some are optimized for speed and some for quality. With Diffusers, you can modify the scheduler configuration to use custom noise schedules, sigmas, and rescale the noise schedule. Changing these parameters can have profound effects on inference quality and speed. This guide will demonstrate how to use these features to improve inference quality. > [!TIP] > Diffusers currently only supports the `timesteps` and `sigmas` parameters for a select list of schedulers and pipelines. Feel free to open a [feature request](https://github.com/huggingface/diffusers/issues/new/choose) if you want to extend these parameters to a scheduler and pipeline that does not currently support it! ## Timestep schedules The timestep or noise schedule determines the amount of noise at each sampling step. The scheduler uses this to generate an image with the corresponding amount of noise at each step. The timestep schedule is generated from the scheduler's default configuration, but you can customize the scheduler to use new and optimized sampling schedules that aren't in Diffusers yet. For example, [Align Your Steps (AYS)](https://research.nvidia.com/labs/toronto-ai/AlignYourSteps/) is a method for optimizing a sampling schedule to generate a high-quality image in as little as 10 steps. The optimal [10-step schedule](https://github.com/huggingface/diffusers/blob/a7bf77fc284810483f1e60afe34d1d27ad91ce2e/src/diffusers/schedulers/scheduling_utils.py#L51) for Stable Diffusion XL is: ```py from diffusers.schedulers import AysSchedules sampling_schedule = AysSchedules["StableDiffusionXLTimesteps"] print(sampling_schedule) "[999, 845, 730, 587, 443, 310, 193, 116, 53, 13]" ``` You can use the AYS sampling schedule in a pipeline by passing it to the `timesteps` parameter. ```py pipeline = StableDiffusionXLPipeline.from_pretrained( "SG161222/RealVisXL_V4.0", torch_dtype=torch.float16, variant="fp16", ).to("cuda") pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config, algorithm_type="sde-dpmsolver++") prompt = "A cinematic shot of a cute little rabbit wearing a jacket and doing a thumbs up" generator = torch.Generator(device="cpu").manual_seed(2487854446) image = pipeline( prompt=prompt, negative_prompt="", generator=generator, timesteps=sampling_schedule, ).images[0] ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ays.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">AYS timestep schedule 10 steps</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/10.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">Linearly-spaced timestep schedule 10 steps</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/25.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">Linearly-spaced timestep schedule 25 steps</figcaption> </div> </div> ## Timestep spacing The way sample steps are selected in the schedule can affect the quality of the generated image, especially with respect to [rescaling the noise schedule](#rescale-noise-schedule), which can enable a model to generate much brighter or darker images. Diffusers provides three timestep spacing methods: - `leading` creates evenly spaced steps - `linspace` includes the first and last steps and evenly selects the remaining intermediate steps - `trailing` only includes the last step and evenly selects the remaining intermediate steps starting from the end It is recommended to use the `trailing` spacing method because it generates higher quality images with more details when there are fewer sample steps. But the difference in quality is not as obvious for more standard sample step values. ```py import torch from diffusers import StableDiffusionXLPipeline, DPMSolverMultistepScheduler pipeline = StableDiffusionXLPipeline.from_pretrained( "SG161222/RealVisXL_V4.0", torch_dtype=torch.float16, variant="fp16", ).to("cuda") pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config, timestep_spacing="trailing") prompt = "A cinematic shot of a cute little black cat sitting on a pumpkin at night" generator = torch.Generator(device="cpu").manual_seed(2487854446) image = pipeline( prompt=prompt, negative_prompt="", generator=generator, num_inference_steps=5, ).images[0] image ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/stevhliu/testing-images/resolve/main/trailing_spacing.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">trailing spacing after 5 steps</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/stevhliu/testing-images/resolve/main/leading_spacing.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">leading spacing after 5 steps</figcaption> </div> </div> ## Sigmas The `sigmas` parameter is the amount of noise added at each timestep according to the timestep schedule. Like the `timesteps` parameter, you can customize the `sigmas` parameter to control how much noise is added at each step. When you use a custom `sigmas` value, the `timesteps` are calculated from the custom `sigmas` value and the default scheduler configuration is ignored. For example, you can manually pass the [sigmas](https://github.com/huggingface/diffusers/blob/6529ee67ec02fcf58d2fd9242164ea002b351d75/src/diffusers/schedulers/scheduling_utils.py#L55) for something like the 10-step AYS schedule from before to the pipeline. ```py import torch from diffusers import DiffusionPipeline, EulerDiscreteScheduler model_id = "stabilityai/stable-diffusion-xl-base-1.0" pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", ).to("cuda") pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config) sigmas = [14.615, 6.315, 3.771, 2.181, 1.342, 0.862, 0.555, 0.380, 0.234, 0.113, 0.0] prompt = "anthropomorphic capybara wearing a suit and working with a computer" generator = torch.Generator(device='cuda').manual_seed(123) image = pipeline( prompt=prompt, num_inference_steps=10, sigmas=sigmas, generator=generator ).images[0] ``` When you take a look at the scheduler's `timesteps` parameter, you'll see that it is the same as the AYS timestep schedule because the `timestep` schedule is calculated from the `sigmas`. ```py print(f" timesteps: {pipe.scheduler.timesteps}") "timesteps: tensor([999., 845., 730., 587., 443., 310., 193., 116., 53., 13.], device='cuda:0')" ``` ### Karras sigmas > [!TIP] > Refer to the scheduler API [overview](../api/schedulers/overview) for a list of schedulers that support Karras sigmas. > > Karras sigmas should not be used for models that weren't trained with them. For example, the base Stable Diffusion XL model shouldn't use Karras sigmas but the [DreamShaperXL](https://hf.co/Lykon/dreamshaper-xl-1-0) model can since they are trained with Karras sigmas. Karras scheduler's use the timestep schedule and sigmas from the [Elucidating the Design Space of Diffusion-Based Generative Models](https://hf.co/papers/2206.00364) paper. This scheduler variant applies a smaller amount of noise per step as it approaches the end of the sampling process compared to other schedulers, and can increase the level of details in the generated image. Enable Karras sigmas by setting `use_karras_sigmas=True` in the scheduler. ```py import torch from diffusers import StableDiffusionXLPipeline, DPMSolverMultistepScheduler pipeline = StableDiffusionXLPipeline.from_pretrained( "SG161222/RealVisXL_V4.0", torch_dtype=torch.float16, variant="fp16", ).to("cuda") pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config, algorithm_type="sde-dpmsolver++", use_karras_sigmas=True) prompt = "A cinematic shot of a cute little rabbit wearing a jacket and doing a thumbs up" generator = torch.Generator(device="cpu").manual_seed(2487854446) image = pipeline( prompt=prompt, negative_prompt="", generator=generator, ).images[0] ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/stevhliu/testing-images/resolve/main/karras_sigmas_true.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">Karras sigmas enabled</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/stevhliu/testing-images/resolve/main/karras_sigmas_false.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">Karras sigmas disabled</figcaption> </div> </div> ## Rescale noise schedule In the [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://hf.co/papers/2305.08891) paper, the authors discovered that common noise schedules allowed some signal to leak into the last timestep. This signal leakage at inference can cause models to only generate images with medium brightness. By enforcing a zero signal-to-noise ratio (SNR) for the timstep schedule and sampling from the last timestep, the model can be improved to generate very bright or dark images. > [!TIP] > For inference, you need a model that has been trained with *v_prediction*. To train your own model with *v_prediction*, add the following flag to the [train_text_to_image.py](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py) or [train_text_to_image_lora.py](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_lora.py) scripts. > > ```bash > --prediction_type="v_prediction" > ``` For example, load the [ptx0/pseudo-journey-v2](https://hf.co/ptx0/pseudo-journey-v2) checkpoint which was trained with `v_prediction` and the [`DDIMScheduler`]. Configure the following parameters in the [`DDIMScheduler`]: * `rescale_betas_zero_snr=True` to rescale the noise schedule to zero SNR * `timestep_spacing="trailing"` to start sampling from the last timestep Set `guidance_rescale` in the pipeline to prevent over-exposure. A lower value increases brightness but some of the details may appear washed out. ```py from diffusers import DiffusionPipeline, DDIMScheduler pipeline = DiffusionPipeline.from_pretrained("ptx0/pseudo-journey-v2", use_safetensors=True) pipeline.scheduler = DDIMScheduler.from_config( pipeline.scheduler.config, rescale_betas_zero_snr=True, timestep_spacing="trailing" ) pipeline.to("cuda") prompt = "cinematic photo of a snowy mountain at night with the northern lights aurora borealis overhead, 35mm photograph, film, professional, 4k, highly detailed" generator = torch.Generator(device="cpu").manual_seed(23) image = pipeline(prompt, guidance_rescale=0.7, generator=generator).images[0] image ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/no-zero-snr.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">default Stable Diffusion v2-1 image</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/zero-snr.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">image with zero SNR and trailing timestep spacing enabled</figcaption> </div> </div>
diffusers/docs/source/en/using-diffusers/scheduler_features.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/scheduler_features.md", "repo_id": "diffusers", "token_count": 4053 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> [[open-in-colab]] # 簡単な案内 拡散モデル(Diffusion Model)は、ランダムな正規分布から段階的にノイズ除去するように学習され、画像や音声などの目的のものを生成できます。これは生成AIに多大な関心を呼び起こしました。インターネット上で拡散によって生成された画像の例を見たことがあるでしょう。🧨 Diffusersは、誰もが拡散モデルに広くアクセスできるようにすることを目的としたライブラリです。 この案内では、開発者または日常的なユーザーに関わらず、🧨 Diffusers を紹介し、素早く目的のものを生成できるようにします!このライブラリには3つの主要コンポーネントがあります: * [`DiffusionPipeline`]は事前に学習された拡散モデルからサンプルを迅速に生成するために設計された高レベルのエンドツーエンドクラス。 * 拡散システムを作成するためのビルディングブロックとして使用できる、人気のある事前学習された[モデル](./api/models)アーキテクチャとモジュール。 * 多くの異なる[スケジューラ](./api/schedulers/overview) - ノイズがどのようにトレーニングのために加えられるか、そして生成中にどのようにノイズ除去された画像を生成するかを制御するアルゴリズム。 この案内では、[`DiffusionPipeline`]を生成に使用する方法を紹介し、モデルとスケジューラを組み合わせて[`DiffusionPipeline`]の内部で起こっていることを再現する方法を説明します。 <Tip> この案内は🧨 Diffusers [ノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/diffusers_intro.ipynb)を簡略化したもので、すぐに使い始めることができます。Diffusers 🧨のゴール、設計哲学、コアAPIの詳細についてもっと知りたい方は、ノートブックをご覧ください! </Tip> 始める前に必要なライブラリーがすべてインストールされていることを確認してください: ```py # uncomment to install the necessary libraries in Colab #!pip install --upgrade diffusers accelerate transformers ``` - [🤗 Accelerate](https://huggingface.co/docs/accelerate/index)生成とトレーニングのためのモデルのロードを高速化します - [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/overview)ような最も一般的な拡散モデルを実行するには、[🤗 Transformers](https://huggingface.co/docs/transformers/index)が必要です。 ## 拡散パイプライン [`DiffusionPipeline`]は事前学習された拡散システムを生成に使用する最も簡単な方法です。これはモデルとスケジューラを含むエンドツーエンドのシステムです。[`DiffusionPipeline`]は多くの作業/タスクにすぐに使用することができます。また、サポートされているタスクの完全なリストについては[🧨Diffusersの概要](./api/pipelines/overview#diffusers-summary)の表を参照してください。 | **タスク** | **説明** | **パイプライン** |------------------------------|--------------------------------------------------------------------------------------------------------------|-----------------| | Unconditional Image Generation | 正規分布から画像生成 | [unconditional_image_generation](./using-diffusers/unconditional_image_generation) | | Text-Guided Image Generation | 文章から画像生成 | [conditional_image_generation](./using-diffusers/conditional_image_generation) | | Text-Guided Image-to-Image Translation | 画像と文章から新たな画像生成 | [img2img](./using-diffusers/img2img) | | Text-Guided Image-Inpainting | 画像、マスク、および文章が指定された場合に、画像のマスクされた部分を文章をもとに修復 | [inpaint](./using-diffusers/inpaint) | | Text-Guided Depth-to-Image Translation | 文章と深度推定によって構造を保持しながら画像生成 | [depth2img](./using-diffusers/depth2img) | まず、[`DiffusionPipeline`]のインスタンスを作成し、ダウンロードしたいパイプラインのチェックポイントを指定します。 この[`DiffusionPipeline`]はHugging Face Hubに保存されている任意の[チェックポイント](https://huggingface.co/models?library=diffusers&sort=downloads)を使用することができます。 この案内では、[`stable-diffusion-v1-5`](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5)チェックポイントでテキストから画像へ生成します。 <Tip warning={true}> [Stable Diffusion]モデルについては、モデルを実行する前にまず[ライセンス](https://huggingface.co/spaces/CompVis/stable-diffusion-license)を注意深くお読みください。🧨 Diffusers は、攻撃的または有害なコンテンツを防ぐために [`safety_checker`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py) を実装していますが、モデルの改良された画像生成機能により、潜在的に有害なコンテンツが生成される可能性があります。 </Tip> モデルを[`~DiffusionPipeline.from_pretrained`]メソッドでロードします: ```python >>> from diffusers import DiffusionPipeline >>> pipeline = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", use_safetensors=True) ``` [`DiffusionPipeline`]は全てのモデリング、トークン化、スケジューリングコンポーネントをダウンロードしてキャッシュします。Stable Diffusionパイプラインは[`UNet2DConditionModel`]と[`PNDMScheduler`]などで構成されています: ```py >>> pipeline StableDiffusionPipeline { "_class_name": "StableDiffusionPipeline", "_diffusers_version": "0.13.1", ..., "scheduler": [ "diffusers", "PNDMScheduler" ], ..., "unet": [ "diffusers", "UNet2DConditionModel" ], "vae": [ "diffusers", "AutoencoderKL" ] } ``` このモデルはおよそ14億個のパラメータで構成されているため、GPU上でパイプラインを実行することを強く推奨します。 PyTorchと同じように、ジェネレータオブジェクトをGPUに移すことができます: ```python >>> pipeline.to("cuda") ``` これで、文章を `pipeline` に渡して画像を生成し、ノイズ除去された画像にアクセスできるようになりました。デフォルトでは、画像出力は[`PIL.Image`](https://pillow.readthedocs.io/en/stable/reference/Image.html?highlight=image#the-image-class)オブジェクトでラップされます。 ```python >>> image = pipeline("An image of a squirrel in Picasso style").images[0] >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/image_of_squirrel_painting.png"/> </div> `save`関数で画像を保存できます: ```python >>> image.save("image_of_squirrel_painting.png") ``` ### ローカルパイプライン ローカルでパイプラインを使用することもできます。唯一の違いは、最初にウェイトをダウンロードする必要があることです: ```bash !git lfs install !git clone https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5 ``` 保存したウェイトをパイプラインにロードします: ```python >>> pipeline = DiffusionPipeline.from_pretrained("./stable-diffusion-v1-5", use_safetensors=True) ``` これで、上のセクションと同じようにパイプラインを動かすことができます。 ### スケジューラの交換 スケジューラーによって、ノイズ除去のスピードや品質のトレードオフが異なります。どれが自分に最適かを知る最善の方法は、実際に試してみることです!Diffusers 🧨の主な機能の1つは、スケジューラを簡単に切り替えることができることです。例えば、デフォルトの[`PNDMScheduler`]を[`EulerDiscreteScheduler`]に置き換えるには、[`~diffusers.ConfigMixin.from_config`]メソッドでロードできます: ```py >>> from diffusers import EulerDiscreteScheduler >>> pipeline = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", use_safetensors=True) >>> pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config) ``` 新しいスケジューラを使って画像を生成し、その違いに気づくかどうか試してみてください! 次のセクションでは、[`DiffusionPipeline`]を構成するコンポーネント(モデルとスケジューラ)を詳しく見て、これらのコンポーネントを使って猫の画像を生成する方法を学びます。 ## モデル ほとんどのモデルはノイズの多いサンプルを取り、各タイムステップで*残りのノイズ*を予測します(他のモデルは前のサンプルを直接予測するか、速度または[`v-prediction`](https://github.com/huggingface/diffusers/blob/5e5ce13e2f89ac45a0066cb3f369462a3cf1d9ef/src/diffusers/schedulers/scheduling_ddim.py#L110)を予測するように学習します)。モデルを混ぜて他の拡散システムを作ることもできます。 モデルは[`~ModelMixin.from_pretrained`]メソッドで開始されます。このメソッドはモデルをローカルにキャッシュするので、次にモデルをロードするときに高速になります。この案内では、[`UNet2DModel`]をロードします。これは基本的な画像生成モデルであり、猫画像で学習されたチェックポイントを使います: ```py >>> from diffusers import UNet2DModel >>> repo_id = "google/ddpm-cat-256" >>> model = UNet2DModel.from_pretrained(repo_id, use_safetensors=True) ``` モデルのパラメータにアクセスするには、`model.config` を呼び出せます: ```py >>> model.config ``` モデル構成は🧊凍結🧊されたディクショナリであり、モデル作成後にこれらのパラメー タを変更することはできません。これは意図的なもので、最初にモデル・アーキテクチャを定義するために使用されるパラメータが同じままであることを保証します。他のパラメータは生成中に調整することができます。 最も重要なパラメータは以下の通りです: * sample_size`: 入力サンプルの高さと幅。 * `in_channels`: 入力サンプルの入力チャンネル数。 * down_block_types` と `up_block_types`: UNet アーキテクチャを作成するために使用されるダウンサンプリングブロックとアップサンプリングブロックのタイプ。 * block_out_channels`: ダウンサンプリングブロックの出力チャンネル数。逆順でアップサンプリングブロックの入力チャンネル数にも使用されます。 * layer_per_block`: 各 UNet ブロックに含まれる ResNet ブロックの数。 このモデルを生成に使用するには、ランダムな画像の形の正規分布を作成します。このモデルは複数のランダムな正規分布を受け取ることができるため`batch`軸を入れます。入力チャンネル数に対応する`channel`軸も必要です。画像の高さと幅に対応する`sample_size`軸を持つ必要があります: ```py >>> import torch >>> torch.manual_seed(0) >>> noisy_sample = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) >>> noisy_sample.shape torch.Size([1, 3, 256, 256]) ``` 画像生成には、ノイズの多い画像と `timestep` をモデルに渡します。`timestep`は入力画像がどの程度ノイズが多いかを示します。これは、モデルが拡散プロセスにおける自分の位置を決定するのに役立ちます。モデルの出力を得るには `sample` メソッドを使用します: ```py >>> with torch.no_grad(): ... noisy_residual = model(sample=noisy_sample, timestep=2).sample ``` しかし、実際の例を生成するには、ノイズ除去プロセスをガイドするスケジューラが必要です。次のセクションでは、モデルをスケジューラと組み合わせる方法を学びます。 ## スケジューラ スケジューラは、モデルの出力(この場合は `noisy_residual` )が与えられたときに、ノイズの多いサンプルからノイズの少ないサンプルへの移行を管理します。 <Tip> 🧨 Diffusersは拡散システムを構築するためのツールボックスです。[`DiffusionPipeline`]は事前に構築された拡散システムを使い始めるのに便利な方法ですが、独自のモデルとスケジューラコンポーネントを個別に選択してカスタム拡散システムを構築することもできます。 </Tip> この案内では、[`DDPMScheduler`]を[`~diffusers.ConfigMixin.from_config`]メソッドでインスタンス化します: ```py >>> from diffusers import DDPMScheduler >>> scheduler = DDPMScheduler.from_config(repo_id) >>> scheduler DDPMScheduler { "_class_name": "DDPMScheduler", "_diffusers_version": "0.13.1", "beta_end": 0.02, "beta_schedule": "linear", "beta_start": 0.0001, "clip_sample": true, "clip_sample_range": 1.0, "num_train_timesteps": 1000, "prediction_type": "epsilon", "trained_betas": null, "variance_type": "fixed_small" } ``` <Tip> 💡 スケジューラがどのようにコンフィギュレーションからインスタンス化されるかに注目してください。モデルとは異なり、スケジューラは学習可能な重みを持たず、パラメーターを持ちません! </Tip> 最も重要なパラメータは以下の通りです: * num_train_timesteps`: ノイズ除去処理の長さ、言い換えれば、ランダムな正規分布をデータサンプルに処理するのに必要なタイムステップ数です。 * `beta_schedule`: 生成とトレーニングに使用するノイズスケジュールのタイプ。 * `beta_start` と `beta_end`: ノイズスケジュールの開始値と終了値。 少しノイズの少ない画像を予測するには、スケジューラの [`~diffusers.DDPMScheduler.step`] メソッドに以下を渡します: モデルの出力、`timestep`、現在の `sample`。 ```py >>> less_noisy_sample = scheduler.step(model_output=noisy_residual, timestep=2, sample=noisy_sample).prev_sample >>> less_noisy_sample.shape ``` `less_noisy_sample`は次の`timestep`に渡すことができ、そこでさらにノイズが少なくなります! では、すべてをまとめて、ノイズ除去プロセス全体を視覚化してみましょう。 まず、ノイズ除去された画像を後処理して `PIL.Image` として表示する関数を作成します: ```py >>> import PIL.Image >>> import numpy as np >>> def display_sample(sample, i): ... image_processed = sample.cpu().permute(0, 2, 3, 1) ... image_processed = (image_processed + 1.0) * 127.5 ... image_processed = image_processed.numpy().astype(np.uint8) ... image_pil = PIL.Image.fromarray(image_processed[0]) ... display(f"Image at step {i}") ... display(image_pil) ``` ノイズ除去処理を高速化するために入力とモデルをGPUに移します: ```py >>> model.to("cuda") >>> noisy_sample = noisy_sample.to("cuda") ``` ここで、ノイズが少なくなったサンプルの残りのノイズを予測するノイズ除去ループを作成し、スケジューラを使ってさらにノイズの少ないサンプルを計算します: ```py >>> import tqdm >>> sample = noisy_sample >>> for i, t in enumerate(tqdm.tqdm(scheduler.timesteps)): ... # 1. predict noise residual ... with torch.no_grad(): ... residual = model(sample, t).sample ... # 2. compute less noisy image and set x_t -> x_t-1 ... sample = scheduler.step(residual, t, sample).prev_sample ... # 3. optionally look at image ... if (i + 1) % 50 == 0: ... display_sample(sample, i + 1) ``` 何もないところから猫が生成されるのを、座って見てください!😻 <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/diffusion-quicktour.png"/> </div> ## 次のステップ このクイックツアーで、🧨ディフューザーを使ったクールな画像をいくつか作成できたと思います!次のステップとして * モデルをトレーニングまたは微調整については、[training](./tutorials/basic_training)チュートリアルを参照してください。 * 様々な使用例については、公式およびコミュニティの[training or finetuning scripts](https://github.com/huggingface/diffusers/tree/main/examples#-diffusers-examples)の例を参照してください。 * スケジューラのロード、アクセス、変更、比較については[Using different Schedulers](./using-diffusers/schedulers)ガイドを参照してください。 * プロンプトエンジニアリング、スピードとメモリの最適化、より高品質な画像を生成するためのヒントやトリックについては、[Stable Diffusion](./stable_diffusion)ガイドを参照してください。 * 🧨 Diffusers の高速化については、最適化された [PyTorch on a GPU](./optimization/fp16)のガイド、[Stable Diffusion on Apple Silicon (M1/M2)](./optimization/mps)と[ONNX Runtime](./optimization/onnx)を参照してください。
diffusers/docs/source/ja/quicktour.md/0
{ "file_path": "diffusers/docs/source/ja/quicktour.md", "repo_id": "diffusers", "token_count": 7859 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Apple Silicon (M1/M2)에서 Stable Diffusion을 사용하는 방법 Diffusers는 Stable Diffusion 추론을 위해 PyTorch `mps`를 사용해 Apple 실리콘과 호환됩니다. 다음은 Stable Diffusion이 있는 M1 또는 M2 컴퓨터를 사용하기 위해 따라야 하는 단계입니다. ## 요구 사항 - Apple silicon (M1/M2) 하드웨어의 Mac 컴퓨터. - macOS 12.6 또는 이후 (13.0 또는 이후 추천). - Python arm64 버전 - PyTorch 2.0(추천) 또는 1.13(`mps`를 지원하는 최소 버전). Yhttps://pytorch.org/get-started/locally/의 지침에 따라 `pip` 또는 `conda`로 설치할 수 있습니다. ## 추론 파이프라인 아래 코도는 익숙한 `to()` 인터페이스를 사용하여 `mps` 백엔드로 Stable Diffusion 파이프라인을 M1 또는 M2 장치로 이동하는 방법을 보여줍니다. <Tip warning={true}> **PyTorch 1.13을 사용 중일 때 ** 추가 일회성 전달을 사용하여 파이프라인을 "프라이밍"하는 것을 추천합니다. 이것은 발견한 이상한 문제에 대한 임시 해결 방법입니다. 첫 번째 추론 전달은 후속 전달와 약간 다른 결과를 생성합니다. 이 전달은 한 번만 수행하면 되며 추론 단계를 한 번만 사용하고 결과를 폐기해도 됩니다. </Tip> 이전 팁에서 설명한 것들을 포함한 여러 문제를 해결하므로 PyTorch 2 이상을 사용하는 것이 좋습니다. ```python # `huggingface-cli login`에 로그인되어 있음을 확인 from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5") pipe = pipe.to("mps") # 컴퓨터가 64GB 이하의 RAM 램일 때 추천 pipe.enable_attention_slicing() prompt = "a photo of an astronaut riding a horse on mars" # 처음 "워밍업" 전달 (위 설명을 보세요) _ = pipe(prompt, num_inference_steps=1) # 결과는 워밍업 전달 후의 CPU 장치의 결과와 일치합니다. image = pipe(prompt).images[0] ``` ## 성능 추천 M1/M2 성능은 메모리 압력에 매우 민감합니다. 시스템은 필요한 경우 자동으로 스왑되지만 스왑할 때 성능이 크게 저하됩니다. 특히 컴퓨터의 시스템 RAM이 64GB 미만이거나 512 × 512픽셀보다 큰 비표준 해상도에서 이미지를 생성하는 경우, 추론 중에 메모리 압력을 줄이고 스와핑을 방지하기 위해 *어텐션 슬라이싱*을 사용하는 것이 좋습니다. 어텐션 슬라이싱은 비용이 많이 드는 어텐션 작업을 한 번에 모두 수행하는 대신 여러 단계로 수행합니다. 일반적으로 범용 메모리가 없는 컴퓨터에서 ~20%의 성능 영향을 미치지만 64GB 이상이 아닌 경우 대부분의 Apple Silicon 컴퓨터에서 *더 나은 성능*이 관찰되었습니다. ```python pipeline.enable_attention_slicing() ``` ## Known Issues - 여러 프롬프트를 배치로 생성하는 것은 [충돌이 발생하거나 안정적으로 작동하지 않습니다](https://github.com/huggingface/diffusers/issues/363). 우리는 이것이 [PyTorch의 `mps` 백엔드](https://github.com/pytorch/pytorch/issues/84039)와 관련이 있다고 생각합니다. 이 문제는 해결되고 있지만 지금은 배치 대신 반복 방법을 사용하는 것이 좋습니다.
diffusers/docs/source/ko/optimization/mps.md/0
{ "file_path": "diffusers/docs/source/ko/optimization/mps.md", "repo_id": "diffusers", "token_count": 2538 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 다양한 Stable Diffusion 포맷 불러오기 Stable Diffusion 모델들은 학습 및 저장된 프레임워크와 다운로드 위치에 따라 다양한 형식으로 제공됩니다. 이러한 형식을 🤗 Diffusers에서 사용할 수 있도록 변환하면 추론을 위한 [다양한 스케줄러 사용](schedulers), 사용자 지정 파이프라인 구축, 추론 속도 최적화를 위한 다양한 기법과 방법 등 라이브러리에서 지원하는 모든 기능을 사용할 수 있습니다. <Tip> 우리는 `.safetensors` 형식을 추천합니다. 왜냐하면 기존의 pickled 파일은 취약하고 머신에서 코드를 실행할 때 악용될 수 있는 것에 비해 훨씬 더 안전합니다. (safetensors 불러오기 가이드에서 자세히 알아보세요.) </Tip> 이 가이드에서는 다른 Stable Diffusion 형식을 🤗 Diffusers와 호환되도록 변환하는 방법을 설명합니다. ## PyTorch .ckpt 체크포인트 또는 `.ckpt` 형식은 일반적으로 모델을 저장하는 데 사용됩니다. `.ckpt` 파일은 전체 모델을 포함하며 일반적으로 크기가 몇 GB입니다. `.ckpt` 파일을 [~StableDiffusionPipeline.from_ckpt] 메서드를 사용하여 직접 불러와서 사용할 수도 있지만, 일반적으로 두 가지 형식을 모두 사용할 수 있도록 `.ckpt` 파일을 🤗 Diffusers로 변환하는 것이 더 좋습니다. `.ckpt` 파일을 변환하는 두 가지 옵션이 있습니다. Space를 사용하여 체크포인트를 변환하거나 스크립트를 사용하여 `.ckpt` 파일을 변환합니다. ### Space로 변환하기 `.ckpt` 파일을 변환하는 가장 쉽고 편리한 방법은 SD에서 Diffusers로 스페이스를 사용하는 것입니다. Space의 지침에 따라 .ckpt 파일을 변환 할 수 있습니다. 이 접근 방식은 기본 모델에서는 잘 작동하지만 더 많은 사용자 정의 모델에서는 어려움을 겪을 수 있습니다. 빈 pull request나 오류를 반환하면 Space가 실패한 것입니다. 이 경우 스크립트를 사용하여 `.ckpt` 파일을 변환해 볼 수 있습니다. ### 스크립트로 변환하기 🤗 Diffusers는 `.ckpt`  파일 변환을 위한 변환 스크립트를 제공합니다. 이 접근 방식은 위의 Space보다 더 안정적입니다. 시작하기 전에 스크립트를 실행할 🤗 Diffusers의 로컬 클론(clone)이 있는지 확인하고 Hugging Face 계정에 로그인하여 pull request를 열고 변환된 모델을 허브에 푸시할 수 있도록 하세요. ```bash huggingface-cli login ``` 스크립트를 사용하려면: 1. 변환하려는 `.ckpt`  파일이 포함된 리포지토리를 Git으로 클론(clone)합니다. 이 예제에서는 TemporalNet .ckpt 파일을 변환해 보겠습니다: ```bash git lfs install git clone https://huggingface.co/CiaraRowles/TemporalNet ``` 2. 체크포인트를 변환할 리포지토리에서 pull request를 엽니다: ```bash cd TemporalNet && git fetch origin refs/pr/13:pr/13 git checkout pr/13 ``` 3. 변환 스크립트에서 구성할 입력 인수는 여러 가지가 있지만 가장 중요한 인수는 다음과 같습니다: - `checkpoint_path`: 변환할 `.ckpt` 파일의 경로를 입력합니다. - `original_config_file`: 원래 아키텍처의 구성을 정의하는 YAML 파일입니다. 이 파일을 찾을 수 없는 경우 `.ckpt` 파일을 찾은 GitHub 리포지토리에서 YAML 파일을 검색해 보세요. - `dump_path`: 변환된 모델의 경로 예를 들어, TemporalNet 모델은 Stable Diffusion v1.5 및 ControlNet 모델이기 때문에 ControlNet 리포지토리에서 cldm_v15.yaml 파일을 가져올 수 있습니다. 4. 이제 스크립트를 실행하여 .ckpt 파일을 변환할 수 있습니다: ```bash python ../diffusers/scripts/convert_original_stable_diffusion_to_diffusers.py --checkpoint_path temporalnetv3.ckpt --original_config_file cldm_v15.yaml --dump_path ./ --controlnet ``` 5. 변환이 완료되면 변환된 모델을 업로드하고 결과물을 pull request [pull request](https://huggingface.co/CiaraRowles/TemporalNet/discussions/13)를 테스트하세요! ```bash git push origin pr/13:refs/pr/13 ``` ## **Keras .pb or .h5** 🧪 이 기능은 실험적인 기능입니다. 현재로서는 Stable Diffusion v1 체크포인트만 변환 KerasCV Space에서 지원됩니다. [KerasCV](https://keras.io/keras_cv/)는 [Stable Diffusion](https://github.com/keras-team/keras-cv/blob/master/keras_cv/models/stable_diffusion)  v1 및 v2에 대한 학습을 지원합니다. 그러나 추론 및 배포를 위한 Stable Diffusion 모델 실험을 제한적으로 지원하는 반면, 🤗 Diffusers는 다양한 [noise schedulers](https://huggingface.co/docs/diffusers/using-diffusers/schedulers), [flash attention](https://huggingface.co/docs/diffusers/optimization/xformers), and [other optimization techniques](https://huggingface.co/docs/diffusers/optimization/fp16) 등 이러한 목적을 위한 보다 완벽한 기능을 갖추고 있습니다. [Convert KerasCV](https://huggingface.co/spaces/sayakpaul/convert-kerascv-sd-diffusers) Space 변환은 `.pb` 또는 `.h5`을 PyTorch로 변환한 다음, 추론할 수 있도록 [`StableDiffusionPipeline`] 으로 감싸서 준비합니다. 변환된 체크포인트는 Hugging Face Hub의 리포지토리에 저장됩니다. 예제로, textual-inversion으로 학습된 `[sayakpaul/textual-inversion-kerasio](https://huggingface.co/sayakpaul/textual-inversion-kerasio/tree/main)` 체크포인트를 변환해 보겠습니다. 이것은 특수 토큰  `<my-funny-cat>`을 사용하여 고양이로 이미지를 개인화합니다. KerasCV Space 변환에서는 다음을 입력할 수 있습니다: - Hugging Face 토큰. - UNet 과 텍스트 인코더(text encoder) 가중치를 다운로드하는 경로입니다. 모델을 어떻게 학습할지 방식에 따라, UNet과 텍스트 인코더의 경로를 모두 제공할 필요는 없습니다. 예를 들어, textual-inversion에는 텍스트 인코더의 임베딩만 필요하고 텍스트-이미지(text-to-image) 모델 변환에는 UNet 가중치만 필요합니다. - Placeholder 토큰은 textual-inversion 모델에만 적용됩니다. - `output_repo_prefix`는 변환된 모델이 저장되는 리포지토리의 이름입니다. **Submit** (제출) 버튼을 클릭하면 KerasCV 체크포인트가 자동으로 변환됩니다! 체크포인트가 성공적으로 변환되면, 변환된 체크포인트가 포함된 새 리포지토리로 연결되는 링크가 표시됩니다. 새 리포지토리로 연결되는 링크를 따라가면 변환된 모델을 사용해 볼 수 있는 추론 위젯이 포함된 모델 카드가 생성된 KerasCV Space 변환을 확인할 수 있습니다. 코드를 사용하여 추론을 실행하려면 모델 카드의 오른쪽 상단 모서리에 있는 **Use in Diffusers**  버튼을 클릭하여 예시 코드를 복사하여 붙여넣습니다: ```py from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained("sayakpaul/textual-inversion-cat-kerascv_sd_diffusers_pipeline") ``` 그러면 다음과 같은 이미지를 생성할 수 있습니다: ```py from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained("sayakpaul/textual-inversion-cat-kerascv_sd_diffusers_pipeline") pipeline.to("cuda") placeholder_token = "<my-funny-cat-token>" prompt = f"two {placeholder_token} getting married, photorealistic, high quality" image = pipeline(prompt, num_inference_steps=50).images[0] ``` ## **A1111 LoRA files** [Automatic1111](https://github.com/AUTOMATIC1111/stable-diffusion-webui) (A1111)은 Stable Diffusion을 위해 널리 사용되는 웹 UI로, [Civitai](https://civitai.com/) 와 같은 모델 공유 플랫폼을 지원합니다. 특히 LoRA 기법으로 학습된 모델은 학습 속도가 빠르고 완전히 파인튜닝된 모델보다 파일 크기가 훨씬 작기 때문에 인기가 높습니다. 🤗 Diffusers는 [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]:를 사용하여 A1111 LoRA 체크포인트 불러오기를 지원합니다: ```py from diffusers import DiffusionPipeline, UniPCMultistepScheduler import torch pipeline = DiffusionPipeline.from_pretrained( "andite/anything-v4.0", torch_dtype=torch.float16, safety_checker=None ).to("cuda") pipeline.scheduler = UniPCMultistepScheduler.from_config(pipeline.scheduler.config) ``` Civitai에서 LoRA 체크포인트를 다운로드하세요; 이 예제에서는  [Howls Moving Castle,Interior/Scenery LoRA (Ghibli Stlye)](https://civitai.com/models/14605?modelVersionId=19998) 체크포인트를 사용했지만, 어떤 LoRA 체크포인트든 자유롭게 사용해 보세요! ```bash !wget https://civitai.com/api/download/models/19998 -O howls_moving_castle.safetensors ``` 메서드를 사용하여 파이프라인에 LoRA 체크포인트를 불러옵니다: ```py pipeline.load_lora_weights(".", weight_name="howls_moving_castle.safetensors") ``` 이제 파이프라인을 사용하여 이미지를 생성할 수 있습니다: ```py prompt = "masterpiece, illustration, ultra-detailed, cityscape, san francisco, golden gate bridge, california, bay area, in the snow, beautiful detailed starry sky" negative_prompt = "lowres, cropped, worst quality, low quality, normal quality, artifacts, signature, watermark, username, blurry, more than one bridge, bad architecture" images = pipeline( prompt=prompt, negative_prompt=negative_prompt, width=512, height=512, num_inference_steps=25, num_images_per_prompt=4, generator=torch.manual_seed(0), ).images ``` 마지막으로, 디스플레이에 이미지를 표시하는 헬퍼 함수를 만듭니다: ```py from PIL import Image def image_grid(imgs, rows=2, cols=2): w, h = imgs[0].size grid = Image.new("RGB", size=(cols * w, rows * h)) for i, img in enumerate(imgs): grid.paste(img, box=(i % cols * w, i // cols * h)) return grid image_grid(images) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/a1111-lora-sf.png" /> </div>
diffusers/docs/source/ko/using-diffusers/other-formats.md/0
{ "file_path": "diffusers/docs/source/ko/using-diffusers/other-formats.md", "repo_id": "diffusers", "token_count": 6831 }
# LoRA finetuning example for CogVideoX Low-Rank Adaption of Large Language Models was first introduced by Microsoft in [LoRA: Low-Rank Adaptation of Large Language Models](https://arxiv.org/abs/2106.09685) by *Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen*. In a nutshell, LoRA allows adapting pretrained models by adding pairs of rank-decomposition matrices to existing weights and **only** training those newly added weights. This has a couple of advantages: - Previous pretrained weights are kept frozen so that model is not prone to [catastrophic forgetting](https://www.pnas.org/doi/10.1073/pnas.1611835114). - Rank-decomposition matrices have significantly fewer parameters than original model, which means that trained LoRA weights are easily portable. - LoRA attention layers allow to control to which extent the model is adapted toward new training images via a `scale` parameter. At the moment, LoRA finetuning has only been tested for [CogVideoX-2b](https://huggingface.co/THUDM/CogVideoX-2b). > [!NOTE] > The scripts for CogVideoX come with limited support and may not be fully compatible with different training techniques. They are not feature-rich either and simply serve as minimal examples of finetuning to take inspiration from and improve. > > A repository containing memory-optimized finetuning scripts with support for multiple resolutions, dataset preparation, captioning, etc. is available [here](https://github.com/a-r-r-o-w/cogvideox-factory), which will be maintained jointly by the CogVideoX and Diffusers team. ## Data Preparation The training scripts accepts data in two formats. **First data format** Two files where one file contains line-separated prompts and another file contains line-separated paths to video data (the path to video files must be relative to the path you pass when specifying `--instance_data_root`). Let's take a look at an example to understand this better! Assume you've specified `--instance_data_root` as `/dataset`, and that this directory contains the files: `prompts.txt` and `videos.txt`. The `prompts.txt` file should contain line-separated prompts: ``` A black and white animated sequence featuring a rabbit, named Rabbity Ribfried, and an anthropomorphic goat in a musical, playful environment, showcasing their evolving interaction. A black and white animated sequence on a ship's deck features a bulldog character, named Bully Bulldoger, showcasing exaggerated facial expressions and body language. The character progresses from confident to focused, then to strained and distressed, displaying a range of emotions as it navigates challenges. The ship's interior remains static in the background, with minimalistic details such as a bell and open door. The character's dynamic movements and changing expressions drive the narrative, with no camera movement to distract from its evolving reactions and physical gestures. ... ``` The `videos.txt` file should contain line-separate paths to video files. Note that the path should be _relative_ to the `--instance_data_root` directory. ``` videos/00000.mp4 videos/00001.mp4 ... ``` Overall, this is how your dataset would look like if you ran the `tree` command on the dataset root directory: ``` /dataset ├── prompts.txt ├── videos.txt ├── videos ├── videos/00000.mp4 ├── videos/00001.mp4 ├── ... ``` When using this format, the `--caption_column` must be `prompts.txt` and `--video_column` must be `videos.txt`. **Second data format** You could use a single CSV file. For the sake of this example, assume you have a `metadata.csv` file. The expected format is: ``` <CAPTION_COLUMN>,<PATH_TO_VIDEO_COLUMN> """A black and white animated sequence featuring a rabbit, named Rabbity Ribfried, and an anthropomorphic goat in a musical, playful environment, showcasing their evolving interaction.""","""00000.mp4""" """A black and white animated sequence on a ship's deck features a bulldog character, named Bully Bulldoger, showcasing exaggerated facial expressions and body language. The character progresses from confident to focused, then to strained and distressed, displaying a range of emotions as it navigates challenges. The ship's interior remains static in the background, with minimalistic details such as a bell and open door. The character's dynamic movements and changing expressions drive the narrative, with no camera movement to distract from its evolving reactions and physical gestures.""","""00001.mp4""" ... ``` In this case, the `--instance_data_root` should be the location where the videos are stored and `--dataset_name` should be either a path to local folder or `load_dataset` compatible hosted HF Dataset Repository or URL. Assuming you have videos of your Minecraft gameplay at `https://huggingface.co/datasets/my-awesome-username/minecraft-videos`, you would have to specify `my-awesome-username/minecraft-videos`. When using this format, the `--caption_column` must be `<CAPTION_COLUMN>` and `--video_column` must be `<PATH_TO_VIDEO_COLUMN>`. You are not strictly restricted to the CSV format. As long as the `load_dataset` method supports the file format to load a basic `<PATH_TO_VIDEO_COLUMN>` and `<CAPTION_COLUMN>`, you should be good to go. The reason for going through these dataset organization gymnastics for loading video data is because we found `load_dataset` from the datasets library to not fully support all kinds of video formats. This will undoubtedly be improved in the future. >![NOTE] > CogVideoX works best with long and descriptive LLM-augmented prompts for video generation. We recommend pre-processing your videos by first generating a summary using a VLM and then augmenting the prompts with an LLM. To generate the above captions, we use [MiniCPM-V-26](https://huggingface.co/openbmb/MiniCPM-V-2_6) and [Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3.1-8B-Instruct). A very barebones and no-frills example for this is available [here](https://gist.github.com/a-r-r-o-w/4dee20250e82f4e44690a02351324a4a). The official recommendation for augmenting prompts is [ChatGLM](https://huggingface.co/THUDM?search_models=chatglm) and a length of 50-100 words is considered good. >![NOTE] > It is expected that your dataset is already pre-processed. If not, some basic pre-processing can be done by playing with the following parameters: > `--height`, `--width`, `--fps`, `--max_num_frames`, `--skip_frames_start` and `--skip_frames_end`. > Presently, all videos in your dataset should contain the same number of video frames when using a training batch size > 1. <!-- TODO: Implement frame packing in future to address above issue. --> ## Training You need to setup your development environment by installing the necessary requirements. The following packages are required: - Torch 2.0 or above based on the training features you are utilizing (might require latest or nightly versions for quantized/deepspeed training) - `pip install diffusers transformers accelerate peft huggingface_hub` for all things modeling and training related - `pip install datasets decord` for loading video training data - `pip install bitsandbytes` for using 8-bit Adam or AdamW optimizers for memory-optimized training - `pip install wandb` optionally for monitoring training logs - `pip install deepspeed` optionally for [DeepSpeed](https://github.com/microsoft/DeepSpeed) training - `pip install prodigyopt` optionally if you would like to use the Prodigy optimizer for training To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install -e . ``` And initialize an [🤗 Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` Or for a default accelerate configuration without answering questions about your environment ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell (e.g., a notebook) ```python from accelerate.utils import write_basic_config write_basic_config() ``` When running `accelerate config`, if we specify torch compile mode to True there can be dramatic speedups. Note also that we use PEFT library as backend for LoRA training, make sure to have `peft>=0.6.0` installed in your environment. If you would like to push your model to the HF Hub after training is completed with a neat model card, make sure you're logged in: ``` huggingface-cli login # Alternatively, you could upload your model manually using: # huggingface-cli upload my-cool-account-name/my-cool-lora-name /path/to/awesome/lora ``` Make sure your data is prepared as described in [Data Preparation](#data-preparation). When ready, you can begin training! Assuming you are training on 50 videos of a similar concept, we have found 1500-2000 steps to work well. The official recommendation, however, is 100 videos with a total of 4000 steps. Assuming you are training on a single GPU with a `--train_batch_size` of `1`: - 1500 steps on 50 videos would correspond to `30` training epochs - 4000 steps on 100 videos would correspond to `40` training epochs The following bash script launches training for text-to-video lora. ```bash #!/bin/bash GPU_IDS="0" accelerate launch --gpu_ids $GPU_IDS examples/cogvideo/train_cogvideox_lora.py \ --pretrained_model_name_or_path THUDM/CogVideoX-2b \ --cache_dir <CACHE_DIR> \ --instance_data_root <PATH_TO_WHERE_VIDEO_FILES_ARE_STORED> \ --dataset_name my-awesome-name/my-awesome-dataset \ --caption_column <CAPTION_COLUMN> \ --video_column <PATH_TO_VIDEO_COLUMN> \ --id_token <ID_TOKEN> \ --validation_prompt "<ID_TOKEN> Spiderman swinging over buildings:::A panda, dressed in a small, red jacket and a tiny hat, sits on a wooden stool in a serene bamboo forest. The panda's fluffy paws strum a miniature acoustic guitar, producing soft, melodic tunes. Nearby, a few other pandas gather, watching curiously and some clapping in rhythm. Sunlight filters through the tall bamboo, casting a gentle glow on the scene. The panda's face is expressive, showing concentration and joy as it plays. The background includes a small, flowing stream and vibrant green foliage, enhancing the peaceful and magical atmosphere of this unique musical performance" \ --validation_prompt_separator ::: \ --num_validation_videos 1 \ --validation_epochs 10 \ --seed 42 \ --rank 64 \ --lora_alpha 64 \ --mixed_precision fp16 \ --output_dir /raid/aryan/cogvideox-lora \ --height 480 --width 720 --fps 8 --max_num_frames 49 --skip_frames_start 0 --skip_frames_end 0 \ --train_batch_size 1 \ --num_train_epochs 30 \ --checkpointing_steps 1000 \ --gradient_accumulation_steps 1 \ --learning_rate 1e-3 \ --lr_scheduler cosine_with_restarts \ --lr_warmup_steps 200 \ --lr_num_cycles 1 \ --enable_slicing \ --enable_tiling \ --optimizer Adam \ --adam_beta1 0.9 \ --adam_beta2 0.95 \ --max_grad_norm 1.0 \ --report_to wandb ``` For launching image-to-video finetuning instead, run the `train_cogvideox_image_to_video_lora.py` file instead. Additionally, you will have to pass `--validation_images` as paths to initial images corresponding to `--validation_prompts` for I2V validation to work. To better track our training experiments, we're using the following flags in the command above: * `--report_to wandb` will ensure the training runs are tracked on Weights and Biases. To use it, be sure to install `wandb` with `pip install wandb`. * `validation_prompt` and `validation_epochs` to allow the script to do a few validation inference runs. This allows us to qualitatively check if the training is progressing as expected. Note that setting the `<ID_TOKEN>` is not necessary. From some limited experimentation, we found it to work better (as it resembles [Dreambooth](https://huggingface.co/docs/diffusers/en/training/dreambooth) like training) than without. When provided, the ID_TOKEN is appended to the beginning of each prompt. So, if your ID_TOKEN was `"DISNEY"` and your prompt was `"Spiderman swinging over buildings"`, the effective prompt used in training would be `"DISNEY Spiderman swinging over buildings"`. When not provided, you would either be training without any such additional token or could augment your dataset to apply the token where you wish before starting the training. > [!TIP] > You can pass `--use_8bit_adam` to reduce the memory requirements of training. > You can pass `--video_reshape_mode` video cropping functionality, supporting options: ['center', 'random', 'none']. See [this](https://gist.github.com/glide-the/7658dbfd5f555be0a1a687a4139dba40) notebook for examples. > [!IMPORTANT] > The following settings have been tested at the time of adding CogVideoX LoRA training support: > - Our testing was primarily done on CogVideoX-2b. We will work on CogVideoX-5b and CogVideoX-5b-I2V soon > - One dataset comprised of 70 training videos of resolutions `200 x 480 x 720` (F x H x W). From this, by using frame skipping in data preprocessing, we created two smaller 49-frame and 16-frame datasets for faster experimentation and because the maximum limit recommended by the CogVideoX team is 49 frames. Out of the 70 videos, we created three groups of 10, 25 and 50 videos. All videos were similar in nature of the concept being trained. > - 25+ videos worked best for training new concepts and styles. > - We found that it is better to train with an identifier token that can be specified as `--id_token`. This is similar to Dreambooth-like training but normal finetuning without such a token works too. > - Trained concept seemed to work decently well when combined with completely unrelated prompts. We expect even better results if CogVideoX-5B is finetuned. > - The original repository uses a `lora_alpha` of `1`. We found this not suitable in many runs, possibly due to difference in modeling backends and training settings. Our recommendation is to set to the `lora_alpha` to either `rank` or `rank // 2`. > - If you're training on data whose captions generate bad results with the original model, a `rank` of 64 and above is good and also the recommendation by the team behind CogVideoX. If the generations are already moderately good on your training captions, a `rank` of 16/32 should work. We found that setting the rank too low, say `4`, is not ideal and doesn't produce promising results. > - The authors of CogVideoX recommend 4000 training steps and 100 training videos overall to achieve the best result. While that might yield the best results, we found from our limited experimentation that 2000 steps and 25 videos could also be sufficient. > - When using the Prodigy opitimizer for training, one can follow the recommendations from [this](https://huggingface.co/blog/sdxl_lora_advanced_script) blog. Prodigy tends to overfit quickly. From my very limited testing, I found a learning rate of `0.5` to be suitable in addition to `--prodigy_use_bias_correction`, `prodigy_safeguard_warmup` and `--prodigy_decouple`. > - The recommended learning rate by the CogVideoX authors and from our experimentation with Adam/AdamW is between `1e-3` and `1e-4` for a dataset of 25+ videos. > > Note that our testing is not exhaustive due to limited time for exploration. Our recommendation would be to play around with the different knobs and dials to find the best settings for your data. ## Inference Once you have trained a lora model, the inference can be done simply loading the lora weights into the `CogVideoXPipeline`. ```python import torch from diffusers import CogVideoXPipeline from diffusers.utils import export_to_video pipe = CogVideoXPipeline.from_pretrained("THUDM/CogVideoX-2b", torch_dtype=torch.float16) # pipe.load_lora_weights("/path/to/lora/weights", adapter_name="cogvideox-lora") # Or, pipe.load_lora_weights("my-awesome-hf-username/my-awesome-lora-name", adapter_name="cogvideox-lora") # If loading from the HF Hub pipe.to("cuda") # Assuming lora_alpha=32 and rank=64 for training. If different, set accordingly pipe.set_adapters(["cogvideox-lora"], [32 / 64]) prompt = ( "A panda, dressed in a small, red jacket and a tiny hat, sits on a wooden stool in a serene bamboo forest. The " "panda's fluffy paws strum a miniature acoustic guitar, producing soft, melodic tunes. Nearby, a few other " "pandas gather, watching curiously and some clapping in rhythm. Sunlight filters through the tall bamboo, " "casting a gentle glow on the scene. The panda's face is expressive, showing concentration and joy as it plays. " "The background includes a small, flowing stream and vibrant green foliage, enhancing the peaceful and magical " "atmosphere of this unique musical performance" ) frames = pipe(prompt, guidance_scale=6, use_dynamic_cfg=True).frames[0] export_to_video(frames, "output.mp4", fps=8) ``` If you've trained a LoRA for `CogVideoXImageToVideoPipeline` instead, everything in the above example remains the same except you must also pass an image as initial condition for generation.
diffusers/examples/cogvideo/README.md/0
{ "file_path": "diffusers/examples/cogvideo/README.md", "repo_id": "diffusers", "token_count": 4734 }
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import inspect from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import PIL.Image import torch import torch.nn.functional as F import torch.utils.model_zoo from einops import rearrange, repeat from gmflow.gmflow import GMFlow from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection from diffusers.image_processor import PipelineImageInput, VaeImageProcessor from diffusers.loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from diffusers.models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel from diffusers.models.attention_processor import AttnProcessor2_0 from diffusers.models.lora import adjust_lora_scale_text_encoder from diffusers.models.unets.unet_2d_condition import UNet2DConditionOutput from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel from diffusers.pipelines.controlnet.pipeline_controlnet_img2img import StableDiffusionControlNetImg2ImgPipeline from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import ( USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers, ) from diffusers.utils.torch_utils import is_compiled_module, randn_tensor logger = logging.get_logger(__name__) # pylint: disable=invalid-name def clear_cache(): gc.collect() torch.cuda.empty_cache() def coords_grid(b, h, w, homogeneous=False, device=None): y, x = torch.meshgrid(torch.arange(h), torch.arange(w)) # [H, W] stacks = [x, y] if homogeneous: ones = torch.ones_like(x) # [H, W] stacks.append(ones) grid = torch.stack(stacks, dim=0).float() # [2, H, W] or [3, H, W] grid = grid[None].repeat(b, 1, 1, 1) # [B, 2, H, W] or [B, 3, H, W] if device is not None: grid = grid.to(device) return grid def bilinear_sample(img, sample_coords, mode="bilinear", padding_mode="zeros", return_mask=False): # img: [B, C, H, W] # sample_coords: [B, 2, H, W] in image scale if sample_coords.size(1) != 2: # [B, H, W, 2] sample_coords = sample_coords.permute(0, 3, 1, 2) b, _, h, w = sample_coords.shape # Normalize to [-1, 1] x_grid = 2 * sample_coords[:, 0] / (w - 1) - 1 y_grid = 2 * sample_coords[:, 1] / (h - 1) - 1 grid = torch.stack([x_grid, y_grid], dim=-1) # [B, H, W, 2] img = F.grid_sample(img, grid, mode=mode, padding_mode=padding_mode, align_corners=True) if return_mask: mask = (x_grid >= -1) & (y_grid >= -1) & (x_grid <= 1) & (y_grid <= 1) # [B, H, W] return img, mask return img class Dilate: def __init__(self, kernel_size=7, channels=1, device="cpu"): self.kernel_size = kernel_size self.channels = channels gaussian_kernel = torch.ones(1, 1, self.kernel_size, self.kernel_size) gaussian_kernel = gaussian_kernel.repeat(self.channels, 1, 1, 1) self.mean = (self.kernel_size - 1) // 2 gaussian_kernel = gaussian_kernel.to(device) self.gaussian_filter = gaussian_kernel def __call__(self, x): x = F.pad(x, (self.mean, self.mean, self.mean, self.mean), "replicate") return torch.clamp(F.conv2d(x, self.gaussian_filter, bias=None), 0, 1) def flow_warp(feature, flow, mask=False, mode="bilinear", padding_mode="zeros"): b, c, h, w = feature.size() assert flow.size(1) == 2 grid = coords_grid(b, h, w).to(flow.device) + flow # [B, 2, H, W] grid = grid.to(feature.dtype) return bilinear_sample(feature, grid, mode=mode, padding_mode=padding_mode, return_mask=mask) def forward_backward_consistency_check(fwd_flow, bwd_flow, alpha=0.01, beta=0.5): # fwd_flow, bwd_flow: [B, 2, H, W] # alpha and beta values are following UnFlow # (https://arxiv.org/abs/1711.07837) assert fwd_flow.dim() == 4 and bwd_flow.dim() == 4 assert fwd_flow.size(1) == 2 and bwd_flow.size(1) == 2 flow_mag = torch.norm(fwd_flow, dim=1) + torch.norm(bwd_flow, dim=1) # [B, H, W] warped_bwd_flow = flow_warp(bwd_flow, fwd_flow) # [B, 2, H, W] warped_fwd_flow = flow_warp(fwd_flow, bwd_flow) # [B, 2, H, W] diff_fwd = torch.norm(fwd_flow + warped_bwd_flow, dim=1) # [B, H, W] diff_bwd = torch.norm(bwd_flow + warped_fwd_flow, dim=1) threshold = alpha * flow_mag + beta fwd_occ = (diff_fwd > threshold).float() # [B, H, W] bwd_occ = (diff_bwd > threshold).float() return fwd_occ, bwd_occ def numpy2tensor(img): x0 = torch.from_numpy(img.copy()).float().cuda() / 255.0 * 2.0 - 1.0 x0 = torch.stack([x0], dim=0) # einops.rearrange(x0, 'b h w c -> b c h w').clone() return x0.permute(0, 3, 1, 2) def calc_mean_std(feat, eps=1e-5, chunk=1): size = feat.size() assert len(size) == 4 if chunk == 2: feat = torch.cat(feat.chunk(2), dim=3) N, C = size[:2] feat_var = feat.view(N // chunk, C, -1).var(dim=2) + eps feat_std = feat_var.sqrt().view(N, C, 1, 1) feat_mean = feat.view(N // chunk, C, -1).mean(dim=2).view(N // chunk, C, 1, 1) return feat_mean.repeat(chunk, 1, 1, 1), feat_std.repeat(chunk, 1, 1, 1) def adaptive_instance_normalization(content_feat, style_feat, chunk=1): assert content_feat.size()[:2] == style_feat.size()[:2] size = content_feat.size() style_mean, style_std = calc_mean_std(style_feat, chunk) content_mean, content_std = calc_mean_std(content_feat) normalized_feat = (content_feat - content_mean.expand(size)) / content_std.expand(size) return normalized_feat * style_std.expand(size) + style_mean.expand(size) def optimize_feature( sample, flows, occs, correlation_matrix=[], intra_weight=1e2, iters=20, unet_chunk_size=2, optimize_temporal=True ): """ FRESO-guided latent feature optimization * optimize spatial correspondence (match correlation_matrix) * optimize temporal correspondence (match warped_image) """ if (flows is None or occs is None or (not optimize_temporal)) and ( intra_weight == 0 or len(correlation_matrix) == 0 ): return sample # flows=[fwd_flows, bwd_flows]: (N-1)*2*H1*W1 # occs=[fwd_occs, bwd_occs]: (N-1)*H1*W1 # sample: 2N*C*H*W torch.cuda.empty_cache() video_length = sample.shape[0] // unet_chunk_size latent = rearrange(sample.to(torch.float32), "(b f) c h w -> b f c h w", f=video_length) cs = torch.nn.Parameter((latent.detach().clone())) optimizer = torch.optim.Adam([cs], lr=0.2) # unify resolution if flows is not None and occs is not None: scale = sample.shape[2] * 1.0 / flows[0].shape[2] kernel = int(1 / scale) bwd_flow_ = F.interpolate(flows[1] * scale, scale_factor=scale, mode="bilinear").repeat( unet_chunk_size, 1, 1, 1 ) bwd_occ_ = F.max_pool2d(occs[1].unsqueeze(1), kernel_size=kernel).repeat( unet_chunk_size, 1, 1, 1 ) # 2(N-1)*1*H1*W1 fwd_flow_ = F.interpolate(flows[0] * scale, scale_factor=scale, mode="bilinear").repeat( unet_chunk_size, 1, 1, 1 ) fwd_occ_ = F.max_pool2d(occs[0].unsqueeze(1), kernel_size=kernel).repeat( unet_chunk_size, 1, 1, 1 ) # 2(N-1)*1*H1*W1 # match frame 0,1,2,3 and frame 1,2,3,0 reshuffle_list = list(range(1, video_length)) + [0] # attention_probs is the GRAM matrix of the normalized feature attention_probs = None for tmp in correlation_matrix: if sample.shape[2] * sample.shape[3] == tmp.shape[1]: attention_probs = tmp # 2N*HW*HW break n_iter = [0] while n_iter[0] < iters: def closure(): optimizer.zero_grad() loss = 0 # temporal consistency loss if optimize_temporal and flows is not None and occs is not None: c1 = rearrange(cs[:, :], "b f c h w -> (b f) c h w") c2 = rearrange(cs[:, reshuffle_list], "b f c h w -> (b f) c h w") warped_image1 = flow_warp(c1, bwd_flow_) warped_image2 = flow_warp(c2, fwd_flow_) loss = ( abs((c2 - warped_image1) * (1 - bwd_occ_)) + abs((c1 - warped_image2) * (1 - fwd_occ_)) ).mean() * 2 # spatial consistency loss if attention_probs is not None and intra_weight > 0: cs_vector = rearrange(cs, "b f c h w -> (b f) (h w) c") # attention_scores = torch.bmm(cs_vector, cs_vector.transpose(-1, -2)) # cs_attention_probs = attention_scores.softmax(dim=-1) cs_vector = cs_vector / ((cs_vector**2).sum(dim=2, keepdims=True) ** 0.5) cs_attention_probs = torch.bmm(cs_vector, cs_vector.transpose(-1, -2)) tmp = F.l1_loss(cs_attention_probs, attention_probs) * intra_weight loss = tmp + loss loss.backward() n_iter[0] += 1 return loss optimizer.step(closure) torch.cuda.empty_cache() return adaptive_instance_normalization(rearrange(cs.data.to(sample.dtype), "b f c h w -> (b f) c h w"), sample) @torch.no_grad() def warp_tensor(sample, flows, occs, saliency, unet_chunk_size): """ Warp images or features based on optical flow Fuse the warped imges or features based on occusion masks and saliency map """ scale = sample.shape[2] * 1.0 / flows[0].shape[2] kernel = int(1 / scale) bwd_flow_ = F.interpolate(flows[1] * scale, scale_factor=scale, mode="bilinear") bwd_occ_ = F.max_pool2d(occs[1].unsqueeze(1), kernel_size=kernel) # (N-1)*1*H1*W1 if scale == 1: bwd_occ_ = Dilate(kernel_size=13, device=sample.device)(bwd_occ_) fwd_flow_ = F.interpolate(flows[0] * scale, scale_factor=scale, mode="bilinear") fwd_occ_ = F.max_pool2d(occs[0].unsqueeze(1), kernel_size=kernel) # (N-1)*1*H1*W1 if scale == 1: fwd_occ_ = Dilate(kernel_size=13, device=sample.device)(fwd_occ_) scale2 = sample.shape[2] * 1.0 / saliency.shape[2] saliency = F.interpolate(saliency, scale_factor=scale2, mode="bilinear") latent = sample.to(torch.float32) video_length = sample.shape[0] // unet_chunk_size warp_saliency = flow_warp(saliency, bwd_flow_) warp_saliency_ = flow_warp(saliency[0:1], fwd_flow_[video_length - 1 : video_length]) for j in range(unet_chunk_size): for ii in range(video_length - 1): i = video_length * j + ii warped_image = flow_warp(latent[i : i + 1], bwd_flow_[ii : ii + 1]) mask = (1 - bwd_occ_[ii : ii + 1]) * saliency[ii + 1 : ii + 2] * warp_saliency[ii : ii + 1] latent[i + 1 : i + 2] = latent[i + 1 : i + 2] * (1 - mask) + warped_image * mask i = video_length * j ii = video_length - 1 warped_image = flow_warp(latent[i : i + 1], fwd_flow_[ii : ii + 1]) mask = (1 - fwd_occ_[ii : ii + 1]) * saliency[ii : ii + 1] * warp_saliency_ latent[ii + i : ii + i + 1] = latent[ii + i : ii + i + 1] * (1 - mask) + warped_image * mask return latent.to(sample.dtype) def my_forward( self, steps=[], layers=[0, 1, 2, 3], flows=None, occs=None, correlation_matrix=[], intra_weight=1e2, iters=20, optimize_temporal=True, saliency=None, ): """ Hacked pipe.unet.forward() copied from https://github.com/huggingface/diffusers/blob/v0.19.3/src/diffusers/models/unet_2d_condition.py#L700 if you are using a new version of diffusers, please copy the source code and modify it accordingly (find [HACK] in the code) * restore and return the decoder features * optimize the decoder features * perform background smoothing """ def forward( sample: torch.FloatTensor, timestep: Union[torch.Tensor, float, int], encoder_hidden_states: torch.Tensor, class_labels: Optional[torch.Tensor] = None, timestep_cond: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None, down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None, mid_block_additional_residual: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, return_dict: bool = True, ) -> Union[UNet2DConditionOutput, Tuple]: r""" The [`UNet2DConditionModel`] forward method. Args: sample (`torch.FloatTensor`): The noisy input tensor with the following shape `(batch, channel, height, width)`. timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input. encoder_hidden_states (`torch.FloatTensor`): The encoder hidden states with shape `(batch, sequence_length, feature_dim)`. encoder_attention_mask (`torch.Tensor`): A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If `True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias, which adds large negative values to the attention scores corresponding to "discard" tokens. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttnProcessor`]. added_cond_kwargs: (`dict`, *optional*): A kwargs dictionary containin additional embeddings that if specified are added to the embeddings that are passed along to the UNet blocks. Returns: [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`: If `return_dict` is True, an [`~models.unet_2d_condition.UNet2DConditionOutput`] is returned, otherwise a `tuple` is returned where the first element is the sample tensor. """ # By default samples have to be AT least a multiple of the overall upsampling factor. # The overall upsampling factor is equal to 2 ** (# num of upsampling layers). # However, the upsampling interpolation output size can be forced to fit any upsampling size # on the fly if necessary. default_overall_up_factor = 2**self.num_upsamplers # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor` forward_upsample_size = False upsample_size = None if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]): logger.info("Forward upsample size to force interpolation output size.") forward_upsample_size = True # ensure attention_mask is a bias, and give it a singleton query_tokens dimension # expects mask of shape: # [batch, key_tokens] # adds singleton query_tokens dimension: # [batch, 1, key_tokens] # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes: # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn) # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn) if attention_mask is not None: # assume that mask is expressed as: # (1 = keep, 0 = discard) # convert mask into a bias that can be added to attention scores: # (keep = +0, discard = -10000.0) attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 attention_mask = attention_mask.unsqueeze(1) # convert encoder_attention_mask to a bias the same way we do for attention_mask if encoder_attention_mask is not None: encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0 encoder_attention_mask = encoder_attention_mask.unsqueeze(1) # 0. center input if necessary if self.config.center_input_sample: sample = 2 * sample - 1.0 # 1. time timesteps = timestep if not torch.is_tensor(timesteps): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) is_mps = sample.device.type == "mps" is_npu = sample.device.type == "npu" if isinstance(timestep, float): dtype = torch.float32 if (is_mps or is_npu) else torch.float64 else: dtype = torch.int32 if (is_mps or is_npu) else torch.int64 timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) elif len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timesteps = timesteps.expand(sample.shape[0]) t_emb = self.time_proj(timesteps) # `Timesteps` does not contain any weights and will always return f32 tensors # but time_embedding might actually be running in fp16. so we need to cast here. # there might be better ways to encapsulate this. t_emb = t_emb.to(dtype=sample.dtype) emb = self.time_embedding(t_emb, timestep_cond) aug_emb = None if self.class_embedding is not None: if class_labels is None: raise ValueError("class_labels should be provided when num_class_embeds > 0") if self.config.class_embed_type == "timestep": class_labels = self.time_proj(class_labels) # `Timesteps` does not contain any weights and will always return f32 tensors # there might be better ways to encapsulate this. class_labels = class_labels.to(dtype=sample.dtype) class_emb = self.class_embedding(class_labels).to(dtype=sample.dtype) if self.config.class_embeddings_concat: emb = torch.cat([emb, class_emb], dim=-1) else: emb = emb + class_emb if self.config.addition_embed_type == "text": aug_emb = self.add_embedding(encoder_hidden_states) elif self.config.addition_embed_type == "text_image": # Kandinsky 2.1 - style if "image_embeds" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'text_image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`" ) image_embs = added_cond_kwargs.get("image_embeds") text_embs = added_cond_kwargs.get("text_embeds", encoder_hidden_states) aug_emb = self.add_embedding(text_embs, image_embs) elif self.config.addition_embed_type == "text_time": # SDXL - style if "text_embeds" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`" ) text_embeds = added_cond_kwargs.get("text_embeds") if "time_ids" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`" ) time_ids = added_cond_kwargs.get("time_ids") time_embeds = self.add_time_proj(time_ids.flatten()) time_embeds = time_embeds.reshape((text_embeds.shape[0], -1)) add_embeds = torch.concat([text_embeds, time_embeds], dim=-1) add_embeds = add_embeds.to(emb.dtype) aug_emb = self.add_embedding(add_embeds) elif self.config.addition_embed_type == "image": # Kandinsky 2.2 - style if "image_embeds" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`" ) image_embs = added_cond_kwargs.get("image_embeds") aug_emb = self.add_embedding(image_embs) elif self.config.addition_embed_type == "image_hint": # Kandinsky 2.2 - style if "image_embeds" not in added_cond_kwargs or "hint" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'image_hint' which requires the keyword arguments `image_embeds` and `hint` to be passed in `added_cond_kwargs`" ) image_embs = added_cond_kwargs.get("image_embeds") hint = added_cond_kwargs.get("hint") aug_emb, hint = self.add_embedding(image_embs, hint) sample = torch.cat([sample, hint], dim=1) emb = emb + aug_emb if aug_emb is not None else emb if self.time_embed_act is not None: emb = self.time_embed_act(emb) if self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_proj": encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states) elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_image_proj": # Kadinsky 2.1 - style if "image_embeds" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'text_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`" ) image_embeds = added_cond_kwargs.get("image_embeds") encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states, image_embeds) elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "image_proj": # Kandinsky 2.2 - style if "image_embeds" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`" ) image_embeds = added_cond_kwargs.get("image_embeds") encoder_hidden_states = self.encoder_hid_proj(image_embeds) # 2. pre-process sample = self.conv_in(sample) # 3. down is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None is_adapter = mid_block_additional_residual is None and down_block_additional_residuals is not None down_block_res_samples = (sample,) for downsample_block in self.down_blocks: if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: # For t2i-adapter CrossAttnDownBlock2D additional_residuals = {} if is_adapter and len(down_block_additional_residuals) > 0: additional_residuals["additional_residuals"] = down_block_additional_residuals.pop(0) sample, res_samples = downsample_block( hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, encoder_attention_mask=encoder_attention_mask, **additional_residuals, ) else: sample, res_samples = downsample_block(hidden_states=sample, temb=emb) if is_adapter and len(down_block_additional_residuals) > 0: sample += down_block_additional_residuals.pop(0) down_block_res_samples += res_samples if is_controlnet: new_down_block_res_samples = () for down_block_res_sample, down_block_additional_residual in zip( down_block_res_samples, down_block_additional_residuals ): down_block_res_sample = down_block_res_sample + down_block_additional_residual new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,) down_block_res_samples = new_down_block_res_samples # 4. mid if self.mid_block is not None: sample = self.mid_block( sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, encoder_attention_mask=encoder_attention_mask, ) if is_controlnet: sample = sample + mid_block_additional_residual # 5. up """ [HACK] restore the decoder features in up_samples """ up_samples = () # down_samples = () for i, upsample_block in enumerate(self.up_blocks): is_final_block = i == len(self.up_blocks) - 1 res_samples = down_block_res_samples[-len(upsample_block.resnets) :] down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] """ [HACK] restore the decoder features in up_samples [HACK] optimize the decoder features [HACK] perform background smoothing """ if i in layers: up_samples += (sample,) if timestep in steps and i in layers: sample = optimize_feature( sample, flows, occs, correlation_matrix, intra_weight, iters, optimize_temporal=optimize_temporal ) if saliency is not None: sample = warp_tensor(sample, flows, occs, saliency, 2) # if we have not reached the final block and need to forward the # upsample size, we do it here if not is_final_block and forward_upsample_size: upsample_size = down_block_res_samples[-1].shape[2:] if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention: sample = upsample_block( hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, upsample_size=upsample_size, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, ) else: sample = upsample_block( hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size ) # 6. post-process if self.conv_norm_out: sample = self.conv_norm_out(sample) sample = self.conv_act(sample) sample = self.conv_out(sample) """ [HACK] return the output feature as well as the decoder features """ if not return_dict: return (sample,) + up_samples return UNet2DConditionOutput(sample=sample) return forward @torch.no_grad() def get_single_mapping_ind(bwd_flow, bwd_occ, imgs, scale=1.0): """ FLATTEN: Optical fLow-guided attention (Temoporal-guided attention) Find the correspondence between every pixels in a pair of frames [input] bwd_flow: 1*2*H*W bwd_occ: 1*H*W i.e., f2 = warp(f1, bwd_flow) * bwd_occ imgs: 2*3*H*W i.e., [f1,f2] [output] mapping_ind: pixel index correspondence unlinkedmask: indicate whether a pixel has no correspondence i.e., f2 = f1[mapping_ind] * unlinkedmask """ flows = F.interpolate(bwd_flow, scale_factor=1.0 / scale, mode="bilinear")[0][[1, 0]] / scale # 2*H*W _, H, W = flows.shape masks = torch.logical_not(F.interpolate(bwd_occ[None], scale_factor=1.0 / scale, mode="bilinear") > 0.5)[ 0 ] # 1*H*W frames = F.interpolate(imgs, scale_factor=1.0 / scale, mode="bilinear").view(2, 3, -1) # 2*3*HW grid = torch.stack(torch.meshgrid([torch.arange(H), torch.arange(W)]), dim=0).to(flows.device) # 2*H*W warp_grid = torch.round(grid + flows) mask = torch.logical_and( torch.logical_and( torch.logical_and(torch.logical_and(warp_grid[0] >= 0, warp_grid[0] < H), warp_grid[1] >= 0), warp_grid[1] < W, ), masks[0], ).view(-1) # HW warp_grid = warp_grid.view(2, -1) # 2*HW warp_ind = (warp_grid[0] * W + warp_grid[1]).to(torch.long) # HW mapping_ind = torch.zeros_like(warp_ind) - 1 # HW for f0ind, f1ind in enumerate(warp_ind): if mask[f0ind]: if mapping_ind[f1ind] == -1: mapping_ind[f1ind] = f0ind else: targetv = frames[0, :, f1ind] pref0ind = mapping_ind[f1ind] prev = frames[1, :, pref0ind] v = frames[1, :, f0ind] if ((prev - targetv) ** 2).mean() > ((v - targetv) ** 2).mean(): mask[pref0ind] = False mapping_ind[f1ind] = f0ind else: mask[f0ind] = False unusedind = torch.arange(len(mask)).to(mask.device)[~mask] unlinkedmask = mapping_ind == -1 mapping_ind[unlinkedmask] = unusedind return mapping_ind, unlinkedmask @torch.no_grad() def get_mapping_ind(bwd_flows, bwd_occs, imgs, scale=1.0): """ FLATTEN: Optical fLow-guided attention (Temoporal-guided attention) Find pixel correspondence between every consecutive frames in a batch [input] bwd_flow: (N-1)*2*H*W bwd_occ: (N-1)*H*W imgs: N*3*H*W [output] fwd_mappings: N*1*HW bwd_mappings: N*1*HW flattn_mask: HW*1*N*N i.e., imgs[i,:,fwd_mappings[i]] corresponds to imgs[0] i.e., imgs[i,:,fwd_mappings[i]][:,bwd_mappings[i]] restore the original imgs[i] """ N, H, W = imgs.shape[0], int(imgs.shape[2] // scale), int(imgs.shape[3] // scale) iterattn_mask = torch.ones(H * W, N, N, dtype=torch.bool).to(imgs.device) for i in range(len(imgs) - 1): one_mask = torch.ones(N, N, dtype=torch.bool).to(imgs.device) one_mask[: i + 1, i + 1 :] = False one_mask[i + 1 :, : i + 1] = False mapping_ind, unlinkedmask = get_single_mapping_ind( bwd_flows[i : i + 1], bwd_occs[i : i + 1], imgs[i : i + 2], scale ) if i == 0: fwd_mapping = [torch.arange(len(mapping_ind)).to(mapping_ind.device)] bwd_mapping = [torch.arange(len(mapping_ind)).to(mapping_ind.device)] iterattn_mask[unlinkedmask[fwd_mapping[-1]]] = torch.logical_and( iterattn_mask[unlinkedmask[fwd_mapping[-1]]], one_mask ) fwd_mapping += [mapping_ind[fwd_mapping[-1]]] bwd_mapping += [torch.sort(fwd_mapping[-1])[1]] fwd_mappings = torch.stack(fwd_mapping, dim=0).unsqueeze(1) bwd_mappings = torch.stack(bwd_mapping, dim=0).unsqueeze(1) return fwd_mappings, bwd_mappings, iterattn_mask.unsqueeze(1) def apply_FRESCO_opt( pipe, steps=[], layers=[0, 1, 2, 3], flows=None, occs=None, correlation_matrix=[], intra_weight=1e2, iters=20, optimize_temporal=True, saliency=None, ): """ Apply FRESCO-based optimization to a StableDiffusionPipeline """ pipe.unet.forward = my_forward( pipe.unet, steps, layers, flows, occs, correlation_matrix, intra_weight, iters, optimize_temporal, saliency ) @torch.no_grad() def get_intraframe_paras(pipe, imgs, frescoProc, prompt_embeds, do_classifier_free_guidance=True, generator=None): """ Get parameters for spatial-guided attention and optimization * perform one step denoising * collect attention feature, stored in frescoProc.controller.stored_attn['decoder_attn'] * compute the gram matrix of the normalized feature for spatial consistency loss """ noise_scheduler = pipe.scheduler timestep = noise_scheduler.timesteps[-1] device = pipe._execution_device B, C, H, W = imgs.shape frescoProc.controller.disable_controller() apply_FRESCO_opt(pipe) frescoProc.controller.clear_store() frescoProc.controller.enable_store() latents = pipe.prepare_latents( imgs.to(pipe.unet.dtype), timestep, B, 1, prompt_embeds.dtype, device, generator=generator, repeat_noise=False ) latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents model_output = pipe.unet( latent_model_input, timestep, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=None, return_dict=False, ) frescoProc.controller.disable_store() # gram matrix of the normalized feature for spatial consistency loss correlation_matrix = [] for tmp in model_output[1:]: latent_vector = rearrange(tmp, "b c h w -> b (h w) c") latent_vector = latent_vector / ((latent_vector**2).sum(dim=2, keepdims=True) ** 0.5) attention_probs = torch.bmm(latent_vector, latent_vector.transpose(-1, -2)) correlation_matrix += [attention_probs.detach().clone().to(torch.float32)] del attention_probs, latent_vector, tmp del model_output clear_cache() return correlation_matrix @torch.no_grad() def get_flow_and_interframe_paras(flow_model, imgs): """ Get parameters for temporal-guided attention and optimization * predict optical flow and occlusion mask * compute pixel index correspondence for FLATTEN """ images = torch.stack([torch.from_numpy(img).permute(2, 0, 1).float() for img in imgs], dim=0).cuda() imgs_torch = torch.cat([numpy2tensor(img) for img in imgs], dim=0) reshuffle_list = list(range(1, len(images))) + [0] results_dict = flow_model( images, images[reshuffle_list], attn_splits_list=[2], corr_radius_list=[-1], prop_radius_list=[-1], pred_bidir_flow=True, ) flow_pr = results_dict["flow_preds"][-1] # [2*B, 2, H, W] fwd_flows, bwd_flows = flow_pr.chunk(2) # [B, 2, H, W] fwd_occs, bwd_occs = forward_backward_consistency_check(fwd_flows, bwd_flows) # [B, H, W] warped_image1 = flow_warp(images, bwd_flows) bwd_occs = torch.clamp( bwd_occs + (abs(images[reshuffle_list] - warped_image1).mean(dim=1) > 255 * 0.25).float(), 0, 1 ) warped_image2 = flow_warp(images[reshuffle_list], fwd_flows) fwd_occs = torch.clamp(fwd_occs + (abs(images - warped_image2).mean(dim=1) > 255 * 0.25).float(), 0, 1) attn_mask = [] for scale in [8.0, 16.0, 32.0]: bwd_occs_ = F.interpolate(bwd_occs[:-1].unsqueeze(1), scale_factor=1.0 / scale, mode="bilinear") attn_mask += [ torch.cat((bwd_occs_[0:1].reshape(1, -1) > -1, bwd_occs_.reshape(bwd_occs_.shape[0], -1) > 0.5), dim=0) ] fwd_mappings = [] bwd_mappings = [] interattn_masks = [] for scale in [8.0, 16.0]: fwd_mapping, bwd_mapping, interattn_mask = get_mapping_ind(bwd_flows, bwd_occs, imgs_torch, scale=scale) fwd_mappings += [fwd_mapping] bwd_mappings += [bwd_mapping] interattn_masks += [interattn_mask] interattn_paras = {} interattn_paras["fwd_mappings"] = fwd_mappings interattn_paras["bwd_mappings"] = bwd_mappings interattn_paras["interattn_masks"] = interattn_masks clear_cache() return [fwd_flows, bwd_flows], [fwd_occs, bwd_occs], attn_mask, interattn_paras class AttentionControl: """ Control FRESCO-based attention * enable/diable spatial-guided attention * enable/diable temporal-guided attention * enable/diable cross-frame attention * collect intermediate attention feature (for spatial-guided attention) """ def __init__(self): self.stored_attn = self.get_empty_store() self.store = False self.index = 0 self.attn_mask = None self.interattn_paras = None self.use_interattn = False self.use_cfattn = False self.use_intraattn = False self.intraattn_bias = 0 self.intraattn_scale_factor = 0.2 self.interattn_scale_factor = 0.2 @staticmethod def get_empty_store(): return { "decoder_attn": [], } def clear_store(self): del self.stored_attn torch.cuda.empty_cache() gc.collect() self.stored_attn = self.get_empty_store() self.disable_intraattn() # store attention feature of the input frame for spatial-guided attention def enable_store(self): self.store = True def disable_store(self): self.store = False # spatial-guided attention def enable_intraattn(self): self.index = 0 self.use_intraattn = True self.disable_store() if len(self.stored_attn["decoder_attn"]) == 0: self.use_intraattn = False def disable_intraattn(self): self.index = 0 self.use_intraattn = False self.disable_store() def disable_cfattn(self): self.use_cfattn = False # cross frame attention def enable_cfattn(self, attn_mask=None): if attn_mask: if self.attn_mask: del self.attn_mask torch.cuda.empty_cache() self.attn_mask = attn_mask self.use_cfattn = True else: if self.attn_mask: self.use_cfattn = True else: print("Warning: no valid cross-frame attention parameters available!") self.disable_cfattn() def disable_interattn(self): self.use_interattn = False # temporal-guided attention def enable_interattn(self, interattn_paras=None): if interattn_paras: if self.interattn_paras: del self.interattn_paras torch.cuda.empty_cache() self.interattn_paras = interattn_paras self.use_interattn = True else: if self.interattn_paras: self.use_interattn = True else: print("Warning: no valid temporal-guided attention parameters available!") self.disable_interattn() def disable_controller(self): self.disable_intraattn() self.disable_interattn() self.disable_cfattn() def enable_controller(self, interattn_paras=None, attn_mask=None): self.enable_intraattn() self.enable_interattn(interattn_paras) self.enable_cfattn(attn_mask) def forward(self, context): if self.store: self.stored_attn["decoder_attn"].append(context.detach()) if self.use_intraattn and len(self.stored_attn["decoder_attn"]) > 0: tmp = self.stored_attn["decoder_attn"][self.index] self.index = self.index + 1 if self.index >= len(self.stored_attn["decoder_attn"]): self.index = 0 self.disable_store() return tmp return context def __call__(self, context): context = self.forward(context) return context class FRESCOAttnProcessor2_0: """ Hack self attention to FRESCO-based attention * adding spatial-guided attention * adding temporal-guided attention * adding cross-frame attention Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). Usage frescoProc = FRESCOAttnProcessor2_0(2, attn_mask) attnProc = AttnProcessor2_0() attn_processor_dict = {} for k in pipe.unet.attn_processors.keys(): if k.startswith("up_blocks.2") or k.startswith("up_blocks.3"): attn_processor_dict[k] = frescoProc else: attn_processor_dict[k] = attnProc pipe.unet.set_attn_processor(attn_processor_dict) """ def __init__(self, unet_chunk_size=2, controller=None): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") self.unet_chunk_size = unet_chunk_size self.controller = controller def __call__( self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None, temb=None, ): residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) crossattn = False if encoder_hidden_states is None: encoder_hidden_states = hidden_states if self.controller and self.controller.store: self.controller(hidden_states.detach().clone()) else: crossattn = True if attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) # BC * HW * 8D key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) query_raw, key_raw = None, None if self.controller and self.controller.use_interattn and (not crossattn): query_raw, key_raw = query.clone(), key.clone() inner_dim = key.shape[-1] # 8D head_dim = inner_dim // attn.heads # D """for efficient cross-frame attention""" if self.controller and self.controller.use_cfattn and (not crossattn): video_length = key.size()[0] // self.unet_chunk_size former_frame_index = [0] * video_length attn_mask = None if self.controller.attn_mask is not None: for m in self.controller.attn_mask: if m.shape[1] == key.shape[1]: attn_mask = m # BC * HW * 8D --> B * C * HW * 8D key = rearrange(key, "(b f) d c -> b f d c", f=video_length) # B * C * HW * 8D --> B * C * HW * 8D if attn_mask is None: key = key[:, former_frame_index] else: key = repeat(key[:, attn_mask], "b d c -> b f d c", f=video_length) # B * C * HW * 8D --> BC * HW * 8D key = rearrange(key, "b f d c -> (b f) d c").detach() value = rearrange(value, "(b f) d c -> b f d c", f=video_length) if attn_mask is None: value = value[:, former_frame_index] else: value = repeat(value[:, attn_mask], "b d c -> b f d c", f=video_length) value = rearrange(value, "b f d c -> (b f) d c").detach() # BC * HW * 8D --> BC * HW * 8 * D --> BC * 8 * HW * D query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # BC * 8 * HW2 * D key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # BC * 8 * HW2 * D2 value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) """for spatial-guided intra-frame attention""" if self.controller and self.controller.use_intraattn and (not crossattn): ref_hidden_states = self.controller(None) assert ref_hidden_states.shape == encoder_hidden_states.shape query_ = attn.to_q(ref_hidden_states) key_ = attn.to_k(ref_hidden_states) # BC * 8 * HW * D query_ = query_.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key_ = key_.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) query = F.scaled_dot_product_attention( query_, key_ * self.controller.intraattn_scale_factor, query, attn_mask=torch.eye(query_.size(-2), key_.size(-2), dtype=query.dtype, device=query.device) * self.controller.intraattn_bias, ).detach() del query_, key_ torch.cuda.empty_cache() # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 # output: BC * 8 * HW * D2 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) """for temporal-guided inter-frame attention (FLATTEN)""" if self.controller and self.controller.use_interattn and (not crossattn): del query, key, value torch.cuda.empty_cache() bwd_mapping = None fwd_mapping = None for i, f in enumerate(self.controller.interattn_paras["fwd_mappings"]): if f.shape[2] == hidden_states.shape[2]: fwd_mapping = f bwd_mapping = self.controller.interattn_paras["bwd_mappings"][i] interattn_mask = self.controller.interattn_paras["interattn_masks"][i] video_length = key_raw.size()[0] // self.unet_chunk_size # BC * HW * 8D --> C * 8BD * HW key = rearrange(key_raw, "(b f) d c -> f (b c) d", f=video_length) query = rearrange(query_raw, "(b f) d c -> f (b c) d", f=video_length) # BC * 8 * HW * D --> C * 8BD * HW # key = rearrange(hidden_states, "(b f) h d c -> f (b h c) d", f=video_length) ######## # query = rearrange(hidden_states, "(b f) h d c -> f (b h c) d", f=video_length) ####### value = rearrange(hidden_states, "(b f) h d c -> f (b h c) d", f=video_length) key = torch.gather(key, 2, fwd_mapping.expand(-1, key.shape[1], -1)) query = torch.gather(query, 2, fwd_mapping.expand(-1, query.shape[1], -1)) value = torch.gather(value, 2, fwd_mapping.expand(-1, value.shape[1], -1)) # C * 8BD * HW --> BHW, C, 8D key = rearrange(key, "f (b c) d -> (b d) f c", b=self.unet_chunk_size) query = rearrange(query, "f (b c) d -> (b d) f c", b=self.unet_chunk_size) value = rearrange(value, "f (b c) d -> (b d) f c", b=self.unet_chunk_size) # BHW * C * 8D --> BHW * C * 8 * D--> BHW * 8 * C * D query = query.view(-1, video_length, attn.heads, head_dim).transpose(1, 2).detach() key = key.view(-1, video_length, attn.heads, head_dim).transpose(1, 2).detach() value = value.view(-1, video_length, attn.heads, head_dim).transpose(1, 2).detach() hidden_states_ = F.scaled_dot_product_attention( query, key * self.controller.interattn_scale_factor, value, # .to(query.dtype)-1.0) * 1e6 - attn_mask=(interattn_mask.repeat(self.unet_chunk_size, 1, 1, 1)), # torch.eye(interattn_mask.shape[2]).to(query.device).to(query.dtype) * 1e4, ) # BHW * 8 * C * D --> C * 8BD * HW hidden_states_ = rearrange(hidden_states_, "(b d) h f c -> f (b h c) d", b=self.unet_chunk_size) hidden_states_ = torch.gather( hidden_states_, 2, bwd_mapping.expand(-1, hidden_states_.shape[1], -1) ).detach() # C * 8BD * HW --> BC * 8 * HW * D hidden_states = rearrange( hidden_states_, "f (b h c) d -> (b f) h d c", b=self.unet_chunk_size, h=attn.heads ) # BC * 8 * HW * D --> BC * HW * 8D hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states def apply_FRESCO_attn(pipe): """ Apply FRESCO-guided attention to a StableDiffusionPipeline """ frescoProc = FRESCOAttnProcessor2_0(2, AttentionControl()) attnProc = AttnProcessor2_0() attn_processor_dict = {} for k in pipe.unet.attn_processors.keys(): if k.startswith("up_blocks.2") or k.startswith("up_blocks.3"): attn_processor_dict[k] = frescoProc else: attn_processor_dict[k] = attnProc pipe.unet.set_attn_processor(attn_processor_dict) return frescoProc def retrieve_latents( encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") def prepare_image(image): if isinstance(image, torch.Tensor): # Batch single image if image.ndim == 3: image = image.unsqueeze(0) image = image.to(dtype=torch.float32) else: # preprocess image if isinstance(image, (PIL.Image.Image, np.ndarray)): image = [image] if isinstance(image, list) and isinstance(image[0], PIL.Image.Image): image = [np.array(i.convert("RGB"))[None, :] for i in image] image = np.concatenate(image, axis=0) elif isinstance(image, list) and isinstance(image[0], np.ndarray): image = np.concatenate([i[None, :] for i in image], axis=0) image = image.transpose(0, 3, 1, 2) image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 return image class FrescoV2VPipeline(StableDiffusionControlNetImg2ImgPipeline): r""" Pipeline for video-to-video translation using Stable Diffusion with FRESCO Algorithm. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). The pipeline also inherits the following loading methods: - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. text_encoder ([`~transformers.CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. controlnet ([`ControlNetModel`] or `List[ControlNetModel]`): Provides additional conditioning to the `unet` during the denoising process. If you set multiple ControlNets as a list, the outputs from each ControlNet are added together to create one combined additional conditioning. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details about a model's potential harms. feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ model_cpu_offload_seq = "text_encoder->unet->vae" _optional_components = ["safety_checker", "feature_extractor", "image_encoder"] _exclude_from_cpu_offload = ["safety_checker"] _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel], scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, image_encoder: CLIPVisionModelWithProjection = None, requires_safety_checker: bool = True, ): super().__init__( vae, text_encoder, tokenizer, unet, controlnet, scheduler, safety_checker, feature_extractor, image_encoder, requires_safety_checker, ) if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) if isinstance(controlnet, (list, tuple)): controlnet = MultiControlNetModel(controlnet) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, image_encoder=image_encoder, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True) self.control_image_processor = VaeImageProcessor( vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False ) self.register_to_config(requires_safety_checker=requires_safety_checker) frescoProc = FRESCOAttnProcessor2_0(2, AttentionControl()) attnProc = AttnProcessor2_0() attn_processor_dict = {} for k in self.unet.attn_processors.keys(): if k.startswith("up_blocks.2") or k.startswith("up_blocks.3"): attn_processor_dict[k] = frescoProc else: attn_processor_dict[k] = attnProc self.unet.set_attn_processor(attn_processor_dict) self.frescoProc = frescoProc flow_model = GMFlow( feature_channels=128, num_scales=1, upsample_factor=8, num_head=1, attention_type="swin", ffn_dim_expansion=4, num_transformer_layers=6, ).to(self.device) checkpoint = torch.utils.model_zoo.load_url( "https://huggingface.co/Anonymous-sub/Rerender/resolve/main/models/gmflow_sintel-0c07dcb3.pth", map_location=lambda storage, loc: storage, ) weights = checkpoint["model"] if "model" in checkpoint else checkpoint flow_model.load_state_dict(weights, strict=False) flow_model.eval() self.flow_model = flow_model # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, **kwargs, ): deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs, ) # concatenate for backwards comp prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, negative_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors="pt").pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder( torch.zeros_like(image), output_hidden_states=True ).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( num_images_per_prompt, dim=0 ) return image_enc_hidden_states, uncond_image_enc_hidden_states else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return image_embeds, uncond_image_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds def prepare_ip_adapter_image_embeds( self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance ): if ip_adapter_image_embeds is None: if not isinstance(ip_adapter_image, list): ip_adapter_image = [ip_adapter_image] if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): raise ValueError( f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." ) image_embeds = [] for single_ip_adapter_image, image_proj_layer in zip( ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers ): output_hidden_state = not isinstance(image_proj_layer, ImageProjection) single_image_embeds, single_negative_image_embeds = self.encode_image( single_ip_adapter_image, device, 1, output_hidden_state ) single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0) single_negative_image_embeds = torch.stack( [single_negative_image_embeds] * num_images_per_prompt, dim=0 ) if do_classifier_free_guidance: single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) single_image_embeds = single_image_embeds.to(device) image_embeds.append(single_image_embeds) else: repeat_dims = [1] image_embeds = [] for single_image_embeds in ip_adapter_image_embeds: if do_classifier_free_guidance: single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) single_image_embeds = single_image_embeds.repeat( num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) ) single_negative_image_embeds = single_negative_image_embeds.repeat( num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:])) ) single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds]) else: single_image_embeds = single_image_embeds.repeat( num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:])) ) image_embeds.append(single_image_embeds) return image_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents def decode_latents(self, latents): deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, image, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, ip_adapter_image=None, ip_adapter_image_embeds=None, controlnet_conditioning_scale=1.0, control_guidance_start=0.0, control_guidance_end=1.0, callback_on_step_end_tensor_inputs=None, ): if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) # `prompt` needs more sophisticated handling when there are multiple # conditionings. if isinstance(self.controlnet, MultiControlNetModel): if isinstance(prompt, list): logger.warning( f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}" " prompts. The conditionings will be fixed across the prompts." ) # Check `image` is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance( self.controlnet, torch._dynamo.eval_frame.OptimizedModule ) if ( isinstance(self.controlnet, ControlNetModel) or is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel) ): self.check_image(image, prompt, prompt_embeds) elif ( isinstance(self.controlnet, MultiControlNetModel) or is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel) ): if not isinstance(image, list): raise TypeError("For multiple controlnets: `image` must be type `list`") # When `image` is a nested list: # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]]) elif any(isinstance(i, list) for i in image): raise ValueError("A single batch of multiple conditionings are supported at the moment.") elif len(image) != len(self.controlnet.nets): raise ValueError( f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets." ) for image_ in image: self.check_image(image_, prompt, prompt_embeds) else: assert False # Check `controlnet_conditioning_scale` if ( isinstance(self.controlnet, ControlNetModel) or is_compiled and isinstance(self.controlnet._orig_mod, ControlNetModel) ): if not isinstance(controlnet_conditioning_scale, float): raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.") elif ( isinstance(self.controlnet, MultiControlNetModel) or is_compiled and isinstance(self.controlnet._orig_mod, MultiControlNetModel) ): if isinstance(controlnet_conditioning_scale, list): if any(isinstance(i, list) for i in controlnet_conditioning_scale): raise ValueError("A single batch of multiple conditionings are supported at the moment.") elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len( self.controlnet.nets ): raise ValueError( "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have" " the same length as the number of controlnets" ) else: assert False if len(control_guidance_start) != len(control_guidance_end): raise ValueError( f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list." ) if isinstance(self.controlnet, MultiControlNetModel): if len(control_guidance_start) != len(self.controlnet.nets): raise ValueError( f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}." ) for start, end in zip(control_guidance_start, control_guidance_end): if start >= end: raise ValueError( f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}." ) if start < 0.0: raise ValueError(f"control guidance start: {start} can't be smaller than 0.") if end > 1.0: raise ValueError(f"control guidance end: {end} can't be larger than 1.0.") if ip_adapter_image is not None and ip_adapter_image_embeds is not None: raise ValueError( "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." ) if ip_adapter_image_embeds is not None: if not isinstance(ip_adapter_image_embeds, list): raise ValueError( f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" ) elif ip_adapter_image_embeds[0].ndim not in [3, 4]: raise ValueError( f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" ) # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image def check_image(self, image, prompt, prompt_embeds): image_is_pil = isinstance(image, PIL.Image.Image) image_is_tensor = isinstance(image, torch.Tensor) image_is_np = isinstance(image, np.ndarray) image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image) image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor) image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray) if ( not image_is_pil and not image_is_tensor and not image_is_np and not image_is_pil_list and not image_is_tensor_list and not image_is_np_list ): raise TypeError( f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}" ) if image_is_pil: image_batch_size = 1 else: image_batch_size = len(image) if prompt is not None and isinstance(prompt, str): prompt_batch_size = 1 elif prompt is not None and isinstance(prompt, list): prompt_batch_size = len(prompt) elif prompt_embeds is not None: prompt_batch_size = prompt_embeds.shape[0] if image_batch_size != 1 and image_batch_size != prompt_batch_size: raise ValueError( f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}" ) # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image def prepare_control_image( self, image, width, height, batch_size, num_images_per_prompt, device, dtype, do_classifier_free_guidance=False, guess_mode=False, ): image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) image_batch_size = image.shape[0] if image_batch_size == 1: repeat_by = batch_size else: # image batch size is the same as prompt batch size repeat_by = num_images_per_prompt image = image.repeat_interleave(repeat_by, dim=0) image = image.to(device=device, dtype=dtype) if do_classifier_free_guidance and not guess_mode: image = torch.cat([image] * 2) return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps def get_timesteps(self, num_inference_steps, strength, device): # get the original timestep using init_timestep init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] if hasattr(self.scheduler, "set_begin_index"): self.scheduler.set_begin_index(t_start * self.scheduler.order) return timesteps, num_inference_steps - t_start # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.prepare_latents def prepare_latents( self, image, timestep, batch_size, num_images_per_prompt, dtype, device, repeat_noise, generator=None ): if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) image = image.to(device=device, dtype=dtype) batch_size = batch_size * num_images_per_prompt if image.shape[1] == 4: init_latents = image else: if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) elif isinstance(generator, list): init_latents = [ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) for i in range(batch_size) ] init_latents = torch.cat(init_latents, dim=0) else: init_latents = retrieve_latents(self.vae.encode(image), generator=generator) init_latents = self.vae.config.scaling_factor * init_latents if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: # expand init_latents for batch_size deprecation_message = ( f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" " your script to pass as many initial images as text prompts to suppress this warning." ) deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) additional_image_per_prompt = batch_size // init_latents.shape[0] init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." ) else: init_latents = torch.cat([init_latents], dim=0) shape = init_latents.shape if repeat_noise: noise = randn_tensor((1, *shape[1:]), generator=generator, device=device, dtype=dtype) one_tuple = (1,) * (len(shape) - 1) noise = noise.repeat(batch_size, *one_tuple) else: noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # get latents init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents @property def guidance_scale(self): return self._guidance_scale @property def clip_skip(self): return self._clip_skip # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() def __call__( self, prompt: Union[str, List[str]] = None, frames: Union[List[np.ndarray], torch.FloatTensor] = None, control_frames: Union[List[np.ndarray], torch.FloatTensor] = None, height: Optional[int] = None, width: Optional[int] = None, strength: float = 0.8, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None, output_type: Optional[str] = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, controlnet_conditioning_scale: Union[float, List[float]] = 0.8, guess_mode: bool = False, control_guidance_start: Union[float, List[float]] = 0.0, control_guidance_end: Union[float, List[float]] = 1.0, clip_skip: Optional[int] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], end_opt_step=15, num_intraattn_steps=1, step_interattn_end=350, **kwargs, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. frames (`List[np.ndarray]` or `torch.FloatTensor`): The input images to be used as the starting point for the image generation process. control_frames (`List[np.ndarray]` or `torch.FloatTensor`): The ControlNet input images condition to provide guidance to the `unet` for generation. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. strength (`float`, *optional*, defaults to 0.8): Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a starting point and more noise is added the higher the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising process runs for the full number of iterations specified in `num_inference_steps`. A value of 1 essentially ignores `image`. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*): Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not provided, embeddings are computed from the `ip_adapter_image` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set the corresponding scale as a list. guess_mode (`bool`, *optional*, defaults to `False`): The ControlNet encoder tries to recognize the content of the input image even if you remove all prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended. control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): The percentage of total steps at which the ControlNet starts applying. control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): The percentage of total steps at which the ControlNet stops applying. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. end_opt_step: The feature optimization is activated from strength * num_inference_step to end_opt_step. num_intraattn_steps: Apply num_interattn_steps steps of spatial-guided attention. step_interattn_end: Apply temporal-guided attention in [step_interattn_end, 1000] steps Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ callback = kwargs.pop("callback", None) callback_steps = kwargs.pop("callback_steps", None) if callback is not None: deprecate( "callback", "1.0.0", "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", ) if callback_steps is not None: deprecate( "callback_steps", "1.0.0", "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", ) controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet # align format for control guidance if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): control_guidance_start = len(control_guidance_end) * [control_guidance_start] elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): control_guidance_end = len(control_guidance_start) * [control_guidance_end] elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 control_guidance_start, control_guidance_end = ( mult * [control_guidance_start], mult * [control_guidance_end], ) # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, control_frames[0], callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, controlnet_conditioning_scale, control_guidance_start, control_guidance_end, callback_on_step_end_tensor_inputs, ) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs # 2. Define call parameters batch_size = len(frames) device = self._execution_device if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) global_pool_conditions = ( controlnet.config.global_pool_conditions if isinstance(controlnet, ControlNetModel) else controlnet.nets[0].config.global_pool_conditions ) guess_mode = guess_mode or global_pool_conditions # 3. Encode input prompt text_encoder_lora_scale = ( self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None ) prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt, device, num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip, ) prompt_embeds = prompt_embeds.repeat(batch_size, 1, 1) negative_prompt_embeds = negative_prompt_embeds.repeat(batch_size, 1, 1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) if ip_adapter_image is not None or ip_adapter_image_embeds is not None: image_embeds = self.prepare_ip_adapter_image_embeds( ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_images_per_prompt, self.do_classifier_free_guidance, ) # 4. Prepare image imgs_np = [] for frame in frames: if isinstance(frame, PIL.Image.Image): imgs_np.append(np.asarray(frame)) else: # np.ndarray imgs_np.append(frame) images_pt = self.image_processor.preprocess(frames).to(dtype=torch.float32) # 5. Prepare controlnet_conditioning_image if isinstance(controlnet, ControlNetModel): control_image = self.prepare_control_image( image=control_frames, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=guess_mode, ) elif isinstance(controlnet, MultiControlNetModel): control_images = [] for control_image_ in control_frames: control_image_ = self.prepare_control_image( image=control_image_, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=guess_mode, ) control_images.append(control_image_) control_image = control_images else: assert False self.flow_model.to(device) flows, occs, attn_mask, interattn_paras = get_flow_and_interframe_paras(self.flow_model, imgs_np) correlation_matrix = get_intraframe_paras(self, images_pt, self.frescoProc, prompt_embeds, generator) """ Flexible settings for attention: * Turn off FRESCO-guided attention: frescoProc.controller.disable_controller() Then you can turn on one specific attention submodule * Turn on Cross-frame attention: frescoProc.controller.enable_cfattn(attn_mask) * Turn on Spatial-guided attention: frescoProc.controller.enable_intraattn() * Turn on Temporal-guided attention: frescoProc.controller.enable_interattn(interattn_paras) Flexible settings for optimization: * Turn off Spatial-guided optimization: set optimize_temporal = False in apply_FRESCO_opt() * Turn off Temporal-guided optimization: set correlation_matrix = [] in apply_FRESCO_opt() * Turn off FRESCO-guided optimization: disable_FRESCO_opt(pipe) Flexible settings for background smoothing: * Turn off background smoothing: set saliency = None in apply_FRESCO_opt() """ self.frescoProc.controller.enable_controller(interattn_paras=interattn_paras, attn_mask=attn_mask) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps apply_FRESCO_opt( self, steps=timesteps[:end_opt_step], flows=flows, occs=occs, correlation_matrix=correlation_matrix, saliency=None, optimize_temporal=True, ) clear_cache() # 5. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) self._num_timesteps = len(timesteps) # 6. Prepare latent variables latents = self.prepare_latents( images_pt, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator=generator, repeat_noise=True, ) # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7.1 Add image embeds for IP-Adapter added_cond_kwargs = ( {"image_embeds": image_embeds} if ip_adapter_image is not None or ip_adapter_image_embeds is not None else None ) # 7.2 Create tensor stating which controlnets to keep controlnet_keep = [] for i in range(len(timesteps)): keeps = [ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) for s, e in zip(control_guidance_start, control_guidance_end) ] controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) # 8. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if i >= num_intraattn_steps: self.frescoProc.controller.disable_intraattn() if t < step_interattn_end: self.frescoProc.controller.disable_interattn() # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # controlnet(s) inference if guess_mode and self.do_classifier_free_guidance: # Infer ControlNet only for the conditional batch. control_model_input = latents control_model_input = self.scheduler.scale_model_input(control_model_input, t) controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] else: control_model_input = latent_model_input controlnet_prompt_embeds = prompt_embeds if isinstance(controlnet_keep[i], list): cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] down_block_res_samples, mid_block_res_sample = self.controlnet( control_model_input, t, encoder_hidden_states=controlnet_prompt_embeds, controlnet_cond=control_image, conditioning_scale=cond_scale, guess_mode=guess_mode, return_dict=False, ) if guess_mode and self.do_classifier_free_guidance: # Inferred ControlNet only for the conditional batch. # To apply the output of ControlNet to both the unconditional and conditional batches, # add 0 to the unconditional batch to keep it unchanged. down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=self.cross_attention_kwargs, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) # If we do sequential model offloading, let's offload unet and controlnet # manually for max memory savings if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.unet.to("cpu") self.controlnet.to("cpu") torch.cuda.empty_cache() if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[ 0 ] image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
diffusers/examples/community/fresco_v2v.py/0
{ "file_path": "diffusers/examples/community/fresco_v2v.py", "repo_id": "diffusers", "token_count": 53457 }
## ---------------------------------------------------------- # A SDXL pipeline can take unlimited weighted prompt # # Author: Andrew Zhu # GitHub: https://github.com/xhinker # Medium: https://medium.com/@xhinker ## ----------------------------------------------------------- import inspect import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from PIL import Image from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection, ) from diffusers import DiffusionPipeline, StableDiffusionXLPipeline from diffusers.image_processor import PipelineImageInput, VaeImageProcessor from diffusers.loaders import ( FromSingleFileMixin, IPAdapterMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin, ) from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel from diffusers.models.attention_processor import AttnProcessor2_0, XFormersAttnProcessor from diffusers.models.lora import adjust_lora_scale_text_encoder from diffusers.pipelines.pipeline_utils import StableDiffusionMixin from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import ( USE_PEFT_BACKEND, deprecate, is_accelerate_available, is_accelerate_version, is_invisible_watermark_available, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from diffusers.utils.torch_utils import randn_tensor if is_invisible_watermark_available(): from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker def parse_prompt_attention(text): """ Parses a string with attention tokens and returns a list of pairs: text and its associated weight. Accepted tokens are: (abc) - increases attention to abc by a multiplier of 1.1 (abc:3.12) - increases attention to abc by a multiplier of 3.12 [abc] - decreases attention to abc by a multiplier of 1.1 \\( - literal character '(' \\[ - literal character '[' \\) - literal character ')' \\] - literal character ']' \\ - literal character '\' anything else - just text >>> parse_prompt_attention('normal text') [['normal text', 1.0]] >>> parse_prompt_attention('an (important) word') [['an ', 1.0], ['important', 1.1], [' word', 1.0]] >>> parse_prompt_attention('(unbalanced') [['unbalanced', 1.1]] >>> parse_prompt_attention('\\(literal\\]') [['(literal]', 1.0]] >>> parse_prompt_attention('(unnecessary)(parens)') [['unnecessaryparens', 1.1]] >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).') [['a ', 1.0], ['house', 1.5730000000000004], [' ', 1.1], ['on', 1.0], [' a ', 1.1], ['hill', 0.55], [', sun, ', 1.1], ['sky', 1.4641000000000006], ['.', 1.1]] """ import re re_attention = re.compile( r""" \\\(|\\\)|\\\[|\\]|\\\\|\\|\(|\[|:([+-]?[.\d]+)\)| \)|]|[^\\()\[\]:]+|: """, re.X, ) re_break = re.compile(r"\s*\bBREAK\b\s*", re.S) res = [] round_brackets = [] square_brackets = [] round_bracket_multiplier = 1.1 square_bracket_multiplier = 1 / 1.1 def multiply_range(start_position, multiplier): for p in range(start_position, len(res)): res[p][1] *= multiplier for m in re_attention.finditer(text): text = m.group(0) weight = m.group(1) if text.startswith("\\"): res.append([text[1:], 1.0]) elif text == "(": round_brackets.append(len(res)) elif text == "[": square_brackets.append(len(res)) elif weight is not None and len(round_brackets) > 0: multiply_range(round_brackets.pop(), float(weight)) elif text == ")" and len(round_brackets) > 0: multiply_range(round_brackets.pop(), round_bracket_multiplier) elif text == "]" and len(square_brackets) > 0: multiply_range(square_brackets.pop(), square_bracket_multiplier) else: parts = re.split(re_break, text) for i, part in enumerate(parts): if i > 0: res.append(["BREAK", -1]) res.append([part, 1.0]) for pos in round_brackets: multiply_range(pos, round_bracket_multiplier) for pos in square_brackets: multiply_range(pos, square_bracket_multiplier) if len(res) == 0: res = [["", 1.0]] # merge runs of identical weights i = 0 while i + 1 < len(res): if res[i][1] == res[i + 1][1]: res[i][0] += res[i + 1][0] res.pop(i + 1) else: i += 1 return res def get_prompts_tokens_with_weights(clip_tokenizer: CLIPTokenizer, prompt: str): """ Get prompt token ids and weights, this function works for both prompt and negative prompt Args: pipe (CLIPTokenizer) A CLIPTokenizer prompt (str) A prompt string with weights Returns: text_tokens (list) A list contains token ids text_weight (list) A list contains the correspondent weight of token ids Example: import torch from transformers import CLIPTokenizer clip_tokenizer = CLIPTokenizer.from_pretrained( "stablediffusionapi/deliberate-v2" , subfolder = "tokenizer" , dtype = torch.float16 ) token_id_list, token_weight_list = get_prompts_tokens_with_weights( clip_tokenizer = clip_tokenizer ,prompt = "a (red:1.5) cat"*70 ) """ texts_and_weights = parse_prompt_attention(prompt) text_tokens, text_weights = [], [] for word, weight in texts_and_weights: # tokenize and discard the starting and the ending token token = clip_tokenizer(word, truncation=False).input_ids[1:-1] # so that tokenize whatever length prompt # the returned token is a 1d list: [320, 1125, 539, 320] # merge the new tokens to the all tokens holder: text_tokens text_tokens = [*text_tokens, *token] # each token chunk will come with one weight, like ['red cat', 2.0] # need to expand weight for each token. chunk_weights = [weight] * len(token) # append the weight back to the weight holder: text_weights text_weights = [*text_weights, *chunk_weights] return text_tokens, text_weights def group_tokens_and_weights(token_ids: list, weights: list, pad_last_block=False): """ Produce tokens and weights in groups and pad the missing tokens Args: token_ids (list) The token ids from tokenizer weights (list) The weights list from function get_prompts_tokens_with_weights pad_last_block (bool) Control if fill the last token list to 75 tokens with eos Returns: new_token_ids (2d list) new_weights (2d list) Example: token_groups,weight_groups = group_tokens_and_weights( token_ids = token_id_list , weights = token_weight_list ) """ bos, eos = 49406, 49407 # this will be a 2d list new_token_ids = [] new_weights = [] while len(token_ids) >= 75: # get the first 75 tokens head_75_tokens = [token_ids.pop(0) for _ in range(75)] head_75_weights = [weights.pop(0) for _ in range(75)] # extract token ids and weights temp_77_token_ids = [bos] + head_75_tokens + [eos] temp_77_weights = [1.0] + head_75_weights + [1.0] # add 77 token and weights chunk to the holder list new_token_ids.append(temp_77_token_ids) new_weights.append(temp_77_weights) # padding the left if len(token_ids) > 0: padding_len = 75 - len(token_ids) if pad_last_block else 0 temp_77_token_ids = [bos] + token_ids + [eos] * padding_len + [eos] new_token_ids.append(temp_77_token_ids) temp_77_weights = [1.0] + weights + [1.0] * padding_len + [1.0] new_weights.append(temp_77_weights) return new_token_ids, new_weights def get_weighted_text_embeddings_sdxl( pipe: StableDiffusionXLPipeline, prompt: str = "", prompt_2: str = None, neg_prompt: str = "", neg_prompt_2: str = None, num_images_per_prompt: int = 1, device: Optional[torch.device] = None, clip_skip: Optional[int] = None, lora_scale: Optional[int] = None, ): """ This function can process long prompt with weights, no length limitation for Stable Diffusion XL Args: pipe (StableDiffusionPipeline) prompt (str) prompt_2 (str) neg_prompt (str) neg_prompt_2 (str) num_images_per_prompt (int) device (torch.device) clip_skip (int) Returns: prompt_embeds (torch.Tensor) neg_prompt_embeds (torch.Tensor) """ device = device or pipe._execution_device # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(pipe, StableDiffusionXLLoraLoaderMixin): pipe._lora_scale = lora_scale # dynamically adjust the LoRA scale if pipe.text_encoder is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(pipe.text_encoder, lora_scale) else: scale_lora_layers(pipe.text_encoder, lora_scale) if pipe.text_encoder_2 is not None: if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(pipe.text_encoder_2, lora_scale) else: scale_lora_layers(pipe.text_encoder_2, lora_scale) if prompt_2: prompt = f"{prompt} {prompt_2}" if neg_prompt_2: neg_prompt = f"{neg_prompt} {neg_prompt_2}" prompt_t1 = prompt_t2 = prompt neg_prompt_t1 = neg_prompt_t2 = neg_prompt if isinstance(pipe, TextualInversionLoaderMixin): prompt_t1 = pipe.maybe_convert_prompt(prompt_t1, pipe.tokenizer) neg_prompt_t1 = pipe.maybe_convert_prompt(neg_prompt_t1, pipe.tokenizer) prompt_t2 = pipe.maybe_convert_prompt(prompt_t2, pipe.tokenizer_2) neg_prompt_t2 = pipe.maybe_convert_prompt(neg_prompt_t2, pipe.tokenizer_2) eos = pipe.tokenizer.eos_token_id # tokenizer 1 prompt_tokens, prompt_weights = get_prompts_tokens_with_weights(pipe.tokenizer, prompt_t1) neg_prompt_tokens, neg_prompt_weights = get_prompts_tokens_with_weights(pipe.tokenizer, neg_prompt_t1) # tokenizer 2 prompt_tokens_2, prompt_weights_2 = get_prompts_tokens_with_weights(pipe.tokenizer_2, prompt_t2) neg_prompt_tokens_2, neg_prompt_weights_2 = get_prompts_tokens_with_weights(pipe.tokenizer_2, neg_prompt_t2) # padding the shorter one for prompt set 1 prompt_token_len = len(prompt_tokens) neg_prompt_token_len = len(neg_prompt_tokens) if prompt_token_len > neg_prompt_token_len: # padding the neg_prompt with eos token neg_prompt_tokens = neg_prompt_tokens + [eos] * abs(prompt_token_len - neg_prompt_token_len) neg_prompt_weights = neg_prompt_weights + [1.0] * abs(prompt_token_len - neg_prompt_token_len) else: # padding the prompt prompt_tokens = prompt_tokens + [eos] * abs(prompt_token_len - neg_prompt_token_len) prompt_weights = prompt_weights + [1.0] * abs(prompt_token_len - neg_prompt_token_len) # padding the shorter one for token set 2 prompt_token_len_2 = len(prompt_tokens_2) neg_prompt_token_len_2 = len(neg_prompt_tokens_2) if prompt_token_len_2 > neg_prompt_token_len_2: # padding the neg_prompt with eos token neg_prompt_tokens_2 = neg_prompt_tokens_2 + [eos] * abs(prompt_token_len_2 - neg_prompt_token_len_2) neg_prompt_weights_2 = neg_prompt_weights_2 + [1.0] * abs(prompt_token_len_2 - neg_prompt_token_len_2) else: # padding the prompt prompt_tokens_2 = prompt_tokens_2 + [eos] * abs(prompt_token_len_2 - neg_prompt_token_len_2) prompt_weights_2 = prompt_weights + [1.0] * abs(prompt_token_len_2 - neg_prompt_token_len_2) embeds = [] neg_embeds = [] prompt_token_groups, prompt_weight_groups = group_tokens_and_weights(prompt_tokens.copy(), prompt_weights.copy()) neg_prompt_token_groups, neg_prompt_weight_groups = group_tokens_and_weights( neg_prompt_tokens.copy(), neg_prompt_weights.copy() ) prompt_token_groups_2, prompt_weight_groups_2 = group_tokens_and_weights( prompt_tokens_2.copy(), prompt_weights_2.copy() ) neg_prompt_token_groups_2, neg_prompt_weight_groups_2 = group_tokens_and_weights( neg_prompt_tokens_2.copy(), neg_prompt_weights_2.copy() ) # get prompt embeddings one by one is not working. for i in range(len(prompt_token_groups)): # get positive prompt embeddings with weights token_tensor = torch.tensor([prompt_token_groups[i]], dtype=torch.long, device=device) weight_tensor = torch.tensor(prompt_weight_groups[i], dtype=torch.float16, device=device) token_tensor_2 = torch.tensor([prompt_token_groups_2[i]], dtype=torch.long, device=device) # use first text encoder prompt_embeds_1 = pipe.text_encoder(token_tensor.to(device), output_hidden_states=True) # use second text encoder prompt_embeds_2 = pipe.text_encoder_2(token_tensor_2.to(device), output_hidden_states=True) pooled_prompt_embeds = prompt_embeds_2[0] if clip_skip is None: prompt_embeds_1_hidden_states = prompt_embeds_1.hidden_states[-2] prompt_embeds_2_hidden_states = prompt_embeds_2.hidden_states[-2] else: # "2" because SDXL always indexes from the penultimate layer. prompt_embeds_1_hidden_states = prompt_embeds_1.hidden_states[-(clip_skip + 2)] prompt_embeds_2_hidden_states = prompt_embeds_2.hidden_states[-(clip_skip + 2)] prompt_embeds_list = [prompt_embeds_1_hidden_states, prompt_embeds_2_hidden_states] token_embedding = torch.concat(prompt_embeds_list, dim=-1).squeeze(0) for j in range(len(weight_tensor)): if weight_tensor[j] != 1.0: token_embedding[j] = ( token_embedding[-1] + (token_embedding[j] - token_embedding[-1]) * weight_tensor[j] ) token_embedding = token_embedding.unsqueeze(0) embeds.append(token_embedding) # get negative prompt embeddings with weights neg_token_tensor = torch.tensor([neg_prompt_token_groups[i]], dtype=torch.long, device=device) neg_token_tensor_2 = torch.tensor([neg_prompt_token_groups_2[i]], dtype=torch.long, device=device) neg_weight_tensor = torch.tensor(neg_prompt_weight_groups[i], dtype=torch.float16, device=device) # use first text encoder neg_prompt_embeds_1 = pipe.text_encoder(neg_token_tensor.to(device), output_hidden_states=True) neg_prompt_embeds_1_hidden_states = neg_prompt_embeds_1.hidden_states[-2] # use second text encoder neg_prompt_embeds_2 = pipe.text_encoder_2(neg_token_tensor_2.to(device), output_hidden_states=True) neg_prompt_embeds_2_hidden_states = neg_prompt_embeds_2.hidden_states[-2] negative_pooled_prompt_embeds = neg_prompt_embeds_2[0] neg_prompt_embeds_list = [neg_prompt_embeds_1_hidden_states, neg_prompt_embeds_2_hidden_states] neg_token_embedding = torch.concat(neg_prompt_embeds_list, dim=-1).squeeze(0) for z in range(len(neg_weight_tensor)): if neg_weight_tensor[z] != 1.0: neg_token_embedding[z] = ( neg_token_embedding[-1] + (neg_token_embedding[z] - neg_token_embedding[-1]) * neg_weight_tensor[z] ) neg_token_embedding = neg_token_embedding.unsqueeze(0) neg_embeds.append(neg_token_embedding) prompt_embeds = torch.cat(embeds, dim=1) negative_prompt_embeds = torch.cat(neg_embeds, dim=1) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt, 1).view( bs_embed * num_images_per_prompt, -1 ) negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt, 1).view( bs_embed * num_images_per_prompt, -1 ) if pipe.text_encoder is not None: if isinstance(pipe, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(pipe.text_encoder, lora_scale) if pipe.text_encoder_2 is not None: if isinstance(pipe, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(pipe.text_encoder_2, lora_scale) return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds # ------------------------------------------------------------------------------------------------------------------------------- # reuse the backbone code from StableDiffusionXLPipeline # ------------------------------------------------------------------------------------------------------------------------------- logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py from diffusers import DiffusionPipeline import torch pipe = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0" , torch_dtype = torch.float16 , use_safetensors = True , variant = "fp16" , custom_pipeline = "lpw_stable_diffusion_xl", ) prompt = "a white cat running on the grass"*20 prompt2 = "play a football"*20 prompt = f"{prompt},{prompt2}" neg_prompt = "blur, low quality" pipe.to("cuda") images = pipe( prompt = prompt , negative_prompt = neg_prompt ).images[0] pipe.to("cpu") torch.cuda.empty_cache() images ``` """ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): """ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 """ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents def retrieve_latents( encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" ): if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": return encoder_output.latent_dist.sample(generator) elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": return encoder_output.latent_dist.mode() elif hasattr(encoder_output, "latents"): return encoder_output.latents else: raise AttributeError("Could not access latents of provided encoder_output") # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps def retrieve_timesteps( scheduler, num_inference_steps: Optional[int] = None, device: Optional[Union[str, torch.device]] = None, timesteps: Optional[List[int]] = None, **kwargs, ): """ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. Args: scheduler (`SchedulerMixin`): The scheduler to get timesteps from. num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` must be `None`. Returns: `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the second element is the number of inference steps. """ if timesteps is not None: accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) if not accepts_timesteps: raise ValueError( f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" f" timestep schedules. Please check whether you are using the correct scheduler." ) scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) timesteps = scheduler.timesteps num_inference_steps = len(timesteps) else: scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) timesteps = scheduler.timesteps return timesteps, num_inference_steps class SDXLLongPromptWeightingPipeline( DiffusionPipeline, StableDiffusionMixin, FromSingleFileMixin, IPAdapterMixin, StableDiffusionXLLoraLoaderMixin, TextualInversionLoaderMixin, ): r""" Pipeline for text-to-image generation using Stable Diffusion XL. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). The pipeline also inherits the following loading methods: - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion XL uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. text_encoder_2 ([` CLIPTextModelWithProjection`]): Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), specifically the [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). tokenizer_2 (`CLIPTokenizer`): Second Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae" _optional_components = [ "tokenizer", "tokenizer_2", "text_encoder", "text_encoder_2", "image_encoder", "feature_extractor", ] _callback_tensor_inputs = [ "latents", "prompt_embeds", "negative_prompt_embeds", "add_text_embeds", "add_time_ids", "negative_pooled_prompt_embeds", "negative_add_time_ids", ] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, feature_extractor: Optional[CLIPImageProcessor] = None, image_encoder: Optional[CLIPVisionModelWithProjection] = None, force_zeros_for_empty_prompt: bool = True, add_watermarker: Optional[bool] = None, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, scheduler=scheduler, feature_extractor=feature_extractor, image_encoder=image_encoder, ) self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.mask_processor = VaeImageProcessor( vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True ) self.default_sample_size = ( self.unet.config.sample_size if hasattr(self, "unet") and self.unet is not None and hasattr(self.unet.config, "sample_size") else 128 ) add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() if add_watermarker: self.watermark = StableDiffusionXLWatermarker() else: self.watermark = None def enable_model_cpu_offload(self, gpu_id=0): r""" Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. """ if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.") device = torch.device(f"cuda:{gpu_id}") if self.device.type != "cpu": self.to("cpu", silence_dtype_warnings=True) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) model_sequence = ( [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] ) model_sequence.extend([self.unet, self.vae]) hook = None for cpu_offloaded_model in model_sequence: _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook) # We'll offload the last model manually. self.final_offload_hook = hook # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt def encode_prompt( self, prompt: str, prompt_2: Optional[str] = None, device: Optional[torch.device] = None, num_images_per_prompt: int = 1, do_classifier_free_guidance: bool = True, negative_prompt: Optional[str] = None, negative_prompt_2: Optional[str] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. """ device = device or self._execution_device # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin): self._lora_scale = lora_scale if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # Define tokenizers and text encoders tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = ( [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] ) if prompt_embeds is None: prompt_2 = prompt_2 or prompt # textual inversion: process multi-vector tokens if necessary prompt_embeds_list = [] prompts = [prompt, prompt_2] for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer( prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {tokenizer.model_max_length} tokens: {removed_text}" ) prompt_embeds = text_encoder( text_input_ids.to(device), output_hidden_states=True, ) # We are only ALWAYS interested in the pooled output of the final text encoder if pooled_prompt_embeds is None and prompt_embeds[0].ndim == 2: pooled_prompt_embeds = prompt_embeds[0] prompt_embeds = prompt_embeds.hidden_states[-2] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) # get unconditional embeddings for classifier free guidance zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt_2 = negative_prompt_2 or negative_prompt uncond_tokens: List[str] if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt, negative_prompt_2] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer( negative_prompt, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) negative_prompt_embeds = text_encoder( uncond_input.input_ids.to(device), output_hidden_states=True, ) # We are only ALWAYS interested in the pooled output of the final text encoder if negative_pooled_prompt_embeds is None and negative_prompt_embeds[0].ndim == 2: negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors="pt").pixel_values image = image.to(device=device, dtype=dtype) if output_hidden_states: image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_enc_hidden_states = self.image_encoder( torch.zeros_like(image), output_hidden_states=True ).hidden_states[-2] uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( num_images_per_prompt, dim=0 ) return image_enc_hidden_states, uncond_image_enc_hidden_states else: image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return image_embeds, uncond_image_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, prompt_2, height, width, strength, callback_steps, negative_prompt=None, negative_prompt_2=None, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, negative_pooled_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if strength < 0 or strength > 1: raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt_2 is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError( "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." ) if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError( "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." ) def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None): # get the original timestep using init_timestep if denoising_start is None: init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) else: t_start = 0 timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] # Strength is irrelevant if we directly request a timestep to start at; # that is, strength is determined by the denoising_start instead. if denoising_start is not None: discrete_timestep_cutoff = int( round( self.scheduler.config.num_train_timesteps - (denoising_start * self.scheduler.config.num_train_timesteps) ) ) num_inference_steps = (timesteps < discrete_timestep_cutoff).sum().item() if self.scheduler.order == 2 and num_inference_steps % 2 == 0: # if the scheduler is a 2nd order scheduler we might have to do +1 # because `num_inference_steps` might be even given that every timestep # (except the highest one) is duplicated. If `num_inference_steps` is even it would # mean that we cut the timesteps in the middle of the denoising step # (between 1st and 2nd derivative) which leads to incorrect results. By adding 1 # we ensure that the denoising process always ends after the 2nd derivate step of the scheduler num_inference_steps = num_inference_steps + 1 # because t_n+1 >= t_n, we slice the timesteps starting from the end timesteps = timesteps[-num_inference_steps:] return timesteps, num_inference_steps return timesteps, num_inference_steps - t_start def prepare_latents( self, image, mask, width, height, num_channels_latents, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True, latents=None, is_strength_max=True, return_noise=False, return_image_latents=False, ): batch_size *= num_images_per_prompt if image is None: shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents elif mask is None: if not isinstance(image, (torch.Tensor, Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) # Offload text encoder if `enable_model_cpu_offload` was enabled if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.text_encoder_2.to("cpu") torch.cuda.empty_cache() image = image.to(device=device, dtype=dtype) if image.shape[1] == 4: init_latents = image else: # make sure the VAE is in float32 mode, as it overflows in float16 if self.vae.config.force_upcast: image = image.float() self.vae.to(dtype=torch.float32) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) elif isinstance(generator, list): init_latents = [ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) for i in range(batch_size) ] init_latents = torch.cat(init_latents, dim=0) else: init_latents = retrieve_latents(self.vae.encode(image), generator=generator) if self.vae.config.force_upcast: self.vae.to(dtype) init_latents = init_latents.to(dtype) init_latents = self.vae.config.scaling_factor * init_latents if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: # expand init_latents for batch_size additional_image_per_prompt = batch_size // init_latents.shape[0] init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." ) else: init_latents = torch.cat([init_latents], dim=0) if add_noise: shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # get latents init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents else: shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if (image is None or timestep is None) and not is_strength_max: raise ValueError( "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise." "However, either the image or the noise timestep has not been provided." ) if image.shape[1] == 4: image_latents = image.to(device=device, dtype=dtype) image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) elif return_image_latents or (latents is None and not is_strength_max): image = image.to(device=device, dtype=dtype) image_latents = self._encode_vae_image(image=image, generator=generator) image_latents = image_latents.repeat(batch_size // image_latents.shape[0], 1, 1, 1) if latents is None and add_noise: noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # if strength is 1. then initialise the latents to noise, else initial to image + noise latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep) # if pure noise then scale the initial latents by the Scheduler's init sigma latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents elif add_noise: noise = latents.to(device) latents = noise * self.scheduler.init_noise_sigma else: noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = image_latents.to(device) outputs = (latents,) if return_noise: outputs += (noise,) if return_image_latents: outputs += (image_latents,) return outputs def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator): dtype = image.dtype if self.vae.config.force_upcast: image = image.float() self.vae.to(dtype=torch.float32) if isinstance(generator, list): image_latents = [ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) for i in range(image.shape[0]) ] image_latents = torch.cat(image_latents, dim=0) else: image_latents = retrieve_latents(self.vae.encode(image), generator=generator) if self.vae.config.force_upcast: self.vae.to(dtype) image_latents = image_latents.to(dtype) image_latents = self.vae.config.scaling_factor * image_latents return image_latents def prepare_mask_latents( self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance ): # resize the mask to latents shape as we concatenate the mask to the latents # we do that before converting to dtype to avoid breaking in case we're using cpu_offload # and half precision mask = torch.nn.functional.interpolate( mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor) ) mask = mask.to(device=device, dtype=dtype) # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method if mask.shape[0] < batch_size: if not batch_size % mask.shape[0] == 0: raise ValueError( "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number" " of masks that you pass is divisible by the total requested batch size." ) mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1) mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask if masked_image is not None and masked_image.shape[1] == 4: masked_image_latents = masked_image else: masked_image_latents = None if masked_image is not None: if masked_image_latents is None: masked_image = masked_image.to(device=device, dtype=dtype) masked_image_latents = self._encode_vae_image(masked_image, generator=generator) if masked_image_latents.shape[0] < batch_size: if not batch_size % masked_image_latents.shape[0] == 0: raise ValueError( "The passed images and the required batch size don't match. Images are supposed to be duplicated" f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." " Make sure the number of images that you pass is divisible by the total requested batch size." ) masked_image_latents = masked_image_latents.repeat( batch_size // masked_image_latents.shape[0], 1, 1, 1 ) masked_image_latents = ( torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents ) # aligning device to prevent device errors when concating it with the latent model input masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) return mask, masked_image_latents def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype): add_time_ids = list(original_size + crops_coords_top_left + target_size) passed_add_embed_dim = ( self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim ) expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim != passed_add_embed_dim: raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." ) add_time_ids = torch.tensor([add_time_ids], dtype=dtype) return add_time_ids # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance( self.vae.decoder.mid_block.attentions[0].processor, (AttnProcessor2_0, XFormersAttnProcessor), ) # if xformers or torch_2_0 is used attention block does not need # to be in float32 which can save lots of memory if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): """ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 Args: timesteps (`torch.Tensor`): generate embedding vectors at these timesteps embedding_dim (`int`, *optional*, defaults to 512): dimension of the embeddings to generate dtype: data type of the generated embeddings Returns: `torch.Tensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` """ assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb @property def guidance_scale(self): return self._guidance_scale @property def guidance_rescale(self): return self._guidance_rescale @property def clip_skip(self): return self._clip_skip # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def denoising_end(self): return self._denoising_end @property def denoising_start(self): return self._denoising_start @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: str = None, prompt_2: Optional[str] = None, image: Optional[PipelineImageInput] = None, mask_image: Optional[PipelineImageInput] = None, masked_image_latents: Optional[torch.Tensor] = None, height: Optional[int] = None, width: Optional[int] = None, strength: float = 0.8, num_inference_steps: int = 50, timesteps: List[int] = None, denoising_start: Optional[float] = None, denoising_end: Optional[float] = None, guidance_scale: float = 5.0, negative_prompt: Optional[str] = None, negative_prompt_2: Optional[str] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guidance_rescale: float = 0.0, original_size: Optional[Tuple[int, int]] = None, crops_coords_top_left: Tuple[int, int] = (0, 0), target_size: Optional[Tuple[int, int]] = None, clip_skip: Optional[int] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str`): The prompt to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. prompt_2 (`str`): The prompt to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders image (`PipelineImageInput`, *optional*): `Image`, or tensor representing an image batch, that will be used as the starting point for the process. mask_image (`PipelineImageInput`, *optional*): `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. strength (`float`, *optional*, defaults to 0.8): Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will be maximum and the denoising process will run for the full number of iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed will be used. Must be in descending order. denoising_start (`float`, *optional*): When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be bypassed before it is initiated. Consequently, the initial part of the denoising process is skipped and it is assumed that the passed `image` is a partly denoised image. Note that when this is specified, strength will be ignored. The `denoising_start` parameter is particularly beneficial when this pipeline is integrated into a "Mixture of Denoisers" multi-pipeline setup, as detailed in [**Refine Image Quality**](https://huggingface.co/docs/diffusers/using-diffusers/sdxl#refine-image-quality). denoising_end (`float`, *optional*): When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be completed before it is intentionally prematurely terminated. As a result, the returned sample will still retain a substantial amount of noise (ca. final 20% of timesteps still needed) and should be denoised by a successor pipeline that has `denoising_start` set to 0.8 so that it only denoises the final 20% of the scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refine Image Quality**](https://huggingface.co/docs/diffusers/using-diffusers/sdxl#refine-image-quality). guidance_scale (`float`, *optional*, defaults to 5.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str`): The prompt not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str`): The prompt not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead of a plain tuple. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). guidance_rescale (`float`, *optional*, defaults to 0.0): Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when using zero terminal SNR. original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): For most cases, `target_size` should be set to the desired height and width of the generated image. If not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. Examples: Returns: [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images. """ callback = kwargs.pop("callback", None) callback_steps = kwargs.pop("callback_steps", None) if callback is not None: deprecate( "callback", "1.0.0", "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", ) if callback_steps is not None: deprecate( "callback_steps", "1.0.0", "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", ) # 0. Default height and width to unet height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, prompt_2, height, width, strength, callback_steps, negative_prompt, negative_prompt_2, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, callback_on_step_end_tensor_inputs, ) self._guidance_scale = guidance_scale self._guidance_rescale = guidance_rescale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs self._denoising_end = denoising_end self._denoising_start = denoising_start # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device if ip_adapter_image is not None: output_hidden_state = False if isinstance(self.unet.encoder_hid_proj, ImageProjection) else True image_embeds, negative_image_embeds = self.encode_image( ip_adapter_image, device, num_images_per_prompt, output_hidden_state ) if self.do_classifier_free_guidance: image_embeds = torch.cat([negative_image_embeds, image_embeds]) # 3. Encode input prompt lora_scale = ( self._cross_attention_kwargs.get("scale", None) if self._cross_attention_kwargs is not None else None ) negative_prompt = negative_prompt if negative_prompt is not None else "" ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = get_weighted_text_embeddings_sdxl( pipe=self, prompt=prompt, neg_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, clip_skip=clip_skip, lora_scale=lora_scale, ) dtype = prompt_embeds.dtype if isinstance(image, Image.Image): image = self.image_processor.preprocess(image, height=height, width=width) if image is not None: image = image.to(device=self.device, dtype=dtype) if isinstance(mask_image, Image.Image): mask = self.mask_processor.preprocess(mask_image, height=height, width=width) else: mask = mask_image if mask_image is not None: mask = mask.to(device=self.device, dtype=dtype) if masked_image_latents is not None: masked_image = masked_image_latents elif image.shape[1] == 4: # if image is in latent space, we can't mask it masked_image = None else: masked_image = image * (mask < 0.5) else: mask = None # 4. Prepare timesteps def denoising_value_valid(dnv): return isinstance(dnv, float) and 0 < dnv < 1 timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) if image is not None: timesteps, num_inference_steps = self.get_timesteps( num_inference_steps, strength, device, denoising_start=self.denoising_start if denoising_value_valid(self.denoising_start) else None, ) # check that number of inference steps is not < 1 - as this doesn't make sense if num_inference_steps < 1: raise ValueError( f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline" f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline." ) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) is_strength_max = strength == 1.0 add_noise = True if self.denoising_start is None else False # 5. Prepare latent variables num_channels_latents = self.vae.config.latent_channels num_channels_unet = self.unet.config.in_channels return_image_latents = num_channels_unet == 4 latents = self.prepare_latents( image=image, mask=mask, width=width, height=height, num_channels_latents=num_channels_unet, timestep=latent_timestep, batch_size=batch_size, num_images_per_prompt=num_images_per_prompt, dtype=prompt_embeds.dtype, device=device, generator=generator, add_noise=add_noise, latents=latents, is_strength_max=is_strength_max, return_noise=True, return_image_latents=return_image_latents, ) if mask is not None: if return_image_latents: latents, noise, image_latents = latents else: latents, noise = latents # 5.1 Prepare mask latent variables if mask is not None: mask, masked_image_latents = self.prepare_mask_latents( mask=mask, masked_image=masked_image, batch_size=batch_size * num_images_per_prompt, height=height, width=width, dtype=prompt_embeds.dtype, device=device, generator=generator, do_classifier_free_guidance=self.do_classifier_free_guidance, ) # Check that sizes of mask, masked image and latents match if num_channels_unet == 9: # default case for runwayml/stable-diffusion-inpainting num_channels_mask = mask.shape[1] num_channels_masked_image = masked_image_latents.shape[1] if num_channels_latents + num_channels_mask + num_channels_masked_image != num_channels_unet: raise ValueError( f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}" f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of" " `pipeline.unet` or your `mask_image` or `image` input." ) elif num_channels_unet != 4: raise ValueError( f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}." ) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 6.1 Add image embeds for IP-Adapter added_cond_kwargs = {"image_embeds": image_embeds} if ip_adapter_image is not None else {} height, width = latents.shape[-2:] height = height * self.vae_scale_factor width = width * self.vae_scale_factor original_size = original_size or (height, width) target_size = target_size or (height, width) # 7. Prepare added time ids & embeddings add_text_embeds = pooled_prompt_embeds add_time_ids = self._get_add_time_ids( original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype ) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) # 7.1 Apply denoising_end if ( self.denoising_end is not None and self.denoising_start is not None and denoising_value_valid(self.denoising_end) and denoising_value_valid(self.denoising_start) and self.denoising_start >= self.denoising_end ): raise ValueError( f"`denoising_start`: {self.denoising_start} cannot be larger than or equal to `denoising_end`: " + f" {self.denoising_end} when using type float." ) elif self.denoising_end is not None and denoising_value_valid(self.denoising_end): discrete_timestep_cutoff = int( round( self.scheduler.config.num_train_timesteps - (self.denoising_end * self.scheduler.config.num_train_timesteps) ) ) num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) timesteps = timesteps[:num_inference_steps] # 8. Optionally get Guidance Scale Embedding timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding( guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim ).to(device=device, dtype=latents.dtype) self._num_timesteps = len(timesteps) # 9. Denoising loop with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) if mask is not None and num_channels_unet == 9: latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1) # predict the noise residual added_cond_kwargs.update({"text_embeds": add_text_embeds, "time_ids": add_time_ids}) noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, timestep_cond=timestep_cond, cross_attention_kwargs=self.cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) if self.do_classifier_free_guidance and guidance_rescale > 0.0: # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if mask is not None and num_channels_unet == 4: init_latents_proper = image_latents if self.do_classifier_free_guidance: init_mask, _ = mask.chunk(2) else: init_mask = mask if i < len(timesteps) - 1: noise_timestep = timesteps[i + 1] init_latents_proper = self.scheduler.add_noise( init_latents_proper, noise, torch.tensor([noise_timestep]) ) latents = (1 - init_mask) * init_latents_proper + init_mask * latents if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) negative_pooled_prompt_embeds = callback_outputs.pop( "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds ) add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if not output_type == "latent": # make sure the VAE is in float32 mode, as it overflows in float16 needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] # cast back to fp16 if needed if needs_upcasting: self.vae.to(dtype=torch.float16) else: image = latents return StableDiffusionXLPipelineOutput(images=image) # apply watermark if available if self.watermark is not None: image = self.watermark.apply_watermark(image) image = self.image_processor.postprocess(image, output_type=output_type) # Offload last model to CPU if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (image,) return StableDiffusionXLPipelineOutput(images=image) def text2img( self, prompt: str = None, prompt_2: Optional[str] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, timesteps: List[int] = None, denoising_start: Optional[float] = None, denoising_end: Optional[float] = None, guidance_scale: float = 5.0, negative_prompt: Optional[str] = None, negative_prompt_2: Optional[str] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guidance_rescale: float = 0.0, original_size: Optional[Tuple[int, int]] = None, crops_coords_top_left: Tuple[int, int] = (0, 0), target_size: Optional[Tuple[int, int]] = None, clip_skip: Optional[int] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): r""" Function invoked when calling pipeline for text-to-image. Refer to the documentation of the `__call__` method for parameter descriptions. """ return self.__call__( prompt=prompt, prompt_2=prompt_2, height=height, width=width, num_inference_steps=num_inference_steps, timesteps=timesteps, denoising_start=denoising_start, denoising_end=denoising_end, guidance_scale=guidance_scale, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, latents=latents, ip_adapter_image=ip_adapter_image, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, output_type=output_type, return_dict=return_dict, cross_attention_kwargs=cross_attention_kwargs, guidance_rescale=guidance_rescale, original_size=original_size, crops_coords_top_left=crops_coords_top_left, target_size=target_size, clip_skip=clip_skip, callback_on_step_end=callback_on_step_end, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, **kwargs, ) def img2img( self, prompt: str = None, prompt_2: Optional[str] = None, image: Optional[PipelineImageInput] = None, height: Optional[int] = None, width: Optional[int] = None, strength: float = 0.8, num_inference_steps: int = 50, timesteps: List[int] = None, denoising_start: Optional[float] = None, denoising_end: Optional[float] = None, guidance_scale: float = 5.0, negative_prompt: Optional[str] = None, negative_prompt_2: Optional[str] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guidance_rescale: float = 0.0, original_size: Optional[Tuple[int, int]] = None, crops_coords_top_left: Tuple[int, int] = (0, 0), target_size: Optional[Tuple[int, int]] = None, clip_skip: Optional[int] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): r""" Function invoked when calling pipeline for image-to-image. Refer to the documentation of the `__call__` method for parameter descriptions. """ return self.__call__( prompt=prompt, prompt_2=prompt_2, image=image, height=height, width=width, strength=strength, num_inference_steps=num_inference_steps, timesteps=timesteps, denoising_start=denoising_start, denoising_end=denoising_end, guidance_scale=guidance_scale, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, latents=latents, ip_adapter_image=ip_adapter_image, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, output_type=output_type, return_dict=return_dict, cross_attention_kwargs=cross_attention_kwargs, guidance_rescale=guidance_rescale, original_size=original_size, crops_coords_top_left=crops_coords_top_left, target_size=target_size, clip_skip=clip_skip, callback_on_step_end=callback_on_step_end, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, **kwargs, ) def inpaint( self, prompt: str = None, prompt_2: Optional[str] = None, image: Optional[PipelineImageInput] = None, mask_image: Optional[PipelineImageInput] = None, masked_image_latents: Optional[torch.Tensor] = None, height: Optional[int] = None, width: Optional[int] = None, strength: float = 0.8, num_inference_steps: int = 50, timesteps: List[int] = None, denoising_start: Optional[float] = None, denoising_end: Optional[float] = None, guidance_scale: float = 5.0, negative_prompt: Optional[str] = None, negative_prompt_2: Optional[str] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, ip_adapter_image: Optional[PipelineImageInput] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guidance_rescale: float = 0.0, original_size: Optional[Tuple[int, int]] = None, crops_coords_top_left: Tuple[int, int] = (0, 0), target_size: Optional[Tuple[int, int]] = None, clip_skip: Optional[int] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): r""" Function invoked when calling pipeline for inpainting. Refer to the documentation of the `__call__` method for parameter descriptions. """ return self.__call__( prompt=prompt, prompt_2=prompt_2, image=image, mask_image=mask_image, masked_image_latents=masked_image_latents, height=height, width=width, strength=strength, num_inference_steps=num_inference_steps, timesteps=timesteps, denoising_start=denoising_start, denoising_end=denoising_end, guidance_scale=guidance_scale, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, num_images_per_prompt=num_images_per_prompt, eta=eta, generator=generator, latents=latents, ip_adapter_image=ip_adapter_image, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, output_type=output_type, return_dict=return_dict, cross_attention_kwargs=cross_attention_kwargs, guidance_rescale=guidance_rescale, original_size=original_size, crops_coords_top_left=crops_coords_top_left, target_size=target_size, clip_skip=clip_skip, callback_on_step_end=callback_on_step_end, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, **kwargs, ) # Override to properly handle the loading and unloading of the additional text encoder. def load_lora_weights(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs): # We could have accessed the unet config from `lora_state_dict()` too. We pass # it here explicitly to be able to tell that it's coming from an SDXL # pipeline. state_dict, network_alphas = self.lora_state_dict( pretrained_model_name_or_path_or_dict, unet_config=self.unet.config, **kwargs, ) self.load_lora_into_unet(state_dict, network_alphas=network_alphas, unet=self.unet) text_encoder_state_dict = {k: v for k, v in state_dict.items() if "text_encoder." in k} if len(text_encoder_state_dict) > 0: self.load_lora_into_text_encoder( text_encoder_state_dict, network_alphas=network_alphas, text_encoder=self.text_encoder, prefix="text_encoder", lora_scale=self.lora_scale, ) text_encoder_2_state_dict = {k: v for k, v in state_dict.items() if "text_encoder_2." in k} if len(text_encoder_2_state_dict) > 0: self.load_lora_into_text_encoder( text_encoder_2_state_dict, network_alphas=network_alphas, text_encoder=self.text_encoder_2, prefix="text_encoder_2", lora_scale=self.lora_scale, ) @classmethod def save_lora_weights( cls, save_directory: Union[str, os.PathLike], unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, text_encoder_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, text_encoder_2_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, is_main_process: bool = True, weight_name: str = None, save_function: Callable = None, safe_serialization: bool = False, ): state_dict = {} def pack_weights(layers, prefix): layers_weights = layers.state_dict() if isinstance(layers, torch.nn.Module) else layers layers_state_dict = {f"{prefix}.{module_name}": param for module_name, param in layers_weights.items()} return layers_state_dict state_dict.update(pack_weights(unet_lora_layers, "unet")) if text_encoder_lora_layers and text_encoder_2_lora_layers: state_dict.update(pack_weights(text_encoder_lora_layers, "text_encoder")) state_dict.update(pack_weights(text_encoder_2_lora_layers, "text_encoder_2")) cls.write_lora_layers( state_dict=state_dict, save_directory=save_directory, is_main_process=is_main_process, weight_name=weight_name, save_function=save_function, safe_serialization=safe_serialization, ) def _remove_text_encoder_monkey_patch(self): self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder) self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder_2)
diffusers/examples/community/lpw_stable_diffusion_xl.py/0
{ "file_path": "diffusers/examples/community/lpw_stable_diffusion_xl.py", "repo_id": "diffusers", "token_count": 48188 }
# Copyright 2024 The InstantX Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import Any, Callable, Dict, List, Optional, Tuple, Union import cv2 import numpy as np import PIL.Image import torch import torch.nn as nn from diffusers import StableDiffusionXLControlNetImg2ImgPipeline from diffusers.image_processor import PipelineImageInput from diffusers.models import ControlNetModel from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput from diffusers.utils import ( deprecate, logging, replace_example_docstring, ) from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.torch_utils import is_compiled_module, is_torch_version try: import xformers import xformers.ops xformers_available = True except Exception: xformers_available = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name logger.warning( "To use instant id pipelines, please make sure you have the `insightface` library installed: `pip install insightface`." "Please refer to: https://huggingface.co/InstantX/InstantID for further instructions regarding inference" ) def FeedForward(dim, mult=4): inner_dim = int(dim * mult) return nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, inner_dim, bias=False), nn.GELU(), nn.Linear(inner_dim, dim, bias=False), ) def reshape_tensor(x, heads): bs, length, width = x.shape # (bs, length, width) --> (bs, length, n_heads, dim_per_head) x = x.view(bs, length, heads, -1) # (bs, length, n_heads, dim_per_head) --> (bs, n_heads, length, dim_per_head) x = x.transpose(1, 2) # (bs, n_heads, length, dim_per_head) --> (bs*n_heads, length, dim_per_head) x = x.reshape(bs, heads, length, -1) return x class PerceiverAttention(nn.Module): def __init__(self, *, dim, dim_head=64, heads=8): super().__init__() self.scale = dim_head**-0.5 self.dim_head = dim_head self.heads = heads inner_dim = dim_head * heads self.norm1 = nn.LayerNorm(dim) self.norm2 = nn.LayerNorm(dim) self.to_q = nn.Linear(dim, inner_dim, bias=False) self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False) self.to_out = nn.Linear(inner_dim, dim, bias=False) def forward(self, x, latents): """ Args: x (torch.Tensor): image features shape (b, n1, D) latent (torch.Tensor): latent features shape (b, n2, D) """ x = self.norm1(x) latents = self.norm2(latents) b, l, _ = latents.shape q = self.to_q(latents) kv_input = torch.cat((x, latents), dim=-2) k, v = self.to_kv(kv_input).chunk(2, dim=-1) q = reshape_tensor(q, self.heads) k = reshape_tensor(k, self.heads) v = reshape_tensor(v, self.heads) # attention scale = 1 / math.sqrt(math.sqrt(self.dim_head)) weight = (q * scale) @ (k * scale).transpose(-2, -1) # More stable with f16 than dividing afterwards weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype) out = weight @ v out = out.permute(0, 2, 1, 3).reshape(b, l, -1) return self.to_out(out) class Resampler(nn.Module): def __init__( self, dim=1024, depth=8, dim_head=64, heads=16, num_queries=8, embedding_dim=768, output_dim=1024, ff_mult=4, ): super().__init__() self.latents = nn.Parameter(torch.randn(1, num_queries, dim) / dim**0.5) self.proj_in = nn.Linear(embedding_dim, dim) self.proj_out = nn.Linear(dim, output_dim) self.norm_out = nn.LayerNorm(output_dim) self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append( nn.ModuleList( [ PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads), FeedForward(dim=dim, mult=ff_mult), ] ) ) def forward(self, x): latents = self.latents.repeat(x.size(0), 1, 1) x = self.proj_in(x) for attn, ff in self.layers: latents = attn(x, latents) + latents latents = ff(latents) + latents latents = self.proj_out(latents) return self.norm_out(latents) class AttnProcessor(nn.Module): r""" Default processor for performing attention-related computations. """ def __init__( self, hidden_size=None, cross_attention_dim=None, ): super().__init__() def __call__( self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None, temb=None, ): residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class IPAttnProcessor(nn.Module): r""" Attention processor for IP-Adapater. Args: hidden_size (`int`): The hidden size of the attention layer. cross_attention_dim (`int`): The number of channels in the `encoder_hidden_states`. scale (`float`, defaults to 1.0): the weight scale of image prompt. num_tokens (`int`, defaults to 4 when do ip_adapter_plus it should be 16): The context length of the image features. """ def __init__(self, hidden_size, cross_attention_dim=None, scale=1.0, num_tokens=4): super().__init__() self.hidden_size = hidden_size self.cross_attention_dim = cross_attention_dim self.scale = scale self.num_tokens = num_tokens self.to_k_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) self.to_v_ip = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False) def __call__( self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None, temb=None, ): residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if attn.group_norm is not None: hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states else: # get encoder_hidden_states, ip_hidden_states end_pos = encoder_hidden_states.shape[1] - self.num_tokens encoder_hidden_states, ip_hidden_states = ( encoder_hidden_states[:, :end_pos, :], encoder_hidden_states[:, end_pos:, :], ) if attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) if xformers_available: hidden_states = self._memory_efficient_attention_xformers(query, key, value, attention_mask) else: attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) # for ip-adapter ip_key = self.to_k_ip(ip_hidden_states) ip_value = self.to_v_ip(ip_hidden_states) ip_key = attn.head_to_batch_dim(ip_key) ip_value = attn.head_to_batch_dim(ip_value) if xformers_available: ip_hidden_states = self._memory_efficient_attention_xformers(query, ip_key, ip_value, None) else: ip_attention_probs = attn.get_attention_scores(query, ip_key, None) ip_hidden_states = torch.bmm(ip_attention_probs, ip_value) ip_hidden_states = attn.batch_to_head_dim(ip_hidden_states) hidden_states = hidden_states + self.scale * ip_hidden_states # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states def _memory_efficient_attention_xformers(self, query, key, value, attention_mask): # TODO attention_mask query = query.contiguous() key = key.contiguous() value = value.contiguous() hidden_states = xformers.ops.memory_efficient_attention(query, key, value, attn_bias=attention_mask) return hidden_states EXAMPLE_DOC_STRING = """ Examples: ```py >>> # !pip install opencv-python transformers accelerate insightface >>> import diffusers >>> from diffusers.utils import load_image >>> from diffusers.models import ControlNetModel >>> import cv2 >>> import torch >>> import numpy as np >>> from PIL import Image >>> from insightface.app import FaceAnalysis >>> from pipeline_stable_diffusion_xl_instantid import StableDiffusionXLInstantIDPipeline, draw_kps >>> # download 'antelopev2' under ./models >>> app = FaceAnalysis(name='antelopev2', root='./', providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) >>> app.prepare(ctx_id=0, det_size=(640, 640)) >>> # download models under ./checkpoints >>> face_adapter = f'./checkpoints/ip-adapter.bin' >>> controlnet_path = f'./checkpoints/ControlNetModel' >>> # load IdentityNet >>> controlnet = ControlNetModel.from_pretrained(controlnet_path, torch_dtype=torch.float16) >>> pipe = StableDiffusionXLInstantIDPipeline.from_pretrained( ... "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet, torch_dtype=torch.float16 ... ) >>> pipe.cuda() >>> # load adapter >>> pipe.load_ip_adapter_instantid(face_adapter) >>> prompt = "analog film photo of a man. faded film, desaturated, 35mm photo, grainy, vignette, vintage, Kodachrome, Lomography, stained, highly detailed, found footage, masterpiece, best quality" >>> negative_prompt = "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, painting, drawing, illustration, glitch, deformed, mutated, cross-eyed, ugly, disfigured (lowres, low quality, worst quality:1.2), (text:1.2), watermark, painting, drawing, illustration, glitch,deformed, mutated, cross-eyed, ugly, disfigured" >>> # load an image >>> image = load_image("your-example.jpg") >>> face_info = app.get(cv2.cvtColor(np.array(face_image), cv2.COLOR_RGB2BGR))[-1] >>> face_emb = face_info['embedding'] >>> face_kps = draw_kps(face_image, face_info['kps']) >>> pipe.set_ip_adapter_scale(0.8) >>> # generate image >>> image = pipe( ... prompt, image_embeds=face_emb, image=face_kps, controlnet_conditioning_scale=0.8 ... ).images[0] ``` """ def draw_kps(image_pil, kps, color_list=[(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255)]): stickwidth = 4 limbSeq = np.array([[0, 2], [1, 2], [3, 2], [4, 2]]) kps = np.array(kps) w, h = image_pil.size out_img = np.zeros([h, w, 3]) for i in range(len(limbSeq)): index = limbSeq[i] color = color_list[index[0]] x = kps[index][:, 0] y = kps[index][:, 1] length = ((x[0] - x[1]) ** 2 + (y[0] - y[1]) ** 2) ** 0.5 angle = math.degrees(math.atan2(y[0] - y[1], x[0] - x[1])) polygon = cv2.ellipse2Poly( (int(np.mean(x)), int(np.mean(y))), (int(length / 2), stickwidth), int(angle), 0, 360, 1 ) out_img = cv2.fillConvexPoly(out_img.copy(), polygon, color) out_img = (out_img * 0.6).astype(np.uint8) for idx_kp, kp in enumerate(kps): color = color_list[idx_kp] x, y = kp out_img = cv2.circle(out_img.copy(), (int(x), int(y)), 10, color, -1) out_img_pil = PIL.Image.fromarray(out_img.astype(np.uint8)) return out_img_pil class StableDiffusionXLInstantIDImg2ImgPipeline(StableDiffusionXLControlNetImg2ImgPipeline): def cuda(self, dtype=torch.float16, use_xformers=False): self.to("cuda", dtype) if hasattr(self, "image_proj_model"): self.image_proj_model.to(self.unet.device).to(self.unet.dtype) if use_xformers: if is_xformers_available(): import xformers from packaging import version xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) self.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") def load_ip_adapter_instantid(self, model_ckpt, image_emb_dim=512, num_tokens=16, scale=0.5): self.set_image_proj_model(model_ckpt, image_emb_dim, num_tokens) self.set_ip_adapter(model_ckpt, num_tokens, scale) def set_image_proj_model(self, model_ckpt, image_emb_dim=512, num_tokens=16): image_proj_model = Resampler( dim=1280, depth=4, dim_head=64, heads=20, num_queries=num_tokens, embedding_dim=image_emb_dim, output_dim=self.unet.config.cross_attention_dim, ff_mult=4, ) image_proj_model.eval() self.image_proj_model = image_proj_model.to(self.device, dtype=self.dtype) state_dict = torch.load(model_ckpt, map_location="cpu") if "image_proj" in state_dict: state_dict = state_dict["image_proj"] self.image_proj_model.load_state_dict(state_dict) self.image_proj_model_in_features = image_emb_dim def set_ip_adapter(self, model_ckpt, num_tokens, scale): unet = self.unet attn_procs = {} for name in unet.attn_processors.keys(): cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim if name.startswith("mid_block"): hidden_size = unet.config.block_out_channels[-1] elif name.startswith("up_blocks"): block_id = int(name[len("up_blocks.")]) hidden_size = list(reversed(unet.config.block_out_channels))[block_id] elif name.startswith("down_blocks"): block_id = int(name[len("down_blocks.")]) hidden_size = unet.config.block_out_channels[block_id] if cross_attention_dim is None: attn_procs[name] = AttnProcessor().to(unet.device, dtype=unet.dtype) else: attn_procs[name] = IPAttnProcessor( hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, scale=scale, num_tokens=num_tokens, ).to(unet.device, dtype=unet.dtype) unet.set_attn_processor(attn_procs) state_dict = torch.load(model_ckpt, map_location="cpu") ip_layers = torch.nn.ModuleList(self.unet.attn_processors.values()) if "ip_adapter" in state_dict: state_dict = state_dict["ip_adapter"] ip_layers.load_state_dict(state_dict) def set_ip_adapter_scale(self, scale): unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet for attn_processor in unet.attn_processors.values(): if isinstance(attn_processor, IPAttnProcessor): attn_processor.scale = scale def _encode_prompt_image_emb(self, prompt_image_emb, device, dtype, do_classifier_free_guidance): if isinstance(prompt_image_emb, torch.Tensor): prompt_image_emb = prompt_image_emb.clone().detach() else: prompt_image_emb = torch.tensor(prompt_image_emb) prompt_image_emb = prompt_image_emb.to(device=device, dtype=dtype) prompt_image_emb = prompt_image_emb.reshape([1, -1, self.image_proj_model_in_features]) if do_classifier_free_guidance: prompt_image_emb = torch.cat([torch.zeros_like(prompt_image_emb), prompt_image_emb], dim=0) else: prompt_image_emb = torch.cat([prompt_image_emb], dim=0) prompt_image_emb = self.image_proj_model(prompt_image_emb) return prompt_image_emb @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, prompt_2: Optional[Union[str, List[str]]] = None, image: PipelineImageInput = None, control_image: PipelineImageInput = None, strength: float = 0.8, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 5.0, negative_prompt: Optional[Union[str, List[str]]] = None, negative_prompt_2: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, pooled_prompt_embeds: Optional[torch.Tensor] = None, negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, image_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, controlnet_conditioning_scale: Union[float, List[float]] = 1.0, guess_mode: bool = False, control_guidance_start: Union[float, List[float]] = 0.0, control_guidance_end: Union[float, List[float]] = 1.0, original_size: Tuple[int, int] = None, crops_coords_top_left: Tuple[int, int] = (0, 0), target_size: Tuple[int, int] = None, negative_original_size: Optional[Tuple[int, int]] = None, negative_crops_coords_top_left: Tuple[int, int] = (0, 0), negative_target_size: Optional[Tuple[int, int]] = None, aesthetic_score: float = 6.0, negative_aesthetic_score: float = 2.5, clip_skip: Optional[int] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders. image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,: `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`): The ControlNet input condition to provide guidance to the `unet` for generation. If the type is specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or width are passed, `image` is resized accordingly. If multiple ControlNets are specified in `init`, images must be passed as a list such that each element of the list can be correctly batched for input to a single ControlNet. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. Anything below 512 pixels won't work well for [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) and checkpoints that are not specifically fine-tuned on low resolutions. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. Anything below 512 pixels won't work well for [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) and checkpoints that are not specifically fine-tuned on low resolutions. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 5.0): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. This is sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders. num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, pooled text embeddings are generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, pooled `negative_prompt_embeds` are generated from `negative_prompt` input argument. image_embeds (`torch.Tensor`, *optional*): Pre-generated image embeddings. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0): The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set the corresponding scale as a list. guess_mode (`bool`, *optional*, defaults to `False`): The ControlNet encoder tries to recognize the content of the input image even if you remove all prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended. control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0): The percentage of total steps at which the ControlNet starts applying. control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0): The percentage of total steps at which the ControlNet stops applying. original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): For most cases, `target_size` should be set to the desired height and width of the generated image. If not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): To negatively condition the generation process based on a specific image resolution. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): To negatively condition the generation process based on a target image resolution. It should be as same as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned containing the output images. """ callback = kwargs.pop("callback", None) callback_steps = kwargs.pop("callback_steps", None) if callback is not None: deprecate( "callback", "1.0.0", "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", ) if callback_steps is not None: deprecate( "callback_steps", "1.0.0", "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", ) controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet # align format for control guidance if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list): control_guidance_start = len(control_guidance_end) * [control_guidance_start] elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list): control_guidance_end = len(control_guidance_start) * [control_guidance_end] elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list): mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1 control_guidance_start, control_guidance_end = ( mult * [control_guidance_start], mult * [control_guidance_end], ) # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, prompt_2, control_image, strength, num_inference_steps, callback_steps, negative_prompt, negative_prompt_2, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, None, None, controlnet_conditioning_scale, control_guidance_start, control_guidance_end, callback_on_step_end_tensor_inputs, ) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float): controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets) global_pool_conditions = ( controlnet.config.global_pool_conditions if isinstance(controlnet, ControlNetModel) else controlnet.nets[0].config.global_pool_conditions ) guess_mode = guess_mode or global_pool_conditions # 3.1 Encode input prompt text_encoder_lora_scale = ( self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None ) ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = self.encode_prompt( prompt, prompt_2, device, num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt, negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=self.clip_skip, ) # 3.2 Encode image prompt prompt_image_emb = self._encode_prompt_image_emb( image_embeds, device, self.unet.dtype, self.do_classifier_free_guidance ) bs_embed, seq_len, _ = prompt_image_emb.shape prompt_image_emb = prompt_image_emb.repeat(1, num_images_per_prompt, 1) prompt_image_emb = prompt_image_emb.view(bs_embed * num_images_per_prompt, seq_len, -1) # 4. Prepare image and controlnet_conditioning_image image = self.image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) if isinstance(controlnet, ControlNetModel): control_image = self.prepare_control_image( image=control_image, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=guess_mode, ) height, width = control_image.shape[-2:] elif isinstance(controlnet, MultiControlNetModel): control_images = [] for control_image_ in control_image: control_image_ = self.prepare_control_image( image=control_image_, width=width, height=height, batch_size=batch_size * num_images_per_prompt, num_images_per_prompt=num_images_per_prompt, device=device, dtype=controlnet.dtype, do_classifier_free_guidance=self.do_classifier_free_guidance, guess_mode=guess_mode, ) control_images.append(control_image_) control_image = control_images height, width = control_image[0].shape[-2:] else: assert False # 5. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) self._num_timesteps = len(timesteps) # 6. Prepare latent variables latents = self.prepare_latents( image, latent_timestep, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator, True, ) # # 6.5 Optionally get Guidance Scale Embedding timestep_cond = None if self.unet.config.time_cond_proj_dim is not None: guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) timestep_cond = self.get_guidance_scale_embedding( guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim ).to(device=device, dtype=latents.dtype) # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7.1 Create tensor stating which controlnets to keep controlnet_keep = [] for i in range(len(timesteps)): keeps = [ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) for s, e in zip(control_guidance_start, control_guidance_end) ] controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps) # 7.2 Prepare added time ids & embeddings if isinstance(control_image, list): original_size = original_size or control_image[0].shape[-2:] else: original_size = original_size or control_image.shape[-2:] target_size = target_size or (height, width) if negative_original_size is None: negative_original_size = original_size if negative_target_size is None: negative_target_size = target_size add_text_embeds = pooled_prompt_embeds if self.text_encoder_2 is None: text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) else: text_encoder_projection_dim = self.text_encoder_2.config.projection_dim add_time_ids, add_neg_time_ids = self._get_add_time_ids( original_size, crops_coords_top_left, target_size, aesthetic_score, negative_aesthetic_score, negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype=prompt_embeds.dtype, text_encoder_projection_dim=text_encoder_projection_dim, ) add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1) add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) encoder_hidden_states = torch.cat([prompt_embeds, prompt_image_emb], dim=1) # 8. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order is_unet_compiled = is_compiled_module(self.unet) is_controlnet_compiled = is_compiled_module(self.controlnet) is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1") with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # Relevant thread: # https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428 if (is_unet_compiled and is_controlnet_compiled) and is_torch_higher_equal_2_1: torch._inductor.cudagraph_mark_step_begin() # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} # controlnet(s) inference if guess_mode and self.do_classifier_free_guidance: # Infer ControlNet only for the conditional batch. control_model_input = latents control_model_input = self.scheduler.scale_model_input(control_model_input, t) controlnet_prompt_embeds = prompt_embeds.chunk(2)[1] controlnet_added_cond_kwargs = { "text_embeds": add_text_embeds.chunk(2)[1], "time_ids": add_time_ids.chunk(2)[1], } else: control_model_input = latent_model_input controlnet_prompt_embeds = prompt_embeds controlnet_added_cond_kwargs = added_cond_kwargs if isinstance(controlnet_keep[i], list): cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])] else: controlnet_cond_scale = controlnet_conditioning_scale if isinstance(controlnet_cond_scale, list): controlnet_cond_scale = controlnet_cond_scale[0] cond_scale = controlnet_cond_scale * controlnet_keep[i] down_block_res_samples, mid_block_res_sample = self.controlnet( control_model_input, t, encoder_hidden_states=prompt_image_emb, controlnet_cond=control_image, conditioning_scale=cond_scale, guess_mode=guess_mode, added_cond_kwargs=controlnet_added_cond_kwargs, return_dict=False, ) if guess_mode and self.do_classifier_free_guidance: # Inferred ControlNet only for the conditional batch. # To apply the output of ControlNet to both the unconditional and conditional batches, # add 0 to the unconditional batch to keep it unchanged. down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples] mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample]) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=encoder_hidden_states, timestep_cond=timestep_cond, cross_attention_kwargs=self.cross_attention_kwargs, down_block_additional_residuals=down_block_res_samples, mid_block_additional_residual=mid_block_res_sample, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if not output_type == "latent": # make sure the VAE is in float32 mode, as it overflows in float16 needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] # cast back to fp16 if needed if needs_upcasting: self.vae.to(dtype=torch.float16) else: image = latents if not output_type == "latent": # apply watermark if available if self.watermark is not None: image = self.watermark.apply_watermark(image) image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return StableDiffusionXLPipelineOutput(images=image)
diffusers/examples/community/pipeline_stable_diffusion_xl_instandid_img2img.py/0
{ "file_path": "diffusers/examples/community/pipeline_stable_diffusion_xl_instandid_img2img.py", "repo_id": "diffusers", "token_count": 22982 }
import inspect import os import random import re from dataclasses import dataclass from typing import Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL, UNet2DConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import deprecate, logging logger = logging.get_logger(__name__) # pylint: disable=invalid-name global_re_wildcard = re.compile(r"__([^_]*)__") def get_filename(path: str): # this doesn't work on Windows return os.path.basename(path).split(".txt")[0] def read_wildcard_values(path: str): with open(path, encoding="utf8") as f: return f.read().splitlines() def grab_wildcard_values(wildcard_option_dict: Dict[str, List[str]] = {}, wildcard_files: List[str] = []): for wildcard_file in wildcard_files: filename = get_filename(wildcard_file) read_values = read_wildcard_values(wildcard_file) if filename not in wildcard_option_dict: wildcard_option_dict[filename] = [] wildcard_option_dict[filename].extend(read_values) return wildcard_option_dict def replace_prompt_with_wildcards( prompt: str, wildcard_option_dict: Dict[str, List[str]] = {}, wildcard_files: List[str] = [] ): new_prompt = prompt # get wildcard options wildcard_option_dict = grab_wildcard_values(wildcard_option_dict, wildcard_files) for m in global_re_wildcard.finditer(new_prompt): wildcard_value = m.group() replace_value = random.choice(wildcard_option_dict[wildcard_value.strip("__")]) new_prompt = new_prompt.replace(wildcard_value, replace_value, 1) return new_prompt @dataclass class WildcardStableDiffusionOutput(StableDiffusionPipelineOutput): prompts: List[str] class WildcardStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin): r""" Example Usage: pipe = WildcardStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16, ) prompt = "__animal__ sitting on a __object__ wearing a __clothing__" out = pipe( prompt, wildcard_option_dict={ "clothing":["hat", "shirt", "scarf", "beret"] }, wildcard_files=["object.txt", "animal.txt"], num_prompt_samples=1 ) Pipeline for text-to-image generation with wild cards using Stable Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details. feature_extractor ([`CLIPImageProcessor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, ): super().__init__() if scheduler is not None and getattr(scheduler.config, "steps_offset", 1) != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], height: int = 512, width: int = 512, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[torch.Generator] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, wildcard_option_dict: Dict[str, List[str]] = {}, wildcard_files: List[str] = [], num_prompt_samples: Optional[int] = 1, **kwargs, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator`, *optional*): A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. wildcard_option_dict (Dict[str, List[str]]): dict with key as `wildcard` and values as a list of possible replacements. For example if a prompt, "A __animal__ sitting on a chair". A wildcard_option_dict can provide possible values for "animal" like this: {"animal":["dog", "cat", "fox"]} wildcard_files: (List[str]) List of filenames of txt files for wildcard replacements. For example if a prompt, "A __animal__ sitting on a chair". A file can be provided ["animal.txt"] num_prompt_samples: int Number of times to sample wildcards for each prompt provided Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ if isinstance(prompt, str): prompt = [ replace_prompt_with_wildcards(prompt, wildcard_option_dict, wildcard_files) for i in range(num_prompt_samples) ] batch_size = len(prompt) elif isinstance(prompt, list): prompt_list = [] for p in prompt: for i in range(num_prompt_samples): prompt_list.append(replace_prompt_with_wildcards(p, wildcard_option_dict, wildcard_files)) prompt = prompt_list batch_size = len(prompt) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) # get prompt text embeddings text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt", ) text_input_ids = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) latents_dtype = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to( self.device ) else: latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype) else: if latents.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") latents = latents.to(self.device) # set timesteps self.scheduler.set_timesteps(num_inference_steps) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand timesteps_tensor = self.scheduler.timesteps.to(self.device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta for i, t in enumerate(self.progress_bar(timesteps_tensor)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) latents = 1 / 0.18215 * latents image = self.vae.decode(latents).sample image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() if self.safety_checker is not None: safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to( self.device ) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype) ) else: has_nsfw_concept = None if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image, has_nsfw_concept) return WildcardStableDiffusionOutput(images=image, nsfw_content_detected=has_nsfw_concept, prompts=prompt)
diffusers/examples/community/wildcard_stable_diffusion.py/0
{ "file_path": "diffusers/examples/community/wildcard_stable_diffusion.py", "repo_id": "diffusers", "token_count": 9006 }
# DreamBooth training example for FLUX.1 [dev] [DreamBooth](https://arxiv.org/abs/2208.12242) is a method to personalize text2image models like stable diffusion given just a few (3~5) images of a subject. The `train_dreambooth_flux.py` script shows how to implement the training procedure and adapt it for [FLUX.1 [dev]](https://blackforestlabs.ai/announcing-black-forest-labs/). We also provide a LoRA implementation in the `train_dreambooth_lora_flux.py` script. > [!NOTE] > **Memory consumption** > > Flux can be quite expensive to run on consumer hardware devices and as a result finetuning it comes with high memory requirements - > a LoRA with a rank of 16 (w/ all components trained) can exceed 40GB of VRAM for training. > For more tips & guidance on training on a resource-constrained device and general good practices please check out these great guides and trainers for FLUX: > 1) [`@bghira`'s guide](https://github.com/bghira/SimpleTuner/blob/main/documentation/quickstart/FLUX.md) > 2) [`ostris`'s guide](https://github.com/ostris/ai-toolkit?tab=readme-ov-file#flux1-training) > [!NOTE] > **Gated model** > > As the model is gated, before using it with diffusers you first need to go to the [FLUX.1 [dev] Hugging Face page](https://huggingface.co/black-forest-labs/FLUX.1-dev), fill in the form and accept the gate. Once you are in, you need to log in so that your system knows you’ve accepted the gate. Use the command below to log in: ```bash huggingface-cli login ``` This will also allow us to push the trained model parameters to the Hugging Face Hub platform. ## Running locally with PyTorch ### Installing the dependencies Before running the scripts, make sure to install the library's training dependencies: **Important** To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install -e . ``` Then cd in the `examples/dreambooth` folder and run ```bash pip install -r requirements_flux.txt ``` And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` Or for a default accelerate configuration without answering questions about your environment ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell (e.g., a notebook) ```python from accelerate.utils import write_basic_config write_basic_config() ``` When running `accelerate config`, if we specify torch compile mode to True there can be dramatic speedups. Note also that we use PEFT library as backend for LoRA training, make sure to have `peft>=0.6.0` installed in your environment. ### Dog toy example Now let's get our dataset. For this example we will use some dog images: https://huggingface.co/datasets/diffusers/dog-example. Let's first download it locally: ```python from huggingface_hub import snapshot_download local_dir = "./dog" snapshot_download( "diffusers/dog-example", local_dir=local_dir, repo_type="dataset", ignore_patterns=".gitattributes", ) ``` This will also allow us to push the trained LoRA parameters to the Hugging Face Hub platform. Now, we can launch training using: ```bash export MODEL_NAME="black-forest-labs/FLUX.1-dev" export INSTANCE_DIR="dog" export OUTPUT_DIR="trained-flux" accelerate launch train_dreambooth_flux.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --mixed_precision="bf16" \ --instance_prompt="a photo of sks dog" \ --resolution=1024 \ --train_batch_size=1 \ --guidance_scale=1 \ --gradient_accumulation_steps=4 \ --optimizer="prodigy" \ --learning_rate=1. \ --report_to="wandb" \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --max_train_steps=500 \ --validation_prompt="A photo of sks dog in a bucket" \ --validation_epochs=25 \ --seed="0" \ --push_to_hub ``` To better track our training experiments, we're using the following flags in the command above: * `report_to="wandb` will ensure the training runs are tracked on [Weights and Biases](https://wandb.ai/site). To use it, be sure to install `wandb` with `pip install wandb`. Don't forget to call `wandb login <your_api_key>` before training if you haven't done it before. * `validation_prompt` and `validation_epochs` to allow the script to do a few validation inference runs. This allows us to qualitatively check if the training is progressing as expected. > [!NOTE] > If you want to train using long prompts with the T5 text encoder, you can use `--max_sequence_length` to set the token limit. The default is 77, but it can be increased to as high as 512. Note that this will use more resources and may slow down the training in some cases. ## LoRA + DreamBooth [LoRA](https://huggingface.co/docs/peft/conceptual_guides/adapter#low-rank-adaptation-lora) is a popular parameter-efficient fine-tuning technique that allows you to achieve full-finetuning like performance but with a fraction of learnable parameters. Note also that we use PEFT library as backend for LoRA training, make sure to have `peft>=0.6.0` installed in your environment. ### Prodigy Optimizer Prodigy is an adaptive optimizer that dynamically adjusts the learning rate learned parameters based on past gradients, allowing for more efficient convergence. By using prodigy we can "eliminate" the need for manual learning rate tuning. read more [here](https://huggingface.co/blog/sdxl_lora_advanced_script#adaptive-optimizers). to use prodigy, specify ```bash --optimizer="prodigy" ``` > [!TIP] > When using prodigy it's generally good practice to set- `--learning_rate=1.0` To perform DreamBooth with LoRA, run: ```bash export MODEL_NAME="black-forest-labs/FLUX.1-dev" export INSTANCE_DIR="dog" export OUTPUT_DIR="trained-flux-lora" accelerate launch train_dreambooth_lora_flux.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --mixed_precision="bf16" \ --instance_prompt="a photo of sks dog" \ --resolution=512 \ --train_batch_size=1 \ --guidance_scale=1 \ --gradient_accumulation_steps=4 \ --optimizer="prodigy" \ --learning_rate=1. \ --report_to="wandb" \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --max_train_steps=500 \ --validation_prompt="A photo of sks dog in a bucket" \ --validation_epochs=25 \ --seed="0" \ --push_to_hub ``` ### Target Modules When LoRA was first adapted from language models to diffusion models, it was applied to the cross-attention layers in the Unet that relate the image representations with the prompts that describe them. More recently, SOTA text-to-image diffusion models replaced the Unet with a diffusion Transformer(DiT). With this change, we may also want to explore applying LoRA training onto different types of layers and blocks. To allow more flexibility and control over the targeted modules we added `--lora_layers`- in which you can specify in a comma seperated string the exact modules for LoRA training. Here are some examples of target modules you can provide: - for attention only layers: `--lora_layers="attn.to_k,attn.to_q,attn.to_v,attn.to_out.0"` - to train the same modules as in the fal trainer: `--lora_layers="attn.to_k,attn.to_q,attn.to_v,attn.to_out.0,attn.add_k_proj,attn.add_q_proj,attn.add_v_proj,attn.to_add_out,ff.net.0.proj,ff.net.2,ff_context.net.0.proj,ff_context.net.2"` - to train the same modules as in ostris ai-toolkit / replicate trainer: `--lora_blocks="attn.to_k,attn.to_q,attn.to_v,attn.to_out.0,attn.add_k_proj,attn.add_q_proj,attn.add_v_proj,attn.to_add_out,ff.net.0.proj,ff.net.2,ff_context.net.0.proj,ff_context.net.2,norm1_context.linear, norm1.linear,norm.linear,proj_mlp,proj_out"` > [!NOTE] > `--lora_layers` can also be used to specify which **blocks** to apply LoRA training to. To do so, simply add a block prefix to each layer in the comma seperated string: > **single DiT blocks**: to target the ith single transformer block, add the prefix `single_transformer_blocks.i`, e.g. - `single_transformer_blocks.i.attn.to_k` > **MMDiT blocks**: to target the ith MMDiT block, add the prefix `transformer_blocks.i`, e.g. - `transformer_blocks.i.attn.to_k` > [!NOTE] > keep in mind that while training more layers can improve quality and expressiveness, it also increases the size of the output LoRA weights. ### Text Encoder Training Alongside the transformer, fine-tuning of the CLIP text encoder is also supported. To do so, just specify `--train_text_encoder` while launching training. Please keep the following points in mind: > [!NOTE] > This is still an experimental feature. > FLUX.1 has 2 text encoders (CLIP L/14 and T5-v1.1-XXL). By enabling `--train_text_encoder`, fine-tuning of the **CLIP encoder** is performed. > At the moment, T5 fine-tuning is not supported and weights remain frozen when text encoder training is enabled. To perform DreamBooth LoRA with text-encoder training, run: ```bash export MODEL_NAME="black-forest-labs/FLUX.1-dev" export OUTPUT_DIR="trained-flux-dev-dreambooth-lora" accelerate launch train_dreambooth_lora_flux.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --mixed_precision="bf16" \ --train_text_encoder\ --instance_prompt="a photo of sks dog" \ --resolution=512 \ --train_batch_size=1 \ --guidance_scale=1 \ --gradient_accumulation_steps=4 \ --optimizer="prodigy" \ --learning_rate=1. \ --report_to="wandb" \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --max_train_steps=500 \ --validation_prompt="A photo of sks dog in a bucket" \ --seed="0" \ --push_to_hub ``` ## Memory Optimizations As mentioned, Flux Dreambooth LoRA training is very memory intensive Here are some options (some still experimental) for a more memory efficient training. ### Image Resolution An easy way to mitigate some of the memory requirements is through `--resolution`. `--resolution` refers to the resolution for input images, all the images in the train/validation dataset are resized to this. Note that by default, images are resized to resolution of 512, but it's good to keep in mind in case you're accustomed to training on higher resolutions. ### Gradient Checkpointing and Accumulation * `--gradient accumulation` refers to the number of updates steps to accumulate before performing a backward/update pass. by passing a value > 1 you can reduce the amount of backward/update passes and hence also memory reqs. * with `--gradient checkpointing` we can save memory by not storing all intermediate activations during the forward pass. Instead, only a subset of these activations (the checkpoints) are stored and the rest is recomputed as needed during the backward pass. Note that this comes at the expanse of a slower backward pass. ### 8-bit-Adam Optimizer When training with `AdamW`(doesn't apply to `prodigy`) You can pass `--use_8bit_adam` to reduce the memory requirements of training. Make sure to install `bitsandbytes` if you want to do so. ### Latent caching When training w/o validation runs, we can pre-encode the training images with the vae, and then delete it to free up some memory. to enable `latent_caching` simply pass `--cache_latents`. ### Precision of saved LoRA layers By default, trained transformer layers are saved in the precision dtype in which training was performed. E.g. when training in mixed precision is enabled with `--mixed_precision="bf16"`, final finetuned layers will be saved in `torch.bfloat16` as well. This reduces memory requirements significantly w/o a significant quality loss. Note that if you do wish to save the final layers in float32 at the expanse of more memory usage, you can do so by passing `--upcast_before_saving`. ## Other notes Thanks to `bghira` and `ostris` for their help with reviewing & insight sharing ♥️
diffusers/examples/dreambooth/README_flux.md/0
{ "file_path": "diffusers/examples/dreambooth/README_flux.md", "repo_id": "diffusers", "token_count": 3793 }
# coding=utf-8 # Copyright 2025 suzukimain # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import re import types from collections import OrderedDict from dataclasses import asdict, dataclass, field from typing import Dict, List, Optional, Union import requests import torch from huggingface_hub import hf_api, hf_hub_download from huggingface_hub.file_download import http_get from huggingface_hub.utils import validate_hf_hub_args from diffusers.loaders.single_file_utils import ( VALID_URL_PREFIXES, _extract_repo_id_and_weights_name, infer_diffusers_model_type, load_single_file_checkpoint, ) from diffusers.pipelines.animatediff import AnimateDiffPipeline, AnimateDiffSDXLPipeline from diffusers.pipelines.auto_pipeline import ( AutoPipelineForImage2Image, AutoPipelineForInpainting, AutoPipelineForText2Image, ) from diffusers.pipelines.controlnet import ( StableDiffusionControlNetImg2ImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionXLControlNetImg2ImgPipeline, StableDiffusionXLControlNetPipeline, ) from diffusers.pipelines.flux import FluxImg2ImgPipeline, FluxPipeline from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion import ( StableDiffusionImg2ImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionPipeline, StableDiffusionUpscalePipeline, ) from diffusers.pipelines.stable_diffusion_3 import StableDiffusion3Img2ImgPipeline, StableDiffusion3Pipeline from diffusers.pipelines.stable_diffusion_xl import ( StableDiffusionXLImg2ImgPipeline, StableDiffusionXLInpaintPipeline, StableDiffusionXLPipeline, ) from diffusers.utils import logging logger = logging.get_logger(__name__) SINGLE_FILE_CHECKPOINT_TEXT2IMAGE_PIPELINE_MAPPING = OrderedDict( [ ("animatediff_rgb", AnimateDiffPipeline), ("animatediff_scribble", AnimateDiffPipeline), ("animatediff_sdxl_beta", AnimateDiffSDXLPipeline), ("animatediff_v1", AnimateDiffPipeline), ("animatediff_v2", AnimateDiffPipeline), ("animatediff_v3", AnimateDiffPipeline), ("autoencoder-dc-f128c512", None), ("autoencoder-dc-f32c32", None), ("autoencoder-dc-f32c32-sana", None), ("autoencoder-dc-f64c128", None), ("controlnet", StableDiffusionControlNetPipeline), ("controlnet_xl", StableDiffusionXLControlNetPipeline), ("controlnet_xl_large", StableDiffusionXLControlNetPipeline), ("controlnet_xl_mid", StableDiffusionXLControlNetPipeline), ("controlnet_xl_small", StableDiffusionXLControlNetPipeline), ("flux-depth", FluxPipeline), ("flux-dev", FluxPipeline), ("flux-fill", FluxPipeline), ("flux-schnell", FluxPipeline), ("hunyuan-video", None), ("inpainting", None), ("inpainting_v2", None), ("ltx-video", None), ("ltx-video-0.9.1", None), ("mochi-1-preview", None), ("playground-v2-5", StableDiffusionXLPipeline), ("sd3", StableDiffusion3Pipeline), ("sd35_large", StableDiffusion3Pipeline), ("sd35_medium", StableDiffusion3Pipeline), ("stable_cascade_stage_b", None), ("stable_cascade_stage_b_lite", None), ("stable_cascade_stage_c", None), ("stable_cascade_stage_c_lite", None), ("upscale", StableDiffusionUpscalePipeline), ("v1", StableDiffusionPipeline), ("v2", StableDiffusionPipeline), ("xl_base", StableDiffusionXLPipeline), ("xl_inpaint", None), ("xl_refiner", StableDiffusionXLPipeline), ] ) SINGLE_FILE_CHECKPOINT_IMAGE2IMAGE_PIPELINE_MAPPING = OrderedDict( [ ("animatediff_rgb", AnimateDiffPipeline), ("animatediff_scribble", AnimateDiffPipeline), ("animatediff_sdxl_beta", AnimateDiffSDXLPipeline), ("animatediff_v1", AnimateDiffPipeline), ("animatediff_v2", AnimateDiffPipeline), ("animatediff_v3", AnimateDiffPipeline), ("autoencoder-dc-f128c512", None), ("autoencoder-dc-f32c32", None), ("autoencoder-dc-f32c32-sana", None), ("autoencoder-dc-f64c128", None), ("controlnet", StableDiffusionControlNetImg2ImgPipeline), ("controlnet_xl", StableDiffusionXLControlNetImg2ImgPipeline), ("controlnet_xl_large", StableDiffusionXLControlNetImg2ImgPipeline), ("controlnet_xl_mid", StableDiffusionXLControlNetImg2ImgPipeline), ("controlnet_xl_small", StableDiffusionXLControlNetImg2ImgPipeline), ("flux-depth", FluxImg2ImgPipeline), ("flux-dev", FluxImg2ImgPipeline), ("flux-fill", FluxImg2ImgPipeline), ("flux-schnell", FluxImg2ImgPipeline), ("hunyuan-video", None), ("inpainting", None), ("inpainting_v2", None), ("ltx-video", None), ("ltx-video-0.9.1", None), ("mochi-1-preview", None), ("playground-v2-5", StableDiffusionXLImg2ImgPipeline), ("sd3", StableDiffusion3Img2ImgPipeline), ("sd35_large", StableDiffusion3Img2ImgPipeline), ("sd35_medium", StableDiffusion3Img2ImgPipeline), ("stable_cascade_stage_b", None), ("stable_cascade_stage_b_lite", None), ("stable_cascade_stage_c", None), ("stable_cascade_stage_c_lite", None), ("upscale", StableDiffusionUpscalePipeline), ("v1", StableDiffusionImg2ImgPipeline), ("v2", StableDiffusionImg2ImgPipeline), ("xl_base", StableDiffusionXLImg2ImgPipeline), ("xl_inpaint", None), ("xl_refiner", StableDiffusionXLImg2ImgPipeline), ] ) SINGLE_FILE_CHECKPOINT_INPAINT_PIPELINE_MAPPING = OrderedDict( [ ("animatediff_rgb", None), ("animatediff_scribble", None), ("animatediff_sdxl_beta", None), ("animatediff_v1", None), ("animatediff_v2", None), ("animatediff_v3", None), ("autoencoder-dc-f128c512", None), ("autoencoder-dc-f32c32", None), ("autoencoder-dc-f32c32-sana", None), ("autoencoder-dc-f64c128", None), ("controlnet", StableDiffusionControlNetInpaintPipeline), ("controlnet_xl", None), ("controlnet_xl_large", None), ("controlnet_xl_mid", None), ("controlnet_xl_small", None), ("flux-depth", None), ("flux-dev", None), ("flux-fill", None), ("flux-schnell", None), ("hunyuan-video", None), ("inpainting", StableDiffusionInpaintPipeline), ("inpainting_v2", StableDiffusionInpaintPipeline), ("ltx-video", None), ("ltx-video-0.9.1", None), ("mochi-1-preview", None), ("playground-v2-5", None), ("sd3", None), ("sd35_large", None), ("sd35_medium", None), ("stable_cascade_stage_b", None), ("stable_cascade_stage_b_lite", None), ("stable_cascade_stage_c", None), ("stable_cascade_stage_c_lite", None), ("upscale", StableDiffusionUpscalePipeline), ("v1", None), ("v2", None), ("xl_base", None), ("xl_inpaint", StableDiffusionXLInpaintPipeline), ("xl_refiner", None), ] ) CONFIG_FILE_LIST = [ "pytorch_model.bin", "pytorch_model.fp16.bin", "diffusion_pytorch_model.bin", "diffusion_pytorch_model.fp16.bin", "diffusion_pytorch_model.safetensors", "diffusion_pytorch_model.fp16.safetensors", "diffusion_pytorch_model.ckpt", "diffusion_pytorch_model.fp16.ckpt", "diffusion_pytorch_model.non_ema.bin", "diffusion_pytorch_model.non_ema.safetensors", ] DIFFUSERS_CONFIG_DIR = [ "safety_checker", "unet", "vae", "text_encoder", "text_encoder_2", ] TOKENIZER_SHAPE_MAP = { 768: [ "SD 1.4", "SD 1.5", "SD 1.5 LCM", "SDXL 0.9", "SDXL 1.0", "SDXL 1.0 LCM", "SDXL Distilled", "SDXL Turbo", "SDXL Lightning", "PixArt a", "Playground v2", "Pony", ], 1024: ["SD 2.0", "SD 2.0 768", "SD 2.1", "SD 2.1 768", "SD 2.1 Unclip"], } EXTENSION = [".safetensors", ".ckpt", ".bin"] CACHE_HOME = os.path.expanduser("~/.cache") @dataclass class RepoStatus: r""" Data class for storing repository status information. Attributes: repo_id (`str`): The name of the repository. repo_hash (`str`): The hash of the repository. version (`str`): The version ID of the repository. """ repo_id: str = "" repo_hash: str = "" version: str = "" @dataclass class ModelStatus: r""" Data class for storing model status information. Attributes: search_word (`str`): The search word used to find the model. download_url (`str`): The URL to download the model. file_name (`str`): The name of the model file. local (`bool`): Whether the model exists locally site_url (`str`): The URL of the site where the model is hosted. """ search_word: str = "" download_url: str = "" file_name: str = "" local: bool = False site_url: str = "" @dataclass class ExtraStatus: r""" Data class for storing extra status information. Attributes: trained_words (`str`): The words used to trigger the model """ trained_words: Union[List[str], None] = None @dataclass class SearchResult: r""" Data class for storing model data. Attributes: model_path (`str`): The path to the model. loading_method (`str`): The type of loading method used for the model ( None or 'from_single_file' or 'from_pretrained') checkpoint_format (`str`): The format of the model checkpoint (`single_file` or `diffusers`). repo_status (`RepoStatus`): The status of the repository. model_status (`ModelStatus`): The status of the model. """ model_path: str = "" loading_method: Union[str, None] = None checkpoint_format: Union[str, None] = None repo_status: RepoStatus = field(default_factory=RepoStatus) model_status: ModelStatus = field(default_factory=ModelStatus) extra_status: ExtraStatus = field(default_factory=ExtraStatus) @validate_hf_hub_args def load_pipeline_from_single_file(pretrained_model_or_path, pipeline_mapping, **kwargs): r""" Instantiate a [`DiffusionPipeline`] from pretrained pipeline weights saved in the `.ckpt` or `.safetensors` format. The pipeline is set in evaluation mode (`model.eval()`) by default. Parameters: pretrained_model_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A link to the `.ckpt` file (for example `"https://huggingface.co/<repo_id>/blob/main/<path_to_file>.ckpt"`) on the Hub. - A path to a *file* containing all pipeline weights. pipeline_mapping (`dict`): A mapping of model types to their corresponding pipeline classes. This is used to determine which pipeline class to instantiate based on the model type inferred from the checkpoint. torch_dtype (`str` or `torch.dtype`, *optional*): Override the default `torch.dtype` and load the model with another dtype. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. original_config_file (`str`, *optional*): The path to the original config file that was used to train the model. If not provided, the config file will be inferred from the checkpoint file. config (`str`, *optional*): Can be either: - A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline hosted on the Hub. - A path to a *directory* (for example `./my_pipeline_directory/`) containing the pipeline component configs in Diffusers format. checkpoint (`dict`, *optional*): The loaded state dictionary of the model. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline class). The overwritten components are passed directly to the pipelines `__init__` method. See example below for more information. """ # Load the checkpoint from the provided link or path checkpoint = load_single_file_checkpoint(pretrained_model_or_path) # Infer the model type from the loaded checkpoint model_type = infer_diffusers_model_type(checkpoint) # Get the corresponding pipeline class from the pipeline mapping pipeline_class = pipeline_mapping[model_type] # For tasks not supported by this pipeline if pipeline_class is None: raise ValueError( f"{model_type} is not supported in this pipeline." "For `Text2Image`, please use `AutoPipelineForText2Image.from_pretrained`, " "for `Image2Image` , please use `AutoPipelineForImage2Image.from_pretrained`, " "and `inpaint` is only supported in `AutoPipelineForInpainting.from_pretrained`" ) else: # Instantiate and return the pipeline with the loaded checkpoint and any additional kwargs return pipeline_class.from_single_file(pretrained_model_or_path, **kwargs) def get_keyword_types(keyword): r""" Determine the type and loading method for a given keyword. Parameters: keyword (`str`): The input keyword to classify. Returns: `dict`: A dictionary containing the model format, loading method, and various types and extra types flags. """ # Initialize the status dictionary with default values status = { "checkpoint_format": None, "loading_method": None, "type": { "other": False, "hf_url": False, "hf_repo": False, "civitai_url": False, "local": False, }, "extra_type": { "url": False, "missing_model_index": None, }, } # Check if the keyword is an HTTP or HTTPS URL status["extra_type"]["url"] = bool(re.search(r"^(https?)://", keyword)) # Check if the keyword is a file if os.path.isfile(keyword): status["type"]["local"] = True status["checkpoint_format"] = "single_file" status["loading_method"] = "from_single_file" # Check if the keyword is a directory elif os.path.isdir(keyword): status["type"]["local"] = True status["checkpoint_format"] = "diffusers" status["loading_method"] = "from_pretrained" if not os.path.exists(os.path.join(keyword, "model_index.json")): status["extra_type"]["missing_model_index"] = True # Check if the keyword is a Civitai URL elif keyword.startswith("https://civitai.com/"): status["type"]["civitai_url"] = True status["checkpoint_format"] = "single_file" status["loading_method"] = None # Check if the keyword starts with any valid URL prefixes elif any(keyword.startswith(prefix) for prefix in VALID_URL_PREFIXES): repo_id, weights_name = _extract_repo_id_and_weights_name(keyword) if weights_name: status["type"]["hf_url"] = True status["checkpoint_format"] = "single_file" status["loading_method"] = "from_single_file" else: status["type"]["hf_repo"] = True status["checkpoint_format"] = "diffusers" status["loading_method"] = "from_pretrained" # Check if the keyword matches a Hugging Face repository format elif re.match(r"^[^/]+/[^/]+$", keyword): status["type"]["hf_repo"] = True status["checkpoint_format"] = "diffusers" status["loading_method"] = "from_pretrained" # If none of the above apply else: status["type"]["other"] = True status["checkpoint_format"] = None status["loading_method"] = None return status def file_downloader( url, save_path, **kwargs, ) -> None: """ Downloads a file from a given URL and saves it to the specified path. parameters: url (`str`): The URL of the file to download. save_path (`str`): The local path where the file will be saved. resume (`bool`, *optional*, defaults to `False`): Whether to resume an incomplete download. headers (`dict`, *optional*, defaults to `None`): Dictionary of HTTP Headers to send with the request. proxies (`dict`, *optional*, defaults to `None`): Dictionary mapping protocol to the URL of the proxy passed to `requests.request`. force_download (`bool`, *optional*, defaults to `False`): Whether to force the download even if the file already exists. displayed_filename (`str`, *optional*): The filename of the file that is being downloaded. Value is used only to display a nice progress bar. If not set, the filename is guessed from the URL or the `Content-Disposition` header. returns: None """ # Get optional parameters from kwargs, with their default values resume = kwargs.pop("resume", False) headers = kwargs.pop("headers", None) proxies = kwargs.pop("proxies", None) force_download = kwargs.pop("force_download", False) displayed_filename = kwargs.pop("displayed_filename", None) # Default mode for file writing and initial file size mode = "wb" file_size = 0 # Create directory os.makedirs(os.path.dirname(save_path), exist_ok=True) # Check if the file already exists at the save path if os.path.exists(save_path): if not force_download: # If the file exists and force_download is False, skip the download logger.info(f"File already exists: {save_path}, skipping download.") return None elif resume: # If resuming, set mode to append binary and get current file size mode = "ab" file_size = os.path.getsize(save_path) # Open the file in the appropriate mode (write or append) with open(save_path, mode) as model_file: # Call the http_get function to perform the file download return http_get( url=url, temp_file=model_file, resume_size=file_size, displayed_filename=displayed_filename, headers=headers, proxies=proxies, **kwargs, ) def search_huggingface(search_word: str, **kwargs) -> Union[str, SearchResult, None]: r""" Downloads a model from Hugging Face. Parameters: search_word (`str`): The search query string. revision (`str`, *optional*): The specific version of the model to download. checkpoint_format (`str`, *optional*, defaults to `"single_file"`): The format of the model checkpoint. download (`bool`, *optional*, defaults to `False`): Whether to download the model. force_download (`bool`, *optional*, defaults to `False`): Whether to force the download if the model already exists. include_params (`bool`, *optional*, defaults to `False`): Whether to include parameters in the returned data. pipeline_tag (`str`, *optional*): Tag to filter models by pipeline. token (`str`, *optional*): API token for Hugging Face authentication. gated (`bool`, *optional*, defaults to `False` ): A boolean to filter models on the Hub that are gated or not. skip_error (`bool`, *optional*, defaults to `False`): Whether to skip errors and return None. Returns: `Union[str, SearchResult, None]`: The model path or SearchResult or None. """ # Extract additional parameters from kwargs revision = kwargs.pop("revision", None) checkpoint_format = kwargs.pop("checkpoint_format", "single_file") download = kwargs.pop("download", False) force_download = kwargs.pop("force_download", False) include_params = kwargs.pop("include_params", False) pipeline_tag = kwargs.pop("pipeline_tag", None) token = kwargs.pop("token", None) gated = kwargs.pop("gated", False) skip_error = kwargs.pop("skip_error", False) file_list = [] hf_repo_info = {} hf_security_info = {} model_path = "" repo_id, file_name = "", "" diffusers_model_exists = False # Get the type and loading method for the keyword search_word_status = get_keyword_types(search_word) if search_word_status["type"]["hf_repo"]: hf_repo_info = hf_api.model_info(repo_id=search_word, securityStatus=True) if download: model_path = DiffusionPipeline.download( search_word, revision=revision, token=token, force_download=force_download, **kwargs, ) else: model_path = search_word elif search_word_status["type"]["hf_url"]: repo_id, weights_name = _extract_repo_id_and_weights_name(search_word) if download: model_path = hf_hub_download( repo_id=repo_id, filename=weights_name, force_download=force_download, token=token, ) else: model_path = search_word elif search_word_status["type"]["local"]: model_path = search_word elif search_word_status["type"]["civitai_url"]: if skip_error: return None else: raise ValueError("The URL for Civitai is invalid with `for_hf`. Please use `for_civitai` instead.") else: # Get model data from HF API hf_models = hf_api.list_models( search=search_word, direction=-1, limit=100, fetch_config=True, pipeline_tag=pipeline_tag, full=True, gated=gated, token=token, ) model_dicts = [asdict(value) for value in list(hf_models)] # Loop through models to find a suitable candidate for repo_info in model_dicts: repo_id = repo_info["id"] file_list = [] hf_repo_info = hf_api.model_info(repo_id=repo_id, securityStatus=True) # Lists files with security issues. hf_security_info = hf_repo_info.security_repo_status exclusion = [issue["path"] for issue in hf_security_info["filesWithIssues"]] # Checks for multi-folder diffusers model or valid files (models with security issues are excluded). if hf_security_info["scansDone"]: for info in repo_info["siblings"]: file_path = info["rfilename"] if "model_index.json" == file_path and checkpoint_format in [ "diffusers", "all", ]: diffusers_model_exists = True break elif ( any(file_path.endswith(ext) for ext in EXTENSION) and not any(config in file_path for config in CONFIG_FILE_LIST) and not any(exc in file_path for exc in exclusion) and os.path.basename(os.path.dirname(file_path)) not in DIFFUSERS_CONFIG_DIR ): file_list.append(file_path) # Exit from the loop if a multi-folder diffusers model or valid file is found if diffusers_model_exists or file_list: break else: # Handle case where no models match the criteria if skip_error: return None else: raise ValueError("No models matching your criteria were found on huggingface.") if diffusers_model_exists: if download: model_path = DiffusionPipeline.download( repo_id, token=token, **kwargs, ) else: model_path = repo_id elif file_list: # Sort and find the safest model file_name = next( (model for model in sorted(file_list, reverse=True) if re.search(r"(?i)[-_](safe|sfw)", model)), file_list[0], ) if download: model_path = hf_hub_download( repo_id=repo_id, filename=file_name, revision=revision, token=token, force_download=force_download, ) # `pathlib.PosixPath` may be returned if model_path: model_path = str(model_path) if file_name: download_url = f"https://huggingface.co/{repo_id}/blob/main/{file_name}" else: download_url = f"https://huggingface.co/{repo_id}" output_info = get_keyword_types(model_path) if include_params: return SearchResult( model_path=model_path or download_url, loading_method=output_info["loading_method"], checkpoint_format=output_info["checkpoint_format"], repo_status=RepoStatus(repo_id=repo_id, repo_hash=hf_repo_info.sha, version=revision), model_status=ModelStatus( search_word=search_word, site_url=download_url, download_url=download_url, file_name=file_name, local=download, ), extra_status=ExtraStatus(trained_words=None), ) else: return model_path def search_civitai(search_word: str, **kwargs) -> Union[str, SearchResult, None]: r""" Downloads a model from Civitai. Parameters: search_word (`str`): The search query string. model_type (`str`, *optional*, defaults to `Checkpoint`): The type of model to search for. sort (`str`, *optional*): The order in which you wish to sort the results(for example, `Highest Rated`, `Most Downloaded`, `Newest`). base_model (`str`, *optional*): The base model to filter by. download (`bool`, *optional*, defaults to `False`): Whether to download the model. force_download (`bool`, *optional*, defaults to `False`): Whether to force the download if the model already exists. token (`str`, *optional*): API token for Civitai authentication. include_params (`bool`, *optional*, defaults to `False`): Whether to include parameters in the returned data. cache_dir (`str`, `Path`, *optional*): Path to the folder where cached files are stored. resume (`bool`, *optional*, defaults to `False`): Whether to resume an incomplete download. skip_error (`bool`, *optional*, defaults to `False`): Whether to skip errors and return None. Returns: `Union[str, SearchResult, None]`: The model path or ` SearchResult` or None. """ # Extract additional parameters from kwargs model_type = kwargs.pop("model_type", "Checkpoint") sort = kwargs.pop("sort", None) download = kwargs.pop("download", False) base_model = kwargs.pop("base_model", None) force_download = kwargs.pop("force_download", False) token = kwargs.pop("token", None) include_params = kwargs.pop("include_params", False) resume = kwargs.pop("resume", False) cache_dir = kwargs.pop("cache_dir", None) skip_error = kwargs.pop("skip_error", False) # Initialize additional variables with default values model_path = "" repo_name = "" repo_id = "" version_id = "" trainedWords = "" models_list = [] selected_repo = {} selected_model = {} selected_version = {} civitai_cache_dir = cache_dir or os.path.join(CACHE_HOME, "Civitai") # Set up parameters and headers for the CivitAI API request params = { "query": search_word, "types": model_type, "limit": 20, } if base_model is not None: if not isinstance(base_model, list): base_model = [base_model] params["baseModel"] = base_model if sort is not None: params["sort"] = sort headers = {} if token: headers["Authorization"] = f"Bearer {token}" try: # Make the request to the CivitAI API response = requests.get("https://civitai.com/api/v1/models", params=params, headers=headers) response.raise_for_status() except requests.exceptions.HTTPError as err: raise requests.HTTPError(f"Could not get elements from the URL: {err}") else: try: data = response.json() except AttributeError: if skip_error: return None else: raise ValueError("Invalid JSON response") # Sort repositories by download count in descending order sorted_repos = sorted(data["items"], key=lambda x: x["stats"]["downloadCount"], reverse=True) for selected_repo in sorted_repos: repo_name = selected_repo["name"] repo_id = selected_repo["id"] # Sort versions within the selected repo by download count sorted_versions = sorted( selected_repo["modelVersions"], key=lambda x: x["stats"]["downloadCount"], reverse=True, ) for selected_version in sorted_versions: version_id = selected_version["id"] trainedWords = selected_version["trainedWords"] models_list = [] # When searching for textual inversion, results other than the values entered for the base model may come up, so check again. if base_model is None or selected_version["baseModel"] in base_model: for model_data in selected_version["files"]: # Check if the file passes security scans and has a valid extension file_name = model_data["name"] if ( model_data["pickleScanResult"] == "Success" and model_data["virusScanResult"] == "Success" and any(file_name.endswith(ext) for ext in EXTENSION) and os.path.basename(os.path.dirname(file_name)) not in DIFFUSERS_CONFIG_DIR ): file_status = { "filename": file_name, "download_url": model_data["downloadUrl"], } models_list.append(file_status) if models_list: # Sort the models list by filename and find the safest model sorted_models = sorted(models_list, key=lambda x: x["filename"], reverse=True) selected_model = next( ( model_data for model_data in sorted_models if bool(re.search(r"(?i)[-_](safe|sfw)", model_data["filename"])) ), sorted_models[0], ) break else: continue break # Exception handling when search candidates are not found if not selected_model: if skip_error: return None else: raise ValueError("No model found. Please try changing the word you are searching for.") # Define model file status file_name = selected_model["filename"] download_url = selected_model["download_url"] # Handle file download and setting model information if download: # The path where the model is to be saved. model_path = os.path.join(str(civitai_cache_dir), str(repo_id), str(version_id), str(file_name)) # Download Model File file_downloader( url=download_url, save_path=model_path, resume=resume, force_download=force_download, displayed_filename=file_name, headers=headers, **kwargs, ) else: model_path = download_url output_info = get_keyword_types(model_path) if not include_params: return model_path else: return SearchResult( model_path=model_path, loading_method=output_info["loading_method"], checkpoint_format=output_info["checkpoint_format"], repo_status=RepoStatus(repo_id=repo_name, repo_hash=repo_id, version=version_id), model_status=ModelStatus( search_word=search_word, site_url=f"https://civitai.com/models/{repo_id}?modelVersionId={version_id}", download_url=download_url, file_name=file_name, local=output_info["type"]["local"], ), extra_status=ExtraStatus(trained_words=trainedWords or None), ) def add_methods(pipeline): r""" Add methods from `AutoConfig` to the pipeline. Parameters: pipeline (`Pipeline`): The pipeline to which the methods will be added. """ for attr_name in dir(AutoConfig): attr_value = getattr(AutoConfig, attr_name) if callable(attr_value) and not attr_name.startswith("__"): setattr(pipeline, attr_name, types.MethodType(attr_value, pipeline)) return pipeline class AutoConfig: def auto_load_textual_inversion( self, pretrained_model_name_or_path: Union[str, List[str]], token: Optional[Union[str, List[str]]] = None, base_model: Optional[Union[str, List[str]]] = None, tokenizer=None, text_encoder=None, **kwargs, ): r""" Load Textual Inversion embeddings into the text encoder of [`StableDiffusionPipeline`] (both 🤗 Diffusers and Automatic1111 formats are supported). Parameters: pretrained_model_name_or_path (`str` or `os.PathLike` or `List[str or os.PathLike]` or `Dict` or `List[Dict]`): Can be either one of the following or a list of them: - Search keywords for pretrained model (for example `EasyNegative`). - A string, the *model id* (for example `sd-concepts-library/low-poly-hd-logos-icons`) of a pretrained model hosted on the Hub. - A path to a *directory* (for example `./my_text_inversion_directory/`) containing the textual inversion weights. - A path to a *file* (for example `./my_text_inversions.pt`) containing textual inversion weights. - A [torch state dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict). token (`str` or `List[str]`, *optional*): Override the token to use for the textual inversion weights. If `pretrained_model_name_or_path` is a list, then `token` must also be a list of equal length. text_encoder ([`~transformers.CLIPTextModel`], *optional*): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). If not specified, function will take self.tokenizer. tokenizer ([`~transformers.CLIPTokenizer`], *optional*): A `CLIPTokenizer` to tokenize text. If not specified, function will take self.tokenizer. weight_name (`str`, *optional*): Name of a custom weight file. This should be used when: - The saved textual inversion file is in 🤗 Diffusers format, but was saved under a specific weight name such as `text_inv.bin`. - The saved textual inversion file is in the Automatic1111 format. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. subfolder (`str`, *optional*, defaults to `""`): The subfolder location of a model file within a larger model repository on the Hub or locally. mirror (`str`, *optional*): Mirror source to resolve accessibility issues if you're downloading a model in China. We do not guarantee the timeliness or safety of the source, and you should refer to the mirror site for more information. Examples: ```py >>> from auto_diffusers import EasyPipelineForText2Image >>> pipeline = EasyPipelineForText2Image.from_huggingface("stable-diffusion-v1-5") >>> pipeline.auto_load_textual_inversion("EasyNegative", token="EasyNegative") >>> image = pipeline(prompt).images[0] ``` """ # 1. Set tokenizer and text encoder tokenizer = tokenizer or getattr(self, "tokenizer", None) text_encoder = text_encoder or getattr(self, "text_encoder", None) # Check if tokenizer and text encoder are provided if tokenizer is None or text_encoder is None: raise ValueError("Tokenizer and text encoder must be provided.") # 2. Normalize inputs pretrained_model_name_or_paths = ( [pretrained_model_name_or_path] if not isinstance(pretrained_model_name_or_path, list) else pretrained_model_name_or_path ) # 2.1 Normalize tokens tokens = [token] if not isinstance(token, list) else token if tokens[0] is None: tokens = tokens * len(pretrained_model_name_or_paths) for check_token in tokens: # Check if token is already in tokenizer vocabulary if check_token in tokenizer.get_vocab(): raise ValueError( f"Token {token} already in tokenizer vocabulary. Please choose a different token name or remove {token} and embedding from the tokenizer and text encoder." ) expected_shape = text_encoder.get_input_embeddings().weight.shape[-1] # Expected shape of tokenizer for search_word in pretrained_model_name_or_paths: if isinstance(search_word, str): # Update kwargs to ensure the model is downloaded and parameters are included _status = { "download": True, "include_params": True, "skip_error": False, "model_type": "TextualInversion", } # Get tags for the base model of textual inversion compatible with tokenizer. # If the tokenizer is 768-dimensional, set tags for SD 1.x and SDXL. # If the tokenizer is 1024-dimensional, set tags for SD 2.x. if expected_shape in TOKENIZER_SHAPE_MAP: # Retrieve the appropriate tags from the TOKENIZER_SHAPE_MAP based on the expected shape tags = TOKENIZER_SHAPE_MAP[expected_shape] if base_model is not None: if isinstance(base_model, list): tags.extend(base_model) else: tags.append(base_model) _status["base_model"] = tags kwargs.update(_status) # Search for the model on Civitai and get the model status textual_inversion_path = search_civitai(search_word, **kwargs) logger.warning( f"textual_inversion_path: {search_word} -> {textual_inversion_path.model_status.site_url}" ) pretrained_model_name_or_paths[ pretrained_model_name_or_paths.index(search_word) ] = textual_inversion_path.model_path self.load_textual_inversion( pretrained_model_name_or_paths, token=tokens, tokenizer=tokenizer, text_encoder=text_encoder, **kwargs ) def auto_load_lora_weights( self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], adapter_name=None, **kwargs ): r""" Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.unet` and `self.text_encoder`. All kwargs are forwarded to `self.lora_state_dict`. See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded. See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details on how the state dict is loaded into `self.unet`. See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder`] for more details on how the state dict is loaded into `self.text_encoder`. Parameters: pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. adapter_name (`str`, *optional*): Adapter name to be used for referencing the loaded adapter model. If not specified, it will use `default_{i}` where i is the total number of adapters being loaded. low_cpu_mem_usage (`bool`, *optional*): Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. kwargs (`dict`, *optional*): See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. """ if isinstance(pretrained_model_name_or_path_or_dict, str): # Update kwargs to ensure the model is downloaded and parameters are included _status = { "download": True, "include_params": True, "skip_error": False, "model_type": "LORA", } kwargs.update(_status) # Search for the model on Civitai and get the model status lora_path = search_civitai(pretrained_model_name_or_path_or_dict, **kwargs) logger.warning(f"lora_path: {lora_path.model_status.site_url}") logger.warning(f"trained_words: {lora_path.extra_status.trained_words}") pretrained_model_name_or_path_or_dict = lora_path.model_path self.load_lora_weights(pretrained_model_name_or_path_or_dict, adapter_name=adapter_name, **kwargs) class EasyPipelineForText2Image(AutoPipelineForText2Image): r""" [`EasyPipelineForText2Image`] is a generic pipeline class that instantiates a text-to-image pipeline class. The specific underlying pipeline class is automatically selected from either the [`~EasyPipelineForText2Image.from_pretrained`], [`~EasyPipelineForText2Image.from_pipe`], [`~EasyPipelineForText2Image.from_huggingface`] or [`~EasyPipelineForText2Image.from_civitai`] methods. This class cannot be instantiated using `__init__()` (throws an error). Class attributes: - **config_name** (`str`) -- The configuration filename that stores the class and module names of all the diffusion pipeline's components. """ config_name = "model_index.json" def __init__(self, *args, **kwargs): # EnvironmentError is returned super().__init__() @classmethod @validate_hf_hub_args def from_huggingface(cls, pretrained_model_link_or_path, **kwargs): r""" Parameters: pretrained_model_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A keyword to search for Hugging Face (for example `Stable Diffusion`) - Link to `.ckpt` or `.safetensors` file (for example `"https://huggingface.co/<repo_id>/blob/main/<path_to_file>.safetensors"`) on the Hub. - A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline hosted on the Hub. - A path to a *directory* (for example `./my_pipeline_directory/`) containing pipeline weights saved using [`~DiffusionPipeline.save_pretrained`]. checkpoint_format (`str`, *optional*, defaults to `"single_file"`): The format of the model checkpoint. pipeline_tag (`str`, *optional*): Tag to filter models by pipeline. torch_dtype (`str` or `torch.dtype`, *optional*): Override the default `torch.dtype` and load the model with another dtype. If "auto" is passed, the dtype is automatically derived from the model's weights. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(`bool`, *optional*, defaults to `False`): Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. custom_revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id similar to `revision` when loading a custom pipeline from the Hub. It can be a 🤗 Diffusers version when loading a custom pipeline from GitHub, otherwise it defaults to `"main"` when loading from the Hub. mirror (`str`, *optional*): Mirror source to resolve accessibility issues if you’re downloading a model in China. We do not guarantee the timeliness or safety of the source, and you should refer to the mirror site for more information. device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn’t need to be defined for each parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the same device. Set `device_map="auto"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For more information about each option see [designing a device map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary device identifier for the maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. offload_folder (`str` or `os.PathLike`, *optional*): The path to offload weights if device_map contains the value `"disk"`. offload_state_dict (`bool`, *optional*): If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True` when there is some disk offload. low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): Speed up model loading only loading the pretrained weights and not initializing the weights. This also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this argument to `True` will raise an error. use_safetensors (`bool`, *optional*, defaults to `None`): If set to `None`, the safetensors weights are downloaded if they're available **and** if the safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors weights. If set to `False`, safetensors weights are not loaded. gated (`bool`, *optional*, defaults to `False` ): A boolean to filter models on the Hub that are gated or not. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline class). The overwritten components are passed directly to the pipelines `__init__` method. See example below for more information. variant (`str`, *optional*): Load weights from a specified variant filename such as `"fp16"` or `"ema"`. This is ignored when loading `from_flax`. <Tip> To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with `huggingface-cli login`. </Tip> Examples: ```py >>> from auto_diffusers import EasyPipelineForText2Image >>> pipeline = EasyPipelineForText2Image.from_huggingface("stable-diffusion-v1-5") >>> image = pipeline(prompt).images[0] ``` """ # Update kwargs to ensure the model is downloaded and parameters are included _status = { "download": True, "include_params": True, "skip_error": False, "pipeline_tag": "text-to-image", } kwargs.update(_status) # Search for the model on Hugging Face and get the model status hf_checkpoint_status = search_huggingface(pretrained_model_link_or_path, **kwargs) logger.warning(f"checkpoint_path: {hf_checkpoint_status.model_status.download_url}") checkpoint_path = hf_checkpoint_status.model_path # Check the format of the model checkpoint if hf_checkpoint_status.loading_method == "from_single_file": # Load the pipeline from a single file checkpoint pipeline = load_pipeline_from_single_file( pretrained_model_or_path=checkpoint_path, pipeline_mapping=SINGLE_FILE_CHECKPOINT_TEXT2IMAGE_PIPELINE_MAPPING, **kwargs, ) else: pipeline = cls.from_pretrained(checkpoint_path, **kwargs) return add_methods(pipeline) @classmethod def from_civitai(cls, pretrained_model_link_or_path, **kwargs): r""" Parameters: pretrained_model_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A keyword to search for Hugging Face (for example `Stable Diffusion`) - Link to `.ckpt` or `.safetensors` file (for example `"https://huggingface.co/<repo_id>/blob/main/<path_to_file>.safetensors"`) on the Hub. - A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline hosted on the Hub. - A path to a *directory* (for example `./my_pipeline_directory/`) containing pipeline weights saved using [`~DiffusionPipeline.save_pretrained`]. model_type (`str`, *optional*, defaults to `Checkpoint`): The type of model to search for. (for example `Checkpoint`, `TextualInversion`, `LORA`, `Controlnet`) base_model (`str`, *optional*): The base model to filter by. cache_dir (`str`, `Path`, *optional*): Path to the folder where cached files are stored. resume (`bool`, *optional*, defaults to `False`): Whether to resume an incomplete download. torch_dtype (`str` or `torch.dtype`, *optional*): Override the default `torch.dtype` and load the model with another dtype. If "auto" is passed, the dtype is automatically derived from the model's weights. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. output_loading_info(`bool`, *optional*, defaults to `False`): Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str`, *optional*): The token to use as HTTP bearer authorization for remote files. device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn’t need to be defined for each parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the same device. Set `device_map="auto"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For more information about each option see [designing a device map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary device identifier for the maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. offload_folder (`str` or `os.PathLike`, *optional*): The path to offload weights if device_map contains the value `"disk"`. offload_state_dict (`bool`, *optional*): If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True` when there is some disk offload. low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): Speed up model loading only loading the pretrained weights and not initializing the weights. This also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this argument to `True` will raise an error. use_safetensors (`bool`, *optional*, defaults to `None`): If set to `None`, the safetensors weights are downloaded if they're available **and** if the safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors weights. If set to `False`, safetensors weights are not loaded. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline class). The overwritten components are passed directly to the pipelines `__init__` method. See example below for more information. <Tip> To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with `huggingface-cli login`. </Tip> Examples: ```py >>> from auto_diffusers import EasyPipelineForText2Image >>> pipeline = EasyPipelineForText2Image.from_huggingface("stable-diffusion-v1-5") >>> image = pipeline(prompt).images[0] ``` """ # Update kwargs to ensure the model is downloaded and parameters are included _status = { "download": True, "include_params": True, "skip_error": False, "model_type": "Checkpoint", } kwargs.update(_status) # Search for the model on Civitai and get the model status checkpoint_status = search_civitai(pretrained_model_link_or_path, **kwargs) logger.warning(f"checkpoint_path: {checkpoint_status.model_status.site_url}") checkpoint_path = checkpoint_status.model_path # Load the pipeline from a single file checkpoint pipeline = load_pipeline_from_single_file( pretrained_model_or_path=checkpoint_path, pipeline_mapping=SINGLE_FILE_CHECKPOINT_TEXT2IMAGE_PIPELINE_MAPPING, **kwargs, ) return add_methods(pipeline) class EasyPipelineForImage2Image(AutoPipelineForImage2Image): r""" [`EasyPipelineForImage2Image`] is a generic pipeline class that instantiates an image-to-image pipeline class. The specific underlying pipeline class is automatically selected from either the [`~EasyPipelineForImage2Image.from_pretrained`], [`~EasyPipelineForImage2Image.from_pipe`], [`~EasyPipelineForImage2Image.from_huggingface`] or [`~EasyPipelineForImage2Image.from_civitai`] methods. This class cannot be instantiated using `__init__()` (throws an error). Class attributes: - **config_name** (`str`) -- The configuration filename that stores the class and module names of all the diffusion pipeline's components. """ config_name = "model_index.json" def __init__(self, *args, **kwargs): # EnvironmentError is returned super().__init__() @classmethod @validate_hf_hub_args def from_huggingface(cls, pretrained_model_link_or_path, **kwargs): r""" Parameters: pretrained_model_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A keyword to search for Hugging Face (for example `Stable Diffusion`) - Link to `.ckpt` or `.safetensors` file (for example `"https://huggingface.co/<repo_id>/blob/main/<path_to_file>.safetensors"`) on the Hub. - A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline hosted on the Hub. - A path to a *directory* (for example `./my_pipeline_directory/`) containing pipeline weights saved using [`~DiffusionPipeline.save_pretrained`]. checkpoint_format (`str`, *optional*, defaults to `"single_file"`): The format of the model checkpoint. pipeline_tag (`str`, *optional*): Tag to filter models by pipeline. torch_dtype (`str` or `torch.dtype`, *optional*): Override the default `torch.dtype` and load the model with another dtype. If "auto" is passed, the dtype is automatically derived from the model's weights. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(`bool`, *optional*, defaults to `False`): Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. custom_revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id similar to `revision` when loading a custom pipeline from the Hub. It can be a 🤗 Diffusers version when loading a custom pipeline from GitHub, otherwise it defaults to `"main"` when loading from the Hub. mirror (`str`, *optional*): Mirror source to resolve accessibility issues if you’re downloading a model in China. We do not guarantee the timeliness or safety of the source, and you should refer to the mirror site for more information. device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn’t need to be defined for each parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the same device. Set `device_map="auto"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For more information about each option see [designing a device map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary device identifier for the maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. offload_folder (`str` or `os.PathLike`, *optional*): The path to offload weights if device_map contains the value `"disk"`. offload_state_dict (`bool`, *optional*): If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True` when there is some disk offload. low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): Speed up model loading only loading the pretrained weights and not initializing the weights. This also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this argument to `True` will raise an error. use_safetensors (`bool`, *optional*, defaults to `None`): If set to `None`, the safetensors weights are downloaded if they're available **and** if the safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors weights. If set to `False`, safetensors weights are not loaded. gated (`bool`, *optional*, defaults to `False` ): A boolean to filter models on the Hub that are gated or not. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline class). The overwritten components are passed directly to the pipelines `__init__` method. See example below for more information. variant (`str`, *optional*): Load weights from a specified variant filename such as `"fp16"` or `"ema"`. This is ignored when loading `from_flax`. <Tip> To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with `huggingface-cli login`. </Tip> Examples: ```py >>> from auto_diffusers import EasyPipelineForImage2Image >>> pipeline = EasyPipelineForImage2Image.from_huggingface("stable-diffusion-v1-5") >>> image = pipeline(prompt, image).images[0] ``` """ # Update kwargs to ensure the model is downloaded and parameters are included _parmas = { "download": True, "include_params": True, "skip_error": False, "pipeline_tag": "image-to-image", } kwargs.update(_parmas) # Search for the model on Hugging Face and get the model status hf_checkpoint_status = search_huggingface(pretrained_model_link_or_path, **kwargs) logger.warning(f"checkpoint_path: {hf_checkpoint_status.model_status.download_url}") checkpoint_path = hf_checkpoint_status.model_path # Check the format of the model checkpoint if hf_checkpoint_status.loading_method == "from_single_file": # Load the pipeline from a single file checkpoint pipeline = load_pipeline_from_single_file( pretrained_model_or_path=checkpoint_path, pipeline_mapping=SINGLE_FILE_CHECKPOINT_IMAGE2IMAGE_PIPELINE_MAPPING, **kwargs, ) else: pipeline = cls.from_pretrained(checkpoint_path, **kwargs) return add_methods(pipeline) @classmethod def from_civitai(cls, pretrained_model_link_or_path, **kwargs): r""" Parameters: pretrained_model_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A keyword to search for Hugging Face (for example `Stable Diffusion`) - Link to `.ckpt` or `.safetensors` file (for example `"https://huggingface.co/<repo_id>/blob/main/<path_to_file>.safetensors"`) on the Hub. - A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline hosted on the Hub. - A path to a *directory* (for example `./my_pipeline_directory/`) containing pipeline weights saved using [`~DiffusionPipeline.save_pretrained`]. model_type (`str`, *optional*, defaults to `Checkpoint`): The type of model to search for. (for example `Checkpoint`, `TextualInversion`, `LORA`, `Controlnet`) base_model (`str`, *optional*): The base model to filter by. cache_dir (`str`, `Path`, *optional*): Path to the folder where cached files are stored. resume (`bool`, *optional*, defaults to `False`): Whether to resume an incomplete download. torch_dtype (`str` or `torch.dtype`, *optional*): Override the default `torch.dtype` and load the model with another dtype. If "auto" is passed, the dtype is automatically derived from the model's weights. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. output_loading_info(`bool`, *optional*, defaults to `False`): Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str`, *optional*): The token to use as HTTP bearer authorization for remote files. device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn’t need to be defined for each parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the same device. Set `device_map="auto"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For more information about each option see [designing a device map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary device identifier for the maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. offload_folder (`str` or `os.PathLike`, *optional*): The path to offload weights if device_map contains the value `"disk"`. offload_state_dict (`bool`, *optional*): If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True` when there is some disk offload. low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): Speed up model loading only loading the pretrained weights and not initializing the weights. This also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this argument to `True` will raise an error. use_safetensors (`bool`, *optional*, defaults to `None`): If set to `None`, the safetensors weights are downloaded if they're available **and** if the safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors weights. If set to `False`, safetensors weights are not loaded. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline class). The overwritten components are passed directly to the pipelines `__init__` method. See example below for more information. <Tip> To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with `huggingface-cli login`. </Tip> Examples: ```py >>> from auto_diffusers import EasyPipelineForImage2Image >>> pipeline = EasyPipelineForImage2Image.from_huggingface("stable-diffusion-v1-5") >>> image = pipeline(prompt, image).images[0] ``` """ # Update kwargs to ensure the model is downloaded and parameters are included _status = { "download": True, "include_params": True, "skip_error": False, "model_type": "Checkpoint", } kwargs.update(_status) # Search for the model on Civitai and get the model status checkpoint_status = search_civitai(pretrained_model_link_or_path, **kwargs) logger.warning(f"checkpoint_path: {checkpoint_status.model_status.site_url}") checkpoint_path = checkpoint_status.model_path # Load the pipeline from a single file checkpoint pipeline = load_pipeline_from_single_file( pretrained_model_or_path=checkpoint_path, pipeline_mapping=SINGLE_FILE_CHECKPOINT_IMAGE2IMAGE_PIPELINE_MAPPING, **kwargs, ) return add_methods(pipeline) class EasyPipelineForInpainting(AutoPipelineForInpainting): r""" [`EasyPipelineForInpainting`] is a generic pipeline class that instantiates an inpainting pipeline class. The specific underlying pipeline class is automatically selected from either the [`~EasyPipelineForInpainting.from_pretrained`], [`~EasyPipelineForInpainting.from_pipe`], [`~EasyPipelineForInpainting.from_huggingface`] or [`~EasyPipelineForInpainting.from_civitai`] methods. This class cannot be instantiated using `__init__()` (throws an error). Class attributes: - **config_name** (`str`) -- The configuration filename that stores the class and module names of all the diffusion pipeline's components. """ config_name = "model_index.json" def __init__(self, *args, **kwargs): # EnvironmentError is returned super().__init__() @classmethod @validate_hf_hub_args def from_huggingface(cls, pretrained_model_link_or_path, **kwargs): r""" Parameters: pretrained_model_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A keyword to search for Hugging Face (for example `Stable Diffusion`) - Link to `.ckpt` or `.safetensors` file (for example `"https://huggingface.co/<repo_id>/blob/main/<path_to_file>.safetensors"`) on the Hub. - A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline hosted on the Hub. - A path to a *directory* (for example `./my_pipeline_directory/`) containing pipeline weights saved using [`~DiffusionPipeline.save_pretrained`]. checkpoint_format (`str`, *optional*, defaults to `"single_file"`): The format of the model checkpoint. pipeline_tag (`str`, *optional*): Tag to filter models by pipeline. torch_dtype (`str` or `torch.dtype`, *optional*): Override the default `torch.dtype` and load the model with another dtype. If "auto" is passed, the dtype is automatically derived from the model's weights. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(`bool`, *optional*, defaults to `False`): Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. custom_revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id similar to `revision` when loading a custom pipeline from the Hub. It can be a 🤗 Diffusers version when loading a custom pipeline from GitHub, otherwise it defaults to `"main"` when loading from the Hub. mirror (`str`, *optional*): Mirror source to resolve accessibility issues if you’re downloading a model in China. We do not guarantee the timeliness or safety of the source, and you should refer to the mirror site for more information. device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn’t need to be defined for each parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the same device. Set `device_map="auto"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For more information about each option see [designing a device map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary device identifier for the maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. offload_folder (`str` or `os.PathLike`, *optional*): The path to offload weights if device_map contains the value `"disk"`. offload_state_dict (`bool`, *optional*): If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True` when there is some disk offload. low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): Speed up model loading only loading the pretrained weights and not initializing the weights. This also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this argument to `True` will raise an error. use_safetensors (`bool`, *optional*, defaults to `None`): If set to `None`, the safetensors weights are downloaded if they're available **and** if the safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors weights. If set to `False`, safetensors weights are not loaded. gated (`bool`, *optional*, defaults to `False` ): A boolean to filter models on the Hub that are gated or not. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline class). The overwritten components are passed directly to the pipelines `__init__` method. See example below for more information. variant (`str`, *optional*): Load weights from a specified variant filename such as `"fp16"` or `"ema"`. This is ignored when loading `from_flax`. <Tip> To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with `huggingface-cli login`. </Tip> Examples: ```py >>> from auto_diffusers import EasyPipelineForInpainting >>> pipeline = EasyPipelineForInpainting.from_huggingface("stable-diffusion-2-inpainting") >>> image = pipeline(prompt, image=init_image, mask_image=mask_image).images[0] ``` """ # Update kwargs to ensure the model is downloaded and parameters are included _status = { "download": True, "include_params": True, "skip_error": False, "pipeline_tag": "image-to-image", } kwargs.update(_status) # Search for the model on Hugging Face and get the model status hf_checkpoint_status = search_huggingface(pretrained_model_link_or_path, **kwargs) logger.warning(f"checkpoint_path: {hf_checkpoint_status.model_status.download_url}") checkpoint_path = hf_checkpoint_status.model_path # Check the format of the model checkpoint if hf_checkpoint_status.loading_method == "from_single_file": # Load the pipeline from a single file checkpoint pipeline = load_pipeline_from_single_file( pretrained_model_or_path=checkpoint_path, pipeline_mapping=SINGLE_FILE_CHECKPOINT_INPAINT_PIPELINE_MAPPING, **kwargs, ) else: pipeline = cls.from_pretrained(checkpoint_path, **kwargs) return add_methods(pipeline) @classmethod def from_civitai(cls, pretrained_model_link_or_path, **kwargs): r""" Parameters: pretrained_model_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A keyword to search for Hugging Face (for example `Stable Diffusion`) - Link to `.ckpt` or `.safetensors` file (for example `"https://huggingface.co/<repo_id>/blob/main/<path_to_file>.safetensors"`) on the Hub. - A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline hosted on the Hub. - A path to a *directory* (for example `./my_pipeline_directory/`) containing pipeline weights saved using [`~DiffusionPipeline.save_pretrained`]. model_type (`str`, *optional*, defaults to `Checkpoint`): The type of model to search for. (for example `Checkpoint`, `TextualInversion`, `LORA`, `Controlnet`) base_model (`str`, *optional*): The base model to filter by. cache_dir (`str`, `Path`, *optional*): Path to the folder where cached files are stored. resume (`bool`, *optional*, defaults to `False`): Whether to resume an incomplete download. torch_dtype (`str` or `torch.dtype`, *optional*): Override the default `torch.dtype` and load the model with another dtype. If "auto" is passed, the dtype is automatically derived from the model's weights. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. output_loading_info(`bool`, *optional*, defaults to `False`): Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. token (`str`, *optional*): The token to use as HTTP bearer authorization for remote files. device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn’t need to be defined for each parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the same device. Set `device_map="auto"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For more information about each option see [designing a device map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary device identifier for the maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. offload_folder (`str` or `os.PathLike`, *optional*): The path to offload weights if device_map contains the value `"disk"`. offload_state_dict (`bool`, *optional*): If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True` when there is some disk offload. low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): Speed up model loading only loading the pretrained weights and not initializing the weights. This also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this argument to `True` will raise an error. use_safetensors (`bool`, *optional*, defaults to `None`): If set to `None`, the safetensors weights are downloaded if they're available **and** if the safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors weights. If set to `False`, safetensors weights are not loaded. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline class). The overwritten components are passed directly to the pipelines `__init__` method. See example below for more information. <Tip> To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with `huggingface-cli login`. </Tip> Examples: ```py >>> from auto_diffusers import EasyPipelineForInpainting >>> pipeline = EasyPipelineForInpainting.from_huggingface("stable-diffusion-2-inpainting") >>> image = pipeline(prompt, image=init_image, mask_image=mask_image).images[0] ``` """ # Update kwargs to ensure the model is downloaded and parameters are included _status = { "download": True, "include_params": True, "skip_error": False, "model_type": "Checkpoint", } kwargs.update(_status) # Search for the model on Civitai and get the model status checkpoint_status = search_civitai(pretrained_model_link_or_path, **kwargs) logger.warning(f"checkpoint_path: {checkpoint_status.model_status.site_url}") checkpoint_path = checkpoint_status.model_path # Load the pipeline from a single file checkpoint pipeline = load_pipeline_from_single_file( pretrained_model_or_path=checkpoint_path, pipeline_mapping=SINGLE_FILE_CHECKPOINT_INPAINT_PIPELINE_MAPPING, **kwargs, ) return add_methods(pipeline)
diffusers/examples/model_search/pipeline_easy.py/0
{ "file_path": "diffusers/examples/model_search/pipeline_easy.py", "repo_id": "diffusers", "token_count": 39434 }
compute_environment: LOCAL_MACHINE debug: false deepspeed_config: gradient_accumulation_steps: 1 gradient_clipping: 1.0 offload_optimizer_device: cpu offload_param_device: cpu zero3_init_flag: false zero_stage: 2 distributed_type: DEEPSPEED downcast_bf16: 'no' enable_cpu_affinity: false machine_rank: 0 main_training_function: main mixed_precision: 'no' num_machines: 1 num_processes: 1 rdzv_backend: static same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false use_cpu: false
diffusers/examples/research_projects/flux_lora_quantization/ds2.yaml/0
{ "file_path": "diffusers/examples/research_projects/flux_lora_quantization/ds2.yaml", "repo_id": "diffusers", "token_count": 196 }
# Generating images using Flux and PyTorch/XLA The `flux_inference` script shows how to do image generation using Flux on TPU devices using PyTorch/XLA. It uses the pallas kernel for flash attention for faster generation. It has been tested on [Trillium](https://cloud.google.com/blog/products/compute/introducing-trillium-6th-gen-tpus) TPU versions. No other TPU types have been tested. ## Create TPU To create a TPU on Google Cloud, follow [this guide](https://cloud.google.com/tpu/docs/v6e) ## Setup TPU environment SSH into the VM and install Pytorch, Pytorch/XLA ```bash pip install torch~=2.5.0 torch_xla[tpu]~=2.5.0 -f https://storage.googleapis.com/libtpu-releases/index.html -f https://storage.googleapis.com/libtpu-wheels/index.html pip install torch_xla[pallas] -f https://storage.googleapis.com/jax-releases/jax_nightly_releases.html -f https://storage.googleapis.com/jax-releases/jaxlib_nightly_releases.html ``` Verify that PyTorch and PyTorch/XLA were installed correctly: ```bash python3 -c "import torch; import torch_xla;" ``` Install dependencies ```bash pip install transformers accelerate sentencepiece structlog pushd ../../.. pip install . popd ``` ## Run the inference job ### Authenticate Run the following command to authenticate your token in order to download Flux weights. ```bash huggingface-cli login ``` Then run: ```bash python flux_inference.py ``` The script loads the text encoders onto the CPU and the Flux transformer and VAE models onto the TPU. The first time the script runs, the compilation time is longer, while the cache stores the compiled programs. On subsequent runs, compilation is much faster and the subsequent passes being the fastest. On a Trillium v6e-4, you should expect ~9 sec / 4 images or 2.25 sec / image (as devices run generation in parallel): ```bash WARNING:root:libtpu.so and TPU device found. Setting PJRT_DEVICE=TPU. Loading checkpoint shards: 100%|███████████████████████████████| 2/2 [00:00<00:00, 7.01it/s] Loading pipeline components...: 40%|██████████▍ | 2/5 [00:00<00:00, 3.78it/s]You set `add_prefix_space`. The tokenizer needs to be converted from the slow tokenizers Loading pipeline components...: 100%|██████████████████████████| 5/5 [00:00<00:00, 6.72it/s] 2025-01-10 00:51:25 [info ] loading flux from black-forest-labs/FLUX.1-dev 2025-01-10 00:51:25 [info ] loading flux from black-forest-labs/FLUX.1-dev 2025-01-10 00:51:26 [info ] loading flux from black-forest-labs/FLUX.1-dev 2025-01-10 00:51:26 [info ] loading flux from black-forest-labs/FLUX.1-dev Loading pipeline components...: 100%|██████████████████████████| 3/3 [00:00<00:00, 4.29it/s] Loading pipeline components...: 100%|██████████████████████████| 3/3 [00:00<00:00, 3.26it/s] Loading pipeline components...: 100%|██████████████████████████| 3/3 [00:00<00:00, 3.27it/s] Loading pipeline components...: 100%|██████████████████████████| 3/3 [00:00<00:00, 3.25it/s] 2025-01-10 00:51:34 [info ] starting compilation run... 2025-01-10 00:51:35 [info ] starting compilation run... 2025-01-10 00:51:37 [info ] starting compilation run... 2025-01-10 00:51:37 [info ] starting compilation run... 2025-01-10 00:52:52 [info ] compilation took 78.5155531649998 sec. 2025-01-10 00:52:53 [info ] starting inference run... 2025-01-10 00:52:57 [info ] compilation took 79.52986721400157 sec. 2025-01-10 00:52:57 [info ] compilation took 81.91776501700042 sec. 2025-01-10 00:52:57 [info ] compilation took 80.24951512600092 sec. 2025-01-10 00:52:57 [info ] starting inference run... 2025-01-10 00:52:57 [info ] starting inference run... 2025-01-10 00:52:58 [info ] starting inference run... 2025-01-10 00:53:22 [info ] inference time: 25.112665320000815 2025-01-10 00:53:30 [info ] inference time: 7.7019307739992655 2025-01-10 00:53:38 [info ] inference time: 7.693858365000779 2025-01-10 00:53:46 [info ] inference time: 7.690621814001133 2025-01-10 00:53:53 [info ] inference time: 7.679490454000188 2025-01-10 00:54:01 [info ] inference time: 7.68949568500102 2025-01-10 00:54:09 [info ] inference time: 7.686633744000574 2025-01-10 00:54:16 [info ] inference time: 7.696786873999372 2025-01-10 00:54:24 [info ] inference time: 7.691988694999964 2025-01-10 00:54:32 [info ] inference time: 7.700649563999832 2025-01-10 00:54:39 [info ] inference time: 7.684993574001055 2025-01-10 00:54:47 [info ] inference time: 7.68343457499941 2025-01-10 00:54:55 [info ] inference time: 7.667921153999487 2025-01-10 00:55:02 [info ] inference time: 7.683585194001353 2025-01-10 00:55:06 [info ] avg. inference over 15 iterations took 8.61202360273334 sec. 2025-01-10 00:55:07 [info ] avg. inference over 15 iterations took 8.952725123600006 sec. 2025-01-10 00:55:10 [info ] inference time: 7.673799695001435 2025-01-10 00:55:10 [info ] avg. inference over 15 iterations took 8.849190365400379 sec. 2025-01-10 00:55:10 [info ] saved metric information as /tmp/metrics_report.txt 2025-01-10 00:55:12 [info ] avg. inference over 15 iterations took 8.940161458400205 sec. ```
diffusers/examples/research_projects/pytorch_xla/inference/flux/README.md/0
{ "file_path": "diffusers/examples/research_projects/pytorch_xla/inference/flux/README.md", "repo_id": "diffusers", "token_count": 1997 }
#!/usr/bin/env python # coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import logging import math import os import random import shutil from pathlib import Path import accelerate import datasets import numpy as np import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.state import AcceleratorState from accelerate.utils import ProjectConfiguration, set_seed from datasets import load_dataset from huggingface_hub import create_repo, upload_folder from packaging import version from torchvision import transforms from tqdm.auto import tqdm from transformers import CLIPTextModel, CLIPTokenizer from transformers.utils import ContextManagers import diffusers from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel from diffusers.optimization import get_scheduler from diffusers.training_utils import EMAModel, compute_snr from diffusers.utils import check_min_version, deprecate, is_wandb_available, make_image_grid from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.torch_utils import is_compiled_module if is_wandb_available(): import wandb # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.28.0.dev0") logger = get_logger(__name__, log_level="INFO") DATASET_NAME_MAPPING = { "lambdalabs/naruto-blip-captions": ("image", "text"), } def save_model_card( args, repo_id: str, images: list = None, repo_folder: str = None, ): img_str = "" if len(images) > 0: image_grid = make_image_grid(images, 1, len(args.validation_prompts)) image_grid.save(os.path.join(repo_folder, "val_imgs_grid.png")) img_str += "![val_imgs_grid](./val_imgs_grid.png)\n" model_description = f""" # Text-to-image finetuning - {repo_id} This pipeline was finetuned from **{args.pretrained_model_name_or_path}** on the **{args.dataset_name}** dataset. Below are some example images generated with the finetuned pipeline using the following prompts: {args.validation_prompts}: \n {img_str} ## Pipeline usage You can use the pipeline like so: ```python from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained("{repo_id}", torch_dtype=torch.float16) prompt = "{args.validation_prompts[0]}" image = pipeline(prompt).images[0] image.save("my_image.png") ``` ## Training info These are the key hyperparameters used during training: * Epochs: {args.num_train_epochs} * Learning rate: {args.learning_rate} * Batch size: {args.train_batch_size} * Gradient accumulation steps: {args.gradient_accumulation_steps} * Image resolution: {args.resolution} * Mixed-precision: {args.mixed_precision} """ wandb_info = "" if is_wandb_available(): wandb_run_url = None if wandb.run is not None: wandb_run_url = wandb.run.url if wandb_run_url is not None: wandb_info = f""" More information on all the CLI arguments and the environment are available on your [`wandb` run page]({wandb_run_url}). """ model_description += wandb_info model_card = load_or_create_model_card( repo_id_or_path=repo_id, from_training=True, license="creativeml-openrail-m", base_model=args.pretrained_model_name_or_path, model_description=model_description, inference=True, ) tags = ["stable-diffusion", "stable-diffusion-diffusers", "text-to-image", "diffusers", "diffusers-training"] model_card = populate_model_card(model_card, tags=tags) model_card.save(os.path.join(repo_folder, "README.md")) def log_validation(vae, text_encoder, tokenizer, unet, args, accelerator, weight_dtype, epoch): logger.info("Running validation... ") pipeline = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, vae=accelerator.unwrap_model(vae), text_encoder=accelerator.unwrap_model(text_encoder), tokenizer=tokenizer, unet=accelerator.unwrap_model(unet), safety_checker=None, revision=args.revision, variant=args.variant, torch_dtype=weight_dtype, ) pipeline = pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) if args.enable_xformers_memory_efficient_attention: pipeline.enable_xformers_memory_efficient_attention() if args.seed is None: generator = None else: generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) images = [] for i in range(len(args.validation_prompts)): with torch.autocast("cuda"): image = pipeline(args.validation_prompts[i], num_inference_steps=20, generator=generator).images[0] images.append(image) for tracker in accelerator.trackers: if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC") elif tracker.name == "wandb": tracker.log( { "validation": [ wandb.Image(image, caption=f"{i}: {args.validation_prompts[i]}") for i, image in enumerate(images) ] } ) else: logger.warning(f"image logging not implemented for {tracker.name}") del pipeline torch.cuda.empty_cache() return images def parse_args(): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--input_perturbation", type=float, default=0, help="The scale of input perturbation. Recommended 0.1." ) parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--variant", type=str, default=None, help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16", ) parser.add_argument( "--dataset_name", type=str, default=None, help=( "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," " or to a folder containing files that 🤗 Datasets can understand." ), ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The config of the Dataset, leave as None if there's only one config.", ) parser.add_argument( "--train_data_dir", type=str, default=None, help=( "A folder containing the training data. Folder contents must follow the structure described in" " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" " must exist to provide the captions for the images. Ignored if `dataset_name` is specified." ), ) parser.add_argument( "--image_column", type=str, default="image", help="The column of the dataset containing an image." ) parser.add_argument( "--caption_column", type=str, default="text", help="The column of the dataset containing a caption or a list of captions.", ) parser.add_argument( "--max_train_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ), ) parser.add_argument( "--validation_prompts", type=str, default=None, nargs="+", help=("A set of prompts evaluated every `--validation_epochs` and logged to `--report_to`."), ) parser.add_argument( "--output_dir", type=str, default="sd-model-finetuned", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--cache_dir", type=str, default=None, help="The directory where the downloaded models and datasets will be stored.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", default=False, action="store_true", help=( "Whether to center crop the input images to the resolution. If not set, the images will be randomly" " cropped. The images will be resized to the resolution first before cropping." ), ) parser.add_argument( "--random_flip", action="store_true", help="whether to randomly flip images horizontally", ) parser.add_argument( "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=100) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--snr_gamma", type=float, default=None, help="SNR weighting gamma to be used if rebalancing the loss. Recommended value is 5.0. " "More details here: https://arxiv.org/abs/2303.09556.", ) parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.") parser.add_argument( "--non_ema_revision", type=str, default=None, required=False, help=( "Revision of pretrained non-ema model identifier. Must be a branch, tag or git identifier of the local or" " remote repository specified with --pretrained_model_name_or_path." ), ) parser.add_argument( "--dataloader_num_workers", type=int, default=0, help=( "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." ), ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--prediction_type", type=str, default=None, help="The prediction_type that shall be used for training. Choose between 'epsilon' or 'v_prediction' or leave `None`. If left to `None` the default prediction type of the scheduler: `noise_scheduler.config.prediction_type` is chosen.", ) parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=("Max number of checkpoints to store."), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) parser.add_argument("--noise_offset", type=float, default=0, help="The scale of noise offset.") parser.add_argument( "--loss_type", type=str, default="l2", choices=["l2", "huber", "smooth_l1"], help="The type of loss to use and whether it's timestep-scheduled. See Issue #7488 for more info.", ) parser.add_argument( "--huber_schedule", type=str, default="snr", choices=["constant", "exponential", "snr"], help="The schedule to use for the huber losses parameter", ) parser.add_argument( "--huber_c", type=float, default=0.1, help="The huber loss parameter. Only used if one of the huber loss modes (huber or smooth l1) is selected with loss_type.", ) parser.add_argument( "--validation_epochs", type=int, default=5, help="Run validation every X epochs.", ) parser.add_argument( "--tracker_project_name", type=str, default="text2image-fine-tune", help=( "The `project_name` argument passed to Accelerator.init_trackers for" " more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator" ), ) args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank # Sanity checks if args.dataset_name is None and args.train_data_dir is None: raise ValueError("Need either a dataset name or a training folder.") # default to using the same revision for the non-ema model if not specified if args.non_ema_revision is None: args.non_ema_revision = args.revision return args # NOTE: if you're using the scheduled version, huber_c has to depend on the timesteps already def conditional_loss( model_pred: torch.Tensor, target: torch.Tensor, reduction: str = "mean", loss_type: str = "l2", huber_c: float = 0.1, ): if loss_type == "l2": loss = F.mse_loss(model_pred, target, reduction=reduction) elif loss_type == "huber": loss = 2 * huber_c * (torch.sqrt((model_pred - target) ** 2 + huber_c**2) - huber_c) if reduction == "mean": loss = torch.mean(loss) elif reduction == "sum": loss = torch.sum(loss) elif loss_type == "smooth_l1": loss = 2 * (torch.sqrt((model_pred - target) ** 2 + huber_c**2) - huber_c) if reduction == "mean": loss = torch.mean(loss) elif reduction == "sum": loss = torch.sum(loss) else: raise NotImplementedError(f"Unsupported Loss Type {loss_type}") return loss def main(): args = parse_args() if args.report_to == "wandb" and args.hub_token is not None: raise ValueError( "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." " Please use `huggingface-cli login` to authenticate with the Hub." ) if args.non_ema_revision is not None: deprecate( "non_ema_revision!=None", "0.15.0", message=( "Downloading 'non_ema' weights from revision branches of the Hub is deprecated. Please make sure to" " use `--variant=non_ema` instead." ), ) logging_dir = os.path.join(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, ) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # Load scheduler, tokenizer and models. noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") tokenizer = CLIPTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision ) def deepspeed_zero_init_disabled_context_manager(): """ returns either a context list that includes one that will disable zero.Init or an empty context list """ deepspeed_plugin = AcceleratorState().deepspeed_plugin if accelerate.state.is_initialized() else None if deepspeed_plugin is None: return [] return [deepspeed_plugin.zero3_init_context_manager(enable=False)] # Currently Accelerate doesn't know how to handle multiple models under Deepspeed ZeRO stage 3. # For this to work properly all models must be run through `accelerate.prepare`. But accelerate # will try to assign the same optimizer with the same weights to all models during # `deepspeed.initialize`, which of course doesn't work. # # For now the following workaround will partially support Deepspeed ZeRO-3, by excluding the 2 # frozen models from being partitioned during `zero.Init` which gets called during # `from_pretrained` So CLIPTextModel and AutoencoderKL will not enjoy the parameter sharding # across multiple gpus and only UNet2DConditionModel will get ZeRO sharded. with ContextManagers(deepspeed_zero_init_disabled_context_manager()): text_encoder = CLIPTextModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant ) vae = AutoencoderKL.from_pretrained( args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision, variant=args.variant ) unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.non_ema_revision ) # Freeze vae and text_encoder and set unet to trainable vae.requires_grad_(False) text_encoder.requires_grad_(False) unet.train() # Create EMA for the unet. if args.use_ema: ema_unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, variant=args.variant ) ema_unet = EMAModel(ema_unet.parameters(), model_cls=UNet2DConditionModel, model_config=ema_unet.config) if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): import xformers xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") # `accelerate` 0.16.0 will have better support for customized saving if version.parse(accelerate.__version__) >= version.parse("0.16.0"): # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format def save_model_hook(models, weights, output_dir): if accelerator.is_main_process: if args.use_ema: ema_unet.save_pretrained(os.path.join(output_dir, "unet_ema")) for i, model in enumerate(models): model.save_pretrained(os.path.join(output_dir, "unet")) # make sure to pop weight so that corresponding model is not saved again weights.pop() def load_model_hook(models, input_dir): if args.use_ema: load_model = EMAModel.from_pretrained(os.path.join(input_dir, "unet_ema"), UNet2DConditionModel) ema_unet.load_state_dict(load_model.state_dict()) ema_unet.to(accelerator.device) del load_model for _ in range(len(models)): # pop models so that they are not loaded again model = models.pop() # load diffusers style into model load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet") model.register_to_config(**load_model.config) model.load_state_dict(load_model.state_dict()) del load_model accelerator.register_save_state_pre_hook(save_model_hook) accelerator.register_load_state_pre_hook(load_model_hook) if args.gradient_checkpointing: unet.enable_gradient_checkpointing() # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Initialize the optimizer if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" ) optimizer_cls = bnb.optim.AdamW8bit else: optimizer_cls = torch.optim.AdamW optimizer = optimizer_cls( unet.parameters(), lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) # Get the datasets: you can either provide your own training and evaluation files (see below) # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub). # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. dataset = load_dataset( args.dataset_name, args.dataset_config_name, cache_dir=args.cache_dir, data_dir=args.train_data_dir, ) else: data_files = {} if args.train_data_dir is not None: data_files["train"] = os.path.join(args.train_data_dir, "**") dataset = load_dataset( "imagefolder", data_files=data_files, cache_dir=args.cache_dir, ) # See more about loading custom images at # https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder # Preprocessing the datasets. # We need to tokenize inputs and targets. column_names = dataset["train"].column_names # 6. Get the column names for input/target. dataset_columns = DATASET_NAME_MAPPING.get(args.dataset_name, None) if args.image_column is None: image_column = dataset_columns[0] if dataset_columns is not None else column_names[0] else: image_column = args.image_column if image_column not in column_names: raise ValueError( f"--image_column' value '{args.image_column}' needs to be one of: {', '.join(column_names)}" ) if args.caption_column is None: caption_column = dataset_columns[1] if dataset_columns is not None else column_names[1] else: caption_column = args.caption_column if caption_column not in column_names: raise ValueError( f"--caption_column' value '{args.caption_column}' needs to be one of: {', '.join(column_names)}" ) # Preprocessing the datasets. # We need to tokenize input captions and transform the images. def tokenize_captions(examples, is_train=True): captions = [] for caption in examples[caption_column]: if isinstance(caption, str): captions.append(caption) elif isinstance(caption, (list, np.ndarray)): # take a random caption if there are multiple captions.append(random.choice(caption) if is_train else caption[0]) else: raise ValueError( f"Caption column `{caption_column}` should contain either strings or lists of strings." ) inputs = tokenizer( captions, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt" ) return inputs.input_ids # Preprocessing the datasets. train_transforms = transforms.Compose( [ transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution), transforms.RandomHorizontalFlip() if args.random_flip else transforms.Lambda(lambda x: x), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def preprocess_train(examples): images = [image.convert("RGB") for image in examples[image_column]] examples["pixel_values"] = [train_transforms(image) for image in images] examples["input_ids"] = tokenize_captions(examples) return examples with accelerator.main_process_first(): if args.max_train_samples is not None: dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) # Set the training transforms train_dataset = dataset["train"].with_transform(preprocess_train) def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() input_ids = torch.stack([example["input_ids"] for example in examples]) return {"pixel_values": pixel_values, "input_ids": input_ids} # DataLoaders creation: train_dataloader = torch.utils.data.DataLoader( train_dataset, shuffle=True, collate_fn=collate_fn, batch_size=args.train_batch_size, num_workers=args.dataloader_num_workers, ) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps * accelerator.num_processes, ) # Prepare everything with our `accelerator`. unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, optimizer, train_dataloader, lr_scheduler ) if args.use_ema: ema_unet.to(accelerator.device) # For mixed precision training we cast all non-trainable weights (vae, non-lora text_encoder and non-lora unet) to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 args.mixed_precision = accelerator.mixed_precision elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 args.mixed_precision = accelerator.mixed_precision # Move text_encode and vae to gpu and cast to weight_dtype text_encoder.to(accelerator.device, dtype=weight_dtype) vae.to(accelerator.device, dtype=weight_dtype) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: tracker_config = dict(vars(args)) tracker_config.pop("validation_prompts") accelerator.init_trackers(args.tracker_project_name, tracker_config) # Function for unwrapping if model was compiled with `torch.compile`. def unwrap_model(model): model = accelerator.unwrap_model(model) model = model._orig_mod if is_compiled_module(model) else model return model # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None initial_global_step = 0 else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) initial_global_step = global_step first_epoch = global_step // num_update_steps_per_epoch else: initial_global_step = 0 progress_bar = tqdm( range(0, args.max_train_steps), initial=initial_global_step, desc="Steps", # Only show the progress bar once on each machine. disable=not accelerator.is_local_main_process, ) for epoch in range(first_epoch, args.num_train_epochs): train_loss = 0.0 for step, batch in enumerate(train_dataloader): with accelerator.accumulate(unet): # Convert images to latent space latents = vae.encode(batch["pixel_values"].to(weight_dtype)).latent_dist.sample() latents = latents * vae.config.scaling_factor # Sample noise that we'll add to the latents noise = torch.randn_like(latents) if args.noise_offset: # https://www.crosslabs.org//blog/diffusion-with-offset-noise noise += args.noise_offset * torch.randn( (latents.shape[0], latents.shape[1], 1, 1), device=latents.device ) if args.input_perturbation: new_noise = noise + args.input_perturbation * torch.randn_like(noise) bsz = latents.shape[0] # Sample a random timestep for each image if args.loss_type == "huber" or args.loss_type == "smooth_l1": timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (1,), device="cpu") timestep = timesteps.item() if args.huber_schedule == "exponential": alpha = -math.log(args.huber_c) / noise_scheduler.config.num_train_timesteps huber_c = math.exp(-alpha * timestep) elif args.huber_schedule == "snr": alphas_cumprod = noise_scheduler.alphas_cumprod[timestep] sigmas = ((1.0 - alphas_cumprod) / alphas_cumprod) ** 0.5 huber_c = (1 - args.huber_c) / (1 + sigmas) ** 2 + args.huber_c elif args.huber_schedule == "constant": huber_c = args.huber_c else: raise NotImplementedError(f"Unknown Huber loss schedule {args.huber_schedule}!") timesteps = timesteps.repeat(bsz).to(latents.device) elif args.loss_type == "l2": timesteps = torch.randint( 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device ) huber_c = 1 # may be anything, as it's not used else: raise NotImplementedError(f"Unknown loss type {args.loss_type}") timesteps = timesteps.long() # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) if args.input_perturbation: noisy_latents = noise_scheduler.add_noise(latents, new_noise, timesteps) else: noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) # Get the text embedding for conditioning encoder_hidden_states = text_encoder(batch["input_ids"], return_dict=False)[0] # Get the target for loss depending on the prediction type if args.prediction_type is not None: # set prediction_type of scheduler if defined noise_scheduler.register_to_config(prediction_type=args.prediction_type) if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") # Predict the noise residual and compute loss model_pred = unet(noisy_latents, timesteps, encoder_hidden_states, return_dict=False)[0] if args.snr_gamma is None: loss = conditional_loss( model_pred.float(), target.float(), reduction="mean", loss_type=args.loss_type, huber_c=huber_c ) else: # Compute loss-weights as per Section 3.4 of https://arxiv.org/abs/2303.09556. # Since we predict the noise instead of x_0, the original formulation is slightly changed. # This is discussed in Section 4.2 of the same paper. snr = compute_snr(noise_scheduler, timesteps) mse_loss_weights = torch.stack([snr, args.snr_gamma * torch.ones_like(timesteps)], dim=1).min( dim=1 )[0] if noise_scheduler.config.prediction_type == "epsilon": mse_loss_weights = mse_loss_weights / snr elif noise_scheduler.config.prediction_type == "v_prediction": mse_loss_weights = mse_loss_weights / (snr + 1) loss = conditional_loss( model_pred.float(), target.float(), reduction="none", loss_type=args.loss_type, huber_c=huber_c ) loss = loss.mean(dim=list(range(1, len(loss.shape)))) * mse_loss_weights loss = loss.mean() # Gather the losses across all processes for logging (if we use distributed training). avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean() train_loss += avg_loss.item() / args.gradient_accumulation_steps # Backpropagate accelerator.backward(loss) if accelerator.sync_gradients: accelerator.clip_grad_norm_(unet.parameters(), args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: if args.use_ema: ema_unet.step(unet.parameters()) progress_bar.update(1) global_step += 1 accelerator.log({"train_loss": train_loss}, step=global_step) train_loss = 0.0 if global_step % args.checkpointing_steps == 0: if accelerator.is_main_process: # _before_ saving state, check if this save would set us over the `checkpoints_total_limit` if args.checkpoints_total_limit is not None: checkpoints = os.listdir(args.output_dir) checkpoints = [d for d in checkpoints if d.startswith("checkpoint")] checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1])) # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints if len(checkpoints) >= args.checkpoints_total_limit: num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1 removing_checkpoints = checkpoints[0:num_to_remove] logger.info( f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints" ) logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}") for removing_checkpoint in removing_checkpoints: removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint) shutil.rmtree(removing_checkpoint) save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) if global_step >= args.max_train_steps: break if accelerator.is_main_process: if args.validation_prompts is not None and epoch % args.validation_epochs == 0: if args.use_ema: # Store the UNet parameters temporarily and load the EMA parameters to perform inference. ema_unet.store(unet.parameters()) ema_unet.copy_to(unet.parameters()) log_validation( vae, text_encoder, tokenizer, unet, args, accelerator, weight_dtype, global_step, ) if args.use_ema: # Switch back to the original UNet parameters. ema_unet.restore(unet.parameters()) # Create the pipeline using the trained modules and save it. accelerator.wait_for_everyone() if accelerator.is_main_process: unet = unwrap_model(unet) if args.use_ema: ema_unet.copy_to(unet.parameters()) pipeline = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, revision=args.revision, variant=args.variant, ) pipeline.save_pretrained(args.output_dir) # Run a final round of inference. images = [] if args.validation_prompts is not None: logger.info("Running inference for collecting generated images...") pipeline = pipeline.to(accelerator.device) pipeline.torch_dtype = weight_dtype pipeline.set_progress_bar_config(disable=True) if args.enable_xformers_memory_efficient_attention: pipeline.enable_xformers_memory_efficient_attention() if args.seed is None: generator = None else: generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) for i in range(len(args.validation_prompts)): with torch.autocast("cuda"): image = pipeline(args.validation_prompts[i], num_inference_steps=20, generator=generator).images[0] images.append(image) if args.push_to_hub: save_model_card(args, repo_id, images, repo_folder=args.output_dir) upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) accelerator.end_training() if __name__ == "__main__": main()
diffusers/examples/research_projects/scheduled_huber_loss_training/text_to_image/train_text_to_image.py/0
{ "file_path": "diffusers/examples/research_projects/scheduled_huber_loss_training/text_to_image/train_text_to_image.py", "repo_id": "diffusers", "token_count": 21063 }
import argparse import logging import math import os import random from pathlib import Path import jax import jax.numpy as jnp import numpy as np import optax import PIL import torch import torch.utils.checkpoint import transformers from flax import jax_utils from flax.training import train_state from flax.training.common_utils import shard from huggingface_hub import create_repo, upload_folder # TODO: remove and import from diffusers.utils when the new version of diffusers is released from packaging import version from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import CLIPImageProcessor, CLIPTokenizer, FlaxCLIPTextModel, set_seed from diffusers import ( FlaxAutoencoderKL, FlaxDDPMScheduler, FlaxPNDMScheduler, FlaxStableDiffusionPipeline, FlaxUNet2DConditionModel, ) from diffusers.pipelines.stable_diffusion import FlaxStableDiffusionSafetyChecker from diffusers.utils import check_min_version if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): PIL_INTERPOLATION = { "linear": PIL.Image.Resampling.BILINEAR, "bilinear": PIL.Image.Resampling.BILINEAR, "bicubic": PIL.Image.Resampling.BICUBIC, "lanczos": PIL.Image.Resampling.LANCZOS, "nearest": PIL.Image.Resampling.NEAREST, } else: PIL_INTERPOLATION = { "linear": PIL.Image.LINEAR, "bilinear": PIL.Image.BILINEAR, "bicubic": PIL.Image.BICUBIC, "lanczos": PIL.Image.LANCZOS, "nearest": PIL.Image.NEAREST, } # ------------------------------------------------------------------------------ # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.33.0.dev0") logger = logging.getLogger(__name__) def parse_args(): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--train_data_dir", type=str, default=None, required=True, help="A folder containing the training data." ) parser.add_argument( "--placeholder_token", type=str, default=None, required=True, help="A token to use as a placeholder for the concept.", ) parser.add_argument( "--initializer_token", type=str, default=None, required=True, help="A token to use as initializer word." ) parser.add_argument("--learnable_property", type=str, default="object", help="Choose between 'object' and 'style'") parser.add_argument("--repeats", type=int, default=100, help="How many times to repeat the training data.") parser.add_argument( "--output_dir", type=str, default="text-inversion-model", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--seed", type=int, default=42, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution." ) parser.add_argument( "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=100) parser.add_argument( "--max_train_steps", type=int, default=5000, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--save_steps", type=int, default=500, help="Save learned_embeds.bin every X updates steps.", ) parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=True, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--use_auth_token", action="store_true", help=( "Will use the token generated when running `huggingface-cli login` (necessary to use this script with" " private models)." ), ) parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank if args.train_data_dir is None: raise ValueError("You must specify a train data directory.") return args imagenet_templates_small = [ "a photo of a {}", "a rendering of a {}", "a cropped photo of the {}", "the photo of a {}", "a photo of a clean {}", "a photo of a dirty {}", "a dark photo of the {}", "a photo of my {}", "a photo of the cool {}", "a close-up photo of a {}", "a bright photo of the {}", "a cropped photo of a {}", "a photo of the {}", "a good photo of the {}", "a photo of one {}", "a close-up photo of the {}", "a rendition of the {}", "a photo of the clean {}", "a rendition of a {}", "a photo of a nice {}", "a good photo of a {}", "a photo of the nice {}", "a photo of the small {}", "a photo of the weird {}", "a photo of the large {}", "a photo of a cool {}", "a photo of a small {}", ] imagenet_style_templates_small = [ "a painting in the style of {}", "a rendering in the style of {}", "a cropped painting in the style of {}", "the painting in the style of {}", "a clean painting in the style of {}", "a dirty painting in the style of {}", "a dark painting in the style of {}", "a picture in the style of {}", "a cool painting in the style of {}", "a close-up painting in the style of {}", "a bright painting in the style of {}", "a cropped painting in the style of {}", "a good painting in the style of {}", "a close-up painting in the style of {}", "a rendition in the style of {}", "a nice painting in the style of {}", "a small painting in the style of {}", "a weird painting in the style of {}", "a large painting in the style of {}", ] class TextualInversionDataset(Dataset): def __init__( self, data_root, tokenizer, learnable_property="object", # [object, style] size=512, repeats=100, interpolation="bicubic", flip_p=0.5, set="train", placeholder_token="*", center_crop=False, ): self.data_root = data_root self.tokenizer = tokenizer self.learnable_property = learnable_property self.size = size self.placeholder_token = placeholder_token self.center_crop = center_crop self.flip_p = flip_p self.image_paths = [os.path.join(self.data_root, file_path) for file_path in os.listdir(self.data_root)] self.num_images = len(self.image_paths) self._length = self.num_images if set == "train": self._length = self.num_images * repeats self.interpolation = { "linear": PIL_INTERPOLATION["linear"], "bilinear": PIL_INTERPOLATION["bilinear"], "bicubic": PIL_INTERPOLATION["bicubic"], "lanczos": PIL_INTERPOLATION["lanczos"], }[interpolation] self.templates = imagenet_style_templates_small if learnable_property == "style" else imagenet_templates_small self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p) def __len__(self): return self._length def __getitem__(self, i): example = {} image = Image.open(self.image_paths[i % self.num_images]) if not image.mode == "RGB": image = image.convert("RGB") placeholder_string = self.placeholder_token text = random.choice(self.templates).format(placeholder_string) example["input_ids"] = self.tokenizer( text, padding="max_length", truncation=True, max_length=self.tokenizer.model_max_length, return_tensors="pt", ).input_ids[0] # default to score-sde preprocessing img = np.array(image).astype(np.uint8) if self.center_crop: crop = min(img.shape[0], img.shape[1]) ( h, w, ) = ( img.shape[0], img.shape[1], ) img = img[(h - crop) // 2 : (h + crop) // 2, (w - crop) // 2 : (w + crop) // 2] image = Image.fromarray(img) image = image.resize((self.size, self.size), resample=self.interpolation) image = self.flip_transform(image) image = np.array(image).astype(np.uint8) image = (image / 127.5 - 1.0).astype(np.float32) example["pixel_values"] = torch.from_numpy(image).permute(2, 0, 1) return example def resize_token_embeddings(model, new_num_tokens, initializer_token_id, placeholder_token_id, rng): if model.config.vocab_size == new_num_tokens or new_num_tokens is None: return model.config.vocab_size = new_num_tokens params = model.params old_embeddings = params["text_model"]["embeddings"]["token_embedding"]["embedding"] old_num_tokens, emb_dim = old_embeddings.shape initializer = jax.nn.initializers.normal() new_embeddings = initializer(rng, (new_num_tokens, emb_dim)) new_embeddings = new_embeddings.at[:old_num_tokens].set(old_embeddings) new_embeddings = new_embeddings.at[placeholder_token_id].set(new_embeddings[initializer_token_id]) params["text_model"]["embeddings"]["token_embedding"]["embedding"] = new_embeddings model.params = params return model def get_params_to_save(params): return jax.device_get(jax.tree_util.tree_map(lambda x: x[0], params)) def main(): args = parse_args() if args.seed is not None: set_seed(args.seed) if jax.process_index() == 0: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR) if jax.process_index() == 0: transformers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() # Load the tokenizer and add the placeholder token as a additional special token if args.tokenizer_name: tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name) elif args.pretrained_model_name_or_path: tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer") # Add the placeholder token in tokenizer num_added_tokens = tokenizer.add_tokens(args.placeholder_token) if num_added_tokens == 0: raise ValueError( f"The tokenizer already contains the token {args.placeholder_token}. Please pass a different" " `placeholder_token` that is not already in the tokenizer." ) # Convert the initializer_token, placeholder_token to ids token_ids = tokenizer.encode(args.initializer_token, add_special_tokens=False) # Check if initializer_token is a single token or a sequence of tokens if len(token_ids) > 1: raise ValueError("The initializer token must be a single token.") initializer_token_id = token_ids[0] placeholder_token_id = tokenizer.convert_tokens_to_ids(args.placeholder_token) # Load models and create wrapper for stable diffusion text_encoder = FlaxCLIPTextModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision ) vae, vae_params = FlaxAutoencoderKL.from_pretrained( args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision ) unet, unet_params = FlaxUNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision ) # Create sampling rng rng = jax.random.PRNGKey(args.seed) rng, _ = jax.random.split(rng) # Resize the token embeddings as we are adding new special tokens to the tokenizer text_encoder = resize_token_embeddings( text_encoder, len(tokenizer), initializer_token_id, placeholder_token_id, rng ) original_token_embeds = text_encoder.params["text_model"]["embeddings"]["token_embedding"]["embedding"] train_dataset = TextualInversionDataset( data_root=args.train_data_dir, tokenizer=tokenizer, size=args.resolution, placeholder_token=args.placeholder_token, repeats=args.repeats, learnable_property=args.learnable_property, center_crop=args.center_crop, set="train", ) def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) input_ids = torch.stack([example["input_ids"] for example in examples]) batch = {"pixel_values": pixel_values, "input_ids": input_ids} batch = {k: v.numpy() for k, v in batch.items()} return batch total_train_batch_size = args.train_batch_size * jax.local_device_count() train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=total_train_batch_size, shuffle=True, drop_last=True, collate_fn=collate_fn ) # Optimization if args.scale_lr: args.learning_rate = args.learning_rate * total_train_batch_size constant_scheduler = optax.constant_schedule(args.learning_rate) optimizer = optax.adamw( learning_rate=constant_scheduler, b1=args.adam_beta1, b2=args.adam_beta2, eps=args.adam_epsilon, weight_decay=args.adam_weight_decay, ) def create_mask(params, label_fn): def _map(params, mask, label_fn): for k in params: if label_fn(k): mask[k] = "token_embedding" else: if isinstance(params[k], dict): mask[k] = {} _map(params[k], mask[k], label_fn) else: mask[k] = "zero" mask = {} _map(params, mask, label_fn) return mask def zero_grads(): # from https://github.com/deepmind/optax/issues/159#issuecomment-896459491 def init_fn(_): return () def update_fn(updates, state, params=None): return jax.tree_util.tree_map(jnp.zeros_like, updates), () return optax.GradientTransformation(init_fn, update_fn) # Zero out gradients of layers other than the token embedding layer tx = optax.multi_transform( {"token_embedding": optimizer, "zero": zero_grads()}, create_mask(text_encoder.params, lambda s: s == "token_embedding"), ) state = train_state.TrainState.create(apply_fn=text_encoder.__call__, params=text_encoder.params, tx=tx) noise_scheduler = FlaxDDPMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000 ) noise_scheduler_state = noise_scheduler.create_state() # Initialize our training train_rngs = jax.random.split(rng, jax.local_device_count()) # Define gradient train step fn def train_step(state, vae_params, unet_params, batch, train_rng): dropout_rng, sample_rng, new_train_rng = jax.random.split(train_rng, 3) def compute_loss(params): vae_outputs = vae.apply( {"params": vae_params}, batch["pixel_values"], deterministic=True, method=vae.encode ) latents = vae_outputs.latent_dist.sample(sample_rng) # (NHWC) -> (NCHW) latents = jnp.transpose(latents, (0, 3, 1, 2)) latents = latents * vae.config.scaling_factor noise_rng, timestep_rng = jax.random.split(sample_rng) noise = jax.random.normal(noise_rng, latents.shape) bsz = latents.shape[0] timesteps = jax.random.randint( timestep_rng, (bsz,), 0, noise_scheduler.config.num_train_timesteps, ) noisy_latents = noise_scheduler.add_noise(noise_scheduler_state, latents, noise, timesteps) encoder_hidden_states = state.apply_fn( batch["input_ids"], params=params, dropout_rng=dropout_rng, train=True )[0] # Predict the noise residual and compute loss model_pred = unet.apply( {"params": unet_params}, noisy_latents, timesteps, encoder_hidden_states, train=False ).sample # Get the target for loss depending on the prediction type if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(noise_scheduler_state, latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") loss = (target - model_pred) ** 2 loss = loss.mean() return loss grad_fn = jax.value_and_grad(compute_loss) loss, grad = grad_fn(state.params) grad = jax.lax.pmean(grad, "batch") new_state = state.apply_gradients(grads=grad) # Keep the token embeddings fixed except the newly added embeddings for the concept, # as we only want to optimize the concept embeddings token_embeds = original_token_embeds.at[placeholder_token_id].set( new_state.params["text_model"]["embeddings"]["token_embedding"]["embedding"][placeholder_token_id] ) new_state.params["text_model"]["embeddings"]["token_embedding"]["embedding"] = token_embeds metrics = {"loss": loss} metrics = jax.lax.pmean(metrics, axis_name="batch") return new_state, metrics, new_train_rng # Create parallel version of the train and eval step p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,)) # Replicate the train state on each device state = jax_utils.replicate(state) vae_params = jax_utils.replicate(vae_params) unet_params = jax_utils.replicate(unet_params) # Train! num_update_steps_per_epoch = math.ceil(len(train_dataloader)) # Scheduler and math around the number of training steps. if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel & distributed) = {total_train_batch_size}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 epochs = tqdm(range(args.num_train_epochs), desc=f"Epoch ... (1/{args.num_train_epochs})", position=0) for epoch in epochs: # ======================== Training ================================ train_metrics = [] steps_per_epoch = len(train_dataset) // total_train_batch_size train_step_progress_bar = tqdm(total=steps_per_epoch, desc="Training...", position=1, leave=False) # train for batch in train_dataloader: batch = shard(batch) state, train_metric, train_rngs = p_train_step(state, vae_params, unet_params, batch, train_rngs) train_metrics.append(train_metric) train_step_progress_bar.update(1) global_step += 1 if global_step >= args.max_train_steps: break if global_step % args.save_steps == 0: learned_embeds = get_params_to_save(state.params)["text_model"]["embeddings"]["token_embedding"][ "embedding" ][placeholder_token_id] learned_embeds_dict = {args.placeholder_token: learned_embeds} jnp.save( os.path.join(args.output_dir, "learned_embeds-" + str(global_step) + ".npy"), learned_embeds_dict ) train_metric = jax_utils.unreplicate(train_metric) train_step_progress_bar.close() epochs.write(f"Epoch... ({epoch + 1}/{args.num_train_epochs} | Loss: {train_metric['loss']})") # Create the pipeline using using the trained modules and save it. if jax.process_index() == 0: scheduler = FlaxPNDMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", skip_prk_steps=True ) safety_checker = FlaxStableDiffusionSafetyChecker.from_pretrained( "CompVis/stable-diffusion-safety-checker", from_pt=True ) pipeline = FlaxStableDiffusionPipeline( text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=CLIPImageProcessor.from_pretrained("openai/clip-vit-base-patch32"), ) pipeline.save_pretrained( args.output_dir, params={ "text_encoder": get_params_to_save(state.params), "vae": get_params_to_save(vae_params), "unet": get_params_to_save(unet_params), "safety_checker": safety_checker.params, }, ) # Also save the newly trained embeddings learned_embeds = get_params_to_save(state.params)["text_model"]["embeddings"]["token_embedding"]["embedding"][ placeholder_token_id ] learned_embeds_dict = {args.placeholder_token: learned_embeds} jnp.save(os.path.join(args.output_dir, "learned_embeds.npy"), learned_embeds_dict) if args.push_to_hub: upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) if __name__ == "__main__": main()
diffusers/examples/textual_inversion/textual_inversion_flax.py/0
{ "file_path": "diffusers/examples/textual_inversion/textual_inversion_flax.py", "repo_id": "diffusers", "token_count": 11076 }
import argparse import os import torch from huggingface_hub import create_repo, upload_folder from safetensors.torch import load_file, save_file def convert_motion_module(original_state_dict): converted_state_dict = {} for k, v in original_state_dict.items(): if "pos_encoder" in k: continue else: converted_state_dict[ k.replace(".norms.0", ".norm1") .replace(".norms.1", ".norm2") .replace(".ff_norm", ".norm3") .replace(".attention_blocks.0", ".attn1") .replace(".attention_blocks.1", ".attn2") .replace(".temporal_transformer", "") ] = v return converted_state_dict def get_args(): parser = argparse.ArgumentParser() parser.add_argument("--ckpt_path", type=str, required=True, help="Path to checkpoint") parser.add_argument("--output_path", type=str, required=True, help="Path to output directory") parser.add_argument( "--push_to_hub", action="store_true", default=False, help="Whether to push the converted model to the HF or not", ) return parser.parse_args() if __name__ == "__main__": args = get_args() if args.ckpt_path.endswith(".safetensors"): state_dict = load_file(args.ckpt_path) else: state_dict = torch.load(args.ckpt_path, map_location="cpu") if "state_dict" in state_dict.keys(): state_dict = state_dict["state_dict"] conv_state_dict = convert_motion_module(state_dict) # convert to new format output_dict = {} for module_name, params in conv_state_dict.items(): if type(params) is not torch.Tensor: continue output_dict.update({f"unet.{module_name}": params}) os.makedirs(args.output_path, exist_ok=True) filepath = os.path.join(args.output_path, "diffusion_pytorch_model.safetensors") save_file(output_dict, filepath) if args.push_to_hub: repo_id = create_repo(args.output_path, exist_ok=True).repo_id upload_folder(repo_id=repo_id, folder_path=args.output_path, repo_type="model")
diffusers/scripts/convert_animatediff_motion_lora_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_animatediff_motion_lora_to_diffusers.py", "repo_id": "diffusers", "token_count": 931 }
import argparse import os import torch from torchvision.datasets.utils import download_url from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, Transformer2DModel pretrained_models = {512: "DiT-XL-2-512x512.pt", 256: "DiT-XL-2-256x256.pt"} def download_model(model_name): """ Downloads a pre-trained DiT model from the web. """ local_path = f"pretrained_models/{model_name}" if not os.path.isfile(local_path): os.makedirs("pretrained_models", exist_ok=True) web_path = f"https://dl.fbaipublicfiles.com/DiT/models/{model_name}" download_url(web_path, "pretrained_models") model = torch.load(local_path, map_location=lambda storage, loc: storage) return model def main(args): state_dict = download_model(pretrained_models[args.image_size]) state_dict["pos_embed.proj.weight"] = state_dict["x_embedder.proj.weight"] state_dict["pos_embed.proj.bias"] = state_dict["x_embedder.proj.bias"] state_dict.pop("x_embedder.proj.weight") state_dict.pop("x_embedder.proj.bias") for depth in range(28): state_dict[f"transformer_blocks.{depth}.norm1.emb.timestep_embedder.linear_1.weight"] = state_dict[ "t_embedder.mlp.0.weight" ] state_dict[f"transformer_blocks.{depth}.norm1.emb.timestep_embedder.linear_1.bias"] = state_dict[ "t_embedder.mlp.0.bias" ] state_dict[f"transformer_blocks.{depth}.norm1.emb.timestep_embedder.linear_2.weight"] = state_dict[ "t_embedder.mlp.2.weight" ] state_dict[f"transformer_blocks.{depth}.norm1.emb.timestep_embedder.linear_2.bias"] = state_dict[ "t_embedder.mlp.2.bias" ] state_dict[f"transformer_blocks.{depth}.norm1.emb.class_embedder.embedding_table.weight"] = state_dict[ "y_embedder.embedding_table.weight" ] state_dict[f"transformer_blocks.{depth}.norm1.linear.weight"] = state_dict[ f"blocks.{depth}.adaLN_modulation.1.weight" ] state_dict[f"transformer_blocks.{depth}.norm1.linear.bias"] = state_dict[ f"blocks.{depth}.adaLN_modulation.1.bias" ] q, k, v = torch.chunk(state_dict[f"blocks.{depth}.attn.qkv.weight"], 3, dim=0) q_bias, k_bias, v_bias = torch.chunk(state_dict[f"blocks.{depth}.attn.qkv.bias"], 3, dim=0) state_dict[f"transformer_blocks.{depth}.attn1.to_q.weight"] = q state_dict[f"transformer_blocks.{depth}.attn1.to_q.bias"] = q_bias state_dict[f"transformer_blocks.{depth}.attn1.to_k.weight"] = k state_dict[f"transformer_blocks.{depth}.attn1.to_k.bias"] = k_bias state_dict[f"transformer_blocks.{depth}.attn1.to_v.weight"] = v state_dict[f"transformer_blocks.{depth}.attn1.to_v.bias"] = v_bias state_dict[f"transformer_blocks.{depth}.attn1.to_out.0.weight"] = state_dict[ f"blocks.{depth}.attn.proj.weight" ] state_dict[f"transformer_blocks.{depth}.attn1.to_out.0.bias"] = state_dict[f"blocks.{depth}.attn.proj.bias"] state_dict[f"transformer_blocks.{depth}.ff.net.0.proj.weight"] = state_dict[f"blocks.{depth}.mlp.fc1.weight"] state_dict[f"transformer_blocks.{depth}.ff.net.0.proj.bias"] = state_dict[f"blocks.{depth}.mlp.fc1.bias"] state_dict[f"transformer_blocks.{depth}.ff.net.2.weight"] = state_dict[f"blocks.{depth}.mlp.fc2.weight"] state_dict[f"transformer_blocks.{depth}.ff.net.2.bias"] = state_dict[f"blocks.{depth}.mlp.fc2.bias"] state_dict.pop(f"blocks.{depth}.attn.qkv.weight") state_dict.pop(f"blocks.{depth}.attn.qkv.bias") state_dict.pop(f"blocks.{depth}.attn.proj.weight") state_dict.pop(f"blocks.{depth}.attn.proj.bias") state_dict.pop(f"blocks.{depth}.mlp.fc1.weight") state_dict.pop(f"blocks.{depth}.mlp.fc1.bias") state_dict.pop(f"blocks.{depth}.mlp.fc2.weight") state_dict.pop(f"blocks.{depth}.mlp.fc2.bias") state_dict.pop(f"blocks.{depth}.adaLN_modulation.1.weight") state_dict.pop(f"blocks.{depth}.adaLN_modulation.1.bias") state_dict.pop("t_embedder.mlp.0.weight") state_dict.pop("t_embedder.mlp.0.bias") state_dict.pop("t_embedder.mlp.2.weight") state_dict.pop("t_embedder.mlp.2.bias") state_dict.pop("y_embedder.embedding_table.weight") state_dict["proj_out_1.weight"] = state_dict["final_layer.adaLN_modulation.1.weight"] state_dict["proj_out_1.bias"] = state_dict["final_layer.adaLN_modulation.1.bias"] state_dict["proj_out_2.weight"] = state_dict["final_layer.linear.weight"] state_dict["proj_out_2.bias"] = state_dict["final_layer.linear.bias"] state_dict.pop("final_layer.linear.weight") state_dict.pop("final_layer.linear.bias") state_dict.pop("final_layer.adaLN_modulation.1.weight") state_dict.pop("final_layer.adaLN_modulation.1.bias") # DiT XL/2 transformer = Transformer2DModel( sample_size=args.image_size // 8, num_layers=28, attention_head_dim=72, in_channels=4, out_channels=8, patch_size=2, attention_bias=True, num_attention_heads=16, activation_fn="gelu-approximate", num_embeds_ada_norm=1000, norm_type="ada_norm_zero", norm_elementwise_affine=False, ) transformer.load_state_dict(state_dict, strict=True) scheduler = DDIMScheduler( num_train_timesteps=1000, beta_schedule="linear", prediction_type="epsilon", clip_sample=False, ) vae = AutoencoderKL.from_pretrained(args.vae_model) pipeline = DiTPipeline(transformer=transformer, vae=vae, scheduler=scheduler) if args.save: pipeline.save_pretrained(args.checkpoint_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--image_size", default=256, type=int, required=False, help="Image size of pretrained model, either 256 or 512.", ) parser.add_argument( "--vae_model", default="stabilityai/sd-vae-ft-ema", type=str, required=False, help="Path to pretrained VAE model, either stabilityai/sd-vae-ft-mse or stabilityai/sd-vae-ft-ema.", ) parser.add_argument( "--save", default=True, type=bool, required=False, help="Whether to save the converted pipeline or not." ) parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the output pipeline." ) args = parser.parse_args() main(args)
diffusers/scripts/convert_dit_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_dit_to_diffusers.py", "repo_id": "diffusers", "token_count": 3037 }
import argparse import os import torch from safetensors.torch import load_file from transformers import AutoModel, AutoTokenizer from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, LuminaNextDiT2DModel, LuminaText2ImgPipeline def main(args): # checkpoint from https://huggingface.co/Alpha-VLLM/Lumina-Next-SFT or https://huggingface.co/Alpha-VLLM/Lumina-Next-T2I all_sd = load_file(args.origin_ckpt_path, device="cpu") converted_state_dict = {} # pad token converted_state_dict["pad_token"] = all_sd["pad_token"] # patch embed converted_state_dict["patch_embedder.weight"] = all_sd["x_embedder.weight"] converted_state_dict["patch_embedder.bias"] = all_sd["x_embedder.bias"] # time and caption embed converted_state_dict["time_caption_embed.timestep_embedder.linear_1.weight"] = all_sd["t_embedder.mlp.0.weight"] converted_state_dict["time_caption_embed.timestep_embedder.linear_1.bias"] = all_sd["t_embedder.mlp.0.bias"] converted_state_dict["time_caption_embed.timestep_embedder.linear_2.weight"] = all_sd["t_embedder.mlp.2.weight"] converted_state_dict["time_caption_embed.timestep_embedder.linear_2.bias"] = all_sd["t_embedder.mlp.2.bias"] converted_state_dict["time_caption_embed.caption_embedder.0.weight"] = all_sd["cap_embedder.0.weight"] converted_state_dict["time_caption_embed.caption_embedder.0.bias"] = all_sd["cap_embedder.0.bias"] converted_state_dict["time_caption_embed.caption_embedder.1.weight"] = all_sd["cap_embedder.1.weight"] converted_state_dict["time_caption_embed.caption_embedder.1.bias"] = all_sd["cap_embedder.1.bias"] for i in range(24): # adaln converted_state_dict[f"layers.{i}.gate"] = all_sd[f"layers.{i}.attention.gate"] converted_state_dict[f"layers.{i}.adaLN_modulation.1.weight"] = all_sd[f"layers.{i}.adaLN_modulation.1.weight"] converted_state_dict[f"layers.{i}.adaLN_modulation.1.bias"] = all_sd[f"layers.{i}.adaLN_modulation.1.bias"] # qkv converted_state_dict[f"layers.{i}.attn1.to_q.weight"] = all_sd[f"layers.{i}.attention.wq.weight"] converted_state_dict[f"layers.{i}.attn1.to_k.weight"] = all_sd[f"layers.{i}.attention.wk.weight"] converted_state_dict[f"layers.{i}.attn1.to_v.weight"] = all_sd[f"layers.{i}.attention.wv.weight"] # cap converted_state_dict[f"layers.{i}.attn2.to_q.weight"] = all_sd[f"layers.{i}.attention.wq.weight"] converted_state_dict[f"layers.{i}.attn2.to_k.weight"] = all_sd[f"layers.{i}.attention.wk_y.weight"] converted_state_dict[f"layers.{i}.attn2.to_v.weight"] = all_sd[f"layers.{i}.attention.wv_y.weight"] # output converted_state_dict[f"layers.{i}.attn2.to_out.0.weight"] = all_sd[f"layers.{i}.attention.wo.weight"] # attention # qk norm converted_state_dict[f"layers.{i}.attn1.norm_q.weight"] = all_sd[f"layers.{i}.attention.q_norm.weight"] converted_state_dict[f"layers.{i}.attn1.norm_q.bias"] = all_sd[f"layers.{i}.attention.q_norm.bias"] converted_state_dict[f"layers.{i}.attn1.norm_k.weight"] = all_sd[f"layers.{i}.attention.k_norm.weight"] converted_state_dict[f"layers.{i}.attn1.norm_k.bias"] = all_sd[f"layers.{i}.attention.k_norm.bias"] converted_state_dict[f"layers.{i}.attn2.norm_q.weight"] = all_sd[f"layers.{i}.attention.q_norm.weight"] converted_state_dict[f"layers.{i}.attn2.norm_q.bias"] = all_sd[f"layers.{i}.attention.q_norm.bias"] converted_state_dict[f"layers.{i}.attn2.norm_k.weight"] = all_sd[f"layers.{i}.attention.ky_norm.weight"] converted_state_dict[f"layers.{i}.attn2.norm_k.bias"] = all_sd[f"layers.{i}.attention.ky_norm.bias"] # attention norm converted_state_dict[f"layers.{i}.attn_norm1.weight"] = all_sd[f"layers.{i}.attention_norm1.weight"] converted_state_dict[f"layers.{i}.attn_norm2.weight"] = all_sd[f"layers.{i}.attention_norm2.weight"] converted_state_dict[f"layers.{i}.norm1_context.weight"] = all_sd[f"layers.{i}.attention_y_norm.weight"] # feed forward converted_state_dict[f"layers.{i}.feed_forward.linear_1.weight"] = all_sd[f"layers.{i}.feed_forward.w1.weight"] converted_state_dict[f"layers.{i}.feed_forward.linear_2.weight"] = all_sd[f"layers.{i}.feed_forward.w2.weight"] converted_state_dict[f"layers.{i}.feed_forward.linear_3.weight"] = all_sd[f"layers.{i}.feed_forward.w3.weight"] # feed forward norm converted_state_dict[f"layers.{i}.ffn_norm1.weight"] = all_sd[f"layers.{i}.ffn_norm1.weight"] converted_state_dict[f"layers.{i}.ffn_norm2.weight"] = all_sd[f"layers.{i}.ffn_norm2.weight"] # final layer converted_state_dict["final_layer.linear.weight"] = all_sd["final_layer.linear.weight"] converted_state_dict["final_layer.linear.bias"] = all_sd["final_layer.linear.bias"] converted_state_dict["final_layer.adaLN_modulation.1.weight"] = all_sd["final_layer.adaLN_modulation.1.weight"] converted_state_dict["final_layer.adaLN_modulation.1.bias"] = all_sd["final_layer.adaLN_modulation.1.bias"] # Lumina-Next-SFT 2B transformer = LuminaNextDiT2DModel( sample_size=128, patch_size=2, in_channels=4, hidden_size=2304, num_layers=24, num_attention_heads=32, num_kv_heads=8, multiple_of=256, ffn_dim_multiplier=None, norm_eps=1e-5, learn_sigma=True, qk_norm=True, cross_attention_dim=2048, scaling_factor=1.0, ) transformer.load_state_dict(converted_state_dict, strict=True) num_model_params = sum(p.numel() for p in transformer.parameters()) print(f"Total number of transformer parameters: {num_model_params}") if args.only_transformer: transformer.save_pretrained(os.path.join(args.dump_path, "transformer")) else: scheduler = FlowMatchEulerDiscreteScheduler() vae = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", torch_dtype=torch.float32) tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b") text_encoder = AutoModel.from_pretrained("google/gemma-2b") pipeline = LuminaText2ImgPipeline( tokenizer=tokenizer, text_encoder=text_encoder, transformer=transformer, vae=vae, scheduler=scheduler ) pipeline.save_pretrained(args.dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--origin_ckpt_path", default=None, type=str, required=False, help="Path to the checkpoint to convert." ) parser.add_argument( "--image_size", default=1024, type=int, choices=[256, 512, 1024], required=False, help="Image size of pretrained model, either 512 or 1024.", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output pipeline.") parser.add_argument("--only_transformer", default=True, type=bool, required=True) args = parser.parse_args() main(args)
diffusers/scripts/convert_lumina_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_lumina_to_diffusers.py", "repo_id": "diffusers", "token_count": 3156 }
import argparse from contextlib import nullcontext import safetensors.torch import torch from accelerate import init_empty_weights from diffusers import AutoencoderKL, SD3Transformer2DModel from diffusers.loaders.single_file_utils import convert_ldm_vae_checkpoint from diffusers.models.modeling_utils import load_model_dict_into_meta from diffusers.utils.import_utils import is_accelerate_available CTX = init_empty_weights if is_accelerate_available() else nullcontext parser = argparse.ArgumentParser() parser.add_argument("--checkpoint_path", type=str) parser.add_argument("--output_path", type=str) parser.add_argument("--dtype", type=str) args = parser.parse_args() def load_original_checkpoint(ckpt_path): original_state_dict = safetensors.torch.load_file(ckpt_path) keys = list(original_state_dict.keys()) for k in keys: if "model.diffusion_model." in k: original_state_dict[k.replace("model.diffusion_model.", "")] = original_state_dict.pop(k) return original_state_dict # in SD3 original implementation of AdaLayerNormContinuous, it split linear projection output into shift, scale; # while in diffusers it split into scale, shift. Here we swap the linear projection weights in order to be able to use diffusers implementation def swap_scale_shift(weight, dim): shift, scale = weight.chunk(2, dim=0) new_weight = torch.cat([scale, shift], dim=0) return new_weight def convert_sd3_transformer_checkpoint_to_diffusers( original_state_dict, num_layers, caption_projection_dim, dual_attention_layers, has_qk_norm ): converted_state_dict = {} # Positional and patch embeddings. converted_state_dict["pos_embed.pos_embed"] = original_state_dict.pop("pos_embed") converted_state_dict["pos_embed.proj.weight"] = original_state_dict.pop("x_embedder.proj.weight") converted_state_dict["pos_embed.proj.bias"] = original_state_dict.pop("x_embedder.proj.bias") # Timestep embeddings. converted_state_dict["time_text_embed.timestep_embedder.linear_1.weight"] = original_state_dict.pop( "t_embedder.mlp.0.weight" ) converted_state_dict["time_text_embed.timestep_embedder.linear_1.bias"] = original_state_dict.pop( "t_embedder.mlp.0.bias" ) converted_state_dict["time_text_embed.timestep_embedder.linear_2.weight"] = original_state_dict.pop( "t_embedder.mlp.2.weight" ) converted_state_dict["time_text_embed.timestep_embedder.linear_2.bias"] = original_state_dict.pop( "t_embedder.mlp.2.bias" ) # Context projections. converted_state_dict["context_embedder.weight"] = original_state_dict.pop("context_embedder.weight") converted_state_dict["context_embedder.bias"] = original_state_dict.pop("context_embedder.bias") # Pooled context projection. converted_state_dict["time_text_embed.text_embedder.linear_1.weight"] = original_state_dict.pop( "y_embedder.mlp.0.weight" ) converted_state_dict["time_text_embed.text_embedder.linear_1.bias"] = original_state_dict.pop( "y_embedder.mlp.0.bias" ) converted_state_dict["time_text_embed.text_embedder.linear_2.weight"] = original_state_dict.pop( "y_embedder.mlp.2.weight" ) converted_state_dict["time_text_embed.text_embedder.linear_2.bias"] = original_state_dict.pop( "y_embedder.mlp.2.bias" ) # Transformer blocks 🎸. for i in range(num_layers): # Q, K, V sample_q, sample_k, sample_v = torch.chunk( original_state_dict.pop(f"joint_blocks.{i}.x_block.attn.qkv.weight"), 3, dim=0 ) context_q, context_k, context_v = torch.chunk( original_state_dict.pop(f"joint_blocks.{i}.context_block.attn.qkv.weight"), 3, dim=0 ) sample_q_bias, sample_k_bias, sample_v_bias = torch.chunk( original_state_dict.pop(f"joint_blocks.{i}.x_block.attn.qkv.bias"), 3, dim=0 ) context_q_bias, context_k_bias, context_v_bias = torch.chunk( original_state_dict.pop(f"joint_blocks.{i}.context_block.attn.qkv.bias"), 3, dim=0 ) converted_state_dict[f"transformer_blocks.{i}.attn.to_q.weight"] = torch.cat([sample_q]) converted_state_dict[f"transformer_blocks.{i}.attn.to_q.bias"] = torch.cat([sample_q_bias]) converted_state_dict[f"transformer_blocks.{i}.attn.to_k.weight"] = torch.cat([sample_k]) converted_state_dict[f"transformer_blocks.{i}.attn.to_k.bias"] = torch.cat([sample_k_bias]) converted_state_dict[f"transformer_blocks.{i}.attn.to_v.weight"] = torch.cat([sample_v]) converted_state_dict[f"transformer_blocks.{i}.attn.to_v.bias"] = torch.cat([sample_v_bias]) converted_state_dict[f"transformer_blocks.{i}.attn.add_q_proj.weight"] = torch.cat([context_q]) converted_state_dict[f"transformer_blocks.{i}.attn.add_q_proj.bias"] = torch.cat([context_q_bias]) converted_state_dict[f"transformer_blocks.{i}.attn.add_k_proj.weight"] = torch.cat([context_k]) converted_state_dict[f"transformer_blocks.{i}.attn.add_k_proj.bias"] = torch.cat([context_k_bias]) converted_state_dict[f"transformer_blocks.{i}.attn.add_v_proj.weight"] = torch.cat([context_v]) converted_state_dict[f"transformer_blocks.{i}.attn.add_v_proj.bias"] = torch.cat([context_v_bias]) # qk norm if has_qk_norm: converted_state_dict[f"transformer_blocks.{i}.attn.norm_q.weight"] = original_state_dict.pop( f"joint_blocks.{i}.x_block.attn.ln_q.weight" ) converted_state_dict[f"transformer_blocks.{i}.attn.norm_k.weight"] = original_state_dict.pop( f"joint_blocks.{i}.x_block.attn.ln_k.weight" ) converted_state_dict[f"transformer_blocks.{i}.attn.norm_added_q.weight"] = original_state_dict.pop( f"joint_blocks.{i}.context_block.attn.ln_q.weight" ) converted_state_dict[f"transformer_blocks.{i}.attn.norm_added_k.weight"] = original_state_dict.pop( f"joint_blocks.{i}.context_block.attn.ln_k.weight" ) # output projections. converted_state_dict[f"transformer_blocks.{i}.attn.to_out.0.weight"] = original_state_dict.pop( f"joint_blocks.{i}.x_block.attn.proj.weight" ) converted_state_dict[f"transformer_blocks.{i}.attn.to_out.0.bias"] = original_state_dict.pop( f"joint_blocks.{i}.x_block.attn.proj.bias" ) if not (i == num_layers - 1): converted_state_dict[f"transformer_blocks.{i}.attn.to_add_out.weight"] = original_state_dict.pop( f"joint_blocks.{i}.context_block.attn.proj.weight" ) converted_state_dict[f"transformer_blocks.{i}.attn.to_add_out.bias"] = original_state_dict.pop( f"joint_blocks.{i}.context_block.attn.proj.bias" ) # attn2 if i in dual_attention_layers: # Q, K, V sample_q2, sample_k2, sample_v2 = torch.chunk( original_state_dict.pop(f"joint_blocks.{i}.x_block.attn2.qkv.weight"), 3, dim=0 ) sample_q2_bias, sample_k2_bias, sample_v2_bias = torch.chunk( original_state_dict.pop(f"joint_blocks.{i}.x_block.attn2.qkv.bias"), 3, dim=0 ) converted_state_dict[f"transformer_blocks.{i}.attn2.to_q.weight"] = torch.cat([sample_q2]) converted_state_dict[f"transformer_blocks.{i}.attn2.to_q.bias"] = torch.cat([sample_q2_bias]) converted_state_dict[f"transformer_blocks.{i}.attn2.to_k.weight"] = torch.cat([sample_k2]) converted_state_dict[f"transformer_blocks.{i}.attn2.to_k.bias"] = torch.cat([sample_k2_bias]) converted_state_dict[f"transformer_blocks.{i}.attn2.to_v.weight"] = torch.cat([sample_v2]) converted_state_dict[f"transformer_blocks.{i}.attn2.to_v.bias"] = torch.cat([sample_v2_bias]) # qk norm if has_qk_norm: converted_state_dict[f"transformer_blocks.{i}.attn2.norm_q.weight"] = original_state_dict.pop( f"joint_blocks.{i}.x_block.attn2.ln_q.weight" ) converted_state_dict[f"transformer_blocks.{i}.attn2.norm_k.weight"] = original_state_dict.pop( f"joint_blocks.{i}.x_block.attn2.ln_k.weight" ) # output projections. converted_state_dict[f"transformer_blocks.{i}.attn2.to_out.0.weight"] = original_state_dict.pop( f"joint_blocks.{i}.x_block.attn2.proj.weight" ) converted_state_dict[f"transformer_blocks.{i}.attn2.to_out.0.bias"] = original_state_dict.pop( f"joint_blocks.{i}.x_block.attn2.proj.bias" ) # norms. converted_state_dict[f"transformer_blocks.{i}.norm1.linear.weight"] = original_state_dict.pop( f"joint_blocks.{i}.x_block.adaLN_modulation.1.weight" ) converted_state_dict[f"transformer_blocks.{i}.norm1.linear.bias"] = original_state_dict.pop( f"joint_blocks.{i}.x_block.adaLN_modulation.1.bias" ) if not (i == num_layers - 1): converted_state_dict[f"transformer_blocks.{i}.norm1_context.linear.weight"] = original_state_dict.pop( f"joint_blocks.{i}.context_block.adaLN_modulation.1.weight" ) converted_state_dict[f"transformer_blocks.{i}.norm1_context.linear.bias"] = original_state_dict.pop( f"joint_blocks.{i}.context_block.adaLN_modulation.1.bias" ) else: converted_state_dict[f"transformer_blocks.{i}.norm1_context.linear.weight"] = swap_scale_shift( original_state_dict.pop(f"joint_blocks.{i}.context_block.adaLN_modulation.1.weight"), dim=caption_projection_dim, ) converted_state_dict[f"transformer_blocks.{i}.norm1_context.linear.bias"] = swap_scale_shift( original_state_dict.pop(f"joint_blocks.{i}.context_block.adaLN_modulation.1.bias"), dim=caption_projection_dim, ) # ffs. converted_state_dict[f"transformer_blocks.{i}.ff.net.0.proj.weight"] = original_state_dict.pop( f"joint_blocks.{i}.x_block.mlp.fc1.weight" ) converted_state_dict[f"transformer_blocks.{i}.ff.net.0.proj.bias"] = original_state_dict.pop( f"joint_blocks.{i}.x_block.mlp.fc1.bias" ) converted_state_dict[f"transformer_blocks.{i}.ff.net.2.weight"] = original_state_dict.pop( f"joint_blocks.{i}.x_block.mlp.fc2.weight" ) converted_state_dict[f"transformer_blocks.{i}.ff.net.2.bias"] = original_state_dict.pop( f"joint_blocks.{i}.x_block.mlp.fc2.bias" ) if not (i == num_layers - 1): converted_state_dict[f"transformer_blocks.{i}.ff_context.net.0.proj.weight"] = original_state_dict.pop( f"joint_blocks.{i}.context_block.mlp.fc1.weight" ) converted_state_dict[f"transformer_blocks.{i}.ff_context.net.0.proj.bias"] = original_state_dict.pop( f"joint_blocks.{i}.context_block.mlp.fc1.bias" ) converted_state_dict[f"transformer_blocks.{i}.ff_context.net.2.weight"] = original_state_dict.pop( f"joint_blocks.{i}.context_block.mlp.fc2.weight" ) converted_state_dict[f"transformer_blocks.{i}.ff_context.net.2.bias"] = original_state_dict.pop( f"joint_blocks.{i}.context_block.mlp.fc2.bias" ) # Final blocks. converted_state_dict["proj_out.weight"] = original_state_dict.pop("final_layer.linear.weight") converted_state_dict["proj_out.bias"] = original_state_dict.pop("final_layer.linear.bias") converted_state_dict["norm_out.linear.weight"] = swap_scale_shift( original_state_dict.pop("final_layer.adaLN_modulation.1.weight"), dim=caption_projection_dim ) converted_state_dict["norm_out.linear.bias"] = swap_scale_shift( original_state_dict.pop("final_layer.adaLN_modulation.1.bias"), dim=caption_projection_dim ) return converted_state_dict def is_vae_in_checkpoint(original_state_dict): return ("first_stage_model.decoder.conv_in.weight" in original_state_dict) and ( "first_stage_model.encoder.conv_in.weight" in original_state_dict ) def get_attn2_layers(state_dict): attn2_layers = [] for key in state_dict.keys(): if "attn2." in key: # Extract the layer number from the key layer_num = int(key.split(".")[1]) attn2_layers.append(layer_num) return tuple(sorted(set(attn2_layers))) def get_pos_embed_max_size(state_dict): num_patches = state_dict["pos_embed"].shape[1] pos_embed_max_size = int(num_patches**0.5) return pos_embed_max_size def get_caption_projection_dim(state_dict): caption_projection_dim = state_dict["context_embedder.weight"].shape[0] return caption_projection_dim def main(args): original_ckpt = load_original_checkpoint(args.checkpoint_path) original_dtype = next(iter(original_ckpt.values())).dtype # Initialize dtype with a default value dtype = None if args.dtype is None: dtype = original_dtype elif args.dtype == "fp16": dtype = torch.float16 elif args.dtype == "bf16": dtype = torch.bfloat16 elif args.dtype == "fp32": dtype = torch.float32 else: raise ValueError(f"Unsupported dtype: {args.dtype}") if dtype != original_dtype: print( f"Checkpoint dtype {original_dtype} does not match requested dtype {dtype}. This can lead to unexpected results, proceed with caution." ) num_layers = list(set(int(k.split(".", 2)[1]) for k in original_ckpt if "joint_blocks" in k))[-1] + 1 # noqa: C401 caption_projection_dim = get_caption_projection_dim(original_ckpt) # () for sd3.0; (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12) for sd3.5 attn2_layers = get_attn2_layers(original_ckpt) # sd3.5 use qk norm("rms_norm") has_qk_norm = any("ln_q" in key for key in original_ckpt.keys()) # sd3.5 2b use pox_embed_max_size=384 and sd3.0 and sd3.5 8b use 192 pos_embed_max_size = get_pos_embed_max_size(original_ckpt) converted_transformer_state_dict = convert_sd3_transformer_checkpoint_to_diffusers( original_ckpt, num_layers, caption_projection_dim, attn2_layers, has_qk_norm ) with CTX(): transformer = SD3Transformer2DModel( sample_size=128, patch_size=2, in_channels=16, joint_attention_dim=4096, num_layers=num_layers, caption_projection_dim=caption_projection_dim, num_attention_heads=num_layers, pos_embed_max_size=pos_embed_max_size, qk_norm="rms_norm" if has_qk_norm else None, dual_attention_layers=attn2_layers, ) if is_accelerate_available(): load_model_dict_into_meta(transformer, converted_transformer_state_dict) else: transformer.load_state_dict(converted_transformer_state_dict, strict=True) print("Saving SD3 Transformer in Diffusers format.") transformer.to(dtype).save_pretrained(f"{args.output_path}/transformer") if is_vae_in_checkpoint(original_ckpt): with CTX(): vae = AutoencoderKL.from_config( "stabilityai/stable-diffusion-xl-base-1.0", subfolder="vae", latent_channels=16, use_post_quant_conv=False, use_quant_conv=False, scaling_factor=1.5305, shift_factor=0.0609, ) converted_vae_state_dict = convert_ldm_vae_checkpoint(original_ckpt, vae.config) if is_accelerate_available(): load_model_dict_into_meta(vae, converted_vae_state_dict) else: vae.load_state_dict(converted_vae_state_dict, strict=True) print("Saving SD3 Autoencoder in Diffusers format.") vae.to(dtype).save_pretrained(f"{args.output_path}/vae") if __name__ == "__main__": main(args)
diffusers/scripts/convert_sd3_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_sd3_to_diffusers.py", "repo_id": "diffusers", "token_count": 7598 }
# Run inside root directory of official source code: https://github.com/dome272/wuerstchen/ import os import torch from transformers import AutoTokenizer, CLIPTextModel from vqgan import VQModel from diffusers import ( DDPMWuerstchenScheduler, WuerstchenCombinedPipeline, WuerstchenDecoderPipeline, WuerstchenPriorPipeline, ) from diffusers.pipelines.wuerstchen import PaellaVQModel, WuerstchenDiffNeXt, WuerstchenPrior model_path = "models/" device = "cpu" paella_vqmodel = VQModel() state_dict = torch.load(os.path.join(model_path, "vqgan_f4_v1_500k.pt"), map_location=device)["state_dict"] paella_vqmodel.load_state_dict(state_dict) state_dict["vquantizer.embedding.weight"] = state_dict["vquantizer.codebook.weight"] state_dict.pop("vquantizer.codebook.weight") vqmodel = PaellaVQModel(num_vq_embeddings=paella_vqmodel.codebook_size, latent_channels=paella_vqmodel.c_latent) vqmodel.load_state_dict(state_dict) # Clip Text encoder and tokenizer text_encoder = CLIPTextModel.from_pretrained("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k") tokenizer = AutoTokenizer.from_pretrained("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k") # Generator gen_text_encoder = CLIPTextModel.from_pretrained("laion/CLIP-ViT-H-14-laion2B-s32B-b79K").to("cpu") gen_tokenizer = AutoTokenizer.from_pretrained("laion/CLIP-ViT-H-14-laion2B-s32B-b79K") orig_state_dict = torch.load(os.path.join(model_path, "model_v2_stage_b.pt"), map_location=device)["state_dict"] state_dict = {} for key in orig_state_dict.keys(): if key.endswith("in_proj_weight"): weights = orig_state_dict[key].chunk(3, 0) state_dict[key.replace("attn.in_proj_weight", "to_q.weight")] = weights[0] state_dict[key.replace("attn.in_proj_weight", "to_k.weight")] = weights[1] state_dict[key.replace("attn.in_proj_weight", "to_v.weight")] = weights[2] elif key.endswith("in_proj_bias"): weights = orig_state_dict[key].chunk(3, 0) state_dict[key.replace("attn.in_proj_bias", "to_q.bias")] = weights[0] state_dict[key.replace("attn.in_proj_bias", "to_k.bias")] = weights[1] state_dict[key.replace("attn.in_proj_bias", "to_v.bias")] = weights[2] elif key.endswith("out_proj.weight"): weights = orig_state_dict[key] state_dict[key.replace("attn.out_proj.weight", "to_out.0.weight")] = weights elif key.endswith("out_proj.bias"): weights = orig_state_dict[key] state_dict[key.replace("attn.out_proj.bias", "to_out.0.bias")] = weights else: state_dict[key] = orig_state_dict[key] deocder = WuerstchenDiffNeXt() deocder.load_state_dict(state_dict) # Prior orig_state_dict = torch.load(os.path.join(model_path, "model_v3_stage_c.pt"), map_location=device)["ema_state_dict"] state_dict = {} for key in orig_state_dict.keys(): if key.endswith("in_proj_weight"): weights = orig_state_dict[key].chunk(3, 0) state_dict[key.replace("attn.in_proj_weight", "to_q.weight")] = weights[0] state_dict[key.replace("attn.in_proj_weight", "to_k.weight")] = weights[1] state_dict[key.replace("attn.in_proj_weight", "to_v.weight")] = weights[2] elif key.endswith("in_proj_bias"): weights = orig_state_dict[key].chunk(3, 0) state_dict[key.replace("attn.in_proj_bias", "to_q.bias")] = weights[0] state_dict[key.replace("attn.in_proj_bias", "to_k.bias")] = weights[1] state_dict[key.replace("attn.in_proj_bias", "to_v.bias")] = weights[2] elif key.endswith("out_proj.weight"): weights = orig_state_dict[key] state_dict[key.replace("attn.out_proj.weight", "to_out.0.weight")] = weights elif key.endswith("out_proj.bias"): weights = orig_state_dict[key] state_dict[key.replace("attn.out_proj.bias", "to_out.0.bias")] = weights else: state_dict[key] = orig_state_dict[key] prior_model = WuerstchenPrior(c_in=16, c=1536, c_cond=1280, c_r=64, depth=32, nhead=24).to(device) prior_model.load_state_dict(state_dict) # scheduler scheduler = DDPMWuerstchenScheduler() # Prior pipeline prior_pipeline = WuerstchenPriorPipeline( prior=prior_model, text_encoder=text_encoder, tokenizer=tokenizer, scheduler=scheduler ) prior_pipeline.save_pretrained("warp-ai/wuerstchen-prior") decoder_pipeline = WuerstchenDecoderPipeline( text_encoder=gen_text_encoder, tokenizer=gen_tokenizer, vqgan=vqmodel, decoder=deocder, scheduler=scheduler ) decoder_pipeline.save_pretrained("warp-ai/wuerstchen") # Wuerstchen pipeline wuerstchen_pipeline = WuerstchenCombinedPipeline( # Decoder text_encoder=gen_text_encoder, tokenizer=gen_tokenizer, decoder=deocder, scheduler=scheduler, vqgan=vqmodel, # Prior prior_tokenizer=tokenizer, prior_text_encoder=text_encoder, prior=prior_model, prior_scheduler=scheduler, ) wuerstchen_pipeline.save_pretrained("warp-ai/WuerstchenCombinedPipeline")
diffusers/scripts/convert_wuerstchen.py/0
{ "file_path": "diffusers/scripts/convert_wuerstchen.py", "repo_id": "diffusers", "token_count": 2171 }
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Dict, List, Optional, Union import safetensors import torch from huggingface_hub.utils import validate_hf_hub_args from torch import nn from ..models.modeling_utils import load_state_dict from ..utils import _get_model_file, is_accelerate_available, is_transformers_available, logging if is_transformers_available(): from transformers import PreTrainedModel, PreTrainedTokenizer if is_accelerate_available(): from accelerate.hooks import AlignDevicesHook, CpuOffload, remove_hook_from_module logger = logging.get_logger(__name__) TEXT_INVERSION_NAME = "learned_embeds.bin" TEXT_INVERSION_NAME_SAFE = "learned_embeds.safetensors" @validate_hf_hub_args def load_textual_inversion_state_dicts(pretrained_model_name_or_paths, **kwargs): cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", None) hf_token = kwargs.pop("hf_token", None) revision = kwargs.pop("revision", None) subfolder = kwargs.pop("subfolder", None) weight_name = kwargs.pop("weight_name", None) use_safetensors = kwargs.pop("use_safetensors", None) allow_pickle = False if use_safetensors is None: use_safetensors = True allow_pickle = True user_agent = { "file_type": "text_inversion", "framework": "pytorch", } state_dicts = [] for pretrained_model_name_or_path in pretrained_model_name_or_paths: if not isinstance(pretrained_model_name_or_path, (dict, torch.Tensor)): # 3.1. Load textual inversion file model_file = None # Let's first try to load .safetensors weights if (use_safetensors and weight_name is None) or ( weight_name is not None and weight_name.endswith(".safetensors") ): try: model_file = _get_model_file( pretrained_model_name_or_path, weights_name=weight_name or TEXT_INVERSION_NAME_SAFE, cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=hf_token, revision=revision, subfolder=subfolder, user_agent=user_agent, ) state_dict = safetensors.torch.load_file(model_file, device="cpu") except Exception as e: if not allow_pickle: raise e model_file = None if model_file is None: model_file = _get_model_file( pretrained_model_name_or_path, weights_name=weight_name or TEXT_INVERSION_NAME, cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=hf_token, revision=revision, subfolder=subfolder, user_agent=user_agent, ) state_dict = load_state_dict(model_file) else: state_dict = pretrained_model_name_or_path state_dicts.append(state_dict) return state_dicts class TextualInversionLoaderMixin: r""" Load Textual Inversion tokens and embeddings to the tokenizer and text encoder. """ def maybe_convert_prompt(self, prompt: Union[str, List[str]], tokenizer: "PreTrainedTokenizer"): # noqa: F821 r""" Processes prompts that include a special token corresponding to a multi-vector textual inversion embedding to be replaced with multiple special tokens each corresponding to one of the vectors. If the prompt has no textual inversion token or if the textual inversion token is a single vector, the input prompt is returned. Parameters: prompt (`str` or list of `str`): The prompt or prompts to guide the image generation. tokenizer (`PreTrainedTokenizer`): The tokenizer responsible for encoding the prompt into input tokens. Returns: `str` or list of `str`: The converted prompt """ if not isinstance(prompt, List): prompts = [prompt] else: prompts = prompt prompts = [self._maybe_convert_prompt(p, tokenizer) for p in prompts] if not isinstance(prompt, List): return prompts[0] return prompts def _maybe_convert_prompt(self, prompt: str, tokenizer: "PreTrainedTokenizer"): # noqa: F821 r""" Maybe convert a prompt into a "multi vector"-compatible prompt. If the prompt includes a token that corresponds to a multi-vector textual inversion embedding, this function will process the prompt so that the special token is replaced with multiple special tokens each corresponding to one of the vectors. If the prompt has no textual inversion token or a textual inversion token that is a single vector, the input prompt is simply returned. Parameters: prompt (`str`): The prompt to guide the image generation. tokenizer (`PreTrainedTokenizer`): The tokenizer responsible for encoding the prompt into input tokens. Returns: `str`: The converted prompt """ tokens = tokenizer.tokenize(prompt) unique_tokens = set(tokens) for token in unique_tokens: if token in tokenizer.added_tokens_encoder: replacement = token i = 1 while f"{token}_{i}" in tokenizer.added_tokens_encoder: replacement += f" {token}_{i}" i += 1 prompt = prompt.replace(token, replacement) return prompt def _check_text_inv_inputs(self, tokenizer, text_encoder, pretrained_model_name_or_paths, tokens): if tokenizer is None: raise ValueError( f"{self.__class__.__name__} requires `self.tokenizer` or passing a `tokenizer` of type `PreTrainedTokenizer` for calling" f" `{self.load_textual_inversion.__name__}`" ) if text_encoder is None: raise ValueError( f"{self.__class__.__name__} requires `self.text_encoder` or passing a `text_encoder` of type `PreTrainedModel` for calling" f" `{self.load_textual_inversion.__name__}`" ) if len(pretrained_model_name_or_paths) > 1 and len(pretrained_model_name_or_paths) != len(tokens): raise ValueError( f"You have passed a list of models of length {len(pretrained_model_name_or_paths)}, and list of tokens of length {len(tokens)} " f"Make sure both lists have the same length." ) valid_tokens = [t for t in tokens if t is not None] if len(set(valid_tokens)) < len(valid_tokens): raise ValueError(f"You have passed a list of tokens that contains duplicates: {tokens}") @staticmethod def _retrieve_tokens_and_embeddings(tokens, state_dicts, tokenizer): all_tokens = [] all_embeddings = [] for state_dict, token in zip(state_dicts, tokens): if isinstance(state_dict, torch.Tensor): if token is None: raise ValueError( "You are trying to load a textual inversion embedding that has been saved as a PyTorch tensor. Make sure to pass the name of the corresponding token in this case: `token=...`." ) loaded_token = token embedding = state_dict elif len(state_dict) == 1: # diffusers loaded_token, embedding = next(iter(state_dict.items())) elif "string_to_param" in state_dict: # A1111 loaded_token = state_dict["name"] embedding = state_dict["string_to_param"]["*"] else: raise ValueError( f"Loaded state dictionary is incorrect: {state_dict}. \n\n" "Please verify that the loaded state dictionary of the textual embedding either only has a single key or includes the `string_to_param`" " input key." ) if token is not None and loaded_token != token: logger.info(f"The loaded token: {loaded_token} is overwritten by the passed token {token}.") else: token = loaded_token if token in tokenizer.get_vocab(): raise ValueError( f"Token {token} already in tokenizer vocabulary. Please choose a different token name or remove {token} and embedding from the tokenizer and text encoder." ) all_tokens.append(token) all_embeddings.append(embedding) return all_tokens, all_embeddings @staticmethod def _extend_tokens_and_embeddings(tokens, embeddings, tokenizer): all_tokens = [] all_embeddings = [] for embedding, token in zip(embeddings, tokens): if f"{token}_1" in tokenizer.get_vocab(): multi_vector_tokens = [token] i = 1 while f"{token}_{i}" in tokenizer.added_tokens_encoder: multi_vector_tokens.append(f"{token}_{i}") i += 1 raise ValueError( f"Multi-vector Token {multi_vector_tokens} already in tokenizer vocabulary. Please choose a different token name or remove the {multi_vector_tokens} and embedding from the tokenizer and text encoder." ) is_multi_vector = len(embedding.shape) > 1 and embedding.shape[0] > 1 if is_multi_vector: all_tokens += [token] + [f"{token}_{i}" for i in range(1, embedding.shape[0])] all_embeddings += [e for e in embedding] # noqa: C416 else: all_tokens += [token] all_embeddings += [embedding[0]] if len(embedding.shape) > 1 else [embedding] return all_tokens, all_embeddings @validate_hf_hub_args def load_textual_inversion( self, pretrained_model_name_or_path: Union[str, List[str], Dict[str, torch.Tensor], List[Dict[str, torch.Tensor]]], token: Optional[Union[str, List[str]]] = None, tokenizer: Optional["PreTrainedTokenizer"] = None, # noqa: F821 text_encoder: Optional["PreTrainedModel"] = None, # noqa: F821 **kwargs, ): r""" Load Textual Inversion embeddings into the text encoder of [`StableDiffusionPipeline`] (both 🤗 Diffusers and Automatic1111 formats are supported). Parameters: pretrained_model_name_or_path (`str` or `os.PathLike` or `List[str or os.PathLike]` or `Dict` or `List[Dict]`): Can be either one of the following or a list of them: - A string, the *model id* (for example `sd-concepts-library/low-poly-hd-logos-icons`) of a pretrained model hosted on the Hub. - A path to a *directory* (for example `./my_text_inversion_directory/`) containing the textual inversion weights. - A path to a *file* (for example `./my_text_inversions.pt`) containing textual inversion weights. - A [torch state dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict). token (`str` or `List[str]`, *optional*): Override the token to use for the textual inversion weights. If `pretrained_model_name_or_path` is a list, then `token` must also be a list of equal length. text_encoder ([`~transformers.CLIPTextModel`], *optional*): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). If not specified, function will take self.tokenizer. tokenizer ([`~transformers.CLIPTokenizer`], *optional*): A `CLIPTokenizer` to tokenize text. If not specified, function will take self.tokenizer. weight_name (`str`, *optional*): Name of a custom weight file. This should be used when: - The saved textual inversion file is in 🤗 Diffusers format, but was saved under a specific weight name such as `text_inv.bin`. - The saved textual inversion file is in the Automatic1111 format. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to `True`, the model won't be downloaded from the Hub. hf_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. subfolder (`str`, *optional*, defaults to `""`): The subfolder location of a model file within a larger model repository on the Hub or locally. mirror (`str`, *optional*): Mirror source to resolve accessibility issues if you're downloading a model in China. We do not guarantee the timeliness or safety of the source, and you should refer to the mirror site for more information. Example: To load a Textual Inversion embedding vector in 🤗 Diffusers format: ```py from diffusers import StableDiffusionPipeline import torch model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") pipe.load_textual_inversion("sd-concepts-library/cat-toy") prompt = "A <cat-toy> backpack" image = pipe(prompt, num_inference_steps=50).images[0] image.save("cat-backpack.png") ``` To load a Textual Inversion embedding vector in Automatic1111 format, make sure to download the vector first (for example from [civitAI](https://civitai.com/models/3036?modelVersionId=9857)) and then load the vector locally: ```py from diffusers import StableDiffusionPipeline import torch model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") pipe.load_textual_inversion("./charturnerv2.pt", token="charturnerv2") prompt = "charturnerv2, multiple views of the same character in the same outfit, a character turnaround of a woman wearing a black jacket and red shirt, best quality, intricate details." image = pipe(prompt, num_inference_steps=50).images[0] image.save("character.png") ``` """ # 1. Set correct tokenizer and text encoder tokenizer = tokenizer or getattr(self, "tokenizer", None) text_encoder = text_encoder or getattr(self, "text_encoder", None) # 2. Normalize inputs pretrained_model_name_or_paths = ( [pretrained_model_name_or_path] if not isinstance(pretrained_model_name_or_path, list) else pretrained_model_name_or_path ) tokens = [token] if not isinstance(token, list) else token if tokens[0] is None: tokens = tokens * len(pretrained_model_name_or_paths) # 3. Check inputs self._check_text_inv_inputs(tokenizer, text_encoder, pretrained_model_name_or_paths, tokens) # 4. Load state dicts of textual embeddings state_dicts = load_textual_inversion_state_dicts(pretrained_model_name_or_paths, **kwargs) # 4.1 Handle the special case when state_dict is a tensor that contains n embeddings for n tokens if len(tokens) > 1 and len(state_dicts) == 1: if isinstance(state_dicts[0], torch.Tensor): state_dicts = list(state_dicts[0]) if len(tokens) != len(state_dicts): raise ValueError( f"You have passed a state_dict contains {len(state_dicts)} embeddings, and list of tokens of length {len(tokens)} " f"Make sure both have the same length." ) # 4. Retrieve tokens and embeddings tokens, embeddings = self._retrieve_tokens_and_embeddings(tokens, state_dicts, tokenizer) # 5. Extend tokens and embeddings for multi vector tokens, embeddings = self._extend_tokens_and_embeddings(tokens, embeddings, tokenizer) # 6. Make sure all embeddings have the correct size expected_emb_dim = text_encoder.get_input_embeddings().weight.shape[-1] if any(expected_emb_dim != emb.shape[-1] for emb in embeddings): raise ValueError( "Loaded embeddings are of incorrect shape. Expected each textual inversion embedding " "to be of shape {input_embeddings.shape[-1]}, but are {embeddings.shape[-1]} " ) # 7. Now we can be sure that loading the embedding matrix works # < Unsafe code: # 7.1 Offload all hooks in case the pipeline was cpu offloaded before make sure, we offload and onload again is_model_cpu_offload = False is_sequential_cpu_offload = False if self.hf_device_map is None: for _, component in self.components.items(): if isinstance(component, nn.Module): if hasattr(component, "_hf_hook"): is_model_cpu_offload = isinstance(getattr(component, "_hf_hook"), CpuOffload) is_sequential_cpu_offload = ( isinstance(getattr(component, "_hf_hook"), AlignDevicesHook) or hasattr(component._hf_hook, "hooks") and isinstance(component._hf_hook.hooks[0], AlignDevicesHook) ) logger.info( "Accelerate hooks detected. Since you have called `load_textual_inversion()`, the previous hooks will be first removed. Then the textual inversion parameters will be loaded and the hooks will be applied again." ) remove_hook_from_module(component, recurse=is_sequential_cpu_offload) # 7.2 save expected device and dtype device = text_encoder.device dtype = text_encoder.dtype # 7.3 Increase token embedding matrix text_encoder.resize_token_embeddings(len(tokenizer) + len(tokens)) input_embeddings = text_encoder.get_input_embeddings().weight # 7.4 Load token and embedding for token, embedding in zip(tokens, embeddings): # add tokens and get ids tokenizer.add_tokens(token) token_id = tokenizer.convert_tokens_to_ids(token) input_embeddings.data[token_id] = embedding logger.info(f"Loaded textual inversion embedding for {token}.") input_embeddings.to(dtype=dtype, device=device) # 7.5 Offload the model again if is_model_cpu_offload: self.enable_model_cpu_offload() elif is_sequential_cpu_offload: self.enable_sequential_cpu_offload() # / Unsafe Code > def unload_textual_inversion( self, tokens: Optional[Union[str, List[str]]] = None, tokenizer: Optional["PreTrainedTokenizer"] = None, text_encoder: Optional["PreTrainedModel"] = None, ): r""" Unload Textual Inversion embeddings from the text encoder of [`StableDiffusionPipeline`] Example: ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5") # Example 1 pipeline.load_textual_inversion("sd-concepts-library/gta5-artwork") pipeline.load_textual_inversion("sd-concepts-library/moeb-style") # Remove all token embeddings pipeline.unload_textual_inversion() # Example 2 pipeline.load_textual_inversion("sd-concepts-library/moeb-style") pipeline.load_textual_inversion("sd-concepts-library/gta5-artwork") # Remove just one token pipeline.unload_textual_inversion("<moe-bius>") # Example 3: unload from SDXL pipeline = AutoPipelineForText2Image.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0") embedding_path = hf_hub_download( repo_id="linoyts/web_y2k", filename="web_y2k_emb.safetensors", repo_type="model" ) # load embeddings to the text encoders state_dict = load_file(embedding_path) # load embeddings of text_encoder 1 (CLIP ViT-L/14) pipeline.load_textual_inversion( state_dict["clip_l"], tokens=["<s0>", "<s1>"], text_encoder=pipeline.text_encoder, tokenizer=pipeline.tokenizer, ) # load embeddings of text_encoder 2 (CLIP ViT-G/14) pipeline.load_textual_inversion( state_dict["clip_g"], tokens=["<s0>", "<s1>"], text_encoder=pipeline.text_encoder_2, tokenizer=pipeline.tokenizer_2, ) # Unload explicitly from both text encoders and tokenizers pipeline.unload_textual_inversion( tokens=["<s0>", "<s1>"], text_encoder=pipeline.text_encoder, tokenizer=pipeline.tokenizer ) pipeline.unload_textual_inversion( tokens=["<s0>", "<s1>"], text_encoder=pipeline.text_encoder_2, tokenizer=pipeline.tokenizer_2 ) ``` """ tokenizer = tokenizer or getattr(self, "tokenizer", None) text_encoder = text_encoder or getattr(self, "text_encoder", None) # Get textual inversion tokens and ids token_ids = [] last_special_token_id = None if tokens: if isinstance(tokens, str): tokens = [tokens] for added_token_id, added_token in tokenizer.added_tokens_decoder.items(): if not added_token.special: if added_token.content in tokens: token_ids.append(added_token_id) else: last_special_token_id = added_token_id if len(token_ids) == 0: raise ValueError("No tokens to remove found") else: tokens = [] for added_token_id, added_token in tokenizer.added_tokens_decoder.items(): if not added_token.special: token_ids.append(added_token_id) tokens.append(added_token.content) else: last_special_token_id = added_token_id # Delete from tokenizer for token_id, token_to_remove in zip(token_ids, tokens): del tokenizer._added_tokens_decoder[token_id] del tokenizer._added_tokens_encoder[token_to_remove] # Make all token ids sequential in tokenizer key_id = 1 for token_id in tokenizer.added_tokens_decoder: if token_id > last_special_token_id and token_id > last_special_token_id + key_id: token = tokenizer._added_tokens_decoder[token_id] tokenizer._added_tokens_decoder[last_special_token_id + key_id] = token del tokenizer._added_tokens_decoder[token_id] tokenizer._added_tokens_encoder[token.content] = last_special_token_id + key_id key_id += 1 tokenizer._update_trie() # set correct total vocab size after removing tokens tokenizer._update_total_vocab_size() # Delete from text encoder text_embedding_dim = text_encoder.get_input_embeddings().embedding_dim temp_text_embedding_weights = text_encoder.get_input_embeddings().weight text_embedding_weights = temp_text_embedding_weights[: last_special_token_id + 1] to_append = [] for i in range(last_special_token_id + 1, temp_text_embedding_weights.shape[0]): if i not in token_ids: to_append.append(temp_text_embedding_weights[i].unsqueeze(0)) if len(to_append) > 0: to_append = torch.cat(to_append, dim=0) text_embedding_weights = torch.cat([text_embedding_weights, to_append], dim=0) text_embeddings_filtered = nn.Embedding(text_embedding_weights.shape[0], text_embedding_dim) text_embeddings_filtered.weight.data = text_embedding_weights text_encoder.set_input_embeddings(text_embeddings_filtered)
diffusers/src/diffusers/loaders/textual_inversion.py/0
{ "file_path": "diffusers/src/diffusers/loaders/textual_inversion.py", "repo_id": "diffusers", "token_count": 12077 }
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...loaders import PeftAdapterMixin from ...loaders.single_file_model import FromOriginalModelMixin from ...utils import deprecate from ...utils.accelerate_utils import apply_forward_hook from ..attention_processor import ( ADDED_KV_ATTENTION_PROCESSORS, CROSS_ATTENTION_PROCESSORS, Attention, AttentionProcessor, AttnAddedKVProcessor, AttnProcessor, FusedAttnProcessor2_0, ) from ..modeling_outputs import AutoencoderKLOutput from ..modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder class AutoencoderKL(ModelMixin, ConfigMixin, FromOriginalModelMixin, PeftAdapterMixin): r""" A VAE model with KL loss for encoding images into latents and decoding latent representations into images. This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). Parameters: in_channels (int, *optional*, defaults to 3): Number of channels in the input image. out_channels (int, *optional*, defaults to 3): Number of channels in the output. down_block_types (`Tuple[str]`, *optional*, defaults to `("DownEncoderBlock2D",)`): Tuple of downsample block types. up_block_types (`Tuple[str]`, *optional*, defaults to `("UpDecoderBlock2D",)`): Tuple of upsample block types. block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`): Tuple of block output channels. act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. latent_channels (`int`, *optional*, defaults to 4): Number of channels in the latent space. sample_size (`int`, *optional*, defaults to `32`): Sample input size. scaling_factor (`float`, *optional*, defaults to 0.18215): The component-wise standard deviation of the trained latent space computed using the first batch of the training set. This is used to scale the latent space to have unit variance when training the diffusion model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1 / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper. force_upcast (`bool`, *optional*, default to `True`): If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE can be fine-tuned / trained to a lower range without loosing too much precision in which case `force_upcast` can be set to `False` - see: https://huggingface.co/madebyollin/sdxl-vae-fp16-fix mid_block_add_attention (`bool`, *optional*, default to `True`): If enabled, the mid_block of the Encoder and Decoder will have attention blocks. If set to false, the mid_block will only have resnet blocks """ _supports_gradient_checkpointing = True _no_split_modules = ["BasicTransformerBlock", "ResnetBlock2D"] @register_to_config def __init__( self, in_channels: int = 3, out_channels: int = 3, down_block_types: Tuple[str] = ("DownEncoderBlock2D",), up_block_types: Tuple[str] = ("UpDecoderBlock2D",), block_out_channels: Tuple[int] = (64,), layers_per_block: int = 1, act_fn: str = "silu", latent_channels: int = 4, norm_num_groups: int = 32, sample_size: int = 32, scaling_factor: float = 0.18215, shift_factor: Optional[float] = None, latents_mean: Optional[Tuple[float]] = None, latents_std: Optional[Tuple[float]] = None, force_upcast: float = True, use_quant_conv: bool = True, use_post_quant_conv: bool = True, mid_block_add_attention: bool = True, ): super().__init__() # pass init params to Encoder self.encoder = Encoder( in_channels=in_channels, out_channels=latent_channels, down_block_types=down_block_types, block_out_channels=block_out_channels, layers_per_block=layers_per_block, act_fn=act_fn, norm_num_groups=norm_num_groups, double_z=True, mid_block_add_attention=mid_block_add_attention, ) # pass init params to Decoder self.decoder = Decoder( in_channels=latent_channels, out_channels=out_channels, up_block_types=up_block_types, block_out_channels=block_out_channels, layers_per_block=layers_per_block, norm_num_groups=norm_num_groups, act_fn=act_fn, mid_block_add_attention=mid_block_add_attention, ) self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1) if use_quant_conv else None self.post_quant_conv = nn.Conv2d(latent_channels, latent_channels, 1) if use_post_quant_conv else None self.use_slicing = False self.use_tiling = False # only relevant if vae tiling is enabled self.tile_sample_min_size = self.config.sample_size sample_size = ( self.config.sample_size[0] if isinstance(self.config.sample_size, (list, tuple)) else self.config.sample_size ) self.tile_latent_min_size = int(sample_size / (2 ** (len(self.config.block_out_channels) - 1))) self.tile_overlap_factor = 0.25 def enable_tiling(self, use_tiling: bool = True): r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ self.use_tiling = use_tiling def disable_tiling(self): r""" Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing decoding in one step. """ self.enable_tiling(False) def enable_slicing(self): r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ self.use_slicing = True def disable_slicing(self): r""" Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing decoding in one step. """ self.use_slicing = False @property # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "get_processor"): processors[f"{name}.processor"] = module.get_processor() for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnAddedKVProcessor() elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): processor = AttnProcessor() else: raise ValueError( f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" ) self.set_attn_processor(processor) def _encode(self, x: torch.Tensor) -> torch.Tensor: batch_size, num_channels, height, width = x.shape if self.use_tiling and (width > self.tile_sample_min_size or height > self.tile_sample_min_size): return self._tiled_encode(x) enc = self.encoder(x) if self.quant_conv is not None: enc = self.quant_conv(enc) return enc @apply_forward_hook def encode( self, x: torch.Tensor, return_dict: bool = True ) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]: """ Encode a batch of images into latents. Args: x (`torch.Tensor`): Input batch of images. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple. Returns: The latent representations of the encoded images. If `return_dict` is True, a [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned. """ if self.use_slicing and x.shape[0] > 1: encoded_slices = [self._encode(x_slice) for x_slice in x.split(1)] h = torch.cat(encoded_slices) else: h = self._encode(x) posterior = DiagonalGaussianDistribution(h) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=posterior) def _decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]: if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(z, return_dict=return_dict) if self.post_quant_conv is not None: z = self.post_quant_conv(z) dec = self.decoder(z) if not return_dict: return (dec,) return DecoderOutput(sample=dec) @apply_forward_hook def decode( self, z: torch.FloatTensor, return_dict: bool = True, generator=None ) -> Union[DecoderOutput, torch.FloatTensor]: """ Decode a batch of images. Args: z (`torch.Tensor`): Input batch of latent vectors. return_dict (`bool`, *optional*, defaults to `True`): Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. Returns: [`~models.vae.DecoderOutput`] or `tuple`: If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is returned. """ if self.use_slicing and z.shape[0] > 1: decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)] decoded = torch.cat(decoded_slices) else: decoded = self._decode(z).sample if not return_dict: return (decoded,) return DecoderOutput(sample=decoded) def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: blend_extent = min(a.shape[2], b.shape[2], blend_extent) for y in range(blend_extent): b[:, :, y, :] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: blend_extent = min(a.shape[3], b.shape[3], blend_extent) for x in range(blend_extent): b[:, :, :, x] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def _tiled_encode(self, x: torch.Tensor) -> torch.Tensor: r"""Encode a batch of images using a tiled encoder. When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several steps. This is useful to keep memory use constant regardless of image size. The end result of tiled encoding is different from non-tiled encoding because each tile uses a different encoder. To avoid tiling artifacts, the tiles overlap and are blended together to form a smooth output. You may still see tile-sized changes in the output, but they should be much less noticeable. Args: x (`torch.Tensor`): Input batch of images. Returns: `torch.Tensor`: The latent representation of the encoded videos. """ overlap_size = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor)) blend_extent = int(self.tile_latent_min_size * self.tile_overlap_factor) row_limit = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. rows = [] for i in range(0, x.shape[2], overlap_size): row = [] for j in range(0, x.shape[3], overlap_size): tile = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] tile = self.encoder(tile) if self.config.use_quant_conv: tile = self.quant_conv(tile) row.append(tile) rows.append(row) result_rows = [] for i, row in enumerate(rows): result_row = [] for j, tile in enumerate(row): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: tile = self.blend_v(rows[i - 1][j], tile, blend_extent) if j > 0: tile = self.blend_h(row[j - 1], tile, blend_extent) result_row.append(tile[:, :, :row_limit, :row_limit]) result_rows.append(torch.cat(result_row, dim=3)) enc = torch.cat(result_rows, dim=2) return enc def tiled_encode(self, x: torch.Tensor, return_dict: bool = True) -> AutoencoderKLOutput: r"""Encode a batch of images using a tiled encoder. When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several steps. This is useful to keep memory use constant regardless of image size. The end result of tiled encoding is different from non-tiled encoding because each tile uses a different encoder. To avoid tiling artifacts, the tiles overlap and are blended together to form a smooth output. You may still see tile-sized changes in the output, but they should be much less noticeable. Args: x (`torch.Tensor`): Input batch of images. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple. Returns: [`~models.autoencoder_kl.AutoencoderKLOutput`] or `tuple`: If return_dict is True, a [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned. """ deprecation_message = ( "The tiled_encode implementation supporting the `return_dict` parameter is deprecated. In the future, the " "implementation of this method will be replaced with that of `_tiled_encode` and you will no longer be able " "to pass `return_dict`. You will also have to create a `DiagonalGaussianDistribution()` from the returned value." ) deprecate("tiled_encode", "1.0.0", deprecation_message, standard_warn=False) overlap_size = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor)) blend_extent = int(self.tile_latent_min_size * self.tile_overlap_factor) row_limit = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. rows = [] for i in range(0, x.shape[2], overlap_size): row = [] for j in range(0, x.shape[3], overlap_size): tile = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] tile = self.encoder(tile) if self.config.use_quant_conv: tile = self.quant_conv(tile) row.append(tile) rows.append(row) result_rows = [] for i, row in enumerate(rows): result_row = [] for j, tile in enumerate(row): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: tile = self.blend_v(rows[i - 1][j], tile, blend_extent) if j > 0: tile = self.blend_h(row[j - 1], tile, blend_extent) result_row.append(tile[:, :, :row_limit, :row_limit]) result_rows.append(torch.cat(result_row, dim=3)) moments = torch.cat(result_rows, dim=2) posterior = DiagonalGaussianDistribution(moments) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=posterior) def tiled_decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]: r""" Decode a batch of images using a tiled decoder. Args: z (`torch.Tensor`): Input batch of latent vectors. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. Returns: [`~models.vae.DecoderOutput`] or `tuple`: If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is returned. """ overlap_size = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor)) blend_extent = int(self.tile_sample_min_size * self.tile_overlap_factor) row_limit = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. rows = [] for i in range(0, z.shape[2], overlap_size): row = [] for j in range(0, z.shape[3], overlap_size): tile = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] if self.config.use_post_quant_conv: tile = self.post_quant_conv(tile) decoded = self.decoder(tile) row.append(decoded) rows.append(row) result_rows = [] for i, row in enumerate(rows): result_row = [] for j, tile in enumerate(row): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: tile = self.blend_v(rows[i - 1][j], tile, blend_extent) if j > 0: tile = self.blend_h(row[j - 1], tile, blend_extent) result_row.append(tile[:, :, :row_limit, :row_limit]) result_rows.append(torch.cat(result_row, dim=3)) dec = torch.cat(result_rows, dim=2) if not return_dict: return (dec,) return DecoderOutput(sample=dec) def forward( self, sample: torch.Tensor, sample_posterior: bool = False, return_dict: bool = True, generator: Optional[torch.Generator] = None, ) -> Union[DecoderOutput, torch.Tensor]: r""" Args: sample (`torch.Tensor`): Input sample. sample_posterior (`bool`, *optional*, defaults to `False`): Whether to sample from the posterior. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`DecoderOutput`] instead of a plain tuple. """ x = sample posterior = self.encode(x).latent_dist if sample_posterior: z = posterior.sample(generator=generator) else: z = posterior.mode() dec = self.decode(z).sample if not return_dict: return (dec,) return DecoderOutput(sample=dec) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections def fuse_qkv_projections(self): """ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value) are fused. For cross-attention modules, key and value projection matrices are fused. <Tip warning={true}> This API is 🧪 experimental. </Tip> """ self.original_attn_processors = None for _, attn_processor in self.attn_processors.items(): if "Added" in str(attn_processor.__class__.__name__): raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.") self.original_attn_processors = self.attn_processors for module in self.modules(): if isinstance(module, Attention): module.fuse_projections(fuse=True) self.set_attn_processor(FusedAttnProcessor2_0()) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections def unfuse_qkv_projections(self): """Disables the fused QKV projection if enabled. <Tip warning={true}> This API is 🧪 experimental. </Tip> """ if self.original_attn_processors is not None: self.set_attn_processor(self.original_attn_processors)
diffusers/src/diffusers/models/autoencoders/autoencoder_kl.py/0
{ "file_path": "diffusers/src/diffusers/models/autoencoders/autoencoder_kl.py", "repo_id": "diffusers", "token_count": 10722 }
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional, Tuple, Union from ..utils import deprecate, logging from .controlnets.controlnet_sparsectrl import ( # noqa SparseControlNetConditioningEmbedding, SparseControlNetModel, SparseControlNetOutput, zero_module, ) logger = logging.get_logger(__name__) # pylint: disable=invalid-name class SparseControlNetOutput(SparseControlNetOutput): def __init__(self, *args, **kwargs): deprecation_message = "Importing `SparseControlNetOutput` from `diffusers.models.controlnet_sparsectrl` is deprecated and this will be removed in a future version. Please use `from diffusers.models.controlnets.controlnet_sparsectrl import SparseControlNetOutput`, instead." deprecate("diffusers.models.controlnet_sparsectrl.SparseControlNetOutput", "0.34", deprecation_message) super().__init__(*args, **kwargs) class SparseControlNetConditioningEmbedding(SparseControlNetConditioningEmbedding): def __init__(self, *args, **kwargs): deprecation_message = "Importing `SparseControlNetConditioningEmbedding` from `diffusers.models.controlnet_sparsectrl` is deprecated and this will be removed in a future version. Please use `from diffusers.models.controlnets.controlnet_sparsectrl import SparseControlNetConditioningEmbedding`, instead." deprecate( "diffusers.models.controlnet_sparsectrl.SparseControlNetConditioningEmbedding", "0.34", deprecation_message ) super().__init__(*args, **kwargs) class SparseControlNetModel(SparseControlNetModel): def __init__( self, in_channels: int = 4, conditioning_channels: int = 4, flip_sin_to_cos: bool = True, freq_shift: int = 0, down_block_types: Tuple[str, ...] = ( "CrossAttnDownBlockMotion", "CrossAttnDownBlockMotion", "CrossAttnDownBlockMotion", "DownBlockMotion", ), only_cross_attention: Union[bool, Tuple[bool]] = False, block_out_channels: Tuple[int, ...] = (320, 640, 1280, 1280), layers_per_block: int = 2, downsample_padding: int = 1, mid_block_scale_factor: float = 1, act_fn: str = "silu", norm_num_groups: Optional[int] = 32, norm_eps: float = 1e-5, cross_attention_dim: int = 768, transformer_layers_per_block: Union[int, Tuple[int, ...]] = 1, transformer_layers_per_mid_block: Optional[Union[int, Tuple[int]]] = None, temporal_transformer_layers_per_block: Union[int, Tuple[int, ...]] = 1, attention_head_dim: Union[int, Tuple[int, ...]] = 8, num_attention_heads: Optional[Union[int, Tuple[int, ...]]] = None, use_linear_projection: bool = False, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", conditioning_embedding_out_channels: Optional[Tuple[int, ...]] = (16, 32, 96, 256), global_pool_conditions: bool = False, controlnet_conditioning_channel_order: str = "rgb", motion_max_seq_length: int = 32, motion_num_attention_heads: int = 8, concat_conditioning_mask: bool = True, use_simplified_condition_embedding: bool = True, ): deprecation_message = "Importing `SparseControlNetModel` from `diffusers.models.controlnet_sparsectrl` is deprecated and this will be removed in a future version. Please use `from diffusers.models.controlnets.controlnet_sparsectrl import SparseControlNetModel`, instead." deprecate("diffusers.models.controlnet_sparsectrl.SparseControlNetModel", "0.34", deprecation_message) super().__init__( in_channels=in_channels, conditioning_channels=conditioning_channels, flip_sin_to_cos=flip_sin_to_cos, freq_shift=freq_shift, down_block_types=down_block_types, only_cross_attention=only_cross_attention, block_out_channels=block_out_channels, layers_per_block=layers_per_block, downsample_padding=downsample_padding, mid_block_scale_factor=mid_block_scale_factor, act_fn=act_fn, norm_num_groups=norm_num_groups, norm_eps=norm_eps, cross_attention_dim=cross_attention_dim, transformer_layers_per_block=transformer_layers_per_block, transformer_layers_per_mid_block=transformer_layers_per_mid_block, temporal_transformer_layers_per_block=temporal_transformer_layers_per_block, attention_head_dim=attention_head_dim, num_attention_heads=num_attention_heads, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, conditioning_embedding_out_channels=conditioning_embedding_out_channels, global_pool_conditions=global_pool_conditions, controlnet_conditioning_channel_order=controlnet_conditioning_channel_order, motion_max_seq_length=motion_max_seq_length, motion_num_attention_heads=motion_num_attention_heads, concat_conditioning_mask=concat_conditioning_mask, use_simplified_condition_embedding=use_simplified_condition_embedding, )
diffusers/src/diffusers/models/controlnet_sparsectrl.py/0
{ "file_path": "diffusers/src/diffusers/models/controlnet_sparsectrl.py", "repo_id": "diffusers", "token_count": 2376 }
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch - Flax general utilities.""" import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging logger = logging.get_logger(__name__) def rename_key(key): regex = r"\w+[.]\d+" pats = re.findall(regex, key) for pat in pats: key = key.replace(pat, "_".join(pat.split("."))) return key ##################### # PyTorch => Flax # ##################### # Adapted from https://github.com/huggingface/transformers/blob/c603c80f46881ae18b2ca50770ef65fa4033eacd/src/transformers/modeling_flax_pytorch_utils.py#L69 # and https://github.com/patil-suraj/stable-diffusion-jax/blob/main/stable_diffusion_jax/convert_diffusers_to_jax.py def rename_key_and_reshape_tensor(pt_tuple_key, pt_tensor, random_flax_state_dict): """Rename PT weight names to corresponding Flax weight names and reshape tensor if necessary""" # conv norm or layer norm renamed_pt_tuple_key = pt_tuple_key[:-1] + ("scale",) # rename attention layers if len(pt_tuple_key) > 1: for rename_from, rename_to in ( ("to_out_0", "proj_attn"), ("to_k", "key"), ("to_v", "value"), ("to_q", "query"), ): if pt_tuple_key[-2] == rename_from: weight_name = pt_tuple_key[-1] weight_name = "kernel" if weight_name == "weight" else weight_name renamed_pt_tuple_key = pt_tuple_key[:-2] + (rename_to, weight_name) if renamed_pt_tuple_key in random_flax_state_dict: assert random_flax_state_dict[renamed_pt_tuple_key].shape == pt_tensor.T.shape return renamed_pt_tuple_key, pt_tensor.T if ( any("norm" in str_ for str_ in pt_tuple_key) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): renamed_pt_tuple_key = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: renamed_pt_tuple_key = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: pt_tuple_key = pt_tuple_key[:-1] + ("embedding",) return renamed_pt_tuple_key, pt_tensor # conv layer renamed_pt_tuple_key = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: pt_tensor = pt_tensor.transpose(2, 3, 1, 0) return renamed_pt_tuple_key, pt_tensor # linear layer renamed_pt_tuple_key = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight": pt_tensor = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight renamed_pt_tuple_key = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias renamed_pt_tuple_key = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def convert_pytorch_state_dict_to_flax(pt_state_dict, flax_model, init_key=42): # Step 1: Convert pytorch tensor to numpy pt_state_dict = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params random_flax_params = flax_model.init_weights(PRNGKey(init_key)) random_flax_state_dict = flatten_dict(random_flax_params) flax_state_dict = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): renamed_pt_key = rename_key(pt_key) pt_tuple_key = tuple(renamed_pt_key.split(".")) # Correctly rename weight parameters flax_key, flax_tensor = rename_key_and_reshape_tensor(pt_tuple_key, pt_tensor, random_flax_state_dict) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." ) # also add unexpected weight so that warning is thrown flax_state_dict[flax_key] = jnp.asarray(flax_tensor) return unflatten_dict(flax_state_dict)
diffusers/src/diffusers/models/modeling_flax_pytorch_utils.py/0
{ "file_path": "diffusers/src/diffusers/models/modeling_flax_pytorch_utils.py", "repo_id": "diffusers", "token_count": 2325 }
# Copyright 2024 Alpha-VLLM Authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Dict, Optional import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...utils import logging from ..attention import LuminaFeedForward from ..attention_processor import Attention, LuminaAttnProcessor2_0 from ..embeddings import ( LuminaCombinedTimestepCaptionEmbedding, LuminaPatchEmbed, ) from ..modeling_outputs import Transformer2DModelOutput from ..modeling_utils import ModelMixin from ..normalization import LuminaLayerNormContinuous, LuminaRMSNormZero, RMSNorm logger = logging.get_logger(__name__) # pylint: disable=invalid-name class LuminaNextDiTBlock(nn.Module): """ A LuminaNextDiTBlock for LuminaNextDiT2DModel. Parameters: dim (`int`): Embedding dimension of the input features. num_attention_heads (`int`): Number of attention heads. num_kv_heads (`int`): Number of attention heads in key and value features (if using GQA), or set to None for the same as query. multiple_of (`int`): The number of multiple of ffn layer. ffn_dim_multiplier (`float`): The multipier factor of ffn layer dimension. norm_eps (`float`): The eps for norm layer. qk_norm (`bool`): normalization for query and key. cross_attention_dim (`int`): Cross attention embedding dimension of the input text prompt hidden_states. norm_elementwise_affine (`bool`, *optional*, defaults to True), """ def __init__( self, dim: int, num_attention_heads: int, num_kv_heads: int, multiple_of: int, ffn_dim_multiplier: float, norm_eps: float, qk_norm: bool, cross_attention_dim: int, norm_elementwise_affine: bool = True, ) -> None: super().__init__() self.head_dim = dim // num_attention_heads self.gate = nn.Parameter(torch.zeros([num_attention_heads])) # Self-attention self.attn1 = Attention( query_dim=dim, cross_attention_dim=None, dim_head=dim // num_attention_heads, qk_norm="layer_norm_across_heads" if qk_norm else None, heads=num_attention_heads, kv_heads=num_kv_heads, eps=1e-5, bias=False, out_bias=False, processor=LuminaAttnProcessor2_0(), ) self.attn1.to_out = nn.Identity() # Cross-attention self.attn2 = Attention( query_dim=dim, cross_attention_dim=cross_attention_dim, dim_head=dim // num_attention_heads, qk_norm="layer_norm_across_heads" if qk_norm else None, heads=num_attention_heads, kv_heads=num_kv_heads, eps=1e-5, bias=False, out_bias=False, processor=LuminaAttnProcessor2_0(), ) self.feed_forward = LuminaFeedForward( dim=dim, inner_dim=4 * dim, multiple_of=multiple_of, ffn_dim_multiplier=ffn_dim_multiplier, ) self.norm1 = LuminaRMSNormZero( embedding_dim=dim, norm_eps=norm_eps, norm_elementwise_affine=norm_elementwise_affine, ) self.ffn_norm1 = RMSNorm(dim, eps=norm_eps, elementwise_affine=norm_elementwise_affine) self.norm2 = RMSNorm(dim, eps=norm_eps, elementwise_affine=norm_elementwise_affine) self.ffn_norm2 = RMSNorm(dim, eps=norm_eps, elementwise_affine=norm_elementwise_affine) self.norm1_context = RMSNorm(cross_attention_dim, eps=norm_eps, elementwise_affine=norm_elementwise_affine) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, image_rotary_emb: torch.Tensor, encoder_hidden_states: torch.Tensor, encoder_mask: torch.Tensor, temb: torch.Tensor, cross_attention_kwargs: Optional[Dict[str, Any]] = None, ): """ Perform a forward pass through the LuminaNextDiTBlock. Parameters: hidden_states (`torch.Tensor`): The input of hidden_states for LuminaNextDiTBlock. attention_mask (`torch.Tensor): The input of hidden_states corresponse attention mask. image_rotary_emb (`torch.Tensor`): Precomputed cosine and sine frequencies. encoder_hidden_states: (`torch.Tensor`): The hidden_states of text prompt are processed by Gemma encoder. encoder_mask (`torch.Tensor`): The hidden_states of text prompt attention mask. temb (`torch.Tensor`): Timestep embedding with text prompt embedding. cross_attention_kwargs (`Dict[str, Any]`): kwargs for cross attention. """ residual = hidden_states # Self-attention norm_hidden_states, gate_msa, scale_mlp, gate_mlp = self.norm1(hidden_states, temb) self_attn_output = self.attn1( hidden_states=norm_hidden_states, encoder_hidden_states=norm_hidden_states, attention_mask=attention_mask, query_rotary_emb=image_rotary_emb, key_rotary_emb=image_rotary_emb, **cross_attention_kwargs, ) # Cross-attention norm_encoder_hidden_states = self.norm1_context(encoder_hidden_states) cross_attn_output = self.attn2( hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states, attention_mask=encoder_mask, query_rotary_emb=image_rotary_emb, key_rotary_emb=None, **cross_attention_kwargs, ) cross_attn_output = cross_attn_output * self.gate.tanh().view(1, 1, -1, 1) mixed_attn_output = self_attn_output + cross_attn_output mixed_attn_output = mixed_attn_output.flatten(-2) # linear proj hidden_states = self.attn2.to_out[0](mixed_attn_output) hidden_states = residual + gate_msa.unsqueeze(1).tanh() * self.norm2(hidden_states) mlp_output = self.feed_forward(self.ffn_norm1(hidden_states) * (1 + scale_mlp.unsqueeze(1))) hidden_states = hidden_states + gate_mlp.unsqueeze(1).tanh() * self.ffn_norm2(mlp_output) return hidden_states class LuminaNextDiT2DModel(ModelMixin, ConfigMixin): """ LuminaNextDiT: Diffusion model with a Transformer backbone. Inherit ModelMixin and ConfigMixin to be compatible with the sampler StableDiffusionPipeline of diffusers. Parameters: sample_size (`int`): The width of the latent images. This is fixed during training since it is used to learn a number of position embeddings. patch_size (`int`, *optional*, (`int`, *optional*, defaults to 2): The size of each patch in the image. This parameter defines the resolution of patches fed into the model. in_channels (`int`, *optional*, defaults to 4): The number of input channels for the model. Typically, this matches the number of channels in the input images. hidden_size (`int`, *optional*, defaults to 4096): The dimensionality of the hidden layers in the model. This parameter determines the width of the model's hidden representations. num_layers (`int`, *optional*, default to 32): The number of layers in the model. This defines the depth of the neural network. num_attention_heads (`int`, *optional*, defaults to 32): The number of attention heads in each attention layer. This parameter specifies how many separate attention mechanisms are used. num_kv_heads (`int`, *optional*, defaults to 8): The number of key-value heads in the attention mechanism, if different from the number of attention heads. If None, it defaults to num_attention_heads. multiple_of (`int`, *optional*, defaults to 256): A factor that the hidden size should be a multiple of. This can help optimize certain hardware configurations. ffn_dim_multiplier (`float`, *optional*): A multiplier for the dimensionality of the feed-forward network. If None, it uses a default value based on the model configuration. norm_eps (`float`, *optional*, defaults to 1e-5): A small value added to the denominator for numerical stability in normalization layers. learn_sigma (`bool`, *optional*, defaults to True): Whether the model should learn the sigma parameter, which might be related to uncertainty or variance in predictions. qk_norm (`bool`, *optional*, defaults to True): Indicates if the queries and keys in the attention mechanism should be normalized. cross_attention_dim (`int`, *optional*, defaults to 2048): The dimensionality of the text embeddings. This parameter defines the size of the text representations used in the model. scaling_factor (`float`, *optional*, defaults to 1.0): A scaling factor applied to certain parameters or layers in the model. This can be used for adjusting the overall scale of the model's operations. """ _skip_layerwise_casting_patterns = ["patch_embedder", "norm", "ffn_norm"] @register_to_config def __init__( self, sample_size: int = 128, patch_size: Optional[int] = 2, in_channels: Optional[int] = 4, hidden_size: Optional[int] = 2304, num_layers: Optional[int] = 32, num_attention_heads: Optional[int] = 32, num_kv_heads: Optional[int] = None, multiple_of: Optional[int] = 256, ffn_dim_multiplier: Optional[float] = None, norm_eps: Optional[float] = 1e-5, learn_sigma: Optional[bool] = True, qk_norm: Optional[bool] = True, cross_attention_dim: Optional[int] = 2048, scaling_factor: Optional[float] = 1.0, ) -> None: super().__init__() self.sample_size = sample_size self.patch_size = patch_size self.in_channels = in_channels self.out_channels = in_channels * 2 if learn_sigma else in_channels self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads self.head_dim = hidden_size // num_attention_heads self.scaling_factor = scaling_factor self.patch_embedder = LuminaPatchEmbed( patch_size=patch_size, in_channels=in_channels, embed_dim=hidden_size, bias=True ) self.pad_token = nn.Parameter(torch.empty(hidden_size)) self.time_caption_embed = LuminaCombinedTimestepCaptionEmbedding( hidden_size=min(hidden_size, 1024), cross_attention_dim=cross_attention_dim ) self.layers = nn.ModuleList( [ LuminaNextDiTBlock( hidden_size, num_attention_heads, num_kv_heads, multiple_of, ffn_dim_multiplier, norm_eps, qk_norm, cross_attention_dim, ) for _ in range(num_layers) ] ) self.norm_out = LuminaLayerNormContinuous( embedding_dim=hidden_size, conditioning_embedding_dim=min(hidden_size, 1024), elementwise_affine=False, eps=1e-6, bias=True, out_dim=patch_size * patch_size * self.out_channels, ) # self.final_layer = LuminaFinalLayer(hidden_size, patch_size, self.out_channels) assert (hidden_size // num_attention_heads) % 4 == 0, "2d rope needs head dim to be divisible by 4" def forward( self, hidden_states: torch.Tensor, timestep: torch.Tensor, encoder_hidden_states: torch.Tensor, encoder_mask: torch.Tensor, image_rotary_emb: torch.Tensor, cross_attention_kwargs: Dict[str, Any] = None, return_dict=True, ) -> torch.Tensor: """ Forward pass of LuminaNextDiT. Parameters: hidden_states (torch.Tensor): Input tensor of shape (N, C, H, W). timestep (torch.Tensor): Tensor of diffusion timesteps of shape (N,). encoder_hidden_states (torch.Tensor): Tensor of caption features of shape (N, D). encoder_mask (torch.Tensor): Tensor of caption masks of shape (N, L). """ hidden_states, mask, img_size, image_rotary_emb = self.patch_embedder(hidden_states, image_rotary_emb) image_rotary_emb = image_rotary_emb.to(hidden_states.device) temb = self.time_caption_embed(timestep, encoder_hidden_states, encoder_mask) encoder_mask = encoder_mask.bool() for layer in self.layers: hidden_states = layer( hidden_states, mask, image_rotary_emb, encoder_hidden_states, encoder_mask, temb=temb, cross_attention_kwargs=cross_attention_kwargs, ) hidden_states = self.norm_out(hidden_states, temb) # unpatchify height_tokens = width_tokens = self.patch_size height, width = img_size[0] batch_size = hidden_states.size(0) sequence_length = (height // height_tokens) * (width // width_tokens) hidden_states = hidden_states[:, :sequence_length].view( batch_size, height // height_tokens, width // width_tokens, height_tokens, width_tokens, self.out_channels ) output = hidden_states.permute(0, 5, 1, 3, 2, 4).flatten(4, 5).flatten(2, 3) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output)
diffusers/src/diffusers/models/transformers/lumina_nextdit2d.py/0
{ "file_path": "diffusers/src/diffusers/models/transformers/lumina_nextdit2d.py", "repo_id": "diffusers", "token_count": 6306 }
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...utils import BaseOutput from ..embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from ..modeling_utils import ModelMixin from .unet_1d_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class UNet1DOutput(BaseOutput): """ The output of [`UNet1DModel`]. Args: sample (`torch.Tensor` of shape `(batch_size, num_channels, sample_size)`): The hidden states output from the last layer of the model. """ sample: torch.Tensor class UNet1DModel(ModelMixin, ConfigMixin): r""" A 1D UNet model that takes a noisy sample and a timestep and returns a sample shaped output. This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). Parameters: sample_size (`int`, *optional*): Default length of sample. Should be adaptable at runtime. in_channels (`int`, *optional*, defaults to 2): Number of channels in the input sample. out_channels (`int`, *optional*, defaults to 2): Number of channels in the output. extra_in_channels (`int`, *optional*, defaults to 0): Number of additional channels to be added to the input of the first down block. Useful for cases where the input data has more channels than what the model was initially designed for. time_embedding_type (`str`, *optional*, defaults to `"fourier"`): Type of time embedding to use. freq_shift (`float`, *optional*, defaults to 0.0): Frequency shift for Fourier time embedding. flip_sin_to_cos (`bool`, *optional*, defaults to `False`): Whether to flip sin to cos for Fourier time embedding. down_block_types (`Tuple[str]`, *optional*, defaults to `("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D")`): Tuple of downsample block types. up_block_types (`Tuple[str]`, *optional*, defaults to `("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip")`): Tuple of upsample block types. block_out_channels (`Tuple[int]`, *optional*, defaults to `(32, 32, 64)`): Tuple of block output channels. mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock1D"`): Block type for middle of UNet. out_block_type (`str`, *optional*, defaults to `None`): Optional output processing block of UNet. act_fn (`str`, *optional*, defaults to `None`): Optional activation function in UNet blocks. norm_num_groups (`int`, *optional*, defaults to 8): The number of groups for normalization. layers_per_block (`int`, *optional*, defaults to 1): The number of layers per block. downsample_each_block (`int`, *optional*, defaults to `False`): Experimental feature for using a UNet without upsampling. """ _skip_layerwise_casting_patterns = ["norm"] @register_to_config def __init__( self, sample_size: int = 65536, sample_rate: Optional[int] = None, in_channels: int = 2, out_channels: int = 2, extra_in_channels: int = 0, time_embedding_type: str = "fourier", flip_sin_to_cos: bool = True, use_timestep_embedding: bool = False, freq_shift: float = 0.0, down_block_types: Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D"), up_block_types: Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip"), mid_block_type: Tuple[str] = "UNetMidBlock1D", out_block_type: str = None, block_out_channels: Tuple[int] = (32, 32, 64), act_fn: str = None, norm_num_groups: int = 8, layers_per_block: int = 1, downsample_each_block: bool = False, ): super().__init__() self.sample_size = sample_size # time if time_embedding_type == "fourier": self.time_proj = GaussianFourierProjection( embedding_size=8, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos ) timestep_input_dim = 2 * block_out_channels[0] elif time_embedding_type == "positional": self.time_proj = Timesteps( block_out_channels[0], flip_sin_to_cos=flip_sin_to_cos, downscale_freq_shift=freq_shift ) timestep_input_dim = block_out_channels[0] if use_timestep_embedding: time_embed_dim = block_out_channels[0] * 4 self.time_mlp = TimestepEmbedding( in_channels=timestep_input_dim, time_embed_dim=time_embed_dim, act_fn=act_fn, out_dim=block_out_channels[0], ) self.down_blocks = nn.ModuleList([]) self.mid_block = None self.up_blocks = nn.ModuleList([]) self.out_block = None # down output_channel = in_channels for i, down_block_type in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] if i == 0: input_channel += extra_in_channels is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block( down_block_type, num_layers=layers_per_block, in_channels=input_channel, out_channels=output_channel, temb_channels=block_out_channels[0], add_downsample=not is_final_block or downsample_each_block, ) self.down_blocks.append(down_block) # mid self.mid_block = get_mid_block( mid_block_type, in_channels=block_out_channels[-1], mid_channels=block_out_channels[-1], out_channels=block_out_channels[-1], embed_dim=block_out_channels[0], num_layers=layers_per_block, add_downsample=downsample_each_block, ) # up reversed_block_out_channels = list(reversed(block_out_channels)) output_channel = reversed_block_out_channels[0] if out_block_type is None: final_upsample_channels = out_channels else: final_upsample_channels = block_out_channels[0] for i, up_block_type in enumerate(up_block_types): prev_output_channel = output_channel output_channel = ( reversed_block_out_channels[i + 1] if i < len(up_block_types) - 1 else final_upsample_channels ) is_final_block = i == len(block_out_channels) - 1 up_block = get_up_block( up_block_type, num_layers=layers_per_block, in_channels=prev_output_channel, out_channels=output_channel, temb_channels=block_out_channels[0], add_upsample=not is_final_block, ) self.up_blocks.append(up_block) prev_output_channel = output_channel # out num_groups_out = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4, 32) self.out_block = get_out_block( out_block_type=out_block_type, num_groups_out=num_groups_out, embed_dim=block_out_channels[0], out_channels=out_channels, act_fn=act_fn, fc_dim=block_out_channels[-1] // 4, ) def forward( self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int], return_dict: bool = True, ) -> Union[UNet1DOutput, Tuple]: r""" The [`UNet1DModel`] forward method. Args: sample (`torch.Tensor`): The noisy input tensor with the following shape `(batch_size, num_channels, sample_size)`. timestep (`torch.Tensor` or `float` or `int`): The number of timesteps to denoise an input. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.unets.unet_1d.UNet1DOutput`] instead of a plain tuple. Returns: [`~models.unets.unet_1d.UNet1DOutput`] or `tuple`: If `return_dict` is True, an [`~models.unets.unet_1d.UNet1DOutput`] is returned, otherwise a `tuple` is returned where the first element is the sample tensor. """ # 1. time timesteps = timestep if not torch.is_tensor(timesteps): timesteps = torch.tensor([timesteps], dtype=torch.long, device=sample.device) elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) timestep_embed = self.time_proj(timesteps) if self.config.use_timestep_embedding: timestep_embed = self.time_mlp(timestep_embed.to(sample.dtype)) else: timestep_embed = timestep_embed[..., None] timestep_embed = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype) timestep_embed = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:])) # 2. down down_block_res_samples = () for downsample_block in self.down_blocks: sample, res_samples = downsample_block(hidden_states=sample, temb=timestep_embed) down_block_res_samples += res_samples # 3. mid if self.mid_block: sample = self.mid_block(sample, timestep_embed) # 4. up for i, upsample_block in enumerate(self.up_blocks): res_samples = down_block_res_samples[-1:] down_block_res_samples = down_block_res_samples[:-1] sample = upsample_block(sample, res_hidden_states_tuple=res_samples, temb=timestep_embed) # 5. post-process if self.out_block: sample = self.out_block(sample, timestep_embed) if not return_dict: return (sample,) return UNet1DOutput(sample=sample)
diffusers/src/diffusers/models/unets/unet_1d.py/0
{ "file_path": "diffusers/src/diffusers/models/unets/unet_1d.py", "repo_id": "diffusers", "token_count": 4887 }
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # JAX implementation of VQGAN from taming-transformers https://github.com/CompVis/taming-transformers import math from functools import partial from typing import Tuple import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .modeling_flax_utils import FlaxModelMixin @flax.struct.dataclass class FlaxDecoderOutput(BaseOutput): """ Output of decoding method. Args: sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)`): The decoded output sample from the last layer of the model. dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`): The `dtype` of the parameters. """ sample: jnp.ndarray @flax.struct.dataclass class FlaxAutoencoderKLOutput(BaseOutput): """ Output of AutoencoderKL encoding method. Args: latent_dist (`FlaxDiagonalGaussianDistribution`): Encoded outputs of `Encoder` represented as the mean and logvar of `FlaxDiagonalGaussianDistribution`. `FlaxDiagonalGaussianDistribution` allows for sampling latents from the distribution. """ latent_dist: "FlaxDiagonalGaussianDistribution" class FlaxUpsample2D(nn.Module): """ Flax implementation of 2D Upsample layer Args: in_channels (`int`): Input channels dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """ in_channels: int dtype: jnp.dtype = jnp.float32 def setup(self): self.conv = nn.Conv( self.in_channels, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) def __call__(self, hidden_states): batch, height, width, channels = hidden_states.shape hidden_states = jax.image.resize( hidden_states, shape=(batch, height * 2, width * 2, channels), method="nearest", ) hidden_states = self.conv(hidden_states) return hidden_states class FlaxDownsample2D(nn.Module): """ Flax implementation of 2D Downsample layer Args: in_channels (`int`): Input channels dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """ in_channels: int dtype: jnp.dtype = jnp.float32 def setup(self): self.conv = nn.Conv( self.in_channels, kernel_size=(3, 3), strides=(2, 2), padding="VALID", dtype=self.dtype, ) def __call__(self, hidden_states): pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim hidden_states = jnp.pad(hidden_states, pad_width=pad) hidden_states = self.conv(hidden_states) return hidden_states class FlaxResnetBlock2D(nn.Module): """ Flax implementation of 2D Resnet Block. Args: in_channels (`int`): Input channels out_channels (`int`): Output channels dropout (:obj:`float`, *optional*, defaults to 0.0): Dropout rate groups (:obj:`int`, *optional*, defaults to `32`): The number of groups to use for group norm. use_nin_shortcut (:obj:`bool`, *optional*, defaults to `None`): Whether to use `nin_shortcut`. This activates a new layer inside ResNet block dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """ in_channels: int out_channels: int = None dropout: float = 0.0 groups: int = 32 use_nin_shortcut: bool = None dtype: jnp.dtype = jnp.float32 def setup(self): out_channels = self.in_channels if self.out_channels is None else self.out_channels self.norm1 = nn.GroupNorm(num_groups=self.groups, epsilon=1e-6) self.conv1 = nn.Conv( out_channels, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) self.norm2 = nn.GroupNorm(num_groups=self.groups, epsilon=1e-6) self.dropout_layer = nn.Dropout(self.dropout) self.conv2 = nn.Conv( out_channels, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) use_nin_shortcut = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut self.conv_shortcut = None if use_nin_shortcut: self.conv_shortcut = nn.Conv( out_channels, kernel_size=(1, 1), strides=(1, 1), padding="VALID", dtype=self.dtype, ) def __call__(self, hidden_states, deterministic=True): residual = hidden_states hidden_states = self.norm1(hidden_states) hidden_states = nn.swish(hidden_states) hidden_states = self.conv1(hidden_states) hidden_states = self.norm2(hidden_states) hidden_states = nn.swish(hidden_states) hidden_states = self.dropout_layer(hidden_states, deterministic) hidden_states = self.conv2(hidden_states) if self.conv_shortcut is not None: residual = self.conv_shortcut(residual) return hidden_states + residual class FlaxAttentionBlock(nn.Module): r""" Flax Convolutional based multi-head attention block for diffusion-based VAE. Parameters: channels (:obj:`int`): Input channels num_head_channels (:obj:`int`, *optional*, defaults to `None`): Number of attention heads num_groups (:obj:`int`, *optional*, defaults to `32`): The number of groups to use for group norm dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """ channels: int num_head_channels: int = None num_groups: int = 32 dtype: jnp.dtype = jnp.float32 def setup(self): self.num_heads = self.channels // self.num_head_channels if self.num_head_channels is not None else 1 dense = partial(nn.Dense, self.channels, dtype=self.dtype) self.group_norm = nn.GroupNorm(num_groups=self.num_groups, epsilon=1e-6) self.query, self.key, self.value = dense(), dense(), dense() self.proj_attn = dense() def transpose_for_scores(self, projection): new_projection_shape = projection.shape[:-1] + (self.num_heads, -1) # move heads to 2nd position (B, T, H * D) -> (B, T, H, D) new_projection = projection.reshape(new_projection_shape) # (B, T, H, D) -> (B, H, T, D) new_projection = jnp.transpose(new_projection, (0, 2, 1, 3)) return new_projection def __call__(self, hidden_states): residual = hidden_states batch, height, width, channels = hidden_states.shape hidden_states = self.group_norm(hidden_states) hidden_states = hidden_states.reshape((batch, height * width, channels)) query = self.query(hidden_states) key = self.key(hidden_states) value = self.value(hidden_states) # transpose query = self.transpose_for_scores(query) key = self.transpose_for_scores(key) value = self.transpose_for_scores(value) # compute attentions scale = 1 / math.sqrt(math.sqrt(self.channels / self.num_heads)) attn_weights = jnp.einsum("...qc,...kc->...qk", query * scale, key * scale) attn_weights = nn.softmax(attn_weights, axis=-1) # attend to values hidden_states = jnp.einsum("...kc,...qk->...qc", value, attn_weights) hidden_states = jnp.transpose(hidden_states, (0, 2, 1, 3)) new_hidden_states_shape = hidden_states.shape[:-2] + (self.channels,) hidden_states = hidden_states.reshape(new_hidden_states_shape) hidden_states = self.proj_attn(hidden_states) hidden_states = hidden_states.reshape((batch, height, width, channels)) hidden_states = hidden_states + residual return hidden_states class FlaxDownEncoderBlock2D(nn.Module): r""" Flax Resnet blocks-based Encoder block for diffusion-based VAE. Parameters: in_channels (:obj:`int`): Input channels out_channels (:obj:`int`): Output channels dropout (:obj:`float`, *optional*, defaults to 0.0): Dropout rate num_layers (:obj:`int`, *optional*, defaults to 1): Number of Resnet layer block resnet_groups (:obj:`int`, *optional*, defaults to `32`): The number of groups to use for the Resnet block group norm add_downsample (:obj:`bool`, *optional*, defaults to `True`): Whether to add downsample layer dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """ in_channels: int out_channels: int dropout: float = 0.0 num_layers: int = 1 resnet_groups: int = 32 add_downsample: bool = True dtype: jnp.dtype = jnp.float32 def setup(self): resnets = [] for i in range(self.num_layers): in_channels = self.in_channels if i == 0 else self.out_channels res_block = FlaxResnetBlock2D( in_channels=in_channels, out_channels=self.out_channels, dropout=self.dropout, groups=self.resnet_groups, dtype=self.dtype, ) resnets.append(res_block) self.resnets = resnets if self.add_downsample: self.downsamplers_0 = FlaxDownsample2D(self.out_channels, dtype=self.dtype) def __call__(self, hidden_states, deterministic=True): for resnet in self.resnets: hidden_states = resnet(hidden_states, deterministic=deterministic) if self.add_downsample: hidden_states = self.downsamplers_0(hidden_states) return hidden_states class FlaxUpDecoderBlock2D(nn.Module): r""" Flax Resnet blocks-based Decoder block for diffusion-based VAE. Parameters: in_channels (:obj:`int`): Input channels out_channels (:obj:`int`): Output channels dropout (:obj:`float`, *optional*, defaults to 0.0): Dropout rate num_layers (:obj:`int`, *optional*, defaults to 1): Number of Resnet layer block resnet_groups (:obj:`int`, *optional*, defaults to `32`): The number of groups to use for the Resnet block group norm add_upsample (:obj:`bool`, *optional*, defaults to `True`): Whether to add upsample layer dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """ in_channels: int out_channels: int dropout: float = 0.0 num_layers: int = 1 resnet_groups: int = 32 add_upsample: bool = True dtype: jnp.dtype = jnp.float32 def setup(self): resnets = [] for i in range(self.num_layers): in_channels = self.in_channels if i == 0 else self.out_channels res_block = FlaxResnetBlock2D( in_channels=in_channels, out_channels=self.out_channels, dropout=self.dropout, groups=self.resnet_groups, dtype=self.dtype, ) resnets.append(res_block) self.resnets = resnets if self.add_upsample: self.upsamplers_0 = FlaxUpsample2D(self.out_channels, dtype=self.dtype) def __call__(self, hidden_states, deterministic=True): for resnet in self.resnets: hidden_states = resnet(hidden_states, deterministic=deterministic) if self.add_upsample: hidden_states = self.upsamplers_0(hidden_states) return hidden_states class FlaxUNetMidBlock2D(nn.Module): r""" Flax Unet Mid-Block module. Parameters: in_channels (:obj:`int`): Input channels dropout (:obj:`float`, *optional*, defaults to 0.0): Dropout rate num_layers (:obj:`int`, *optional*, defaults to 1): Number of Resnet layer block resnet_groups (:obj:`int`, *optional*, defaults to `32`): The number of groups to use for the Resnet and Attention block group norm num_attention_heads (:obj:`int`, *optional*, defaults to `1`): Number of attention heads for each attention block dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """ in_channels: int dropout: float = 0.0 num_layers: int = 1 resnet_groups: int = 32 num_attention_heads: int = 1 dtype: jnp.dtype = jnp.float32 def setup(self): resnet_groups = self.resnet_groups if self.resnet_groups is not None else min(self.in_channels // 4, 32) # there is always at least one resnet resnets = [ FlaxResnetBlock2D( in_channels=self.in_channels, out_channels=self.in_channels, dropout=self.dropout, groups=resnet_groups, dtype=self.dtype, ) ] attentions = [] for _ in range(self.num_layers): attn_block = FlaxAttentionBlock( channels=self.in_channels, num_head_channels=self.num_attention_heads, num_groups=resnet_groups, dtype=self.dtype, ) attentions.append(attn_block) res_block = FlaxResnetBlock2D( in_channels=self.in_channels, out_channels=self.in_channels, dropout=self.dropout, groups=resnet_groups, dtype=self.dtype, ) resnets.append(res_block) self.resnets = resnets self.attentions = attentions def __call__(self, hidden_states, deterministic=True): hidden_states = self.resnets[0](hidden_states, deterministic=deterministic) for attn, resnet in zip(self.attentions, self.resnets[1:]): hidden_states = attn(hidden_states) hidden_states = resnet(hidden_states, deterministic=deterministic) return hidden_states class FlaxEncoder(nn.Module): r""" Flax Implementation of VAE Encoder. This model is a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: in_channels (:obj:`int`, *optional*, defaults to 3): Input channels out_channels (:obj:`int`, *optional*, defaults to 3): Output channels down_block_types (:obj:`Tuple[str]`, *optional*, defaults to `(DownEncoderBlock2D)`): DownEncoder block type block_out_channels (:obj:`Tuple[str]`, *optional*, defaults to `(64,)`): Tuple containing the number of output channels for each block layers_per_block (:obj:`int`, *optional*, defaults to `2`): Number of Resnet layer for each block norm_num_groups (:obj:`int`, *optional*, defaults to `32`): norm num group act_fn (:obj:`str`, *optional*, defaults to `silu`): Activation function double_z (:obj:`bool`, *optional*, defaults to `False`): Whether to double the last output channels dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """ in_channels: int = 3 out_channels: int = 3 down_block_types: Tuple[str] = ("DownEncoderBlock2D",) block_out_channels: Tuple[int] = (64,) layers_per_block: int = 2 norm_num_groups: int = 32 act_fn: str = "silu" double_z: bool = False dtype: jnp.dtype = jnp.float32 def setup(self): block_out_channels = self.block_out_channels # in self.conv_in = nn.Conv( block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) # downsampling down_blocks = [] output_channel = block_out_channels[0] for i, _ in enumerate(self.down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = FlaxDownEncoderBlock2D( in_channels=input_channel, out_channels=output_channel, num_layers=self.layers_per_block, resnet_groups=self.norm_num_groups, add_downsample=not is_final_block, dtype=self.dtype, ) down_blocks.append(down_block) self.down_blocks = down_blocks # middle self.mid_block = FlaxUNetMidBlock2D( in_channels=block_out_channels[-1], resnet_groups=self.norm_num_groups, num_attention_heads=None, dtype=self.dtype, ) # end conv_out_channels = 2 * self.out_channels if self.double_z else self.out_channels self.conv_norm_out = nn.GroupNorm(num_groups=self.norm_num_groups, epsilon=1e-6) self.conv_out = nn.Conv( conv_out_channels, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) def __call__(self, sample, deterministic: bool = True): # in sample = self.conv_in(sample) # downsampling for block in self.down_blocks: sample = block(sample, deterministic=deterministic) # middle sample = self.mid_block(sample, deterministic=deterministic) # end sample = self.conv_norm_out(sample) sample = nn.swish(sample) sample = self.conv_out(sample) return sample class FlaxDecoder(nn.Module): r""" Flax Implementation of VAE Decoder. This model is a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: in_channels (:obj:`int`, *optional*, defaults to 3): Input channels out_channels (:obj:`int`, *optional*, defaults to 3): Output channels up_block_types (:obj:`Tuple[str]`, *optional*, defaults to `(UpDecoderBlock2D)`): UpDecoder block type block_out_channels (:obj:`Tuple[str]`, *optional*, defaults to `(64,)`): Tuple containing the number of output channels for each block layers_per_block (:obj:`int`, *optional*, defaults to `2`): Number of Resnet layer for each block norm_num_groups (:obj:`int`, *optional*, defaults to `32`): norm num group act_fn (:obj:`str`, *optional*, defaults to `silu`): Activation function double_z (:obj:`bool`, *optional*, defaults to `False`): Whether to double the last output channels dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): parameters `dtype` """ in_channels: int = 3 out_channels: int = 3 up_block_types: Tuple[str] = ("UpDecoderBlock2D",) block_out_channels: int = (64,) layers_per_block: int = 2 norm_num_groups: int = 32 act_fn: str = "silu" dtype: jnp.dtype = jnp.float32 def setup(self): block_out_channels = self.block_out_channels # z to block_in self.conv_in = nn.Conv( block_out_channels[-1], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) # middle self.mid_block = FlaxUNetMidBlock2D( in_channels=block_out_channels[-1], resnet_groups=self.norm_num_groups, num_attention_heads=None, dtype=self.dtype, ) # upsampling reversed_block_out_channels = list(reversed(block_out_channels)) output_channel = reversed_block_out_channels[0] up_blocks = [] for i, _ in enumerate(self.up_block_types): prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 up_block = FlaxUpDecoderBlock2D( in_channels=prev_output_channel, out_channels=output_channel, num_layers=self.layers_per_block + 1, resnet_groups=self.norm_num_groups, add_upsample=not is_final_block, dtype=self.dtype, ) up_blocks.append(up_block) prev_output_channel = output_channel self.up_blocks = up_blocks # end self.conv_norm_out = nn.GroupNorm(num_groups=self.norm_num_groups, epsilon=1e-6) self.conv_out = nn.Conv( self.out_channels, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) def __call__(self, sample, deterministic: bool = True): # z to block_in sample = self.conv_in(sample) # middle sample = self.mid_block(sample, deterministic=deterministic) # upsampling for block in self.up_blocks: sample = block(sample, deterministic=deterministic) sample = self.conv_norm_out(sample) sample = nn.swish(sample) sample = self.conv_out(sample) return sample class FlaxDiagonalGaussianDistribution(object): def __init__(self, parameters, deterministic=False): # Last axis to account for channels-last self.mean, self.logvar = jnp.split(parameters, 2, axis=-1) self.logvar = jnp.clip(self.logvar, -30.0, 20.0) self.deterministic = deterministic self.std = jnp.exp(0.5 * self.logvar) self.var = jnp.exp(self.logvar) if self.deterministic: self.var = self.std = jnp.zeros_like(self.mean) def sample(self, key): return self.mean + self.std * jax.random.normal(key, self.mean.shape) def kl(self, other=None): if self.deterministic: return jnp.array([0.0]) if other is None: return 0.5 * jnp.sum(self.mean**2 + self.var - 1.0 - self.logvar, axis=[1, 2, 3]) return 0.5 * jnp.sum( jnp.square(self.mean - other.mean) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar, axis=[1, 2, 3], ) def nll(self, sample, axis=[1, 2, 3]): if self.deterministic: return jnp.array([0.0]) logtwopi = jnp.log(2.0 * jnp.pi) return 0.5 * jnp.sum(logtwopi + self.logvar + jnp.square(sample - self.mean) / self.var, axis=axis) def mode(self): return self.mean @flax_register_to_config class FlaxAutoencoderKL(nn.Module, FlaxModelMixin, ConfigMixin): r""" Flax implementation of a VAE model with KL loss for decoding latent representations. This model inherits from [`FlaxModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). This model is a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) subclass. Use it as a regular Flax Linen module and refer to the Flax documentation for all matter related to its general usage and behavior. Inherent JAX features such as the following are supported: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: in_channels (`int`, *optional*, defaults to 3): Number of channels in the input image. out_channels (`int`, *optional*, defaults to 3): Number of channels in the output. down_block_types (`Tuple[str]`, *optional*, defaults to `(DownEncoderBlock2D)`): Tuple of downsample block types. up_block_types (`Tuple[str]`, *optional*, defaults to `(UpDecoderBlock2D)`): Tuple of upsample block types. block_out_channels (`Tuple[str]`, *optional*, defaults to `(64,)`): Tuple of block output channels. layers_per_block (`int`, *optional*, defaults to `2`): Number of ResNet layer for each block. act_fn (`str`, *optional*, defaults to `silu`): The activation function to use. latent_channels (`int`, *optional*, defaults to `4`): Number of channels in the latent space. norm_num_groups (`int`, *optional*, defaults to `32`): The number of groups for normalization. sample_size (`int`, *optional*, defaults to 32): Sample input size. scaling_factor (`float`, *optional*, defaults to 0.18215): The component-wise standard deviation of the trained latent space computed using the first batch of the training set. This is used to scale the latent space to have unit variance when training the diffusion model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1 / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper. dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`): The `dtype` of the parameters. """ in_channels: int = 3 out_channels: int = 3 down_block_types: Tuple[str] = ("DownEncoderBlock2D",) up_block_types: Tuple[str] = ("UpDecoderBlock2D",) block_out_channels: Tuple[int] = (64,) layers_per_block: int = 1 act_fn: str = "silu" latent_channels: int = 4 norm_num_groups: int = 32 sample_size: int = 32 scaling_factor: float = 0.18215 dtype: jnp.dtype = jnp.float32 def setup(self): self.encoder = FlaxEncoder( in_channels=self.config.in_channels, out_channels=self.config.latent_channels, down_block_types=self.config.down_block_types, block_out_channels=self.config.block_out_channels, layers_per_block=self.config.layers_per_block, act_fn=self.config.act_fn, norm_num_groups=self.config.norm_num_groups, double_z=True, dtype=self.dtype, ) self.decoder = FlaxDecoder( in_channels=self.config.latent_channels, out_channels=self.config.out_channels, up_block_types=self.config.up_block_types, block_out_channels=self.config.block_out_channels, layers_per_block=self.config.layers_per_block, norm_num_groups=self.config.norm_num_groups, act_fn=self.config.act_fn, dtype=self.dtype, ) self.quant_conv = nn.Conv( 2 * self.config.latent_channels, kernel_size=(1, 1), strides=(1, 1), padding="VALID", dtype=self.dtype, ) self.post_quant_conv = nn.Conv( self.config.latent_channels, kernel_size=(1, 1), strides=(1, 1), padding="VALID", dtype=self.dtype, ) def init_weights(self, rng: jax.Array) -> FrozenDict: # init input tensors sample_shape = (1, self.in_channels, self.sample_size, self.sample_size) sample = jnp.zeros(sample_shape, dtype=jnp.float32) params_rng, dropout_rng, gaussian_rng = jax.random.split(rng, 3) rngs = {"params": params_rng, "dropout": dropout_rng, "gaussian": gaussian_rng} return self.init(rngs, sample)["params"] def encode(self, sample, deterministic: bool = True, return_dict: bool = True): sample = jnp.transpose(sample, (0, 2, 3, 1)) hidden_states = self.encoder(sample, deterministic=deterministic) moments = self.quant_conv(hidden_states) posterior = FlaxDiagonalGaussianDistribution(moments) if not return_dict: return (posterior,) return FlaxAutoencoderKLOutput(latent_dist=posterior) def decode(self, latents, deterministic: bool = True, return_dict: bool = True): if latents.shape[-1] != self.config.latent_channels: latents = jnp.transpose(latents, (0, 2, 3, 1)) hidden_states = self.post_quant_conv(latents) hidden_states = self.decoder(hidden_states, deterministic=deterministic) hidden_states = jnp.transpose(hidden_states, (0, 3, 1, 2)) if not return_dict: return (hidden_states,) return FlaxDecoderOutput(sample=hidden_states) def __call__(self, sample, sample_posterior=False, deterministic: bool = True, return_dict: bool = True): posterior = self.encode(sample, deterministic=deterministic, return_dict=return_dict) if sample_posterior: rng = self.make_rng("gaussian") hidden_states = posterior.latent_dist.sample(rng) else: hidden_states = posterior.latent_dist.mode() sample = self.decode(hidden_states, return_dict=return_dict).sample if not return_dict: return (sample,) return FlaxDecoderOutput(sample=sample)
diffusers/src/diffusers/models/vae_flax.py/0
{ "file_path": "diffusers/src/diffusers/models/vae_flax.py", "repo_id": "diffusers", "token_count": 14479 }
# Copyright 2024 Salesforce.com, inc. # Copyright 2024 The HuggingFace Team. All rights reserved.# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Optional, Union import PIL.Image import torch from transformers import CLIPTokenizer from ...models import AutoencoderKL, UNet2DConditionModel from ...schedulers import PNDMScheduler from ...utils import ( is_torch_xla_available, logging, replace_example_docstring, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput from .blip_image_processing import BlipImageProcessor from .modeling_blip2 import Blip2QFormerModel from .modeling_ctx_clip import ContextCLIPTextModel if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> from diffusers.pipelines import BlipDiffusionPipeline >>> from diffusers.utils import load_image >>> import torch >>> blip_diffusion_pipe = BlipDiffusionPipeline.from_pretrained( ... "Salesforce/blipdiffusion", torch_dtype=torch.float16 ... ).to("cuda") >>> cond_subject = "dog" >>> tgt_subject = "dog" >>> text_prompt_input = "swimming underwater" >>> cond_image = load_image( ... "https://huggingface.co/datasets/ayushtues/blipdiffusion_images/resolve/main/dog.jpg" ... ) >>> guidance_scale = 7.5 >>> num_inference_steps = 25 >>> negative_prompt = "over-exposure, under-exposure, saturated, duplicate, out of frame, lowres, cropped, worst quality, low quality, jpeg artifacts, morbid, mutilated, out of frame, ugly, bad anatomy, bad proportions, deformed, blurry, duplicate" >>> output = blip_diffusion_pipe( ... text_prompt_input, ... cond_image, ... cond_subject, ... tgt_subject, ... guidance_scale=guidance_scale, ... num_inference_steps=num_inference_steps, ... neg_prompt=negative_prompt, ... height=512, ... width=512, ... ).images >>> output[0].save("image.png") ``` """ class BlipDiffusionPipeline(DiffusionPipeline): """ Pipeline for Zero-Shot Subject Driven Generation using Blip Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: tokenizer ([`CLIPTokenizer`]): Tokenizer for the text encoder text_encoder ([`ContextCLIPTextModel`]): Text encoder to encode the text prompt vae ([`AutoencoderKL`]): VAE model to map the latents to the image unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the image embedding. scheduler ([`PNDMScheduler`]): A scheduler to be used in combination with `unet` to generate image latents. qformer ([`Blip2QFormerModel`]): QFormer model to get multi-modal embeddings from the text and image. image_processor ([`BlipImageProcessor`]): Image Processor to preprocess and postprocess the image. ctx_begin_pos (int, `optional`, defaults to 2): Position of the context token in the text encoder. """ model_cpu_offload_seq = "qformer->text_encoder->unet->vae" def __init__( self, tokenizer: CLIPTokenizer, text_encoder: ContextCLIPTextModel, vae: AutoencoderKL, unet: UNet2DConditionModel, scheduler: PNDMScheduler, qformer: Blip2QFormerModel, image_processor: BlipImageProcessor, ctx_begin_pos: int = 2, mean: List[float] = None, std: List[float] = None, ): super().__init__() self.register_modules( tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, unet=unet, scheduler=scheduler, qformer=qformer, image_processor=image_processor, ) self.register_to_config(ctx_begin_pos=ctx_begin_pos, mean=mean, std=std) def get_query_embeddings(self, input_image, src_subject): return self.qformer(image_input=input_image, text_input=src_subject, return_dict=False) # from the original Blip Diffusion code, speciefies the target subject and augments the prompt by repeating it def _build_prompt(self, prompts, tgt_subjects, prompt_strength=1.0, prompt_reps=20): rv = [] for prompt, tgt_subject in zip(prompts, tgt_subjects): prompt = f"a {tgt_subject} {prompt.strip()}" # a trick to amplify the prompt rv.append(", ".join([prompt] * int(prompt_strength * prompt_reps))) return rv # Copied from diffusers.pipelines.consistency_models.pipeline_consistency_models.ConsistencyModelPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels, height, width) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device=device, dtype=dtype) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def encode_prompt(self, query_embeds, prompt, device=None): device = device or self._execution_device # embeddings for prompt, with query_embeds as context max_len = self.text_encoder.text_model.config.max_position_embeddings max_len -= self.qformer.config.num_query_tokens tokenized_prompt = self.tokenizer( prompt, padding="max_length", truncation=True, max_length=max_len, return_tensors="pt", ).to(device) batch_size = query_embeds.shape[0] ctx_begin_pos = [self.config.ctx_begin_pos] * batch_size text_embeddings = self.text_encoder( input_ids=tokenized_prompt.input_ids, ctx_embeddings=query_embeds, ctx_begin_pos=ctx_begin_pos, )[0] return text_embeddings @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: List[str], reference_image: PIL.Image.Image, source_subject_category: List[str], target_subject_category: List[str], latents: Optional[torch.Tensor] = None, guidance_scale: float = 7.5, height: int = 512, width: int = 512, num_inference_steps: int = 50, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, neg_prompt: Optional[str] = "", prompt_strength: float = 1.0, prompt_reps: int = 20, output_type: Optional[str] = "pil", return_dict: bool = True, ): """ Function invoked when calling the pipeline for generation. Args: prompt (`List[str]`): The prompt or prompts to guide the image generation. reference_image (`PIL.Image.Image`): The reference image to condition the generation on. source_subject_category (`List[str]`): The source subject category. target_subject_category (`List[str]`): The target subject category. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by random sampling. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. height (`int`, *optional*, defaults to 512): The height of the generated image. width (`int`, *optional*, defaults to 512): The width of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. neg_prompt (`str`, *optional*, defaults to ""): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_strength (`float`, *optional*, defaults to 1.0): The strength of the prompt. Specifies the number of times the prompt is repeated along with prompt_reps to amplify the prompt. prompt_reps (`int`, *optional*, defaults to 20): The number of times the prompt is repeated along with prompt_strength to amplify the prompt. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple` """ device = self._execution_device reference_image = self.image_processor.preprocess( reference_image, image_mean=self.config.mean, image_std=self.config.std, return_tensors="pt" )["pixel_values"] reference_image = reference_image.to(device) if isinstance(prompt, str): prompt = [prompt] if isinstance(source_subject_category, str): source_subject_category = [source_subject_category] if isinstance(target_subject_category, str): target_subject_category = [target_subject_category] batch_size = len(prompt) prompt = self._build_prompt( prompts=prompt, tgt_subjects=target_subject_category, prompt_strength=prompt_strength, prompt_reps=prompt_reps, ) query_embeds = self.get_query_embeddings(reference_image, source_subject_category) text_embeddings = self.encode_prompt(query_embeds, prompt, device) do_classifier_free_guidance = guidance_scale > 1.0 if do_classifier_free_guidance: max_length = self.text_encoder.text_model.config.max_position_embeddings uncond_input = self.tokenizer( [neg_prompt] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt", ) uncond_embeddings = self.text_encoder( input_ids=uncond_input.input_ids.to(device), ctx_embeddings=None, )[0] # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) scale_down_factor = 2 ** (len(self.unet.config.block_out_channels) - 1) latents = self.prepare_latents( batch_size=batch_size, num_channels=self.unet.config.in_channels, height=height // scale_down_factor, width=width // scale_down_factor, generator=generator, latents=latents, dtype=self.unet.dtype, device=device, ) # set timesteps extra_set_kwargs = {} self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)): # expand the latents if we are doing classifier free guidance do_classifier_free_guidance = guidance_scale > 1.0 latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents noise_pred = self.unet( latent_model_input, timestep=t, encoder_hidden_states=text_embeddings, down_block_additional_residuals=None, mid_block_additional_residual=None, )["sample"] # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) latents = self.scheduler.step( noise_pred, t, latents, )["prev_sample"] if XLA_AVAILABLE: xm.mark_step() image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/src/diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/blip_diffusion/pipeline_blip_diffusion.py", "repo_id": "diffusers", "token_count": 6599 }
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_flax_available, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure["multicontrolnet"] = ["MultiControlNetModel"] _import_structure["pipeline_controlnet"] = ["StableDiffusionControlNetPipeline"] _import_structure["pipeline_controlnet_blip_diffusion"] = ["BlipDiffusionControlNetPipeline"] _import_structure["pipeline_controlnet_img2img"] = ["StableDiffusionControlNetImg2ImgPipeline"] _import_structure["pipeline_controlnet_inpaint"] = ["StableDiffusionControlNetInpaintPipeline"] _import_structure["pipeline_controlnet_inpaint_sd_xl"] = ["StableDiffusionXLControlNetInpaintPipeline"] _import_structure["pipeline_controlnet_sd_xl"] = ["StableDiffusionXLControlNetPipeline"] _import_structure["pipeline_controlnet_sd_xl_img2img"] = ["StableDiffusionXLControlNetImg2ImgPipeline"] _import_structure["pipeline_controlnet_union_inpaint_sd_xl"] = ["StableDiffusionXLControlNetUnionInpaintPipeline"] _import_structure["pipeline_controlnet_union_sd_xl"] = ["StableDiffusionXLControlNetUnionPipeline"] _import_structure["pipeline_controlnet_union_sd_xl_img2img"] = ["StableDiffusionXLControlNetUnionImg2ImgPipeline"] try: if not (is_transformers_available() and is_flax_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_flax_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_flax_and_transformers_objects)) else: _import_structure["pipeline_flax_controlnet"] = ["FlaxStableDiffusionControlNetPipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_blip_diffusion import BlipDiffusionControlNetPipeline from .pipeline_controlnet_img2img import StableDiffusionControlNetImg2ImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline from .pipeline_controlnet_inpaint_sd_xl import StableDiffusionXLControlNetInpaintPipeline from .pipeline_controlnet_sd_xl import StableDiffusionXLControlNetPipeline from .pipeline_controlnet_sd_xl_img2img import StableDiffusionXLControlNetImg2ImgPipeline from .pipeline_controlnet_union_inpaint_sd_xl import StableDiffusionXLControlNetUnionInpaintPipeline from .pipeline_controlnet_union_sd_xl import StableDiffusionXLControlNetUnionPipeline from .pipeline_controlnet_union_sd_xl_img2img import StableDiffusionXLControlNetUnionImg2ImgPipeline try: if not (is_transformers_available() and is_flax_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_flax_and_transformers_objects import * # noqa F403 else: from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/controlnet/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/controlnet/__init__.py", "repo_id": "diffusers", "token_count": 1545 }
from typing import TYPE_CHECKING from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule _import_structure = {"pipeline_latent_diffusion_uncond": ["LDMPipeline"]} if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: from .pipeline_latent_diffusion_uncond import LDMPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, )
diffusers/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/latent_diffusion_uncond/__init__.py", "repo_id": "diffusers", "token_count": 190 }
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import PIL.Image import torch from packaging import version from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from ....configuration_utils import FrozenDict from ....image_processor import VaeImageProcessor from ....loaders import FromSingleFileMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ....models import AutoencoderKL, UNet2DConditionModel from ....models.lora import adjust_lora_scale_text_encoder from ....schedulers import KarrasDiffusionSchedulers from ....utils import PIL_INTERPOLATION, USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers from ....utils.torch_utils import randn_tensor from ...pipeline_utils import DiffusionPipeline from ...stable_diffusion import StableDiffusionPipelineOutput from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker logger = logging.get_logger(__name__) def preprocess_image(image, batch_size): w, h = image.size w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]) image = np.array(image).astype(np.float32) / 255.0 image = np.vstack([image[None].transpose(0, 3, 1, 2)] * batch_size) image = torch.from_numpy(image) return 2.0 * image - 1.0 def preprocess_mask(mask, batch_size, scale_factor=8): if not isinstance(mask, torch.Tensor): mask = mask.convert("L") w, h = mask.size w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL_INTERPOLATION["nearest"]) mask = np.array(mask).astype(np.float32) / 255.0 mask = np.tile(mask, (4, 1, 1)) mask = np.vstack([mask[None]] * batch_size) mask = 1 - mask # repaint white, keep black mask = torch.from_numpy(mask) return mask else: valid_mask_channel_sizes = [1, 3] # if mask channel is fourth tensor dimension, permute dimensions to pytorch standard (B, C, H, W) if mask.shape[3] in valid_mask_channel_sizes: mask = mask.permute(0, 3, 1, 2) elif mask.shape[1] not in valid_mask_channel_sizes: raise ValueError( f"Mask channel dimension of size in {valid_mask_channel_sizes} should be second or fourth dimension," f" but received mask of shape {tuple(mask.shape)}" ) # (potentially) reduce mask channel dimension from 3 to 1 for broadcasting to latent shape mask = mask.mean(dim=1, keepdim=True) h, w = mask.shape[-2:] h, w = (x - x % 8 for x in (h, w)) # resize to integer multiple of 8 mask = torch.nn.functional.interpolate(mask, (h // scale_factor, w // scale_factor)) return mask class StableDiffusionInpaintPipelineLegacy( DiffusionPipeline, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin, FromSingleFileMixin ): r""" Pipeline for text-guided image inpainting using Stable Diffusion. *This is an experimental feature*. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) In addition the pipeline inherits the following loading methods: - *Textual-Inversion*: [`loaders.TextualInversionLoaderMixin.load_textual_inversion`] - *LoRA*: [`loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] - *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`] as well as the following saving methods: - *LoRA*: [`loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. feature_extractor ([`CLIPImageProcessor`]): Model that extracts features from generated images to be used as inputs for the `safety_checker`. """ model_cpu_offload_seq = "text_encoder->unet->vae" _optional_components = ["feature_extractor"] _exclude_from_cpu_offload = ["safety_checker"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = True, ): super().__init__() deprecation_message = ( f"The class {self.__class__} is deprecated and will be removed in v1.0.0. You can achieve exactly the same functionality" "by loading your model into `StableDiffusionInpaintPipeline` instead. See https://github.com/huggingface/diffusers/pull/3533" "for more information." ) deprecate("legacy is outdated", "1.0.0", deprecation_message, standard_warn=False) if scheduler is not None and getattr(scheduler.config, "steps_offset", 1) != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if scheduler is not None and getattr(scheduler.config, "clip_sample", False) is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) is_unet_version_less_0_9_0 = ( unet is not None and hasattr(unet.config, "_diffusers_version") and version.parse(version.parse(unet.config._diffusers_version).base_version) < version.parse("0.9.0.dev0") ) is_unet_sample_size_less_64 = ( unet is not None and hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 ) if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, **kwargs, ): deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple." deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False) prompt_embeds_tuple = self.encode_prompt( prompt=prompt, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=lora_scale, **kwargs, ) # concatenate for backwards comp prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) return prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, negative_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents def decode_latents(self, latents): deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, strength, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if strength < 0 or strength > 1: raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) def get_timesteps(self, num_inference_steps, strength, device): # get the original timestep using init_timestep init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] return timesteps, num_inference_steps - t_start def prepare_latents(self, image, timestep, num_images_per_prompt, dtype, device, generator): image = image.to(device=device, dtype=dtype) init_latent_dist = self.vae.encode(image).latent_dist init_latents = init_latent_dist.sample(generator=generator) init_latents = self.vae.config.scaling_factor * init_latents # Expand init_latents for batch_size and num_images_per_prompt init_latents = torch.cat([init_latents] * num_images_per_prompt, dim=0) init_latents_orig = init_latents # add noise to latents using the timesteps noise = randn_tensor(init_latents.shape, generator=generator, device=device, dtype=dtype) init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents, init_latents_orig, noise @torch.no_grad() def __call__( self, prompt: Union[str, List[str]] = None, image: Union[torch.Tensor, PIL.Image.Image] = None, mask_image: Union[torch.Tensor, PIL.Image.Image] = None, strength: float = 0.8, num_inference_steps: Optional[int] = 50, guidance_scale: Optional[float] = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, add_predicted_noise: Optional[bool] = False, eta: Optional[float] = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, clip_skip: Optional[int] = None, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. image (`torch.Tensor` or `PIL.Image.Image`): `Image`, or tensor representing an image batch, that will be used as the starting point for the process. This is the image whose masked region will be inpainted. mask_image (`torch.Tensor` or `PIL.Image.Image`): `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single channel (luminance) before use. If mask is a tensor, the expected shape should be either `(B, H, W, C)` or `(B, C, H, W)`, where C is 1 or 3. strength (`float`, *optional*, defaults to 0.8): Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. When `strength` is 1, the denoising process will be run on the masked area for the full number of iterations specified in `num_inference_steps`. `image` will be used as a reference for the masked area, adding more noise to that region the larger the `strength`. If `strength` is 0, no inpainting will occur. num_inference_steps (`int`, *optional*, defaults to 50): The reference number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. This parameter will be modulated by `strength`, as explained above. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. add_predicted_noise (`bool`, *optional*, defaults to True): Use predicted noise instead of random noise when constructing noisy versions of the original image in the reverse diffusion process eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images, and the second element is a list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" (nsfw) content, according to the `safety_checker`. """ # 1. Check inputs self.check_inputs(prompt, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt text_encoder_lora_scale = ( cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None ) prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, clip_skip=clip_skip, ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) # 4. Preprocess image and mask if not isinstance(image, torch.Tensor): image = preprocess_image(image, batch_size) mask_image = preprocess_mask(mask_image, batch_size, self.vae_scale_factor) # 5. set timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) # 6. Prepare latent variables # encode the init image into latents and scale the latents latents, init_latents_orig, noise = self.prepare_latents( image, latent_timestep, num_images_per_prompt, prompt_embeds.dtype, device, generator ) # 7. Prepare mask latent mask = mask_image.to(device=device, dtype=latents.dtype) mask = torch.cat([mask] * num_images_per_prompt) # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 9. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, return_dict=False, )[0] # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] # masking if add_predicted_noise: init_latents_proper = self.scheduler.add_noise( init_latents_orig, noise_pred_uncond, torch.tensor([t]) ) else: init_latents_proper = self.scheduler.add_noise(init_latents_orig, noise, torch.tensor([t])) latents = (init_latents_proper * mask) + (latents * (1 - mask)) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) # use original latents corresponding to unmasked portions of the image latents = (init_latents_orig * mask) + (latents * (1 - mask)) if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) # Offload all models self.maybe_free_model_hooks() if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
diffusers/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/deprecated/stable_diffusion_variants/pipeline_stable_diffusion_inpaint_legacy.py", "repo_id": "diffusers", "token_count": 18223 }
from typing import TYPE_CHECKING from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _additional_imports = {} _import_structure = {"pipeline_output": ["FluxPipelineOutput", "FluxPriorReduxPipelineOutput"]} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure["modeling_flux"] = ["ReduxImageEncoder"] _import_structure["pipeline_flux"] = ["FluxPipeline"] _import_structure["pipeline_flux_control"] = ["FluxControlPipeline"] _import_structure["pipeline_flux_control_img2img"] = ["FluxControlImg2ImgPipeline"] _import_structure["pipeline_flux_control_inpaint"] = ["FluxControlInpaintPipeline"] _import_structure["pipeline_flux_controlnet"] = ["FluxControlNetPipeline"] _import_structure["pipeline_flux_controlnet_image_to_image"] = ["FluxControlNetImg2ImgPipeline"] _import_structure["pipeline_flux_controlnet_inpainting"] = ["FluxControlNetInpaintPipeline"] _import_structure["pipeline_flux_fill"] = ["FluxFillPipeline"] _import_structure["pipeline_flux_img2img"] = ["FluxImg2ImgPipeline"] _import_structure["pipeline_flux_inpaint"] = ["FluxInpaintPipeline"] _import_structure["pipeline_flux_prior_redux"] = ["FluxPriorReduxPipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .modeling_flux import ReduxImageEncoder from .pipeline_flux import FluxPipeline from .pipeline_flux_control import FluxControlPipeline from .pipeline_flux_control_img2img import FluxControlImg2ImgPipeline from .pipeline_flux_control_inpaint import FluxControlInpaintPipeline from .pipeline_flux_controlnet import FluxControlNetPipeline from .pipeline_flux_controlnet_image_to_image import FluxControlNetImg2ImgPipeline from .pipeline_flux_controlnet_inpainting import FluxControlNetInpaintPipeline from .pipeline_flux_fill import FluxFillPipeline from .pipeline_flux_img2img import FluxImg2ImgPipeline from .pipeline_flux_inpaint import FluxInpaintPipeline from .pipeline_flux_prior_redux import FluxPriorReduxPipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value) for name, value in _additional_imports.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/flux/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/flux/__init__.py", "repo_id": "diffusers", "token_count": 1276 }
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable, Dict, List, Optional, Union import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection from ...models import PriorTransformer, UNet2DConditionModel, VQModel from ...schedulers import DDPMScheduler, UnCLIPScheduler from ...utils import deprecate, logging, replace_example_docstring from ..pipeline_utils import DiffusionPipeline from .pipeline_kandinsky2_2 import KandinskyV22Pipeline from .pipeline_kandinsky2_2_img2img import KandinskyV22Img2ImgPipeline from .pipeline_kandinsky2_2_inpainting import KandinskyV22InpaintPipeline from .pipeline_kandinsky2_2_prior import KandinskyV22PriorPipeline logger = logging.get_logger(__name__) # pylint: disable=invalid-name TEXT2IMAGE_EXAMPLE_DOC_STRING = """ Examples: ```py from diffusers import AutoPipelineForText2Image import torch pipe = AutoPipelineForText2Image.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 ) pipe.enable_model_cpu_offload() prompt = "A lion in galaxies, spirals, nebulae, stars, smoke, iridescent, intricate detail, octane render, 8k" image = pipe(prompt=prompt, num_inference_steps=25).images[0] ``` """ IMAGE2IMAGE_EXAMPLE_DOC_STRING = """ Examples: ```py from diffusers import AutoPipelineForImage2Image import torch import requests from io import BytesIO from PIL import Image import os pipe = AutoPipelineForImage2Image.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 ) pipe.enable_model_cpu_offload() prompt = "A fantasy landscape, Cinematic lighting" negative_prompt = "low quality, bad quality" url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" response = requests.get(url) image = Image.open(BytesIO(response.content)).convert("RGB") image.thumbnail((768, 768)) image = pipe(prompt=prompt, image=original_image, num_inference_steps=25).images[0] ``` """ INPAINT_EXAMPLE_DOC_STRING = """ Examples: ```py from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image import torch import numpy as np pipe = AutoPipelineForInpainting.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16 ) pipe.enable_model_cpu_offload() prompt = "A fantasy landscape, Cinematic lighting" negative_prompt = "low quality, bad quality" original_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) mask = np.zeros((768, 768), dtype=np.float32) # Let's mask out an area above the cat's head mask[:250, 250:-250] = 1 image = pipe(prompt=prompt, image=original_image, mask_image=mask, num_inference_steps=25).images[0] ``` """ class KandinskyV22CombinedPipeline(DiffusionPipeline): """ Combined Pipeline for text-to-image generation using Kandinsky This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]): A scheduler to be used in combination with `unet` to generate image latents. unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the image embedding. movq ([`VQModel`]): MoVQ Decoder to generate the image from the latents. prior_prior ([`PriorTransformer`]): The canonical unCLIP prior to approximate the image embedding from the text embedding. prior_image_encoder ([`CLIPVisionModelWithProjection`]): Frozen image-encoder. prior_text_encoder ([`CLIPTextModelWithProjection`]): Frozen text-encoder. prior_tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). prior_scheduler ([`UnCLIPScheduler`]): A scheduler to be used in combination with `prior` to generate image embedding. prior_image_processor ([`CLIPImageProcessor`]): A image_processor to be used to preprocess image from clip. """ model_cpu_offload_seq = "prior_text_encoder->prior_image_encoder->unet->movq" _load_connected_pipes = True _exclude_from_cpu_offload = ["prior_prior"] def __init__( self, unet: UNet2DConditionModel, scheduler: DDPMScheduler, movq: VQModel, prior_prior: PriorTransformer, prior_image_encoder: CLIPVisionModelWithProjection, prior_text_encoder: CLIPTextModelWithProjection, prior_tokenizer: CLIPTokenizer, prior_scheduler: UnCLIPScheduler, prior_image_processor: CLIPImageProcessor, ): super().__init__() self.register_modules( unet=unet, scheduler=scheduler, movq=movq, prior_prior=prior_prior, prior_image_encoder=prior_image_encoder, prior_text_encoder=prior_text_encoder, prior_tokenizer=prior_tokenizer, prior_scheduler=prior_scheduler, prior_image_processor=prior_image_processor, ) self.prior_pipe = KandinskyV22PriorPipeline( prior=prior_prior, image_encoder=prior_image_encoder, text_encoder=prior_text_encoder, tokenizer=prior_tokenizer, scheduler=prior_scheduler, image_processor=prior_image_processor, ) self.decoder_pipe = KandinskyV22Pipeline( unet=unet, scheduler=scheduler, movq=movq, ) def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None): self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op) def enable_sequential_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = "cuda"): r""" Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. Note that offloading happens on a submodule basis. Memory savings are higher than with `enable_model_cpu_offload`, but performance is lower. """ self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device) self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device) def progress_bar(self, iterable=None, total=None): self.prior_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.enable_model_cpu_offload() def set_progress_bar_config(self, **kwargs): self.prior_pipe.set_progress_bar_config(**kwargs) self.decoder_pipe.set_progress_bar_config(**kwargs) @torch.no_grad() @replace_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]], negative_prompt: Optional[Union[str, List[str]]] = None, num_inference_steps: int = 100, guidance_scale: float = 4.0, num_images_per_prompt: int = 1, height: int = 512, width: int = 512, prior_guidance_scale: float = 4.0, prior_num_inference_steps: int = 25, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, return_dict: bool = True, prior_callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, prior_callback_on_step_end_tensor_inputs: List[str] = ["latents"], callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], ): """ Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. prior_guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. prior_num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. prior_callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference of the prior pipeline. The function is called with the following arguments: `prior_callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. prior_callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `prior_callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your prior pipeline class. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference of the decoder pipeline. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple` """ prior_outputs = self.prior_pipe( prompt=prompt, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, num_inference_steps=prior_num_inference_steps, generator=generator, latents=latents, guidance_scale=prior_guidance_scale, output_type="pt", return_dict=False, callback_on_step_end=prior_callback_on_step_end, callback_on_step_end_tensor_inputs=prior_callback_on_step_end_tensor_inputs, ) image_embeds = prior_outputs[0] negative_image_embeds = prior_outputs[1] prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0: prompt = (image_embeds.shape[0] // len(prompt)) * prompt outputs = self.decoder_pipe( image_embeds=image_embeds, negative_image_embeds=negative_image_embeds, width=width, height=height, num_inference_steps=num_inference_steps, generator=generator, guidance_scale=guidance_scale, output_type=output_type, callback=callback, callback_steps=callback_steps, return_dict=return_dict, callback_on_step_end=callback_on_step_end, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, ) self.maybe_free_model_hooks() return outputs class KandinskyV22Img2ImgCombinedPipeline(DiffusionPipeline): """ Combined Pipeline for image-to-image generation using Kandinsky This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]): A scheduler to be used in combination with `unet` to generate image latents. unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the image embedding. movq ([`VQModel`]): MoVQ Decoder to generate the image from the latents. prior_prior ([`PriorTransformer`]): The canonical unCLIP prior to approximate the image embedding from the text embedding. prior_image_encoder ([`CLIPVisionModelWithProjection`]): Frozen image-encoder. prior_text_encoder ([`CLIPTextModelWithProjection`]): Frozen text-encoder. prior_tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). prior_scheduler ([`UnCLIPScheduler`]): A scheduler to be used in combination with `prior` to generate image embedding. prior_image_processor ([`CLIPImageProcessor`]): A image_processor to be used to preprocess image from clip. """ model_cpu_offload_seq = "prior_text_encoder->prior_image_encoder->unet->movq" _load_connected_pipes = True _exclude_from_cpu_offload = ["prior_prior"] def __init__( self, unet: UNet2DConditionModel, scheduler: DDPMScheduler, movq: VQModel, prior_prior: PriorTransformer, prior_image_encoder: CLIPVisionModelWithProjection, prior_text_encoder: CLIPTextModelWithProjection, prior_tokenizer: CLIPTokenizer, prior_scheduler: UnCLIPScheduler, prior_image_processor: CLIPImageProcessor, ): super().__init__() self.register_modules( unet=unet, scheduler=scheduler, movq=movq, prior_prior=prior_prior, prior_image_encoder=prior_image_encoder, prior_text_encoder=prior_text_encoder, prior_tokenizer=prior_tokenizer, prior_scheduler=prior_scheduler, prior_image_processor=prior_image_processor, ) self.prior_pipe = KandinskyV22PriorPipeline( prior=prior_prior, image_encoder=prior_image_encoder, text_encoder=prior_text_encoder, tokenizer=prior_tokenizer, scheduler=prior_scheduler, image_processor=prior_image_processor, ) self.decoder_pipe = KandinskyV22Img2ImgPipeline( unet=unet, scheduler=scheduler, movq=movq, ) def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None): self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op) def enable_model_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = "cuda"): r""" Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. """ self.prior_pipe.enable_model_cpu_offload(gpu_id=gpu_id, device=device) self.decoder_pipe.enable_model_cpu_offload(gpu_id=gpu_id, device=device) def enable_sequential_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = "cuda"): r""" Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. Note that offloading happens on a submodule basis. Memory savings are higher than with `enable_model_cpu_offload`, but performance is lower. """ self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device) self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device) def progress_bar(self, iterable=None, total=None): self.prior_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.enable_model_cpu_offload() def set_progress_bar_config(self, **kwargs): self.prior_pipe.set_progress_bar_config(**kwargs) self.decoder_pipe.set_progress_bar_config(**kwargs) @torch.no_grad() @replace_example_docstring(IMAGE2IMAGE_EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]], image: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]], negative_prompt: Optional[Union[str, List[str]]] = None, num_inference_steps: int = 100, guidance_scale: float = 4.0, strength: float = 0.3, num_images_per_prompt: int = 1, height: int = 512, width: int = 512, prior_guidance_scale: float = 4.0, prior_num_inference_steps: int = 25, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, return_dict: bool = True, prior_callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, prior_callback_on_step_end_tensor_inputs: List[str] = ["latents"], callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], ): """ Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): `Image`, or tensor representing an image batch, that will be used as the starting point for the process. Can also accept image latents as `image`, if passing latents directly, it will not be encoded again. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. strength (`float`, *optional*, defaults to 0.3): Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` will be used as a starting point, adding more noise to it the larger the `strength`. The number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will be maximum and the denoising process will run for the full number of iterations specified in `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. prior_guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. prior_num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple` """ prior_outputs = self.prior_pipe( prompt=prompt, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, num_inference_steps=prior_num_inference_steps, generator=generator, latents=latents, guidance_scale=prior_guidance_scale, output_type="pt", return_dict=False, callback_on_step_end=prior_callback_on_step_end, callback_on_step_end_tensor_inputs=prior_callback_on_step_end_tensor_inputs, ) image_embeds = prior_outputs[0] negative_image_embeds = prior_outputs[1] prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt image = [image] if isinstance(image, PIL.Image.Image) else image if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0: prompt = (image_embeds.shape[0] // len(prompt)) * prompt if ( isinstance(image, (list, tuple)) and len(image) < image_embeds.shape[0] and image_embeds.shape[0] % len(image) == 0 ): image = (image_embeds.shape[0] // len(image)) * image outputs = self.decoder_pipe( image=image, image_embeds=image_embeds, negative_image_embeds=negative_image_embeds, width=width, height=height, strength=strength, num_inference_steps=num_inference_steps, generator=generator, guidance_scale=guidance_scale, output_type=output_type, callback=callback, callback_steps=callback_steps, return_dict=return_dict, callback_on_step_end=callback_on_step_end, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, ) self.maybe_free_model_hooks() return outputs class KandinskyV22InpaintCombinedPipeline(DiffusionPipeline): """ Combined Pipeline for inpainting generation using Kandinsky This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]): A scheduler to be used in combination with `unet` to generate image latents. unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the image embedding. movq ([`VQModel`]): MoVQ Decoder to generate the image from the latents. prior_prior ([`PriorTransformer`]): The canonical unCLIP prior to approximate the image embedding from the text embedding. prior_image_encoder ([`CLIPVisionModelWithProjection`]): Frozen image-encoder. prior_text_encoder ([`CLIPTextModelWithProjection`]): Frozen text-encoder. prior_tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). prior_scheduler ([`UnCLIPScheduler`]): A scheduler to be used in combination with `prior` to generate image embedding. prior_image_processor ([`CLIPImageProcessor`]): A image_processor to be used to preprocess image from clip. """ model_cpu_offload_seq = "prior_text_encoder->prior_image_encoder->unet->movq" _load_connected_pipes = True _exclude_from_cpu_offload = ["prior_prior"] def __init__( self, unet: UNet2DConditionModel, scheduler: DDPMScheduler, movq: VQModel, prior_prior: PriorTransformer, prior_image_encoder: CLIPVisionModelWithProjection, prior_text_encoder: CLIPTextModelWithProjection, prior_tokenizer: CLIPTokenizer, prior_scheduler: UnCLIPScheduler, prior_image_processor: CLIPImageProcessor, ): super().__init__() self.register_modules( unet=unet, scheduler=scheduler, movq=movq, prior_prior=prior_prior, prior_image_encoder=prior_image_encoder, prior_text_encoder=prior_text_encoder, prior_tokenizer=prior_tokenizer, prior_scheduler=prior_scheduler, prior_image_processor=prior_image_processor, ) self.prior_pipe = KandinskyV22PriorPipeline( prior=prior_prior, image_encoder=prior_image_encoder, text_encoder=prior_text_encoder, tokenizer=prior_tokenizer, scheduler=prior_scheduler, image_processor=prior_image_processor, ) self.decoder_pipe = KandinskyV22InpaintPipeline( unet=unet, scheduler=scheduler, movq=movq, ) def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None): self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op) def enable_sequential_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = "cuda"): r""" Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. Note that offloading happens on a submodule basis. Memory savings are higher than with `enable_model_cpu_offload`, but performance is lower. """ self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device) self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id, device=device) def progress_bar(self, iterable=None, total=None): self.prior_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.progress_bar(iterable=iterable, total=total) self.decoder_pipe.enable_model_cpu_offload() def set_progress_bar_config(self, **kwargs): self.prior_pipe.set_progress_bar_config(**kwargs) self.decoder_pipe.set_progress_bar_config(**kwargs) @torch.no_grad() @replace_example_docstring(INPAINT_EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]], image: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]], mask_image: Union[torch.Tensor, PIL.Image.Image, List[torch.Tensor], List[PIL.Image.Image]], negative_prompt: Optional[Union[str, List[str]]] = None, num_inference_steps: int = 100, guidance_scale: float = 4.0, num_images_per_prompt: int = 1, height: int = 512, width: int = 512, prior_guidance_scale: float = 4.0, prior_num_inference_steps: int = 25, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, prior_callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, prior_callback_on_step_end_tensor_inputs: List[str] = ["latents"], callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): """ Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`): `Image`, or tensor representing an image batch, that will be used as the starting point for the process. Can also accept image latents as `image`, if passing latents directly, it will not be encoded again. mask_image (`np.array`): Tensor representing an image batch, to mask `image`. White pixels in the mask will be repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. height (`int`, *optional*, defaults to 512): The height in pixels of the generated image. width (`int`, *optional*, defaults to 512): The width in pixels of the generated image. prior_guidance_scale (`float`, *optional*, defaults to 4.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. prior_num_inference_steps (`int`, *optional*, defaults to 100): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. prior_callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `prior_callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. prior_callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `prior_callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple` """ prior_kwargs = {} if kwargs.get("prior_callback", None) is not None: prior_kwargs["callback"] = kwargs.pop("prior_callback") deprecate( "prior_callback", "1.0.0", "Passing `prior_callback` as an input argument to `__call__` is deprecated, consider use `prior_callback_on_step_end`", ) if kwargs.get("prior_callback_steps", None) is not None: deprecate( "prior_callback_steps", "1.0.0", "Passing `prior_callback_steps` as an input argument to `__call__` is deprecated, consider use `prior_callback_on_step_end`", ) prior_kwargs["callback_steps"] = kwargs.pop("prior_callback_steps") prior_outputs = self.prior_pipe( prompt=prompt, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, num_inference_steps=prior_num_inference_steps, generator=generator, latents=latents, guidance_scale=prior_guidance_scale, output_type="pt", return_dict=False, callback_on_step_end=prior_callback_on_step_end, callback_on_step_end_tensor_inputs=prior_callback_on_step_end_tensor_inputs, **prior_kwargs, ) image_embeds = prior_outputs[0] negative_image_embeds = prior_outputs[1] prompt = [prompt] if not isinstance(prompt, (list, tuple)) else prompt image = [image] if isinstance(image, PIL.Image.Image) else image mask_image = [mask_image] if isinstance(mask_image, PIL.Image.Image) else mask_image if len(prompt) < image_embeds.shape[0] and image_embeds.shape[0] % len(prompt) == 0: prompt = (image_embeds.shape[0] // len(prompt)) * prompt if ( isinstance(image, (list, tuple)) and len(image) < image_embeds.shape[0] and image_embeds.shape[0] % len(image) == 0 ): image = (image_embeds.shape[0] // len(image)) * image if ( isinstance(mask_image, (list, tuple)) and len(mask_image) < image_embeds.shape[0] and image_embeds.shape[0] % len(mask_image) == 0 ): mask_image = (image_embeds.shape[0] // len(mask_image)) * mask_image outputs = self.decoder_pipe( image=image, mask_image=mask_image, image_embeds=image_embeds, negative_image_embeds=negative_image_embeds, width=width, height=height, num_inference_steps=num_inference_steps, generator=generator, guidance_scale=guidance_scale, output_type=output_type, return_dict=return_dict, callback_on_step_end=callback_on_step_end, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, **kwargs, ) self.maybe_free_model_hooks() return outputs
diffusers/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_combined.py", "repo_id": "diffusers", "token_count": 18875 }
# Copyright 2024 ChatGLM3-6B Model Team, Kwai-Kolors Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import re from typing import Dict, List, Optional, Union from sentencepiece import SentencePieceProcessor from transformers import PreTrainedTokenizer from transformers.tokenization_utils_base import BatchEncoding, EncodedInput from transformers.utils import PaddingStrategy class SPTokenizer: def __init__(self, model_path: str): # reload tokenizer assert os.path.isfile(model_path), model_path self.sp_model = SentencePieceProcessor(model_file=model_path) # BOS / EOS token IDs self.n_words: int = self.sp_model.vocab_size() self.bos_id: int = self.sp_model.bos_id() self.eos_id: int = self.sp_model.eos_id() self.pad_id: int = self.sp_model.unk_id() assert self.sp_model.vocab_size() == self.sp_model.get_piece_size() role_special_tokens = ["<|system|>", "<|user|>", "<|assistant|>", "<|observation|>"] special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "sop", "eop"] + role_special_tokens self.special_tokens = {} self.index_special_tokens = {} for token in special_tokens: self.special_tokens[token] = self.n_words self.index_special_tokens[self.n_words] = token self.n_words += 1 self.role_special_token_expression = "|".join([re.escape(token) for token in role_special_tokens]) def tokenize(self, s: str, encode_special_tokens=False): if encode_special_tokens: last_index = 0 t = [] for match in re.finditer(self.role_special_token_expression, s): if last_index < match.start(): t.extend(self.sp_model.EncodeAsPieces(s[last_index : match.start()])) t.append(s[match.start() : match.end()]) last_index = match.end() if last_index < len(s): t.extend(self.sp_model.EncodeAsPieces(s[last_index:])) return t else: return self.sp_model.EncodeAsPieces(s) def encode(self, s: str, bos: bool = False, eos: bool = False) -> List[int]: assert isinstance(s, str) t = self.sp_model.encode(s) if bos: t = [self.bos_id] + t if eos: t = t + [self.eos_id] return t def decode(self, t: List[int]) -> str: text, buffer = "", [] for token in t: if token in self.index_special_tokens: if buffer: text += self.sp_model.decode(buffer) buffer = [] text += self.index_special_tokens[token] else: buffer.append(token) if buffer: text += self.sp_model.decode(buffer) return text def decode_tokens(self, tokens: List[str]) -> str: text = self.sp_model.DecodePieces(tokens) return text def convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" if token in self.special_tokens: return self.special_tokens[token] return self.sp_model.PieceToId(token) def convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" if index in self.index_special_tokens: return self.index_special_tokens[index] if index in [self.eos_id, self.bos_id, self.pad_id] or index < 0: return "" return self.sp_model.IdToPiece(index) class ChatGLMTokenizer(PreTrainedTokenizer): vocab_files_names = {"vocab_file": "tokenizer.model"} model_input_names = ["input_ids", "attention_mask", "position_ids"] def __init__( self, vocab_file, padding_side="left", clean_up_tokenization_spaces=False, encode_special_tokens=False, **kwargs, ): self.name = "GLMTokenizer" self.vocab_file = vocab_file self.tokenizer = SPTokenizer(vocab_file) self.special_tokens = { "<bos>": self.tokenizer.bos_id, "<eos>": self.tokenizer.eos_id, "<pad>": self.tokenizer.pad_id, } self.encode_special_tokens = encode_special_tokens super().__init__( padding_side=padding_side, clean_up_tokenization_spaces=clean_up_tokenization_spaces, encode_special_tokens=encode_special_tokens, **kwargs, ) def get_command(self, token): if token in self.special_tokens: return self.special_tokens[token] assert token in self.tokenizer.special_tokens, f"{token} is not a special token for {self.name}" return self.tokenizer.special_tokens[token] @property def unk_token(self) -> str: return "<unk>" @unk_token.setter def unk_token(self, value: str): self._unk_token = value @property def pad_token(self) -> str: return "<unk>" @pad_token.setter def pad_token(self, value: str): self._pad_token = value @property def pad_token_id(self): return self.get_command("<pad>") @property def eos_token(self) -> str: return "</s>" @eos_token.setter def eos_token(self, value: str): self._eos_token = value @property def eos_token_id(self): return self.get_command("<eos>") @property def vocab_size(self): return self.tokenizer.n_words def get_vocab(self): """Returns vocab as a dict""" vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _tokenize(self, text, **kwargs): return self.tokenizer.tokenize(text, encode_special_tokens=self.encode_special_tokens) def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.tokenizer.convert_token_to_id(token) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.tokenizer.convert_id_to_token(index) def convert_tokens_to_string(self, tokens: List[str]) -> str: return self.tokenizer.decode_tokens(tokens) def save_vocabulary(self, save_directory, filename_prefix=None): """ Save the vocabulary and special tokens file to a directory. Args: save_directory (`str`): The directory in which to save the vocabulary. filename_prefix (`str`, *optional*): An optional prefix to add to the named of the saved files. Returns: `Tuple(str)`: Paths to the files saved. """ if os.path.isdir(save_directory): vocab_file = os.path.join(save_directory, self.vocab_files_names["vocab_file"]) else: vocab_file = save_directory with open(self.vocab_file, "rb") as fin: proto_str = fin.read() with open(vocab_file, "wb") as writer: writer.write(proto_str) return (vocab_file,) def get_prefix_tokens(self): prefix_tokens = [self.get_command("[gMASK]"), self.get_command("sop")] return prefix_tokens def build_single_message(self, role, metadata, message): assert role in ["system", "user", "assistant", "observation"], role role_tokens = [self.get_command(f"<|{role}|>")] + self.tokenizer.encode(f"{metadata}\n") message_tokens = self.tokenizer.encode(message) tokens = role_tokens + message_tokens return tokens def build_chat_input(self, query, history=None, role="user"): if history is None: history = [] input_ids = [] for item in history: content = item["content"] if item["role"] == "system" and "tools" in item: content = content + "\n" + json.dumps(item["tools"], indent=4, ensure_ascii=False) input_ids.extend(self.build_single_message(item["role"], item.get("metadata", ""), content)) input_ids.extend(self.build_single_message(role, "", query)) input_ids.extend([self.get_command("<|assistant|>")]) return self.batch_encode_plus([input_ids], return_tensors="pt", is_split_into_words=True) def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ prefix_tokens = self.get_prefix_tokens() token_ids_0 = prefix_tokens + token_ids_0 if token_ids_1 is not None: token_ids_0 = token_ids_0 + token_ids_1 + [self.get_command("<eos>")] return token_ids_0 def _pad( self, encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], max_length: Optional[int] = None, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int] = None, return_attention_mask: Optional[bool] = None, padding_side: Optional[bool] = None, ) -> dict: """ Pad encoded inputs (on left/right and up to predefined length or max length in the batch) Args: encoded_inputs: Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). max_length: maximum length of the returned list and optionally padding length (see below). Will truncate by taking into account the special tokens. padding_strategy: PaddingStrategy to use for padding. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - PaddingStrategy.DO_NOT_PAD: Do not pad The tokenizer padding sides are defined in self.padding_side: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta). padding_side (`str`, *optional*): The side on which the model should have padding applied. Should be selected between ['right', 'left']. Default value is picked from the class attribute of the same name. return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics) """ # Load from model defaults assert self.padding_side == "left" required_input = encoded_inputs[self.model_input_names[0]] seq_length = len(required_input) if padding_strategy == PaddingStrategy.LONGEST: max_length = len(required_input) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length # Initialize attention mask if not present. if "attention_mask" not in encoded_inputs: encoded_inputs["attention_mask"] = [1] * seq_length if "position_ids" not in encoded_inputs: encoded_inputs["position_ids"] = list(range(seq_length)) if needs_to_be_padded: difference = max_length - len(required_input) if "attention_mask" in encoded_inputs: encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] if "position_ids" in encoded_inputs: encoded_inputs["position_ids"] = [0] * difference + encoded_inputs["position_ids"] encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input return encoded_inputs
diffusers/src/diffusers/pipelines/kolors/tokenizer.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/kolors/tokenizer.py", "repo_id": "diffusers", "token_count": 5907 }
from dataclasses import dataclass import torch from diffusers.utils import BaseOutput @dataclass class LTXPipelineOutput(BaseOutput): r""" Output class for LTX pipelines. Args: frames (`torch.Tensor`, `np.ndarray`, or List[List[PIL.Image.Image]]): List of video outputs - It can be a nested list of length `batch_size,` with each sub-list containing denoised PIL image sequences of length `num_frames.` It can also be a NumPy array or Torch tensor of shape `(batch_size, num_frames, channels, height, width)`. """ frames: torch.Tensor
diffusers/src/diffusers/pipelines/ltx/pipeline_output.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/ltx/pipeline_output.py", "repo_id": "diffusers", "token_count": 222 }
from dataclasses import dataclass from typing import TYPE_CHECKING, List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import ( DIFFUSERS_SLOW_IMPORT, OptionalDependencyNotAvailable, _LazyModule, get_objects_from_module, is_torch_available, is_transformers_available, ) _dummy_objects = {} _import_structure = {} try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils import dummy_torch_and_transformers_objects # noqa F403 _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) else: _import_structure["image_encoder"] = ["PaintByExampleImageEncoder"] _import_structure["pipeline_paint_by_example"] = ["PaintByExamplePipeline"] if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * else: from .image_encoder import PaintByExampleImageEncoder from .pipeline_paint_by_example import PaintByExamplePipeline else: import sys sys.modules[__name__] = _LazyModule( __name__, globals()["__file__"], _import_structure, module_spec=__spec__, ) for name, value in _dummy_objects.items(): setattr(sys.modules[__name__], name, value)
diffusers/src/diffusers/pipelines/paint_by_example/__init__.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/paint_by_example/__init__.py", "repo_id": "diffusers", "token_count": 599 }
import inspect from itertools import repeat from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from ...image_processor import VaeImageProcessor from ...models import AutoencoderKL, UNet2DConditionModel from ...pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from ...schedulers import KarrasDiffusionSchedulers from ...utils import deprecate, is_torch_xla_available, logging from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from .pipeline_output import SemanticStableDiffusionPipelineOutput if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name class SemanticStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin): r""" Pipeline for text-to-image generation using Stable Diffusion with latent editing. This model inherits from [`DiffusionPipeline`] and builds on the [`StableDiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. text_encoder ([`~transformers.CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`Q16SafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details about a model's potential harms. feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. """ model_cpu_offload_seq = "text_encoder->unet->vae" _optional_components = ["safety_checker", "feature_extractor"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = True, ): super().__init__() if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents def decode_latents(self, latents): deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs # Copied from diffusers.pipelines.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.StableDiffusionKDiffusionPipeline.check_inputs def check_inputs( self, prompt, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: int = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, editing_prompt: Optional[Union[str, List[str]]] = None, editing_prompt_embeddings: Optional[torch.Tensor] = None, reverse_editing_direction: Optional[Union[bool, List[bool]]] = False, edit_guidance_scale: Optional[Union[float, List[float]]] = 5, edit_warmup_steps: Optional[Union[int, List[int]]] = 10, edit_cooldown_steps: Optional[Union[int, List[int]]] = None, edit_threshold: Optional[Union[float, List[float]]] = 0.9, edit_momentum_scale: Optional[float] = 0.1, edit_mom_beta: Optional[float] = 0.4, edit_weights: Optional[List[float]] = None, sem_guidance: Optional[List[torch.Tensor]] = None, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`): The prompt or prompts to guide image generation. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. editing_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to use for semantic guidance. Semantic guidance is disabled by setting `editing_prompt = None`. Guidance direction of prompt should be specified via `reverse_editing_direction`. editing_prompt_embeddings (`torch.Tensor`, *optional*): Pre-computed embeddings to use for semantic guidance. Guidance direction of embedding should be specified via `reverse_editing_direction`. reverse_editing_direction (`bool` or `List[bool]`, *optional*, defaults to `False`): Whether the corresponding prompt in `editing_prompt` should be increased or decreased. edit_guidance_scale (`float` or `List[float]`, *optional*, defaults to 5): Guidance scale for semantic guidance. If provided as a list, values should correspond to `editing_prompt`. edit_warmup_steps (`float` or `List[float]`, *optional*, defaults to 10): Number of diffusion steps (for each prompt) for which semantic guidance is not applied. Momentum is calculated for those steps and applied once all warmup periods are over. edit_cooldown_steps (`float` or `List[float]`, *optional*, defaults to `None`): Number of diffusion steps (for each prompt) after which semantic guidance is longer applied. edit_threshold (`float` or `List[float]`, *optional*, defaults to 0.9): Threshold of semantic guidance. edit_momentum_scale (`float`, *optional*, defaults to 0.1): Scale of the momentum to be added to the semantic guidance at each diffusion step. If set to 0.0, momentum is disabled. Momentum is already built up during warmup (for diffusion steps smaller than `sld_warmup_steps`). Momentum is only added to latent guidance once all warmup periods are finished. edit_mom_beta (`float`, *optional*, defaults to 0.4): Defines how semantic guidance momentum builds up. `edit_mom_beta` indicates how much of the previous momentum is kept. Momentum is already built up during warmup (for diffusion steps smaller than `edit_warmup_steps`). edit_weights (`List[float]`, *optional*, defaults to `None`): Indicates how much each individual concept should influence the overall guidance. If no weights are provided all concepts are applied equally. sem_guidance (`List[torch.Tensor]`, *optional*): List of pre-generated guidance vectors to be applied at generation. Length of the list has to correspond to `num_inference_steps`. Examples: ```py >>> import torch >>> from diffusers import SemanticStableDiffusionPipeline >>> pipe = SemanticStableDiffusionPipeline.from_pretrained( ... "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to("cuda") >>> out = pipe( ... prompt="a photo of the face of a woman", ... num_images_per_prompt=1, ... guidance_scale=7, ... editing_prompt=[ ... "smiling, smile", # Concepts to apply ... "glasses, wearing glasses", ... "curls, wavy hair, curly hair", ... "beard, full beard, mustache", ... ], ... reverse_editing_direction=[ ... False, ... False, ... False, ... False, ... ], # Direction of guidance i.e. increase all concepts ... edit_warmup_steps=[10, 10, 10, 10], # Warmup period for each concept ... edit_guidance_scale=[4, 5, 5, 5.4], # Guidance scale for each concept ... edit_threshold=[ ... 0.99, ... 0.975, ... 0.925, ... 0.96, ... ], # Threshold for each concept. Threshold equals the percentile of the latent space that will be discarded. I.e. threshold=0.99 uses 1% of the latent dimensions ... edit_momentum_scale=0.3, # Momentum scale that will be added to the latent guidance ... edit_mom_beta=0.6, # Momentum beta ... edit_weights=[1, 1, 1, 1, 1], # Weights of the individual concepts against each other ... ) >>> image = out.images[0] ``` Returns: [`~pipelines.semantic_stable_diffusion.SemanticStableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.semantic_stable_diffusion.SemanticStableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # 2. Define call parameters batch_size = 1 if isinstance(prompt, str) else len(prompt) device = self._execution_device if editing_prompt: enable_edit_guidance = True if isinstance(editing_prompt, str): editing_prompt = [editing_prompt] enabled_editing_prompts = len(editing_prompt) elif editing_prompt_embeddings is not None: enable_edit_guidance = True enabled_editing_prompts = editing_prompt_embeddings.shape[0] else: enabled_editing_prompts = 0 enable_edit_guidance = False # get prompt text embeddings text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt", ) text_input_ids = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] text_embeddings = self.text_encoder(text_input_ids.to(device))[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) if enable_edit_guidance: # get safety text embeddings if editing_prompt_embeddings is None: edit_concepts_input = self.tokenizer( [x for item in editing_prompt for x in repeat(item, batch_size)], padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt", ) edit_concepts_input_ids = edit_concepts_input.input_ids if edit_concepts_input_ids.shape[-1] > self.tokenizer.model_max_length: removed_text = self.tokenizer.batch_decode( edit_concepts_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) edit_concepts_input_ids = edit_concepts_input_ids[:, : self.tokenizer.model_max_length] edit_concepts = self.text_encoder(edit_concepts_input_ids.to(device))[0] else: edit_concepts = editing_prompt_embeddings.to(device).repeat(batch_size, 1, 1) # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed_edit, seq_len_edit, _ = edit_concepts.shape edit_concepts = edit_concepts.repeat(1, num_images_per_prompt, 1) edit_concepts = edit_concepts.view(bs_embed_edit * num_images_per_prompt, seq_len_edit, -1) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(device))[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes if enable_edit_guidance: text_embeddings = torch.cat([uncond_embeddings, text_embeddings, edit_concepts]) else: text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) # get the initial random noise unless the user supplied it # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, text_embeddings.dtype, device, generator, latents, ) # 6. Prepare extra step kwargs. extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # Initialize edit_momentum to None edit_momentum = None self.uncond_estimates = None self.text_estimates = None self.edit_estimates = None self.sem_guidance = None for i, t in enumerate(self.progress_bar(timesteps)): # expand the latents if we are doing classifier free guidance latent_model_input = ( torch.cat([latents] * (2 + enabled_editing_prompts)) if do_classifier_free_guidance else latents ) latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample # perform guidance if do_classifier_free_guidance: noise_pred_out = noise_pred.chunk(2 + enabled_editing_prompts) # [b,4, 64, 64] noise_pred_uncond, noise_pred_text = noise_pred_out[0], noise_pred_out[1] noise_pred_edit_concepts = noise_pred_out[2:] # default text guidance noise_guidance = guidance_scale * (noise_pred_text - noise_pred_uncond) # noise_guidance = (noise_pred_text - noise_pred_edit_concepts[0]) if self.uncond_estimates is None: self.uncond_estimates = torch.zeros((num_inference_steps + 1, *noise_pred_uncond.shape)) self.uncond_estimates[i] = noise_pred_uncond.detach().cpu() if self.text_estimates is None: self.text_estimates = torch.zeros((num_inference_steps + 1, *noise_pred_text.shape)) self.text_estimates[i] = noise_pred_text.detach().cpu() if self.edit_estimates is None and enable_edit_guidance: self.edit_estimates = torch.zeros( (num_inference_steps + 1, len(noise_pred_edit_concepts), *noise_pred_edit_concepts[0].shape) ) if self.sem_guidance is None: self.sem_guidance = torch.zeros((num_inference_steps + 1, *noise_pred_text.shape)) if edit_momentum is None: edit_momentum = torch.zeros_like(noise_guidance) if enable_edit_guidance: concept_weights = torch.zeros( (len(noise_pred_edit_concepts), noise_guidance.shape[0]), device=device, dtype=noise_guidance.dtype, ) noise_guidance_edit = torch.zeros( (len(noise_pred_edit_concepts), *noise_guidance.shape), device=device, dtype=noise_guidance.dtype, ) # noise_guidance_edit = torch.zeros_like(noise_guidance) warmup_inds = [] for c, noise_pred_edit_concept in enumerate(noise_pred_edit_concepts): self.edit_estimates[i, c] = noise_pred_edit_concept if isinstance(edit_guidance_scale, list): edit_guidance_scale_c = edit_guidance_scale[c] else: edit_guidance_scale_c = edit_guidance_scale if isinstance(edit_threshold, list): edit_threshold_c = edit_threshold[c] else: edit_threshold_c = edit_threshold if isinstance(reverse_editing_direction, list): reverse_editing_direction_c = reverse_editing_direction[c] else: reverse_editing_direction_c = reverse_editing_direction if edit_weights: edit_weight_c = edit_weights[c] else: edit_weight_c = 1.0 if isinstance(edit_warmup_steps, list): edit_warmup_steps_c = edit_warmup_steps[c] else: edit_warmup_steps_c = edit_warmup_steps if isinstance(edit_cooldown_steps, list): edit_cooldown_steps_c = edit_cooldown_steps[c] elif edit_cooldown_steps is None: edit_cooldown_steps_c = i + 1 else: edit_cooldown_steps_c = edit_cooldown_steps if i >= edit_warmup_steps_c: warmup_inds.append(c) if i >= edit_cooldown_steps_c: noise_guidance_edit[c, :, :, :, :] = torch.zeros_like(noise_pred_edit_concept) continue noise_guidance_edit_tmp = noise_pred_edit_concept - noise_pred_uncond # tmp_weights = (noise_pred_text - noise_pred_edit_concept).sum(dim=(1, 2, 3)) tmp_weights = (noise_guidance - noise_pred_edit_concept).sum(dim=(1, 2, 3)) tmp_weights = torch.full_like(tmp_weights, edit_weight_c) # * (1 / enabled_editing_prompts) if reverse_editing_direction_c: noise_guidance_edit_tmp = noise_guidance_edit_tmp * -1 concept_weights[c, :] = tmp_weights noise_guidance_edit_tmp = noise_guidance_edit_tmp * edit_guidance_scale_c # torch.quantile function expects float32 if noise_guidance_edit_tmp.dtype == torch.float32: tmp = torch.quantile( torch.abs(noise_guidance_edit_tmp).flatten(start_dim=2), edit_threshold_c, dim=2, keepdim=False, ) else: tmp = torch.quantile( torch.abs(noise_guidance_edit_tmp).flatten(start_dim=2).to(torch.float32), edit_threshold_c, dim=2, keepdim=False, ).to(noise_guidance_edit_tmp.dtype) noise_guidance_edit_tmp = torch.where( torch.abs(noise_guidance_edit_tmp) >= tmp[:, :, None, None], noise_guidance_edit_tmp, torch.zeros_like(noise_guidance_edit_tmp), ) noise_guidance_edit[c, :, :, :, :] = noise_guidance_edit_tmp # noise_guidance_edit = noise_guidance_edit + noise_guidance_edit_tmp warmup_inds = torch.tensor(warmup_inds).to(device) if len(noise_pred_edit_concepts) > warmup_inds.shape[0] > 0: concept_weights = concept_weights.to("cpu") # Offload to cpu noise_guidance_edit = noise_guidance_edit.to("cpu") concept_weights_tmp = torch.index_select(concept_weights.to(device), 0, warmup_inds) concept_weights_tmp = torch.where( concept_weights_tmp < 0, torch.zeros_like(concept_weights_tmp), concept_weights_tmp ) concept_weights_tmp = concept_weights_tmp / concept_weights_tmp.sum(dim=0) # concept_weights_tmp = torch.nan_to_num(concept_weights_tmp) noise_guidance_edit_tmp = torch.index_select(noise_guidance_edit.to(device), 0, warmup_inds) noise_guidance_edit_tmp = torch.einsum( "cb,cbijk->bijk", concept_weights_tmp, noise_guidance_edit_tmp ) noise_guidance = noise_guidance + noise_guidance_edit_tmp self.sem_guidance[i] = noise_guidance_edit_tmp.detach().cpu() del noise_guidance_edit_tmp del concept_weights_tmp concept_weights = concept_weights.to(device) noise_guidance_edit = noise_guidance_edit.to(device) concept_weights = torch.where( concept_weights < 0, torch.zeros_like(concept_weights), concept_weights ) concept_weights = torch.nan_to_num(concept_weights) noise_guidance_edit = torch.einsum("cb,cbijk->bijk", concept_weights, noise_guidance_edit) noise_guidance_edit = noise_guidance_edit.to(edit_momentum.device) noise_guidance_edit = noise_guidance_edit + edit_momentum_scale * edit_momentum edit_momentum = edit_mom_beta * edit_momentum + (1 - edit_mom_beta) * noise_guidance_edit if warmup_inds.shape[0] == len(noise_pred_edit_concepts): noise_guidance = noise_guidance + noise_guidance_edit self.sem_guidance[i] = noise_guidance_edit.detach().cpu() if sem_guidance is not None: edit_guidance = sem_guidance[i].to(device) noise_guidance = noise_guidance + edit_guidance noise_pred = noise_pred_uncond + noise_guidance # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if XLA_AVAILABLE: xm.mark_step() # 8. Post-processing if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] image, has_nsfw_concept = self.run_safety_checker(image, device, text_embeddings.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) if not return_dict: return (image, has_nsfw_concept) return SemanticStableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/semantic_stable_diffusion/pipeline_semantic_stable_diffusion.py", "repo_id": "diffusers", "token_count": 18171 }
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from functools import partial from typing import Dict, List, Optional, Union import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from transformers import CLIPTokenizer, FlaxCLIPTextModel from diffusers.utils import logging from ...models import FlaxAutoencoderKL, FlaxUNet2DConditionModel from ...schedulers import ( FlaxDDIMScheduler, FlaxDPMSolverMultistepScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, ) from ..pipeline_flax_utils import FlaxDiffusionPipeline from .pipeline_output import FlaxStableDiffusionXLPipelineOutput logger = logging.get_logger(__name__) # pylint: disable=invalid-name # Set to True to use python for loop instead of jax.fori_loop for easier debugging DEBUG = False class FlaxStableDiffusionXLPipeline(FlaxDiffusionPipeline): def __init__( self, text_encoder: FlaxCLIPTextModel, text_encoder_2: FlaxCLIPTextModel, vae: FlaxAutoencoderKL, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: FlaxUNet2DConditionModel, scheduler: Union[ FlaxDDIMScheduler, FlaxPNDMScheduler, FlaxLMSDiscreteScheduler, FlaxDPMSolverMultistepScheduler ], dtype: jnp.dtype = jnp.float32, ): super().__init__() self.dtype = dtype self.register_modules( vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 def prepare_inputs(self, prompt: Union[str, List[str]]): if not isinstance(prompt, (str, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") # Assume we have the two encoders inputs = [] for tokenizer in [self.tokenizer, self.tokenizer_2]: text_inputs = tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="np", ) inputs.append(text_inputs.input_ids) inputs = jnp.stack(inputs, axis=1) return inputs def __call__( self, prompt_ids: jax.Array, params: Union[Dict, FrozenDict], prng_seed: jax.Array, num_inference_steps: int = 50, guidance_scale: Union[float, jax.Array] = 7.5, height: Optional[int] = None, width: Optional[int] = None, latents: jnp.array = None, neg_prompt_ids: jnp.array = None, return_dict: bool = True, output_type: str = None, jit: bool = False, ): # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor if isinstance(guidance_scale, float) and jit: # Convert to a tensor so each device gets a copy. guidance_scale = jnp.array([guidance_scale] * prompt_ids.shape[0]) guidance_scale = guidance_scale[:, None] return_latents = output_type == "latent" if jit: images = _p_generate( self, prompt_ids, params, prng_seed, num_inference_steps, height, width, guidance_scale, latents, neg_prompt_ids, return_latents, ) else: images = self._generate( prompt_ids, params, prng_seed, num_inference_steps, height, width, guidance_scale, latents, neg_prompt_ids, return_latents, ) if not return_dict: return (images,) return FlaxStableDiffusionXLPipelineOutput(images=images) def get_embeddings(self, prompt_ids: jnp.array, params): # We assume we have the two encoders # bs, encoder_input, seq_length te_1_inputs = prompt_ids[:, 0, :] te_2_inputs = prompt_ids[:, 1, :] prompt_embeds = self.text_encoder(te_1_inputs, params=params["text_encoder"], output_hidden_states=True) prompt_embeds = prompt_embeds["hidden_states"][-2] prompt_embeds_2_out = self.text_encoder_2( te_2_inputs, params=params["text_encoder_2"], output_hidden_states=True ) prompt_embeds_2 = prompt_embeds_2_out["hidden_states"][-2] text_embeds = prompt_embeds_2_out["text_embeds"] prompt_embeds = jnp.concatenate([prompt_embeds, prompt_embeds_2], axis=-1) return prompt_embeds, text_embeds def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, bs, dtype): add_time_ids = list(original_size + crops_coords_top_left + target_size) add_time_ids = jnp.array([add_time_ids] * bs, dtype=dtype) return add_time_ids def _generate( self, prompt_ids: jnp.array, params: Union[Dict, FrozenDict], prng_seed: jax.Array, num_inference_steps: int, height: int, width: int, guidance_scale: float, latents: Optional[jnp.array] = None, neg_prompt_ids: Optional[jnp.array] = None, return_latents=False, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") # Encode input prompt prompt_embeds, pooled_embeds = self.get_embeddings(prompt_ids, params) # Get unconditional embeddings batch_size = prompt_embeds.shape[0] if neg_prompt_ids is None: neg_prompt_embeds = jnp.zeros_like(prompt_embeds) negative_pooled_embeds = jnp.zeros_like(pooled_embeds) else: neg_prompt_embeds, negative_pooled_embeds = self.get_embeddings(neg_prompt_ids, params) add_time_ids = self._get_add_time_ids( (height, width), (0, 0), (height, width), prompt_embeds.shape[0], dtype=prompt_embeds.dtype ) prompt_embeds = jnp.concatenate([neg_prompt_embeds, prompt_embeds], axis=0) # (2, 77, 2048) add_text_embeds = jnp.concatenate([negative_pooled_embeds, pooled_embeds], axis=0) add_time_ids = jnp.concatenate([add_time_ids, add_time_ids], axis=0) # Ensure model output will be `float32` before going into the scheduler guidance_scale = jnp.array([guidance_scale], dtype=jnp.float32) # Create random latents latents_shape = ( batch_size, self.unet.config.in_channels, height // self.vae_scale_factor, width // self.vae_scale_factor, ) if latents is None: latents = jax.random.normal(prng_seed, shape=latents_shape, dtype=jnp.float32) else: if latents.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") # Prepare scheduler state scheduler_state = self.scheduler.set_timesteps( params["scheduler"], num_inference_steps=num_inference_steps, shape=latents.shape ) # scale the initial noise by the standard deviation required by the scheduler latents = latents * scheduler_state.init_noise_sigma added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} # Denoising loop def loop_body(step, args): latents, scheduler_state = args # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes latents_input = jnp.concatenate([latents] * 2) t = jnp.array(scheduler_state.timesteps, dtype=jnp.int32)[step] timestep = jnp.broadcast_to(t, latents_input.shape[0]) latents_input = self.scheduler.scale_model_input(scheduler_state, latents_input, t) # predict the noise residual noise_pred = self.unet.apply( {"params": params["unet"]}, jnp.array(latents_input), jnp.array(timestep, dtype=jnp.int32), encoder_hidden_states=prompt_embeds, added_cond_kwargs=added_cond_kwargs, ).sample # perform guidance noise_pred_uncond, noise_prediction_text = jnp.split(noise_pred, 2, axis=0) noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents, scheduler_state = self.scheduler.step(scheduler_state, noise_pred, t, latents).to_tuple() return latents, scheduler_state if DEBUG: # run with python for loop for i in range(num_inference_steps): latents, scheduler_state = loop_body(i, (latents, scheduler_state)) else: latents, _ = jax.lax.fori_loop(0, num_inference_steps, loop_body, (latents, scheduler_state)) if return_latents: return latents # Decode latents latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.apply({"params": params["vae"]}, latents, method=self.vae.decode).sample image = (image / 2 + 0.5).clip(0, 1).transpose(0, 2, 3, 1) return image # Static argnums are pipe, num_inference_steps, height, width, return_latents. A change would trigger recompilation. # Non-static args are (sharded) input tensors mapped over their first dimension (hence, `0`). @partial( jax.pmap, in_axes=(None, 0, 0, 0, None, None, None, 0, 0, 0, None), static_broadcasted_argnums=(0, 4, 5, 6, 10), ) def _p_generate( pipe, prompt_ids, params, prng_seed, num_inference_steps, height, width, guidance_scale, latents, neg_prompt_ids, return_latents, ): return pipe._generate( prompt_ids, params, prng_seed, num_inference_steps, height, width, guidance_scale, latents, neg_prompt_ids, return_latents, )
diffusers/src/diffusers/pipelines/stable_diffusion_xl/pipeline_flax_stable_diffusion_xl.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/stable_diffusion_xl/pipeline_flax_stable_diffusion_xl.py", "repo_id": "diffusers", "token_count": 5263 }
import copy import inspect from dataclasses import dataclass from typing import Callable, List, Optional, Union import numpy as np import PIL.Image import torch import torch.nn.functional as F from torch.nn.functional import grid_sample from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from ...image_processor import VaeImageProcessor from ...loaders import FromSingleFileMixin, StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin from ...models import AutoencoderKL, UNet2DConditionModel from ...models.lora import adjust_lora_scale_text_encoder from ...schedulers import KarrasDiffusionSchedulers from ...utils import ( USE_PEFT_BACKEND, BaseOutput, is_torch_xla_available, logging, scale_lora_layers, unscale_lora_layers, ) from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin from ..stable_diffusion import StableDiffusionSafetyChecker if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name def rearrange_0(tensor, f): F, C, H, W = tensor.size() tensor = torch.permute(torch.reshape(tensor, (F // f, f, C, H, W)), (0, 2, 1, 3, 4)) return tensor def rearrange_1(tensor): B, C, F, H, W = tensor.size() return torch.reshape(torch.permute(tensor, (0, 2, 1, 3, 4)), (B * F, C, H, W)) def rearrange_3(tensor, f): F, D, C = tensor.size() return torch.reshape(tensor, (F // f, f, D, C)) def rearrange_4(tensor): B, F, D, C = tensor.size() return torch.reshape(tensor, (B * F, D, C)) class CrossFrameAttnProcessor: """ Cross frame attention processor. Each frame attends the first frame. Args: batch_size: The number that represents actual batch size, other than the frames. For example, calling unet with a single prompt and num_images_per_prompt=1, batch_size should be equal to 2, due to classifier-free guidance. """ def __init__(self, batch_size=2): self.batch_size = batch_size def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): batch_size, sequence_length, _ = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) query = attn.to_q(hidden_states) is_cross_attention = encoder_hidden_states is not None if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) # Cross Frame Attention if not is_cross_attention: video_length = key.size()[0] // self.batch_size first_frame_index = [0] * video_length # rearrange keys to have batch and frames in the 1st and 2nd dims respectively key = rearrange_3(key, video_length) key = key[:, first_frame_index] # rearrange values to have batch and frames in the 1st and 2nd dims respectively value = rearrange_3(value, video_length) value = value[:, first_frame_index] # rearrange back to original shape key = rearrange_4(key) value = rearrange_4(value) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) return hidden_states class CrossFrameAttnProcessor2_0: """ Cross frame attention processor with scaled_dot_product attention of Pytorch 2.0. Args: batch_size: The number that represents actual batch size, other than the frames. For example, calling unet with a single prompt and num_images_per_prompt=1, batch_size should be equal to 2, due to classifier-free guidance. """ def __init__(self, batch_size=2): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") self.batch_size = batch_size def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) inner_dim = hidden_states.shape[-1] if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) query = attn.to_q(hidden_states) is_cross_attention = encoder_hidden_states is not None if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) # Cross Frame Attention if not is_cross_attention: video_length = max(1, key.size()[0] // self.batch_size) first_frame_index = [0] * video_length # rearrange keys to have batch and frames in the 1st and 2nd dims respectively key = rearrange_3(key, video_length) key = key[:, first_frame_index] # rearrange values to have batch and frames in the 1st and 2nd dims respectively value = rearrange_3(value, video_length) value = value[:, first_frame_index] # rearrange back to original shape key = rearrange_4(key) value = rearrange_4(value) head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) return hidden_states @dataclass class TextToVideoPipelineOutput(BaseOutput): r""" Output class for zero-shot text-to-video pipeline. Args: images (`[List[PIL.Image.Image]`, `np.ndarray`]): List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, num_channels)`. nsfw_content_detected (`[List[bool]]`): List indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content or `None` if safety checking could not be performed. """ images: Union[List[PIL.Image.Image], np.ndarray] nsfw_content_detected: Optional[List[bool]] def coords_grid(batch, ht, wd, device): # Adapted from https://github.com/princeton-vl/RAFT/blob/master/core/utils/utils.py coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device)) coords = torch.stack(coords[::-1], dim=0).float() return coords[None].repeat(batch, 1, 1, 1) def warp_single_latent(latent, reference_flow): """ Warp latent of a single frame with given flow Args: latent: latent code of a single frame reference_flow: flow which to warp the latent with Returns: warped: warped latent """ _, _, H, W = reference_flow.size() _, _, h, w = latent.size() coords0 = coords_grid(1, H, W, device=latent.device).to(latent.dtype) coords_t0 = coords0 + reference_flow coords_t0[:, 0] /= W coords_t0[:, 1] /= H coords_t0 = coords_t0 * 2.0 - 1.0 coords_t0 = F.interpolate(coords_t0, size=(h, w), mode="bilinear") coords_t0 = torch.permute(coords_t0, (0, 2, 3, 1)) warped = grid_sample(latent, coords_t0, mode="nearest", padding_mode="reflection") return warped def create_motion_field(motion_field_strength_x, motion_field_strength_y, frame_ids, device, dtype): """ Create translation motion field Args: motion_field_strength_x: motion strength along x-axis motion_field_strength_y: motion strength along y-axis frame_ids: indexes of the frames the latents of which are being processed. This is needed when we perform chunk-by-chunk inference device: device dtype: dtype Returns: """ seq_length = len(frame_ids) reference_flow = torch.zeros((seq_length, 2, 512, 512), device=device, dtype=dtype) for fr_idx in range(seq_length): reference_flow[fr_idx, 0, :, :] = motion_field_strength_x * (frame_ids[fr_idx]) reference_flow[fr_idx, 1, :, :] = motion_field_strength_y * (frame_ids[fr_idx]) return reference_flow def create_motion_field_and_warp_latents(motion_field_strength_x, motion_field_strength_y, frame_ids, latents): """ Creates translation motion and warps the latents accordingly Args: motion_field_strength_x: motion strength along x-axis motion_field_strength_y: motion strength along y-axis frame_ids: indexes of the frames the latents of which are being processed. This is needed when we perform chunk-by-chunk inference latents: latent codes of frames Returns: warped_latents: warped latents """ motion_field = create_motion_field( motion_field_strength_x=motion_field_strength_x, motion_field_strength_y=motion_field_strength_y, frame_ids=frame_ids, device=latents.device, dtype=latents.dtype, ) warped_latents = latents.clone().detach() for i in range(len(warped_latents)): warped_latents[i] = warp_single_latent(latents[i][None], motion_field[i][None]) return warped_latents class TextToVideoZeroPipeline( DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin, FromSingleFileMixin, ): r""" Pipeline for zero-shot text-to-video generation using Stable Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer (`CLIPTokenizer`): A [`~transformers.CLIPTokenizer`] to tokenize text. unet ([`UNet2DConditionModel`]): A [`UNet3DConditionModel`] to denoise the encoded video latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details about a model's potential harms. feature_extractor ([`CLIPImageProcessor`]): A [`CLIPImageProcessor`] to extract features from generated images; used as inputs to the `safety_checker`. """ def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = True, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8 self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) def forward_loop(self, x_t0, t0, t1, generator): """ Perform DDPM forward process from time t0 to t1. This is the same as adding noise with corresponding variance. Args: x_t0: Latent code at time t0. t0: Timestep at t0. t1: Timestamp at t1. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. Returns: x_t1: Forward process applied to x_t0 from time t0 to t1. """ eps = randn_tensor(x_t0.size(), generator=generator, dtype=x_t0.dtype, device=x_t0.device) alpha_vec = torch.prod(self.scheduler.alphas[t0:t1]) x_t1 = torch.sqrt(alpha_vec) * x_t0 + torch.sqrt(1 - alpha_vec) * eps return x_t1 def backward_loop( self, latents, timesteps, prompt_embeds, guidance_scale, callback, callback_steps, num_warmup_steps, extra_step_kwargs, cross_attention_kwargs=None, ): """ Perform backward process given list of time steps. Args: latents: Latents at time timesteps[0]. timesteps: Time steps along which to perform backward process. prompt_embeds: Pre-generated text embeddings. guidance_scale: A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. extra_step_kwargs: Extra_step_kwargs. cross_attention_kwargs: A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). num_warmup_steps: number of warmup steps. Returns: latents: Latents of backward process output at time timesteps[-1]. """ do_classifier_free_guidance = guidance_scale > 1.0 num_steps = (len(timesteps) - num_warmup_steps) // self.scheduler.order with self.progress_bar(total=num_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, ).sample # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if XLA_AVAILABLE: xm.mark_step() return latents.clone().detach() # Copied from diffusers.pipelines.stable_diffusion_k_diffusion.pipeline_stable_diffusion_k_diffusion.StableDiffusionKDiffusionPipeline.check_inputs def check_inputs( self, prompt, height, width, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None, callback_on_step_end_tensor_inputs=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = ( batch_size, num_channels_latents, int(height) // self.vae_scale_factor, int(width) // self.vae_scale_factor, ) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], video_length: Optional[int] = 8, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_videos_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, motion_field_strength_x: float = 12, motion_field_strength_y: float = 12, output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: Optional[int] = 1, t0: int = 44, t1: int = 47, frame_ids: Optional[List[int]] = None, ): """ The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. video_length (`int`, *optional*, defaults to 8): The number of generated video frames. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in video generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). num_videos_per_prompt (`int`, *optional*, defaults to 1): The number of videos to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"np"`): The output format of the generated video. Choose between `"latent"` and `"np"`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.TextToVideoPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that calls every `callback_steps` steps during inference. The function is called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function is called. If not specified, the callback is called at every step. motion_field_strength_x (`float`, *optional*, defaults to 12): Strength of motion in generated video along x-axis. See the [paper](https://arxiv.org/abs/2303.13439), Sect. 3.3.1. motion_field_strength_y (`float`, *optional*, defaults to 12): Strength of motion in generated video along y-axis. See the [paper](https://arxiv.org/abs/2303.13439), Sect. 3.3.1. t0 (`int`, *optional*, defaults to 44): Timestep t0. Should be in the range [0, num_inference_steps - 1]. See the [paper](https://arxiv.org/abs/2303.13439), Sect. 3.3.1. t1 (`int`, *optional*, defaults to 47): Timestep t0. Should be in the range [t0 + 1, num_inference_steps - 1]. See the [paper](https://arxiv.org/abs/2303.13439), Sect. 3.3.1. frame_ids (`List[int]`, *optional*): Indexes of the frames that are being generated. This is used when generating longer videos chunk-by-chunk. Returns: [`~pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.TextToVideoPipelineOutput`]: The output contains a `ndarray` of the generated video, when `output_type` != `"latent"`, otherwise a latent code of generated videos and a list of `bool`s indicating whether the corresponding generated video contains "not-safe-for-work" (nsfw) content.. """ assert video_length > 0 if frame_ids is None: frame_ids = list(range(video_length)) assert len(frame_ids) == video_length assert num_videos_per_prompt == 1 # set the processor original_attn_proc = self.unet.attn_processors processor = ( CrossFrameAttnProcessor2_0(batch_size=2) if hasattr(F, "scaled_dot_product_attention") else CrossFrameAttnProcessor(batch_size=2) ) self.unet.set_attn_processor(processor) if isinstance(prompt, str): prompt = [prompt] if isinstance(negative_prompt, str): negative_prompt = [negative_prompt] # Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # Define call parameters batch_size = 1 if isinstance(prompt, str) else len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # Encode input prompt prompt_embeds_tuple = self.encode_prompt( prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt ) prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) # Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # Prepare extra step kwargs. extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order # Perform the first backward process up to time T_1 x_1_t1 = self.backward_loop( timesteps=timesteps[: -t1 - 1], prompt_embeds=prompt_embeds, latents=latents, guidance_scale=guidance_scale, callback=callback, callback_steps=callback_steps, extra_step_kwargs=extra_step_kwargs, num_warmup_steps=num_warmup_steps, ) scheduler_copy = copy.deepcopy(self.scheduler) # Perform the second backward process up to time T_0 x_1_t0 = self.backward_loop( timesteps=timesteps[-t1 - 1 : -t0 - 1], prompt_embeds=prompt_embeds, latents=x_1_t1, guidance_scale=guidance_scale, callback=callback, callback_steps=callback_steps, extra_step_kwargs=extra_step_kwargs, num_warmup_steps=0, ) # Propagate first frame latents at time T_0 to remaining frames x_2k_t0 = x_1_t0.repeat(video_length - 1, 1, 1, 1) # Add motion in latents at time T_0 x_2k_t0 = create_motion_field_and_warp_latents( motion_field_strength_x=motion_field_strength_x, motion_field_strength_y=motion_field_strength_y, latents=x_2k_t0, frame_ids=frame_ids[1:], ) # Perform forward process up to time T_1 x_2k_t1 = self.forward_loop( x_t0=x_2k_t0, t0=timesteps[-t0 - 1].item(), t1=timesteps[-t1 - 1].item(), generator=generator, ) # Perform backward process from time T_1 to 0 x_1k_t1 = torch.cat([x_1_t1, x_2k_t1]) b, l, d = prompt_embeds.size() prompt_embeds = prompt_embeds[:, None].repeat(1, video_length, 1, 1).reshape(b * video_length, l, d) self.scheduler = scheduler_copy x_1k_0 = self.backward_loop( timesteps=timesteps[-t1 - 1 :], prompt_embeds=prompt_embeds, latents=x_1k_t1, guidance_scale=guidance_scale, callback=callback, callback_steps=callback_steps, extra_step_kwargs=extra_step_kwargs, num_warmup_steps=0, ) latents = x_1k_0 # manually for max memory savings if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.unet.to("cpu") torch.cuda.empty_cache() if output_type == "latent": image = latents has_nsfw_concept = None else: image = self.decode_latents(latents) # Run safety checker image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) # Offload all models self.maybe_free_model_hooks() # make sure to set the original attention processors back self.unet.set_attn_processor(original_attn_proc) if not return_dict: return (image, has_nsfw_concept) return TextToVideoPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.Tensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.Tensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if self.text_encoder is not None: if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, negative_prompt_embeds def decode_latents(self, latents): latents = 1 / self.vae.config.scaling_factor * latents image = self.vae.decode(latents, return_dict=False)[0] image = (image / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 image = image.cpu().permute(0, 2, 3, 1).float().numpy() return image
diffusers/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_zero.py", "repo_id": "diffusers", "token_count": 20293 }
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable, Dict, List, Optional, Union import numpy as np import torch from transformers import CLIPTextModel, CLIPTokenizer from ...schedulers import DDPMWuerstchenScheduler from ...utils import deprecate, is_torch_xla_available, logging, replace_example_docstring from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput from .modeling_paella_vq_model import PaellaVQModel from .modeling_wuerstchen_diffnext import WuerstchenDiffNeXt if is_torch_xla_available(): import torch_xla.core.xla_model as xm XLA_AVAILABLE = True else: XLA_AVAILABLE = False logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import WuerstchenPriorPipeline, WuerstchenDecoderPipeline >>> prior_pipe = WuerstchenPriorPipeline.from_pretrained( ... "warp-ai/wuerstchen-prior", torch_dtype=torch.float16 ... ).to("cuda") >>> gen_pipe = WuerstchenDecoderPipeline.from_pretrain("warp-ai/wuerstchen", torch_dtype=torch.float16).to( ... "cuda" ... ) >>> prompt = "an image of a shiba inu, donning a spacesuit and helmet" >>> prior_output = pipe(prompt) >>> images = gen_pipe(prior_output.image_embeddings, prompt=prompt) ``` """ class WuerstchenDecoderPipeline(DiffusionPipeline): """ Pipeline for generating images from the Wuerstchen model. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: tokenizer (`CLIPTokenizer`): The CLIP tokenizer. text_encoder (`CLIPTextModel`): The CLIP text encoder. decoder ([`WuerstchenDiffNeXt`]): The WuerstchenDiffNeXt unet decoder. vqgan ([`PaellaVQModel`]): The VQGAN model. scheduler ([`DDPMWuerstchenScheduler`]): A scheduler to be used in combination with `prior` to generate image embedding. latent_dim_scale (float, `optional`, defaults to 10.67): Multiplier to determine the VQ latent space size from the image embeddings. If the image embeddings are height=24 and width=24, the VQ latent shape needs to be height=int(24*10.67)=256 and width=int(24*10.67)=256 in order to match the training conditions. """ model_cpu_offload_seq = "text_encoder->decoder->vqgan" _callback_tensor_inputs = [ "latents", "text_encoder_hidden_states", "negative_prompt_embeds", "image_embeddings", ] def __init__( self, tokenizer: CLIPTokenizer, text_encoder: CLIPTextModel, decoder: WuerstchenDiffNeXt, scheduler: DDPMWuerstchenScheduler, vqgan: PaellaVQModel, latent_dim_scale: float = 10.67, ) -> None: super().__init__() self.register_modules( tokenizer=tokenizer, text_encoder=text_encoder, decoder=decoder, scheduler=scheduler, vqgan=vqgan, ) self.register_to_config(latent_dim_scale=latent_dim_scale) # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, ): batch_size = len(prompt) if isinstance(prompt, list) else 1 # get prompt text embeddings text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids attention_mask = text_inputs.attention_mask untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] attention_mask = attention_mask[:, : self.tokenizer.model_max_length] text_encoder_output = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask.to(device)) text_encoder_hidden_states = text_encoder_output.last_hidden_state text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) uncond_text_encoder_hidden_states = None if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) negative_prompt_embeds_text_encoder_output = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=uncond_input.attention_mask.to(device) ) uncond_text_encoder_hidden_states = negative_prompt_embeds_text_encoder_output.last_hidden_state # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_text_encoder_hidden_states.shape[1] uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view( batch_size * num_images_per_prompt, seq_len, -1 ) # done duplicates # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes return text_encoder_hidden_states, uncond_text_encoder_hidden_states @property def guidance_scale(self): return self._guidance_scale @property def do_classifier_free_guidance(self): return self._guidance_scale > 1 @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, image_embeddings: Union[torch.Tensor, List[torch.Tensor]], prompt: Union[str, List[str]] = None, num_inference_steps: int = 12, timesteps: Optional[List[float]] = None, guidance_scale: float = 0.0, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], **kwargs, ): """ Function invoked when calling the pipeline for generation. Args: image_embedding (`torch.Tensor` or `List[torch.Tensor]`): Image Embeddings either extracted from an image or generated by a Prior Model. prompt (`str` or `List[str]`): The prompt or prompts to guide the image generation. num_inference_steps (`int`, *optional*, defaults to 12): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. timesteps (`List[int]`, *optional*): Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` timesteps are used. Must be in descending order. guidance_scale (`float`, *optional*, defaults to 0.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `decoder_guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `decoder_guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored if `decoder_guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.Tensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` (`np.array`) or `"pt"` (`torch.Tensor`). return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeline class. Examples: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple` [`~pipelines.ImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated image embeddings. """ callback = kwargs.pop("callback", None) callback_steps = kwargs.pop("callback_steps", None) if callback is not None: deprecate( "callback", "1.0.0", "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", ) if callback_steps is not None: deprecate( "callback_steps", "1.0.0", "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) # 0. Define commonly used variables device = self._execution_device dtype = self.decoder.dtype self._guidance_scale = guidance_scale # 1. Check inputs. Raise error if not correct if not isinstance(prompt, list): if isinstance(prompt, str): prompt = [prompt] else: raise TypeError(f"'prompt' must be of type 'list' or 'str', but got {type(prompt)}.") if self.do_classifier_free_guidance: if negative_prompt is not None and not isinstance(negative_prompt, list): if isinstance(negative_prompt, str): negative_prompt = [negative_prompt] else: raise TypeError( f"'negative_prompt' must be of type 'list' or 'str', but got {type(negative_prompt)}." ) if isinstance(image_embeddings, list): image_embeddings = torch.cat(image_embeddings, dim=0) if isinstance(image_embeddings, np.ndarray): image_embeddings = torch.Tensor(image_embeddings, device=device).to(dtype=dtype) if not isinstance(image_embeddings, torch.Tensor): raise TypeError( f"'image_embeddings' must be of type 'torch.Tensor' or 'np.array', but got {type(image_embeddings)}." ) if not isinstance(num_inference_steps, int): raise TypeError( f"'num_inference_steps' must be of type 'int', but got {type(num_inference_steps)}\ In Case you want to provide explicit timesteps, please use the 'timesteps' argument." ) # 2. Encode caption prompt_embeds, negative_prompt_embeds = self.encode_prompt( prompt, device, image_embeddings.size(0) * num_images_per_prompt, self.do_classifier_free_guidance, negative_prompt, ) text_encoder_hidden_states = ( torch.cat([prompt_embeds, negative_prompt_embeds]) if negative_prompt_embeds is not None else prompt_embeds ) effnet = ( torch.cat([image_embeddings, torch.zeros_like(image_embeddings)]) if self.do_classifier_free_guidance else image_embeddings ) # 3. Determine latent shape of latents latent_height = int(image_embeddings.size(2) * self.config.latent_dim_scale) latent_width = int(image_embeddings.size(3) * self.config.latent_dim_scale) latent_features_shape = (image_embeddings.size(0) * num_images_per_prompt, 4, latent_height, latent_width) # 4. Prepare and set timesteps if timesteps is not None: self.scheduler.set_timesteps(timesteps=timesteps, device=device) timesteps = self.scheduler.timesteps num_inference_steps = len(timesteps) else: self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latents latents = self.prepare_latents(latent_features_shape, dtype, device, generator, latents, self.scheduler) # 6. Run denoising loop self._num_timesteps = len(timesteps[:-1]) for i, t in enumerate(self.progress_bar(timesteps[:-1])): ratio = t.expand(latents.size(0)).to(dtype) # 7. Denoise latents predicted_latents = self.decoder( torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents, r=torch.cat([ratio] * 2) if self.do_classifier_free_guidance else ratio, effnet=effnet, clip=text_encoder_hidden_states, ) # 8. Check for classifier free guidance and apply it if self.do_classifier_free_guidance: predicted_latents_text, predicted_latents_uncond = predicted_latents.chunk(2) predicted_latents = torch.lerp(predicted_latents_uncond, predicted_latents_text, self.guidance_scale) # 9. Renoise latents to next timestep latents = self.scheduler.step( model_output=predicted_latents, timestep=ratio, sample=latents, generator=generator, ).prev_sample if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) image_embeddings = callback_outputs.pop("image_embeddings", image_embeddings) text_encoder_hidden_states = callback_outputs.pop( "text_encoder_hidden_states", text_encoder_hidden_states ) if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) if XLA_AVAILABLE: xm.mark_step() if output_type not in ["pt", "np", "pil", "latent"]: raise ValueError( f"Only the output types `pt`, `np`, `pil` and `latent` are supported not output_type={output_type}" ) if not output_type == "latent": # 10. Scale and decode the image latents with vq-vae latents = self.vqgan.config.scale_factor * latents images = self.vqgan.decode(latents).sample.clamp(0, 1) if output_type == "np": images = images.permute(0, 2, 3, 1).cpu().float().numpy() elif output_type == "pil": images = images.permute(0, 2, 3, 1).cpu().float().numpy() images = self.numpy_to_pil(images) else: images = latents # Offload all models self.maybe_free_model_hooks() if not return_dict: return images return ImagePipelineOutput(images)
diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py/0
{ "file_path": "diffusers/src/diffusers/pipelines/wuerstchen/pipeline_wuerstchen.py", "repo_id": "diffusers", "token_count": 9338 }
# Copyright 2024 ParaDiGMS authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from ..utils.torch_utils import randn_tensor from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput class DDPMParallelSchedulerOutput(BaseOutput): """ Output class for the scheduler's `step` function output. Args: prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample `(x_{0})` based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. """ prev_sample: torch.Tensor pred_original_sample: Optional[torch.Tensor] = None # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar def betas_for_alpha_bar( num_diffusion_timesteps, max_beta=0.999, alpha_transform_type="cosine", ): """ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up to that part of the diffusion process. Args: num_diffusion_timesteps (`int`): the number of betas to produce. max_beta (`float`): the maximum beta to use; use values lower than 1 to prevent singularities. alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. Choose from `cosine` or `exp` Returns: betas (`np.ndarray`): the betas used by the scheduler to step the model outputs """ if alpha_transform_type == "cosine": def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}") betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) # Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr def rescale_zero_terminal_snr(betas): """ Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1) Args: betas (`torch.Tensor`): the betas that the scheduler is being initialized with. Returns: `torch.Tensor`: rescaled betas with zero terminal SNR """ # Convert betas to alphas_bar_sqrt alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, dim=0) alphas_bar_sqrt = alphas_cumprod.sqrt() # Store old values. alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() # Shift so the last timestep is zero. alphas_bar_sqrt -= alphas_bar_sqrt_T # Scale so the first timestep is back to the old value. alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) # Convert alphas_bar_sqrt to betas alphas_bar = alphas_bar_sqrt**2 # Revert sqrt alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod alphas = torch.cat([alphas_bar[0:1], alphas]) betas = 1 - alphas return betas class DDPMParallelScheduler(SchedulerMixin, ConfigMixin): """ Denoising diffusion probabilistic models (DDPMs) explores the connections between denoising score matching and Langevin dynamics sampling. [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and [`~SchedulerMixin.from_pretrained`] functions. For more details, see the original paper: https://arxiv.org/abs/2006.11239 Args: num_train_timesteps (`int`): number of diffusion steps used to train the model. beta_start (`float`): the starting `beta` value of inference. beta_end (`float`): the final `beta` value. beta_schedule (`str`): the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from `linear`, `scaled_linear`, `squaredcos_cap_v2` or `sigmoid`. trained_betas (`np.ndarray`, optional): option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. variance_type (`str`): options to clip the variance used when adding noise to the denoised sample. Choose from `fixed_small`, `fixed_small_log`, `fixed_large`, `fixed_large_log`, `learned` or `learned_range`. clip_sample (`bool`, default `True`): option to clip predicted sample for numerical stability. clip_sample_range (`float`, default `1.0`): the maximum magnitude for sample clipping. Valid only when `clip_sample=True`. prediction_type (`str`, default `epsilon`, optional): prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4 https://imagen.research.google/video/paper.pdf) thresholding (`bool`, default `False`): whether to use the "dynamic thresholding" method (introduced by Imagen, https://arxiv.org/abs/2205.11487). Note that the thresholding method is unsuitable for latent-space diffusion models (such as stable-diffusion). dynamic_thresholding_ratio (`float`, default `0.995`): the ratio for the dynamic thresholding method. Default is `0.995`, the same as Imagen (https://arxiv.org/abs/2205.11487). Valid only when `thresholding=True`. sample_max_value (`float`, default `1.0`): the threshold value for dynamic thresholding. Valid only when `thresholding=True`. timestep_spacing (`str`, default `"leading"`): The way the timesteps should be scaled. Refer to Table 2. of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/abs/2305.08891) for more information. steps_offset (`int`, default `0`): An offset added to the inference steps, as required by some model families. rescale_betas_zero_snr (`bool`, defaults to `False`): Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and dark samples instead of limiting it to samples with medium brightness. Loosely related to [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506). """ _compatibles = [e.name for e in KarrasDiffusionSchedulers] order = 1 _is_ode_scheduler = False @register_to_config # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.__init__ def __init__( self, num_train_timesteps: int = 1000, beta_start: float = 0.0001, beta_end: float = 0.02, beta_schedule: str = "linear", trained_betas: Optional[Union[np.ndarray, List[float]]] = None, variance_type: str = "fixed_small", clip_sample: bool = True, prediction_type: str = "epsilon", thresholding: bool = False, dynamic_thresholding_ratio: float = 0.995, clip_sample_range: float = 1.0, sample_max_value: float = 1.0, timestep_spacing: str = "leading", steps_offset: int = 0, rescale_betas_zero_snr: bool = False, ): if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == "linear": self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule self.betas = betas_for_alpha_bar(num_train_timesteps) elif beta_schedule == "sigmoid": # GeoDiff sigmoid schedule betas = torch.linspace(-6, 6, num_train_timesteps) self.betas = torch.sigmoid(betas) * (beta_end - beta_start) + beta_start else: raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}") # Rescale for zero SNR if rescale_betas_zero_snr: self.betas = rescale_zero_terminal_snr(self.betas) self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) self.one = torch.tensor(1.0) # standard deviation of the initial noise distribution self.init_noise_sigma = 1.0 # setable values self.custom_timesteps = False self.num_inference_steps = None self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy()) self.variance_type = variance_type # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.scale_model_input def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int] = None) -> torch.Tensor: """ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the current timestep. Args: sample (`torch.Tensor`): The input sample. timestep (`int`, *optional*): The current timestep in the diffusion chain. Returns: `torch.Tensor`: A scaled input sample. """ return sample # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.set_timesteps def set_timesteps( self, num_inference_steps: Optional[int] = None, device: Union[str, torch.device] = None, timesteps: Optional[List[int]] = None, ): """ Sets the discrete timesteps used for the diffusion chain (to be run before inference). Args: num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` must be `None`. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. timesteps (`List[int]`, *optional*): Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default timestep spacing strategy of equal spacing between timesteps is used. If `timesteps` is passed, `num_inference_steps` must be `None`. """ if num_inference_steps is not None and timesteps is not None: raise ValueError("Can only pass one of `num_inference_steps` or `custom_timesteps`.") if timesteps is not None: for i in range(1, len(timesteps)): if timesteps[i] >= timesteps[i - 1]: raise ValueError("`custom_timesteps` must be in descending order.") if timesteps[0] >= self.config.num_train_timesteps: raise ValueError( f"`timesteps` must start before `self.config.train_timesteps`:" f" {self.config.num_train_timesteps}." ) timesteps = np.array(timesteps, dtype=np.int64) self.custom_timesteps = True else: if num_inference_steps > self.config.num_train_timesteps: raise ValueError( f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:" f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" f" maximal {self.config.num_train_timesteps} timesteps." ) self.num_inference_steps = num_inference_steps self.custom_timesteps = False # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": timesteps = ( np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps) .round()[::-1] .copy() .astype(np.int64) ) elif self.config.timestep_spacing == "leading": step_ratio = self.config.num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": step_ratio = self.config.num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 timesteps = np.round(np.arange(self.config.num_train_timesteps, 0, -step_ratio)).astype(np.int64) timesteps -= 1 else: raise ValueError( f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." ) self.timesteps = torch.from_numpy(timesteps).to(device) # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._get_variance def _get_variance(self, t, predicted_variance=None, variance_type=None): prev_t = self.previous_timestep(t) alpha_prod_t = self.alphas_cumprod[t] alpha_prod_t_prev = self.alphas_cumprod[prev_t] if prev_t >= 0 else self.one current_beta_t = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample variance = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * current_beta_t # we always take the log of variance, so clamp it to ensure it's not 0 variance = torch.clamp(variance, min=1e-20) if variance_type is None: variance_type = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": variance = variance # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": variance = torch.log(variance) variance = torch.exp(0.5 * variance) elif variance_type == "fixed_large": variance = current_beta_t elif variance_type == "fixed_large_log": # Glide max_log variance = torch.log(current_beta_t) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": min_log = torch.log(variance) max_log = torch.log(current_beta_t) frac = (predicted_variance + 1) / 2 variance = frac * max_log + (1 - frac) * min_log return variance # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor: """ "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing pixels from saturation at each step. We find that dynamic thresholding results in significantly better photorealism as well as better image-text alignment, especially when using very large guidance weights." https://arxiv.org/abs/2205.11487 """ dtype = sample.dtype batch_size, channels, *remaining_dims = sample.shape if dtype not in (torch.float32, torch.float64): sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half # Flatten sample for doing quantile calculation along each image sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) abs_sample = sample.abs() # "a certain percentile absolute pixel value" s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) s = torch.clamp( s, min=1, max=self.config.sample_max_value ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" sample = sample.reshape(batch_size, channels, *remaining_dims) sample = sample.to(dtype) return sample def step( self, model_output: torch.Tensor, timestep: int, sample: torch.Tensor, generator=None, return_dict: bool = True, ) -> Union[DDPMParallelSchedulerOutput, Tuple]: """ Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion process from the learned model outputs (most often the predicted noise). Args: model_output (`torch.Tensor`): direct output from learned diffusion model. timestep (`int`): current discrete timestep in the diffusion chain. sample (`torch.Tensor`): current instance of sample being created by diffusion process. generator: random number generator. return_dict (`bool`): option for returning tuple rather than DDPMParallelSchedulerOutput class Returns: [`~schedulers.scheduling_utils.DDPMParallelSchedulerOutput`] or `tuple`: [`~schedulers.scheduling_utils.DDPMParallelSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ t = timestep prev_t = self.previous_timestep(t) if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1) else: predicted_variance = None # 1. compute alphas, betas alpha_prod_t = self.alphas_cumprod[t] alpha_prod_t_prev = self.alphas_cumprod[prev_t] if prev_t >= 0 else self.one beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev current_alpha_t = alpha_prod_t / alpha_prod_t_prev current_beta_t = 1 - current_alpha_t # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) elif self.config.prediction_type == "sample": pred_original_sample = model_output elif self.config.prediction_type == "v_prediction": pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` or" " `v_prediction` for the DDPMScheduler." ) # 3. Clip or threshold "predicted x_0" if self.config.thresholding: pred_original_sample = self._threshold_sample(pred_original_sample) elif self.config.clip_sample: pred_original_sample = pred_original_sample.clamp( -self.config.clip_sample_range, self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * current_beta_t) / beta_prod_t current_sample_coeff = current_alpha_t ** (0.5) * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise variance = 0 if t > 0: device = model_output.device variance_noise = randn_tensor( model_output.shape, generator=generator, device=device, dtype=model_output.dtype ) if self.variance_type == "fixed_small_log": variance = self._get_variance(t, predicted_variance=predicted_variance) * variance_noise elif self.variance_type == "learned_range": variance = self._get_variance(t, predicted_variance=predicted_variance) variance = torch.exp(0.5 * variance) * variance_noise else: variance = (self._get_variance(t, predicted_variance=predicted_variance) ** 0.5) * variance_noise pred_prev_sample = pred_prev_sample + variance if not return_dict: return ( pred_prev_sample, pred_original_sample, ) return DDPMParallelSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample) def batch_step_no_noise( self, model_output: torch.Tensor, timesteps: List[int], sample: torch.Tensor, ) -> torch.Tensor: """ Batched version of the `step` function, to be able to reverse the SDE for multiple samples/timesteps at once. Also, does not add any noise to the predicted sample, which is necessary for parallel sampling where the noise is pre-sampled by the pipeline. Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion process from the learned model outputs (most often the predicted noise). Args: model_output (`torch.Tensor`): direct output from learned diffusion model. timesteps (`List[int]`): current discrete timesteps in the diffusion chain. This is now a list of integers. sample (`torch.Tensor`): current instance of sample being created by diffusion process. Returns: `torch.Tensor`: sample tensor at previous timestep. """ t = timesteps num_inference_steps = self.num_inference_steps if self.num_inference_steps else self.config.num_train_timesteps prev_t = t - self.config.num_train_timesteps // num_inference_steps t = t.view(-1, *([1] * (model_output.ndim - 1))) prev_t = prev_t.view(-1, *([1] * (model_output.ndim - 1))) if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1) else: pass # 1. compute alphas, betas self.alphas_cumprod = self.alphas_cumprod.to(model_output.device) alpha_prod_t = self.alphas_cumprod[t] alpha_prod_t_prev = self.alphas_cumprod[torch.clip(prev_t, min=0)] alpha_prod_t_prev[prev_t < 0] = torch.tensor(1.0) beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev current_alpha_t = alpha_prod_t / alpha_prod_t_prev current_beta_t = 1 - current_alpha_t # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) elif self.config.prediction_type == "sample": pred_original_sample = model_output elif self.config.prediction_type == "v_prediction": pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` or" " `v_prediction` for the DDPMParallelScheduler." ) # 3. Clip or threshold "predicted x_0" if self.config.thresholding: pred_original_sample = self._threshold_sample(pred_original_sample) elif self.config.clip_sample: pred_original_sample = pred_original_sample.clamp( -self.config.clip_sample_range, self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * current_beta_t) / beta_prod_t current_sample_coeff = current_alpha_t ** (0.5) * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample return pred_prev_sample # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise def add_noise( self, original_samples: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor, ) -> torch.Tensor: # Make sure alphas_cumprod and timestep have same device and dtype as original_samples # Move the self.alphas_cumprod to device to avoid redundant CPU to GPU data movement # for the subsequent add_noise calls self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device) alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype) timesteps = timesteps.to(original_samples.device) sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape) < len(original_samples.shape): sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity def get_velocity(self, sample: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor: # Make sure alphas_cumprod and timestep have same device and dtype as sample self.alphas_cumprod = self.alphas_cumprod.to(device=sample.device) alphas_cumprod = self.alphas_cumprod.to(dtype=sample.dtype) timesteps = timesteps.to(sample.device) sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape) < len(sample.shape): sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape): sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity def __len__(self): return self.config.num_train_timesteps # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.previous_timestep def previous_timestep(self, timestep): if self.custom_timesteps or self.num_inference_steps: index = (self.timesteps == timestep).nonzero(as_tuple=True)[0][0] if index == self.timesteps.shape[0] - 1: prev_t = torch.tensor(-1) else: prev_t = self.timesteps[index + 1] else: prev_t = timestep - 1 return prev_t
diffusers/src/diffusers/schedulers/scheduling_ddpm_parallel.py/0
{ "file_path": "diffusers/src/diffusers/schedulers/scheduling_ddpm_parallel.py", "repo_id": "diffusers", "token_count": 13226 }
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class LMSDiscreteScheduler(metaclass=DummyObject): _backends = ["torch", "scipy"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch", "scipy"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch", "scipy"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch", "scipy"])
diffusers/src/diffusers/utils/dummy_torch_and_scipy_objects.py/0
{ "file_path": "diffusers/src/diffusers/utils/dummy_torch_and_scipy_objects.py", "repo_id": "diffusers", "token_count": 220 }
from typing import List import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): PIL_INTERPOLATION = { "linear": PIL.Image.Resampling.BILINEAR, "bilinear": PIL.Image.Resampling.BILINEAR, "bicubic": PIL.Image.Resampling.BICUBIC, "lanczos": PIL.Image.Resampling.LANCZOS, "nearest": PIL.Image.Resampling.NEAREST, } else: PIL_INTERPOLATION = { "linear": PIL.Image.LINEAR, "bilinear": PIL.Image.BILINEAR, "bicubic": PIL.Image.BICUBIC, "lanczos": PIL.Image.LANCZOS, "nearest": PIL.Image.NEAREST, } def pt_to_pil(images): """ Convert a torch image to a PIL image. """ images = (images / 2 + 0.5).clamp(0, 1) images = images.cpu().permute(0, 2, 3, 1).float().numpy() images = numpy_to_pil(images) return images def numpy_to_pil(images): """ Convert a numpy image or a batch of images to a PIL image. """ if images.ndim == 3: images = images[None, ...] images = (images * 255).round().astype("uint8") if images.shape[-1] == 1: # special case for grayscale (single channel) images pil_images = [Image.fromarray(image.squeeze(), mode="L") for image in images] else: pil_images = [Image.fromarray(image) for image in images] return pil_images def make_image_grid(images: List[PIL.Image.Image], rows: int, cols: int, resize: int = None) -> PIL.Image.Image: """ Prepares a single grid of images. Useful for visualization purposes. """ assert len(images) == rows * cols if resize is not None: images = [img.resize((resize, resize)) for img in images] w, h = images[0].size grid = Image.new("RGB", size=(cols * w, rows * h)) for i, img in enumerate(images): grid.paste(img, box=(i % cols * w, i // cols * h)) return grid
diffusers/src/diffusers/utils/pil_utils.py/0
{ "file_path": "diffusers/src/diffusers/utils/pil_utils.py", "repo_id": "diffusers", "token_count": 849 }
# Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest import torch from transformers import AutoTokenizer, T5EncoderModel from diffusers import ( AutoencoderKLLTXVideo, FlowMatchEulerDiscreteScheduler, LTXPipeline, LTXVideoTransformer3DModel, ) from diffusers.utils.testing_utils import floats_tensor, require_peft_backend sys.path.append(".") from utils import PeftLoraLoaderMixinTests # noqa: E402 @require_peft_backend class LTXVideoLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests): pipeline_class = LTXPipeline scheduler_cls = FlowMatchEulerDiscreteScheduler scheduler_classes = [FlowMatchEulerDiscreteScheduler] scheduler_kwargs = {} transformer_kwargs = { "in_channels": 8, "out_channels": 8, "patch_size": 1, "patch_size_t": 1, "num_attention_heads": 4, "attention_head_dim": 8, "cross_attention_dim": 32, "num_layers": 1, "caption_channels": 32, } transformer_cls = LTXVideoTransformer3DModel vae_kwargs = { "in_channels": 3, "out_channels": 3, "latent_channels": 8, "block_out_channels": (8, 8, 8, 8), "decoder_block_out_channels": (8, 8, 8, 8), "layers_per_block": (1, 1, 1, 1, 1), "decoder_layers_per_block": (1, 1, 1, 1, 1), "spatio_temporal_scaling": (True, True, False, False), "decoder_spatio_temporal_scaling": (True, True, False, False), "decoder_inject_noise": (False, False, False, False, False), "upsample_residual": (False, False, False, False), "upsample_factor": (1, 1, 1, 1), "timestep_conditioning": False, "patch_size": 1, "patch_size_t": 1, "encoder_causal": True, "decoder_causal": False, } vae_cls = AutoencoderKLLTXVideo tokenizer_cls, tokenizer_id = AutoTokenizer, "hf-internal-testing/tiny-random-t5" text_encoder_cls, text_encoder_id = T5EncoderModel, "hf-internal-testing/tiny-random-t5" text_encoder_target_modules = ["q", "k", "v", "o"] @property def output_shape(self): return (1, 9, 32, 32, 3) def get_dummy_inputs(self, with_generator=True): batch_size = 1 sequence_length = 16 num_channels = 8 num_frames = 9 num_latent_frames = 3 # (num_frames - 1) // temporal_compression_ratio + 1 latent_height = 8 latent_width = 8 generator = torch.manual_seed(0) noise = floats_tensor((batch_size, num_latent_frames, num_channels, latent_height, latent_width)) input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator) pipeline_inputs = { "prompt": "dance monkey", "num_frames": num_frames, "num_inference_steps": 4, "guidance_scale": 6.0, "height": 32, "width": 32, "max_sequence_length": sequence_length, "output_type": "np", } if with_generator: pipeline_inputs.update({"generator": generator}) return noise, input_ids, pipeline_inputs def test_simple_inference_with_text_lora_denoiser_fused_multi(self): super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3) def test_simple_inference_with_text_denoiser_lora_unfused(self): super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3) @unittest.skip("Not supported in LTXVideo.") def test_simple_inference_with_text_denoiser_block_scale(self): pass @unittest.skip("Not supported in LTXVideo.") def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): pass @unittest.skip("Not supported in LTXVideo.") def test_modify_padding_mode(self): pass @unittest.skip("Text encoder LoRA is not supported in LTXVideo.") def test_simple_inference_with_partial_text_lora(self): pass @unittest.skip("Text encoder LoRA is not supported in LTXVideo.") def test_simple_inference_with_text_lora(self): pass @unittest.skip("Text encoder LoRA is not supported in LTXVideo.") def test_simple_inference_with_text_lora_and_scale(self): pass @unittest.skip("Text encoder LoRA is not supported in LTXVideo.") def test_simple_inference_with_text_lora_fused(self): pass @unittest.skip("Text encoder LoRA is not supported in LTXVideo.") def test_simple_inference_with_text_lora_save_load(self): pass
diffusers/tests/lora/test_lora_layers_ltx_video.py/0
{ "file_path": "diffusers/tests/lora/test_lora_layers_ltx_video.py", "repo_id": "diffusers", "token_count": 2180 }
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import torch from datasets import load_dataset from parameterized import parameterized from diffusers import AutoencoderOobleck from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, slow, torch_all_close, torch_device, ) from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class AutoencoderOobleckTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = AutoencoderOobleck main_input_name = "sample" base_precision = 1e-2 def get_autoencoder_oobleck_config(self, block_out_channels=None): init_dict = { "encoder_hidden_size": 12, "decoder_channels": 12, "decoder_input_channels": 6, "audio_channels": 2, "downsampling_ratios": [2, 4], "channel_multiples": [1, 2], } return init_dict @property def dummy_input(self): batch_size = 4 num_channels = 2 seq_len = 24 waveform = floats_tensor((batch_size, num_channels, seq_len)).to(torch_device) return {"sample": waveform, "sample_posterior": False} @property def input_shape(self): return (2, 24) @property def output_shape(self): return (2, 24) def prepare_init_args_and_inputs_for_common(self): init_dict = self.get_autoencoder_oobleck_config() inputs_dict = self.dummy_input return init_dict, inputs_dict def test_enable_disable_slicing(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() torch.manual_seed(0) model = self.model_class(**init_dict).to(torch_device) inputs_dict.update({"return_dict": False}) torch.manual_seed(0) output_without_slicing = model(**inputs_dict, generator=torch.manual_seed(0))[0] torch.manual_seed(0) model.enable_slicing() output_with_slicing = model(**inputs_dict, generator=torch.manual_seed(0))[0] self.assertLess( (output_without_slicing.detach().cpu().numpy() - output_with_slicing.detach().cpu().numpy()).max(), 0.5, "VAE slicing should not affect the inference results", ) torch.manual_seed(0) model.disable_slicing() output_without_slicing_2 = model(**inputs_dict, generator=torch.manual_seed(0))[0] self.assertEqual( output_without_slicing.detach().cpu().numpy().all(), output_without_slicing_2.detach().cpu().numpy().all(), "Without slicing outputs should match with the outputs when slicing is manually disabled.", ) @unittest.skip("Test unsupported.") def test_forward_with_norm_groups(self): pass @unittest.skip("No attention module used in this model") def test_set_attn_processor_for_determinism(self): return @unittest.skip( "The convolution layers of AutoencoderOobleck are wrapped with torch.nn.utils.weight_norm. This causes the hook's pre_forward to not " "cast the module weights to compute_dtype (as required by forward pass). As a result, forward pass errors out. To fix:\n" "1. Make sure `nn::Module::to` works with `torch.nn.utils.weight_norm` wrapped convolution layer.\n" "2. Unskip this test." ) def test_layerwise_casting_inference(self): pass @unittest.skip( "The convolution layers of AutoencoderOobleck are wrapped with torch.nn.utils.weight_norm. This causes the hook's pre_forward to not " "cast the module weights to compute_dtype (as required by forward pass). As a result, forward pass errors out. To fix:\n" "1. Make sure `nn::Module::to` works with `torch.nn.utils.weight_norm` wrapped convolution layer.\n" "2. Unskip this test." ) def test_layerwise_casting_memory(self): pass @slow class AutoencoderOobleckIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() backend_empty_cache(torch_device) def _load_datasamples(self, num_samples): ds = load_dataset( "hf-internal-testing/librispeech_asr_dummy", "clean", split="validation", trust_remote_code=True ) # automatic decoding with librispeech speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return torch.nn.utils.rnn.pad_sequence( [torch.from_numpy(x["array"]) for x in speech_samples], batch_first=True ) def get_audio(self, audio_sample_size=2097152, fp16=False): dtype = torch.float16 if fp16 else torch.float32 audio = self._load_datasamples(2).to(torch_device).to(dtype) # pad / crop to audio_sample_size audio = torch.nn.functional.pad(audio[:, :audio_sample_size], pad=(0, audio_sample_size - audio.shape[-1])) # todo channel audio = audio.unsqueeze(1).repeat(1, 2, 1).to(torch_device) return audio def get_oobleck_vae_model(self, model_id="stabilityai/stable-audio-open-1.0", fp16=False): torch_dtype = torch.float16 if fp16 else torch.float32 model = AutoencoderOobleck.from_pretrained( model_id, subfolder="vae", torch_dtype=torch_dtype, ) model.to(torch_device) return model def get_generator(self, seed=0): generator_device = "cpu" if not torch_device.startswith("cuda") else "cuda" if torch_device != "mps": return torch.Generator(device=generator_device).manual_seed(seed) return torch.manual_seed(seed) @parameterized.expand( [ # fmt: off [33, [1.193e-4, 6.56e-05, 1.314e-4, 3.80e-05, -4.01e-06], 0.001192], [44, [2.77e-05, -2.65e-05, 1.18e-05, -6.94e-05, -9.57e-05], 0.001196], # fmt: on ] ) def test_stable_diffusion(self, seed, expected_slice, expected_mean_absolute_diff): model = self.get_oobleck_vae_model() audio = self.get_audio() generator = self.get_generator(seed) with torch.no_grad(): sample = model(audio, generator=generator, sample_posterior=True).sample assert sample.shape == audio.shape assert ((sample - audio).abs().mean() - expected_mean_absolute_diff).abs() <= 1e-6 output_slice = sample[-1, 1, 5:10].cpu() expected_output_slice = torch.tensor(expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=1e-5) def test_stable_diffusion_mode(self): model = self.get_oobleck_vae_model() audio = self.get_audio() with torch.no_grad(): sample = model(audio, sample_posterior=False).sample assert sample.shape == audio.shape @parameterized.expand( [ # fmt: off [33, [1.193e-4, 6.56e-05, 1.314e-4, 3.80e-05, -4.01e-06], 0.001192], [44, [2.77e-05, -2.65e-05, 1.18e-05, -6.94e-05, -9.57e-05], 0.001196], # fmt: on ] ) def test_stable_diffusion_encode_decode(self, seed, expected_slice, expected_mean_absolute_diff): model = self.get_oobleck_vae_model() audio = self.get_audio() generator = self.get_generator(seed) with torch.no_grad(): x = audio posterior = model.encode(x).latent_dist z = posterior.sample(generator=generator) sample = model.decode(z).sample # (batch_size, latent_dim, sequence_length) assert posterior.mean.shape == (audio.shape[0], model.config.decoder_input_channels, 1024) assert sample.shape == audio.shape assert ((sample - audio).abs().mean() - expected_mean_absolute_diff).abs() <= 1e-6 output_slice = sample[-1, 1, 5:10].cpu() expected_output_slice = torch.tensor(expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=1e-5)
diffusers/tests/models/autoencoders/test_models_autoencoder_oobleck.py/0
{ "file_path": "diffusers/tests/models/autoencoders/test_models_autoencoder_oobleck.py", "repo_id": "diffusers", "token_count": 3771 }
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import AuraFlowTransformer2DModel from diffusers.utils.testing_utils import enable_full_determinism, torch_device from ..test_modeling_common import ModelTesterMixin enable_full_determinism() class AuraFlowTransformerTests(ModelTesterMixin, unittest.TestCase): model_class = AuraFlowTransformer2DModel main_input_name = "hidden_states" # We override the items here because the transformer under consideration is small. model_split_percents = [0.7, 0.6, 0.6] @property def dummy_input(self): batch_size = 2 num_channels = 4 height = width = embedding_dim = 32 sequence_length = 256 hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device) encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) return { "hidden_states": hidden_states, "encoder_hidden_states": encoder_hidden_states, "timestep": timestep, } @property def input_shape(self): return (4, 32, 32) @property def output_shape(self): return (4, 32, 32) def prepare_init_args_and_inputs_for_common(self): init_dict = { "sample_size": 32, "patch_size": 2, "in_channels": 4, "num_mmdit_layers": 1, "num_single_dit_layers": 1, "attention_head_dim": 8, "num_attention_heads": 4, "caption_projection_dim": 32, "joint_attention_dim": 32, "out_channels": 4, "pos_embed_max_size": 256, } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_gradient_checkpointing_is_applied(self): expected_set = {"AuraFlowTransformer2DModel"} super().test_gradient_checkpointing_is_applied(expected_set=expected_set) @unittest.skip("AuraFlowTransformer2DModel uses its own dedicated attention processor. This test does not apply") def test_set_attn_processor_for_determinism(self): pass
diffusers/tests/models/transformers/test_models_transformer_aura_flow.py/0
{ "file_path": "diffusers/tests/models/transformers/test_models_transformer_aura_flow.py", "repo_id": "diffusers", "token_count": 1118 }
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import math import unittest import torch from diffusers import UNet2DModel from diffusers.utils import logging from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, require_torch_accelerator, slow, torch_all_close, torch_device, ) from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin logger = logging.get_logger(__name__) enable_full_determinism() class Unet2DModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = UNet2DModel main_input_name = "sample" @property def dummy_input(self): batch_size = 4 num_channels = 3 sizes = (32, 32) noise = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) time_step = torch.tensor([10]).to(torch_device) return {"sample": noise, "timestep": time_step} @property def input_shape(self): return (3, 32, 32) @property def output_shape(self): return (3, 32, 32) def prepare_init_args_and_inputs_for_common(self): init_dict = { "block_out_channels": (4, 8), "norm_num_groups": 2, "down_block_types": ("DownBlock2D", "AttnDownBlock2D"), "up_block_types": ("AttnUpBlock2D", "UpBlock2D"), "attention_head_dim": 3, "out_channels": 3, "in_channels": 3, "layers_per_block": 2, "sample_size": 32, } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_mid_block_attn_groups(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["add_attention"] = True init_dict["attn_norm_num_groups"] = 4 model = self.model_class(**init_dict) model.to(torch_device) model.eval() self.assertIsNotNone( model.mid_block.attentions[0].group_norm, "Mid block Attention group norm should exist but does not." ) self.assertEqual( model.mid_block.attentions[0].group_norm.num_groups, init_dict["attn_norm_num_groups"], "Mid block Attention group norm does not have the expected number of groups.", ) with torch.no_grad(): output = model(**inputs_dict) if isinstance(output, dict): output = output.to_tuple()[0] self.assertIsNotNone(output) expected_shape = inputs_dict["sample"].shape self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") def test_mid_block_none(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() mid_none_init_dict, mid_none_inputs_dict = self.prepare_init_args_and_inputs_for_common() mid_none_init_dict["mid_block_type"] = None model = self.model_class(**init_dict) model.to(torch_device) model.eval() mid_none_model = self.model_class(**mid_none_init_dict) mid_none_model.to(torch_device) mid_none_model.eval() self.assertIsNone(mid_none_model.mid_block, "Mid block should not exist.") with torch.no_grad(): output = model(**inputs_dict) if isinstance(output, dict): output = output.to_tuple()[0] with torch.no_grad(): mid_none_output = mid_none_model(**mid_none_inputs_dict) if isinstance(mid_none_output, dict): mid_none_output = mid_none_output.to_tuple()[0] self.assertFalse(torch.allclose(output, mid_none_output, rtol=1e-3), "outputs should be different.") def test_gradient_checkpointing_is_applied(self): expected_set = { "AttnUpBlock2D", "AttnDownBlock2D", "UNetMidBlock2D", "UpBlock2D", "DownBlock2D", } # NOTE: unlike UNet2DConditionModel, UNet2DModel does not currently support tuples for `attention_head_dim` attention_head_dim = 8 block_out_channels = (16, 32) super().test_gradient_checkpointing_is_applied( expected_set=expected_set, attention_head_dim=attention_head_dim, block_out_channels=block_out_channels ) class UNetLDMModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = UNet2DModel main_input_name = "sample" @property def dummy_input(self): batch_size = 4 num_channels = 4 sizes = (32, 32) noise = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) time_step = torch.tensor([10]).to(torch_device) return {"sample": noise, "timestep": time_step} @property def input_shape(self): return (4, 32, 32) @property def output_shape(self): return (4, 32, 32) def prepare_init_args_and_inputs_for_common(self): init_dict = { "sample_size": 32, "in_channels": 4, "out_channels": 4, "layers_per_block": 2, "block_out_channels": (32, 64), "attention_head_dim": 32, "down_block_types": ("DownBlock2D", "DownBlock2D"), "up_block_types": ("UpBlock2D", "UpBlock2D"), } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_from_pretrained_hub(self): model, loading_info = UNet2DModel.from_pretrained("fusing/unet-ldm-dummy-update", output_loading_info=True) self.assertIsNotNone(model) self.assertEqual(len(loading_info["missing_keys"]), 0) model.to(torch_device) image = model(**self.dummy_input).sample assert image is not None, "Make sure output is not None" @require_torch_accelerator def test_from_pretrained_accelerate(self): model, _ = UNet2DModel.from_pretrained("fusing/unet-ldm-dummy-update", output_loading_info=True) model.to(torch_device) image = model(**self.dummy_input).sample assert image is not None, "Make sure output is not None" @require_torch_accelerator def test_from_pretrained_accelerate_wont_change_results(self): # by default model loading will use accelerate as `low_cpu_mem_usage=True` model_accelerate, _ = UNet2DModel.from_pretrained("fusing/unet-ldm-dummy-update", output_loading_info=True) model_accelerate.to(torch_device) model_accelerate.eval() noise = torch.randn( 1, model_accelerate.config.in_channels, model_accelerate.config.sample_size, model_accelerate.config.sample_size, generator=torch.manual_seed(0), ) noise = noise.to(torch_device) time_step = torch.tensor([10] * noise.shape[0]).to(torch_device) arr_accelerate = model_accelerate(noise, time_step)["sample"] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() model_normal_load, _ = UNet2DModel.from_pretrained( "fusing/unet-ldm-dummy-update", output_loading_info=True, low_cpu_mem_usage=False ) model_normal_load.to(torch_device) model_normal_load.eval() arr_normal_load = model_normal_load(noise, time_step)["sample"] assert torch_all_close(arr_accelerate, arr_normal_load, rtol=1e-3) def test_output_pretrained(self): model = UNet2DModel.from_pretrained("fusing/unet-ldm-dummy-update") model.eval() model.to(torch_device) noise = torch.randn( 1, model.config.in_channels, model.config.sample_size, model.config.sample_size, generator=torch.manual_seed(0), ) noise = noise.to(torch_device) time_step = torch.tensor([10] * noise.shape[0]).to(torch_device) with torch.no_grad(): output = model(noise, time_step).sample output_slice = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off expected_output_slice = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800]) # fmt: on self.assertTrue(torch_all_close(output_slice, expected_output_slice, rtol=1e-3)) def test_gradient_checkpointing_is_applied(self): expected_set = {"DownBlock2D", "UNetMidBlock2D", "UpBlock2D"} # NOTE: unlike UNet2DConditionModel, UNet2DModel does not currently support tuples for `attention_head_dim` attention_head_dim = 32 block_out_channels = (32, 64) super().test_gradient_checkpointing_is_applied( expected_set=expected_set, attention_head_dim=attention_head_dim, block_out_channels=block_out_channels ) class NCSNppModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = UNet2DModel main_input_name = "sample" @property def dummy_input(self, sizes=(32, 32)): batch_size = 4 num_channels = 3 noise = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) time_step = torch.tensor(batch_size * [10]).to(dtype=torch.int32, device=torch_device) return {"sample": noise, "timestep": time_step} @property def input_shape(self): return (3, 32, 32) @property def output_shape(self): return (3, 32, 32) def prepare_init_args_and_inputs_for_common(self): init_dict = { "block_out_channels": [32, 64, 64, 64], "in_channels": 3, "layers_per_block": 1, "out_channels": 3, "time_embedding_type": "fourier", "norm_eps": 1e-6, "mid_block_scale_factor": math.sqrt(2.0), "norm_num_groups": None, "down_block_types": [ "SkipDownBlock2D", "AttnSkipDownBlock2D", "SkipDownBlock2D", "SkipDownBlock2D", ], "up_block_types": [ "SkipUpBlock2D", "SkipUpBlock2D", "AttnSkipUpBlock2D", "SkipUpBlock2D", ], } inputs_dict = self.dummy_input return init_dict, inputs_dict @slow def test_from_pretrained_hub(self): model, loading_info = UNet2DModel.from_pretrained("google/ncsnpp-celebahq-256", output_loading_info=True) self.assertIsNotNone(model) self.assertEqual(len(loading_info["missing_keys"]), 0) model.to(torch_device) inputs = self.dummy_input noise = floats_tensor((4, 3) + (256, 256)).to(torch_device) inputs["sample"] = noise image = model(**inputs) assert image is not None, "Make sure output is not None" @slow def test_output_pretrained_ve_mid(self): model = UNet2DModel.from_pretrained("google/ncsnpp-celebahq-256") model.to(torch_device) batch_size = 4 num_channels = 3 sizes = (256, 256) noise = torch.ones((batch_size, num_channels) + sizes).to(torch_device) time_step = torch.tensor(batch_size * [1e-4]).to(torch_device) with torch.no_grad(): output = model(noise, time_step).sample output_slice = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off expected_output_slice = torch.tensor([-4836.2178, -6487.1470, -3816.8196, -7964.9302, -10966.3037, -20043.5957, 8137.0513, 2340.3328, 544.6056]) # fmt: on self.assertTrue(torch_all_close(output_slice, expected_output_slice, rtol=1e-2)) def test_output_pretrained_ve_large(self): model = UNet2DModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update") model.to(torch_device) batch_size = 4 num_channels = 3 sizes = (32, 32) noise = torch.ones((batch_size, num_channels) + sizes).to(torch_device) time_step = torch.tensor(batch_size * [1e-4]).to(torch_device) with torch.no_grad(): output = model(noise, time_step).sample output_slice = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off expected_output_slice = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256]) # fmt: on self.assertTrue(torch_all_close(output_slice, expected_output_slice, rtol=1e-2)) @unittest.skip("Test not supported.") def test_forward_with_norm_groups(self): # not required for this model pass def test_gradient_checkpointing_is_applied(self): expected_set = { "UNetMidBlock2D", } block_out_channels = (32, 64, 64, 64) super().test_gradient_checkpointing_is_applied( expected_set=expected_set, block_out_channels=block_out_channels ) def test_effective_gradient_checkpointing(self): super().test_effective_gradient_checkpointing(skip={"time_proj.weight"}) @unittest.skip( "To make layerwise casting work with this model, we will have to update the implementation. Due to potentially low usage, we don't support it here." ) def test_layerwise_casting_inference(self): pass @unittest.skip( "To make layerwise casting work with this model, we will have to update the implementation. Due to potentially low usage, we don't support it here." ) def test_layerwise_casting_memory(self): pass
diffusers/tests/models/unets/test_models_unet_2d.py/0
{ "file_path": "diffusers/tests/models/unets/test_models_unet_2d.py", "repo_id": "diffusers", "token_count": 6472 }
import pickle as pkl import unittest from dataclasses import dataclass from typing import List, Union import numpy as np import PIL.Image from diffusers.utils.outputs import BaseOutput from diffusers.utils.testing_utils import require_torch @dataclass class CustomOutput(BaseOutput): images: Union[List[PIL.Image.Image], np.ndarray] class ConfigTester(unittest.TestCase): def test_outputs_single_attribute(self): outputs = CustomOutput(images=np.random.rand(1, 3, 4, 4)) # check every way of getting the attribute assert isinstance(outputs.images, np.ndarray) assert outputs.images.shape == (1, 3, 4, 4) assert isinstance(outputs["images"], np.ndarray) assert outputs["images"].shape == (1, 3, 4, 4) assert isinstance(outputs[0], np.ndarray) assert outputs[0].shape == (1, 3, 4, 4) # test with a non-tensor attribute outputs = CustomOutput(images=[PIL.Image.new("RGB", (4, 4))]) # check every way of getting the attribute assert isinstance(outputs.images, list) assert isinstance(outputs.images[0], PIL.Image.Image) assert isinstance(outputs["images"], list) assert isinstance(outputs["images"][0], PIL.Image.Image) assert isinstance(outputs[0], list) assert isinstance(outputs[0][0], PIL.Image.Image) def test_outputs_dict_init(self): # test output reinitialization with a `dict` for compatibility with `accelerate` outputs = CustomOutput({"images": np.random.rand(1, 3, 4, 4)}) # check every way of getting the attribute assert isinstance(outputs.images, np.ndarray) assert outputs.images.shape == (1, 3, 4, 4) assert isinstance(outputs["images"], np.ndarray) assert outputs["images"].shape == (1, 3, 4, 4) assert isinstance(outputs[0], np.ndarray) assert outputs[0].shape == (1, 3, 4, 4) # test with a non-tensor attribute outputs = CustomOutput({"images": [PIL.Image.new("RGB", (4, 4))]}) # check every way of getting the attribute assert isinstance(outputs.images, list) assert isinstance(outputs.images[0], PIL.Image.Image) assert isinstance(outputs["images"], list) assert isinstance(outputs["images"][0], PIL.Image.Image) assert isinstance(outputs[0], list) assert isinstance(outputs[0][0], PIL.Image.Image) def test_outputs_serialization(self): outputs_orig = CustomOutput(images=[PIL.Image.new("RGB", (4, 4))]) serialized = pkl.dumps(outputs_orig) outputs_copy = pkl.loads(serialized) # Check original and copy are equal assert dir(outputs_orig) == dir(outputs_copy) assert dict(outputs_orig) == dict(outputs_copy) assert vars(outputs_orig) == vars(outputs_copy) @require_torch def test_torch_pytree(self): # ensure torch.utils._pytree treats ModelOutput subclasses as nodes (and not leaves) # this is important for DistributedDataParallel gradient synchronization with static_graph=True import torch import torch.utils._pytree data = np.random.rand(1, 3, 4, 4) x = CustomOutput(images=data) self.assertFalse(torch.utils._pytree._is_leaf(x)) expected_flat_outs = [data] expected_tree_spec = torch.utils._pytree.TreeSpec(CustomOutput, ["images"], [torch.utils._pytree.LeafSpec()]) actual_flat_outs, actual_tree_spec = torch.utils._pytree.tree_flatten(x) self.assertEqual(expected_flat_outs, actual_flat_outs) self.assertEqual(expected_tree_spec, actual_tree_spec) unflattened_x = torch.utils._pytree.tree_unflatten(actual_flat_outs, actual_tree_spec) self.assertEqual(x, unflattened_x)
diffusers/tests/others/test_outputs.py/0
{ "file_path": "diffusers/tests/others/test_outputs.py", "repo_id": "diffusers", "token_count": 1506 }
import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AnimateDiffVideoToVideoPipeline, AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, LCMScheduler, MotionAdapter, StableDiffusionPipeline, UNet2DConditionModel, UNetMotionModel, ) from diffusers.models.attention import FreeNoiseTransformerBlock from diffusers.utils import is_xformers_available, logging from diffusers.utils.testing_utils import require_accelerator, torch_device from ..pipeline_params import TEXT_TO_IMAGE_PARAMS, VIDEO_TO_VIDEO_BATCH_PARAMS from ..test_pipelines_common import IPAdapterTesterMixin, PipelineFromPipeTesterMixin, PipelineTesterMixin def to_np(tensor): if isinstance(tensor, torch.Tensor): tensor = tensor.detach().cpu().numpy() return tensor class AnimateDiffVideoToVideoPipelineFastTests( IPAdapterTesterMixin, PipelineTesterMixin, PipelineFromPipeTesterMixin, unittest.TestCase ): pipeline_class = AnimateDiffVideoToVideoPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = VIDEO_TO_VIDEO_BATCH_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", ] ) def get_dummy_components(self): cross_attention_dim = 8 block_out_channels = (8, 8) torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=block_out_channels, layers_per_block=2, sample_size=8, in_channels=4, out_channels=4, down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=cross_attention_dim, norm_num_groups=2, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="linear", clip_sample=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=block_out_channels, in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=cross_attention_dim, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") torch.manual_seed(0) motion_adapter = MotionAdapter( block_out_channels=block_out_channels, motion_layers_per_block=2, motion_norm_num_groups=2, motion_num_attention_heads=4, ) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "motion_adapter": motion_adapter, "text_encoder": text_encoder, "tokenizer": tokenizer, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0, num_frames: int = 2): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) video_height = 32 video_width = 32 video = [Image.new("RGB", (video_width, video_height))] * num_frames inputs = { "video": video, "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 7.5, "output_type": "pt", } return inputs def test_from_pipe_consistent_config(self): assert self.original_pipeline_class == StableDiffusionPipeline original_repo = "hf-internal-testing/tinier-stable-diffusion-pipe" original_kwargs = {"requires_safety_checker": False} # create original_pipeline_class(sd) pipe_original = self.original_pipeline_class.from_pretrained(original_repo, **original_kwargs) # original_pipeline_class(sd) -> pipeline_class pipe_components = self.get_dummy_components() pipe_additional_components = {} for name, component in pipe_components.items(): if name not in pipe_original.components: pipe_additional_components[name] = component pipe = self.pipeline_class.from_pipe(pipe_original, **pipe_additional_components) # pipeline_class -> original_pipeline_class(sd) original_pipe_additional_components = {} for name, component in pipe_original.components.items(): if name not in pipe.components or not isinstance(component, pipe.components[name].__class__): original_pipe_additional_components[name] = component pipe_original_2 = self.original_pipeline_class.from_pipe(pipe, **original_pipe_additional_components) # compare the config original_config = {k: v for k, v in pipe_original.config.items() if not k.startswith("_")} original_config_2 = {k: v for k, v in pipe_original_2.config.items() if not k.startswith("_")} assert original_config_2 == original_config def test_motion_unet_loading(self): components = self.get_dummy_components() pipe = AnimateDiffVideoToVideoPipeline(**components) assert isinstance(pipe.unet, UNetMotionModel) @unittest.skip("Attention slicing is not enabled in this pipeline") def test_attention_slicing_forward_pass(self): pass def test_ip_adapter(self): expected_pipe_slice = None if torch_device == "cpu": expected_pipe_slice = np.array( [ 0.5569, 0.6250, 0.4145, 0.5613, 0.5563, 0.5213, 0.5092, 0.4950, 0.4950, 0.5685, 0.3858, 0.4864, 0.6458, 0.4312, 0.5518, 0.5608, 0.4418, 0.5378, ] ) return super().test_ip_adapter(expected_pipe_slice=expected_pipe_slice) def test_inference_batch_single_identical( self, batch_size=2, expected_max_diff=1e-4, additional_params_copy_to_batched_inputs=["num_inference_steps"], ): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for components in pipe.components.values(): if hasattr(components, "set_default_attn_processor"): components.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) # Reset generator in case it is has been used in self.get_dummy_inputs inputs["generator"] = self.get_generator(0) logger = logging.get_logger(pipe.__module__) logger.setLevel(level=diffusers.logging.FATAL) # batchify inputs batched_inputs = {} batched_inputs.update(inputs) for name in self.batch_params: if name not in inputs: continue value = inputs[name] if name == "prompt": len_prompt = len(value) batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] batched_inputs[name][-1] = 100 * "very long" else: batched_inputs[name] = batch_size * [value] if "generator" in inputs: batched_inputs["generator"] = [self.get_generator(i) for i in range(batch_size)] if "batch_size" in inputs: batched_inputs["batch_size"] = batch_size for arg in additional_params_copy_to_batched_inputs: batched_inputs[arg] = inputs[arg] output = pipe(**inputs) output_batch = pipe(**batched_inputs) assert output_batch[0].shape[0] == batch_size max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() assert max_diff < expected_max_diff @require_accelerator def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to("cpu") # pipeline creates a new motion UNet under the hood. So we need to check the device from pipe.components model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] self.assertTrue(all(device == "cpu" for device in model_devices)) output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] self.assertTrue(np.isnan(output_cpu).sum() == 0) pipe.to(torch_device) model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] self.assertTrue(all(device == torch_device for device in model_devices)) output_device = pipe(**self.get_dummy_inputs(torch_device))[0] self.assertTrue(np.isnan(to_np(output_device)).sum() == 0) def test_to_dtype(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) # pipeline creates a new motion UNet under the hood. So we need to check the dtype from pipe.components model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes)) pipe.to(dtype=torch.float16) model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes)) def test_prompt_embeds(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to(torch_device) inputs = self.get_dummy_inputs(torch_device) inputs.pop("prompt") inputs["prompt_embeds"] = torch.randn((1, 4, pipe.text_encoder.config.hidden_size), device=torch_device) pipe(**inputs) def test_latent_inputs(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to(torch_device) inputs = self.get_dummy_inputs(torch_device) sample_size = pipe.unet.config.sample_size inputs["latents"] = torch.randn((1, 4, 1, sample_size, sample_size), device=torch_device) inputs.pop("video") pipe(**inputs) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output_without_offload = pipe(**inputs).frames[0] output_without_offload = ( output_without_offload.cpu() if torch.is_tensor(output_without_offload) else output_without_offload ) pipe.enable_xformers_memory_efficient_attention() inputs = self.get_dummy_inputs(torch_device) output_with_offload = pipe(**inputs).frames[0] output_with_offload = ( output_with_offload.cpu() if torch.is_tensor(output_with_offload) else output_without_offload ) max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() self.assertLess(max_diff, 1e-4, "XFormers attention should not affect the inference results") def test_free_init(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to(torch_device) inputs_normal = self.get_dummy_inputs(torch_device) frames_normal = pipe(**inputs_normal).frames[0] pipe.enable_free_init( num_iters=2, use_fast_sampling=True, method="butterworth", order=4, spatial_stop_frequency=0.25, temporal_stop_frequency=0.25, ) inputs_enable_free_init = self.get_dummy_inputs(torch_device) frames_enable_free_init = pipe(**inputs_enable_free_init).frames[0] pipe.disable_free_init() inputs_disable_free_init = self.get_dummy_inputs(torch_device) frames_disable_free_init = pipe(**inputs_disable_free_init).frames[0] sum_enabled = np.abs(to_np(frames_normal) - to_np(frames_enable_free_init)).sum() max_diff_disabled = np.abs(to_np(frames_normal) - to_np(frames_disable_free_init)).max() self.assertGreater( sum_enabled, 1e1, "Enabling of FreeInit should lead to results different from the default pipeline results" ) self.assertLess( max_diff_disabled, 1e-4, "Disabling of FreeInit should lead to results similar to the default pipeline results", ) def test_free_init_with_schedulers(self): components = self.get_dummy_components() pipe: AnimateDiffVideoToVideoPipeline = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to(torch_device) inputs_normal = self.get_dummy_inputs(torch_device) frames_normal = pipe(**inputs_normal).frames[0] schedulers_to_test = [ DPMSolverMultistepScheduler.from_config( components["scheduler"].config, timestep_spacing="linspace", beta_schedule="linear", algorithm_type="dpmsolver++", steps_offset=1, clip_sample=False, ), LCMScheduler.from_config( components["scheduler"].config, timestep_spacing="linspace", beta_schedule="linear", steps_offset=1, clip_sample=False, ), ] components.pop("scheduler") for scheduler in schedulers_to_test: components["scheduler"] = scheduler pipe: AnimateDiffVideoToVideoPipeline = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to(torch_device) pipe.enable_free_init(num_iters=2, use_fast_sampling=False) inputs = self.get_dummy_inputs(torch_device) frames_enable_free_init = pipe(**inputs).frames[0] sum_enabled = np.abs(to_np(frames_normal) - to_np(frames_enable_free_init)).sum() self.assertGreater( sum_enabled, 1e1, "Enabling of FreeInit should lead to results different from the default pipeline results", ) def test_free_noise_blocks(self): components = self.get_dummy_components() pipe: AnimateDiffVideoToVideoPipeline = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to(torch_device) pipe.enable_free_noise() for block in pipe.unet.down_blocks: for motion_module in block.motion_modules: for transformer_block in motion_module.transformer_blocks: self.assertTrue( isinstance(transformer_block, FreeNoiseTransformerBlock), "Motion module transformer blocks must be an instance of `FreeNoiseTransformerBlock` after enabling FreeNoise.", ) pipe.disable_free_noise() for block in pipe.unet.down_blocks: for motion_module in block.motion_modules: for transformer_block in motion_module.transformer_blocks: self.assertFalse( isinstance(transformer_block, FreeNoiseTransformerBlock), "Motion module transformer blocks must not be an instance of `FreeNoiseTransformerBlock` after disabling FreeNoise.", ) def test_free_noise(self): components = self.get_dummy_components() pipe: AnimateDiffVideoToVideoPipeline = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to(torch_device) inputs_normal = self.get_dummy_inputs(torch_device, num_frames=16) inputs_normal["num_inference_steps"] = 2 inputs_normal["strength"] = 0.5 frames_normal = pipe(**inputs_normal).frames[0] for context_length in [8, 9]: for context_stride in [4, 6]: pipe.enable_free_noise(context_length, context_stride) inputs_enable_free_noise = self.get_dummy_inputs(torch_device, num_frames=16) inputs_enable_free_noise["num_inference_steps"] = 2 inputs_enable_free_noise["strength"] = 0.5 frames_enable_free_noise = pipe(**inputs_enable_free_noise).frames[0] pipe.disable_free_noise() inputs_disable_free_noise = self.get_dummy_inputs(torch_device, num_frames=16) inputs_disable_free_noise["num_inference_steps"] = 2 inputs_disable_free_noise["strength"] = 0.5 frames_disable_free_noise = pipe(**inputs_disable_free_noise).frames[0] sum_enabled = np.abs(to_np(frames_normal) - to_np(frames_enable_free_noise)).sum() max_diff_disabled = np.abs(to_np(frames_normal) - to_np(frames_disable_free_noise)).max() self.assertGreater( sum_enabled, 1e1, "Enabling of FreeNoise should lead to results different from the default pipeline results", ) self.assertLess( max_diff_disabled, 1e-4, "Disabling of FreeNoise should lead to results similar to the default pipeline results", ) def test_free_noise_split_inference(self): components = self.get_dummy_components() pipe: AnimateDiffVideoToVideoPipeline = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to(torch_device) pipe.enable_free_noise(8, 4) inputs_normal = self.get_dummy_inputs(torch_device, num_frames=16) inputs_normal["num_inference_steps"] = 2 inputs_normal["strength"] = 0.5 frames_normal = pipe(**inputs_normal).frames[0] # Test FreeNoise with split inference memory-optimization pipe.enable_free_noise_split_inference(spatial_split_size=16, temporal_split_size=4) inputs_enable_split_inference = self.get_dummy_inputs(torch_device, num_frames=16) inputs_enable_split_inference["num_inference_steps"] = 2 inputs_enable_split_inference["strength"] = 0.5 frames_enable_split_inference = pipe(**inputs_enable_split_inference).frames[0] sum_split_inference = np.abs(to_np(frames_normal) - to_np(frames_enable_split_inference)).sum() self.assertLess( sum_split_inference, 1e-4, "Enabling FreeNoise Split Inference memory-optimizations should lead to results similar to the default pipeline results", ) def test_free_noise_multi_prompt(self): components = self.get_dummy_components() pipe: AnimateDiffVideoToVideoPipeline = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to(torch_device) context_length = 8 context_stride = 4 pipe.enable_free_noise(context_length, context_stride) # Make sure that pipeline works when prompt indices are within num_frames bounds inputs = self.get_dummy_inputs(torch_device, num_frames=16) inputs["prompt"] = {0: "Caterpillar on a leaf", 10: "Butterfly on a leaf"} inputs["num_inference_steps"] = 2 inputs["strength"] = 0.5 pipe(**inputs).frames[0] with self.assertRaises(ValueError): # Ensure that prompt indices are within bounds inputs = self.get_dummy_inputs(torch_device, num_frames=16) inputs["num_inference_steps"] = 2 inputs["strength"] = 0.5 inputs["prompt"] = {0: "Caterpillar on a leaf", 10: "Butterfly on a leaf", 42: "Error on a leaf"} pipe(**inputs).frames[0]
diffusers/tests/pipelines/animatediff/test_animatediff_video2video.py/0
{ "file_path": "diffusers/tests/pipelines/animatediff/test_animatediff_video2video.py", "repo_id": "diffusers", "token_count": 10229 }
# Copyright 2024 The HuggingFace Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import inspect import unittest import numpy as np import torch from transformers import AutoTokenizer, T5EncoderModel from diffusers import AutoencoderKL, CogVideoXDDIMScheduler, CogView3PlusPipeline, CogView3PlusTransformer2DModel from diffusers.utils.testing_utils import ( enable_full_determinism, numpy_cosine_similarity_distance, require_torch_accelerator, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineTesterMixin, to_np, ) enable_full_determinism() class CogView3PlusPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = CogView3PlusPipeline params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", ] ) test_xformers_attention = False test_layerwise_casting = True def get_dummy_components(self): torch.manual_seed(0) transformer = CogView3PlusTransformer2DModel( patch_size=2, in_channels=4, num_layers=1, attention_head_dim=4, num_attention_heads=2, out_channels=4, text_embed_dim=32, # Must match with tiny-random-t5 time_embed_dim=8, condition_dim=2, pos_embed_max_size=8, sample_size=8, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) scheduler = CogVideoXDDIMScheduler() text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") components = { "transformer": transformer, "vae": vae, "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "dance monkey", "negative_prompt": "", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "height": 16, "width": 16, "max_sequence_length": 16, "output_type": "pt", } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs)[0] generated_image = image[0] self.assertEqual(generated_image.shape, (3, 16, 16)) expected_image = torch.randn(3, 16, 16) max_diff = np.abs(generated_image - expected_image).max() self.assertLessEqual(max_diff, 1e10) def test_callback_inputs(self): sig = inspect.signature(self.pipeline_class.__call__) has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters has_callback_step_end = "callback_on_step_end" in sig.parameters if not (has_callback_tensor_inputs and has_callback_step_end): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_inputs_subset(pipe, i, t, callback_kwargs): # iterate over callback args for tensor_name, tensor_value in callback_kwargs.items(): # check that we're only passing in allowed tensor inputs assert tensor_name in pipe._callback_tensor_inputs return callback_kwargs def callback_inputs_all(pipe, i, t, callback_kwargs): for tensor_name in pipe._callback_tensor_inputs: assert tensor_name in callback_kwargs # iterate over callback args for tensor_name, tensor_value in callback_kwargs.items(): # check that we're only passing in allowed tensor inputs assert tensor_name in pipe._callback_tensor_inputs return callback_kwargs inputs = self.get_dummy_inputs(torch_device) # Test passing in a subset inputs["callback_on_step_end"] = callback_inputs_subset inputs["callback_on_step_end_tensor_inputs"] = ["latents"] output = pipe(**inputs)[0] # Test passing in a everything inputs["callback_on_step_end"] = callback_inputs_all inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs output = pipe(**inputs)[0] def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): is_last = i == (pipe.num_timesteps - 1) if is_last: callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) return callback_kwargs inputs["callback_on_step_end"] = callback_inputs_change_tensor inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs output = pipe(**inputs)[0] assert output.abs().sum() < 1e10 def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-3) def test_attention_slicing_forward_pass( self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 ): if not self.test_attention_slicing: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) output_without_slicing = pipe(**inputs)[0] pipe.enable_attention_slicing(slice_size=1) inputs = self.get_dummy_inputs(generator_device) output_with_slicing1 = pipe(**inputs)[0] pipe.enable_attention_slicing(slice_size=2) inputs = self.get_dummy_inputs(generator_device) output_with_slicing2 = pipe(**inputs)[0] if test_max_difference: max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() self.assertLess( max(max_diff1, max_diff2), expected_max_diff, "Attention slicing should not affect the inference results", ) @slow @require_torch_accelerator class CogView3PlusPipelineIntegrationTests(unittest.TestCase): prompt = "A painting of a squirrel eating a burger." def setUp(self): super().setUp() gc.collect() torch.cuda.empty_cache() def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_cogview3plus(self): generator = torch.Generator("cpu").manual_seed(0) pipe = CogView3PlusPipeline.from_pretrained("THUDM/CogView3Plus-3b", torch_dtype=torch.float16) pipe.enable_model_cpu_offload(device=torch_device) prompt = self.prompt images = pipe( prompt=prompt, height=1024, width=1024, generator=generator, num_inference_steps=2, output_type="np", )[0] image = images[0] expected_image = torch.randn(1, 1024, 1024, 3).numpy() max_diff = numpy_cosine_similarity_distance(image, expected_image) assert max_diff < 1e-3, f"Max diff is too high. got {image}"
diffusers/tests/pipelines/cogview3/test_cogview3plus.py/0
{ "file_path": "diffusers/tests/pipelines/cogview3/test_cogview3plus.py", "repo_id": "diffusers", "token_count": 4394 }
import unittest import numpy as np import torch from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, T5EncoderModel from diffusers import ( AutoencoderKL, FlowMatchEulerDiscreteScheduler, FluxControlNetImg2ImgPipeline, FluxControlNetModel, FluxTransformer2DModel, ) from diffusers.utils.testing_utils import ( torch_device, ) from diffusers.utils.torch_utils import randn_tensor from ..test_pipelines_common import ( PipelineTesterMixin, check_qkv_fusion_matches_attn_procs_length, check_qkv_fusion_processors_exist, ) class FluxControlNetImg2ImgPipelineFastTests(unittest.TestCase, PipelineTesterMixin): pipeline_class = FluxControlNetImg2ImgPipeline params = frozenset( [ "prompt", "image", "control_image", "height", "width", "strength", "guidance_scale", "controlnet_conditioning_scale", "prompt_embeds", "pooled_prompt_embeds", ] ) batch_params = frozenset(["prompt", "image", "control_image"]) test_xformers_attention = False def get_dummy_components(self): torch.manual_seed(0) transformer = FluxTransformer2DModel( patch_size=1, in_channels=4, num_layers=1, num_single_layers=1, attention_head_dim=16, num_attention_heads=2, joint_attention_dim=32, pooled_projection_dim=32, axes_dims_rope=[4, 4, 8], ) clip_text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act="gelu", projection_dim=32, ) torch.manual_seed(0) text_encoder = CLIPTextModel(clip_text_encoder_config) torch.manual_seed(0) text_encoder_2 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") tokenizer_2 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) vae = AutoencoderKL( sample_size=32, in_channels=3, out_channels=3, block_out_channels=(4,), layers_per_block=1, latent_channels=1, norm_num_groups=1, use_quant_conv=False, use_post_quant_conv=False, shift_factor=0.0609, scaling_factor=1.5035, ) torch.manual_seed(0) controlnet = FluxControlNetModel( in_channels=4, num_layers=1, num_single_layers=1, attention_head_dim=16, num_attention_heads=2, joint_attention_dim=32, pooled_projection_dim=32, axes_dims_rope=[4, 4, 8], ) scheduler = FlowMatchEulerDiscreteScheduler() return { "scheduler": scheduler, "text_encoder": text_encoder, "text_encoder_2": text_encoder_2, "tokenizer": tokenizer, "tokenizer_2": tokenizer_2, "transformer": transformer, "vae": vae, "controlnet": controlnet, } def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device="cpu").manual_seed(seed) image = torch.randn(1, 3, 32, 32).to(device) control_image = torch.randn(1, 3, 32, 32).to(device) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "control_image": control_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "controlnet_conditioning_scale": 1.0, "strength": 0.8, "height": 32, "width": 32, "max_sequence_length": 48, "output_type": "np", } return inputs def test_flux_controlnet_different_prompts(self): pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) inputs = self.get_dummy_inputs(torch_device) output_same_prompt = pipe(**inputs).images[0] inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = "a different prompt" output_different_prompts = pipe(**inputs).images[0] max_diff = np.abs(output_same_prompt - output_different_prompts).max() assert max_diff > 1e-6 def test_flux_controlnet_prompt_embeds(self): pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) inputs = self.get_dummy_inputs(torch_device) output_with_prompt = pipe(**inputs).images[0] inputs = self.get_dummy_inputs(torch_device) prompt = inputs.pop("prompt") (prompt_embeds, pooled_prompt_embeds, text_ids) = pipe.encode_prompt( prompt, prompt_2=None, device=torch_device, max_sequence_length=inputs["max_sequence_length"], ) output_with_embeds = pipe( prompt_embeds=prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, **inputs, ).images[0] max_diff = np.abs(output_with_prompt - output_with_embeds).max() assert max_diff < 1e-4 def test_fused_qkv_projections(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images original_image_slice = image[0, -3:, -3:, -1] pipe.transformer.fuse_qkv_projections() assert check_qkv_fusion_processors_exist( pipe.transformer ), "Something wrong with the fused attention processors. Expected all the attention processors to be fused." assert check_qkv_fusion_matches_attn_procs_length( pipe.transformer, pipe.transformer.original_attn_processors ), "Something wrong with the attention processors concerning the fused QKV projections." inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice_fused = image[0, -3:, -3:, -1] pipe.transformer.unfuse_qkv_projections() inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice_disabled = image[0, -3:, -3:, -1] assert np.allclose( original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3 ), "Fusion of QKV projections shouldn't affect the outputs." assert np.allclose( image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3 ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." assert np.allclose( original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 ), "Original outputs should match when fused QKV projections are disabled." def test_flux_image_output_shape(self): pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) inputs = self.get_dummy_inputs(torch_device) height_width_pairs = [(32, 32), (72, 56)] for height, width in height_width_pairs: expected_height = height - height % (pipe.vae_scale_factor * 2) expected_width = width - width % (pipe.vae_scale_factor * 2) inputs.update( { "control_image": randn_tensor( (1, 3, height, width), device=torch_device, dtype=torch.float16, ), "image": randn_tensor( (1, 3, height, width), device=torch_device, dtype=torch.float16, ), "height": height, "width": width, } ) image = pipe(**inputs).images[0] output_height, output_width, _ = image.shape assert (output_height, output_width) == (expected_height, expected_width)
diffusers/tests/pipelines/controlnet_flux/test_controlnet_flux_img2img.py/0
{ "file_path": "diffusers/tests/pipelines/controlnet_flux/test_controlnet_flux_img2img.py", "repo_id": "diffusers", "token_count": 4397 }
import tempfile import numpy as np import torch from transformers import AutoTokenizer, T5EncoderModel from diffusers import DDPMScheduler, UNet2DConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np # WARN: the hf-internal-testing/tiny-random-t5 text encoder has some non-determinism in the `save_load` tests. class IFPipelineTesterMixin: def _get_dummy_components(self): torch.manual_seed(0) text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) unet = UNet2DConditionModel( sample_size=32, layers_per_block=1, block_out_channels=[32, 64], down_block_types=[ "ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D", ], mid_block_type="UNetMidBlock2DSimpleCrossAttn", up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"], in_channels=3, out_channels=6, cross_attention_dim=32, encoder_hid_dim=32, attention_head_dim=8, addition_embed_type="text", addition_embed_type_num_heads=2, cross_attention_norm="group_norm", resnet_time_scale_shift="scale_shift", act_fn="gelu", ) unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests torch.manual_seed(0) scheduler = DDPMScheduler( num_train_timesteps=1000, beta_schedule="squaredcos_cap_v2", beta_start=0.0001, beta_end=0.02, thresholding=True, dynamic_thresholding_ratio=0.95, sample_max_value=1.0, prediction_type="epsilon", variance_type="learned_range", ) torch.manual_seed(0) watermarker = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def _get_superresolution_dummy_components(self): torch.manual_seed(0) text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) unet = UNet2DConditionModel( sample_size=32, layers_per_block=[1, 2], block_out_channels=[32, 64], down_block_types=[ "ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D", ], mid_block_type="UNetMidBlock2DSimpleCrossAttn", up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"], in_channels=6, out_channels=6, cross_attention_dim=32, encoder_hid_dim=32, attention_head_dim=8, addition_embed_type="text", addition_embed_type_num_heads=2, cross_attention_norm="group_norm", resnet_time_scale_shift="scale_shift", act_fn="gelu", class_embed_type="timestep", mid_block_scale_factor=1.414, time_embedding_act_fn="gelu", time_embedding_dim=32, ) unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests torch.manual_seed(0) scheduler = DDPMScheduler( num_train_timesteps=1000, beta_schedule="squaredcos_cap_v2", beta_start=0.0001, beta_end=0.02, thresholding=True, dynamic_thresholding_ratio=0.95, sample_max_value=1.0, prediction_type="epsilon", variance_type="learned_range", ) torch.manual_seed(0) image_noising_scheduler = DDPMScheduler( num_train_timesteps=1000, beta_schedule="squaredcos_cap_v2", beta_start=0.0001, beta_end=0.02, ) torch.manual_seed(0) watermarker = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } # this test is modified from the base class because if pipelines set the text encoder # as optional with the intention that the user is allowed to encode the prompt once # and then pass the embeddings directly to the pipeline. The base class test uses # the unmodified arguments from `self.get_dummy_inputs` which will pass the unencoded # prompt to the pipeline when the text encoder is set to None, throwing an error. # So we make the test reflect the intended usage of setting the text encoder to None. def _test_save_load_optional_components(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) prompt = inputs["prompt"] generator = inputs["generator"] num_inference_steps = inputs["num_inference_steps"] output_type = inputs["output_type"] if "image" in inputs: image = inputs["image"] else: image = None if "mask_image" in inputs: mask_image = inputs["mask_image"] else: mask_image = None if "original_image" in inputs: original_image = inputs["original_image"] else: original_image = None prompt_embeds, negative_prompt_embeds = pipe.encode_prompt(prompt) # inputs with prompt converted to embeddings inputs = { "prompt_embeds": prompt_embeds, "negative_prompt_embeds": negative_prompt_embeds, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, } if image is not None: inputs["image"] = image if mask_image is not None: inputs["mask_image"] = mask_image if original_image is not None: inputs["original_image"] = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(pipe, optional_component, None) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) inputs = self.get_dummy_inputs(torch_device) generator = inputs["generator"] num_inference_steps = inputs["num_inference_steps"] output_type = inputs["output_type"] # inputs with prompt converted to embeddings inputs = { "prompt_embeds": prompt_embeds, "negative_prompt_embeds": negative_prompt_embeds, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, } if image is not None: inputs["image"] = image if mask_image is not None: inputs["mask_image"] = mask_image if original_image is not None: inputs["original_image"] = original_image output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, 1e-4) # Modified from `PipelineTesterMixin` to set the attn processor as it's not serialized. # This should be handled in the base test and then this method can be removed. def _test_save_load_local(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests inputs = self.get_dummy_inputs(torch_device) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, 1e-4)
diffusers/tests/pipelines/deepfloyd_if/__init__.py/0
{ "file_path": "diffusers/tests/pipelines/deepfloyd_if/__init__.py", "repo_id": "diffusers", "token_count": 4583 }
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from diffusers import DDIMScheduler, KandinskyV22Pipeline, KandinskyV22PriorPipeline, UNet2DConditionModel, VQModel from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_numpy, numpy_cosine_similarity_distance, require_torch_gpu, slow, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class Dummies: @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 32 @property def dummy_unet(self): torch.manual_seed(0) model_kwargs = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } model = UNet2DConditionModel(**model_kwargs) return model @property def dummy_movq_kwargs(self): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def dummy_movq(self): torch.manual_seed(0) model = VQModel(**self.dummy_movq_kwargs) return model def get_dummy_components(self): unet = self.dummy_unet movq = self.dummy_movq scheduler = DDIMScheduler( num_train_timesteps=1000, beta_schedule="linear", beta_start=0.00085, beta_end=0.012, clip_sample=False, set_alpha_to_one=False, steps_offset=1, prediction_type="epsilon", thresholding=False, ) components = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def get_dummy_inputs(self, device, seed=0): image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed)).to(device) negative_image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to( device ) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs class KandinskyV22PipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22Pipeline params = [ "image_embeds", "negative_image_embeds", ] batch_params = ["image_embeds", "negative_image_embeds"] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] callback_cfg_params = ["image_embds"] test_xformers_attention = False def get_dummy_inputs(self, device, seed=0): dummies = Dummies() return dummies.get_dummy_inputs(device=device, seed=seed) def get_dummy_components(self): dummies = Dummies() return dummies.get_dummy_components() def test_kandinsky(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.3420, 0.9505, 0.3919, 1.0000, 0.5188, 0.3109, 0.6139, 0.5624, 0.6811]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" def test_float16_inference(self): super().test_float16_inference(expected_max_diff=1e-1) @slow @require_torch_gpu class KandinskyV22PipelineIntegrationTests(unittest.TestCase): def setUp(self): # clean up the VRAM before each test super().setUp() gc.collect() torch.cuda.empty_cache() def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_kandinsky_text2img(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" ) pipe_prior = KandinskyV22PriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 ) pipe_prior.enable_model_cpu_offload() pipeline = KandinskyV22Pipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 ) pipeline.enable_model_cpu_offload() pipeline.set_progress_bar_config(disable=None) prompt = "red cat, 4k photo" generator = torch.Generator(device="cpu").manual_seed(0) image_emb, zero_image_emb = pipe_prior( prompt, generator=generator, num_inference_steps=3, negative_prompt="", ).to_tuple() generator = torch.Generator(device="cpu").manual_seed(0) output = pipeline( image_embeds=image_emb, negative_image_embeds=zero_image_emb, generator=generator, num_inference_steps=3, output_type="np", ) image = output.images[0] assert image.shape == (512, 512, 3) max_diff = numpy_cosine_similarity_distance(expected_image.flatten(), image.flatten()) assert max_diff < 1e-4
diffusers/tests/pipelines/kandinsky2_2/test_kandinsky.py/0
{ "file_path": "diffusers/tests/pipelines/kandinsky2_2/test_kandinsky.py", "repo_id": "diffusers", "token_count": 4070 }
import inspect import unittest import numpy as np import torch from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel from diffusers import ( AutoencoderKL, FlowMatchEulerDiscreteScheduler, SD3Transformer2DModel, StableDiffusion3PAGPipeline, StableDiffusion3Pipeline, ) from diffusers.utils.testing_utils import ( torch_device, ) from ..test_pipelines_common import ( PipelineTesterMixin, check_qkv_fusion_matches_attn_procs_length, check_qkv_fusion_processors_exist, ) class StableDiffusion3PAGPipelineFastTests(unittest.TestCase, PipelineTesterMixin): pipeline_class = StableDiffusion3PAGPipeline params = frozenset( [ "prompt", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", ] ) batch_params = frozenset(["prompt", "negative_prompt"]) test_xformers_attention = False def get_dummy_components(self): torch.manual_seed(0) transformer = SD3Transformer2DModel( sample_size=32, patch_size=1, in_channels=4, num_layers=2, attention_head_dim=8, num_attention_heads=4, caption_projection_dim=32, joint_attention_dim=32, pooled_projection_dim=64, out_channels=4, ) clip_text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act="gelu", projection_dim=32, ) torch.manual_seed(0) text_encoder = CLIPTextModelWithProjection(clip_text_encoder_config) torch.manual_seed(0) text_encoder_2 = CLIPTextModelWithProjection(clip_text_encoder_config) text_encoder_3 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") tokenizer_3 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") torch.manual_seed(0) vae = AutoencoderKL( sample_size=32, in_channels=3, out_channels=3, block_out_channels=(4,), layers_per_block=1, latent_channels=4, norm_num_groups=1, use_quant_conv=False, use_post_quant_conv=False, shift_factor=0.0609, scaling_factor=1.5035, ) scheduler = FlowMatchEulerDiscreteScheduler() return { "scheduler": scheduler, "text_encoder": text_encoder, "text_encoder_2": text_encoder_2, "text_encoder_3": text_encoder_3, "tokenizer": tokenizer, "tokenizer_2": tokenizer_2, "tokenizer_3": tokenizer_3, "transformer": transformer, "vae": vae, } def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device="cpu").manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "output_type": "np", "pag_scale": 0.0, } return inputs def test_stable_diffusion_3_different_prompts(self): pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) inputs = self.get_dummy_inputs(torch_device) output_same_prompt = pipe(**inputs).images[0] inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = "a different prompt" inputs["prompt_3"] = "another different prompt" output_different_prompts = pipe(**inputs).images[0] max_diff = np.abs(output_same_prompt - output_different_prompts).max() # Outputs should be different here assert max_diff > 1e-2 def test_stable_diffusion_3_different_negative_prompts(self): pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) inputs = self.get_dummy_inputs(torch_device) output_same_prompt = pipe(**inputs).images[0] inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt_2"] = "deformed" inputs["negative_prompt_3"] = "blurry" output_different_prompts = pipe(**inputs).images[0] max_diff = np.abs(output_same_prompt - output_different_prompts).max() # Outputs should be different here assert max_diff > 1e-2 def test_stable_diffusion_3_prompt_embeds(self): pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) inputs = self.get_dummy_inputs(torch_device) output_with_prompt = pipe(**inputs).images[0] inputs = self.get_dummy_inputs(torch_device) prompt = inputs.pop("prompt") do_classifier_free_guidance = inputs["guidance_scale"] > 1 ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = pipe.encode_prompt( prompt, prompt_2=None, prompt_3=None, do_classifier_free_guidance=do_classifier_free_guidance, device=torch_device, ) output_with_embeds = pipe( prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, **inputs, ).images[0] max_diff = np.abs(output_with_prompt - output_with_embeds).max() assert max_diff < 1e-4 def test_fused_qkv_projections(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images original_image_slice = image[0, -3:, -3:, -1] # TODO (sayakpaul): will refactor this once `fuse_qkv_projections()` has been added # to the pipeline level. pipe.transformer.fuse_qkv_projections() assert check_qkv_fusion_processors_exist( pipe.transformer ), "Something wrong with the fused attention processors. Expected all the attention processors to be fused." assert check_qkv_fusion_matches_attn_procs_length( pipe.transformer, pipe.transformer.original_attn_processors ), "Something wrong with the attention processors concerning the fused QKV projections." inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice_fused = image[0, -3:, -3:, -1] pipe.transformer.unfuse_qkv_projections() inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice_disabled = image[0, -3:, -3:, -1] assert np.allclose( original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3 ), "Fusion of QKV projections shouldn't affect the outputs." assert np.allclose( image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3 ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." assert np.allclose( original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 ), "Original outputs should match when fused QKV projections are disabled." def test_pag_disable_enable(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() # base pipeline (expect same output when pag is disabled) pipe_sd = StableDiffusion3Pipeline(**components) pipe_sd = pipe_sd.to(device) pipe_sd.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["pag_scale"] assert ( "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters ), f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." out = pipe_sd(**inputs).images[0, -3:, -3:, -1] components = self.get_dummy_components() # pag disabled with pag_scale=0.0 pipe_pag = self.pipeline_class(**components) pipe_pag = pipe_pag.to(device) pipe_pag.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["pag_scale"] = 0.0 out_pag_disabled = pipe_pag(**inputs).images[0, -3:, -3:, -1] assert np.abs(out.flatten() - out_pag_disabled.flatten()).max() < 1e-3 def test_pag_applied_layers(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() # base pipeline pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) all_self_attn_layers = [k for k in pipe.transformer.attn_processors.keys() if "attn" in k] original_attn_procs = pipe.transformer.attn_processors pag_layers = ["blocks.0", "blocks.1"] pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) assert set(pipe.pag_attn_processors) == set(all_self_attn_layers) # blocks.0 block_0_self_attn = ["transformer_blocks.0.attn.processor"] pipe.transformer.set_attn_processor(original_attn_procs.copy()) pag_layers = ["blocks.0"] pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) assert set(pipe.pag_attn_processors) == set(block_0_self_attn) pipe.transformer.set_attn_processor(original_attn_procs.copy()) pag_layers = ["blocks.0.attn"] pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) assert set(pipe.pag_attn_processors) == set(block_0_self_attn) pipe.transformer.set_attn_processor(original_attn_procs.copy()) pag_layers = ["blocks.(0|1)"] pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) assert (len(pipe.pag_attn_processors)) == 2 pipe.transformer.set_attn_processor(original_attn_procs.copy()) pag_layers = ["blocks.0", r"blocks\.1"] pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) assert len(pipe.pag_attn_processors) == 2
diffusers/tests/pipelines/pag/test_pag_sd3.py/0
{ "file_path": "diffusers/tests/pipelines/pag/test_pag_sd3.py", "repo_id": "diffusers", "token_count": 5329 }
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import numpy as np from diffusers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionPipeline, PNDMScheduler, ) from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class OnnxStableDiffusionPipelineFastTests(OnnxPipelineTesterMixin, unittest.TestCase): hub_checkpoint = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline" def get_dummy_inputs(self, seed=0): generator = np.random.RandomState(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 7.5, "output_type": "np", } return inputs def test_pipeline_default_ddim(self): pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.65072, 0.58492, 0.48219, 0.55521, 0.53180, 0.55939, 0.50697, 0.39800, 0.46455]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_pipeline_pndm(self): pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=True) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.65863, 0.59425, 0.49326, 0.56313, 0.53875, 0.56627, 0.51065, 0.39777, 0.46330]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_pipeline_lms(self): pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_pipeline_euler(self): pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_pipeline_euler_ancestral(self): pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.53817, 0.60812, 0.47384, 0.49530, 0.51894, 0.49814, 0.47984, 0.38958, 0.44271]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_pipeline_dpm_multistep(self): pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.53895, 0.60808, 0.47933, 0.49608, 0.51886, 0.49950, 0.48053, 0.38957, 0.44200]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_prompt_embeds(self): pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] inputs = self.get_dummy_inputs() prompt = 3 * [inputs.pop("prompt")] text_inputs = pipe.tokenizer( prompt, padding="max_length", max_length=pipe.tokenizer.model_max_length, truncation=True, return_tensors="np", ) text_inputs = text_inputs["input_ids"] prompt_embeds = pipe.text_encoder(input_ids=text_inputs.astype(np.int32))[0] inputs["prompt_embeds"] = prompt_embeds # forward output = pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 def test_stable_diffusion_negative_prompt_embeds(self): pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() negative_prompt = 3 * ["this is a negative prompt"] inputs["negative_prompt"] = negative_prompt inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] inputs = self.get_dummy_inputs() prompt = 3 * [inputs.pop("prompt")] embeds = [] for p in [prompt, negative_prompt]: text_inputs = pipe.tokenizer( p, padding="max_length", max_length=pipe.tokenizer.model_max_length, truncation=True, return_tensors="np", ) text_inputs = text_inputs["input_ids"] embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.int32))[0]) inputs["prompt_embeds"], inputs["negative_prompt_embeds"] = embeds # forward output = pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 @nightly @require_onnxruntime @require_torch_gpu class OnnxStableDiffusionPipelineIntegrationTests(unittest.TestCase): @property def gpu_provider(self): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def gpu_options(self): options = ort.SessionOptions() options.enable_mem_pattern = False return options def test_inference_default_pndm(self): # using the PNDM scheduler by default sd_pipe = OnnxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="onnx", safety_checker=None, feature_extractor=None, provider=self.gpu_provider, sess_options=self.gpu_options, ) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" np.random.seed(0) output = sd_pipe([prompt], guidance_scale=6.0, num_inference_steps=10, output_type="np") image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_inference_ddim(self): ddim_scheduler = DDIMScheduler.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", subfolder="scheduler", revision="onnx" ) sd_pipe = OnnxStableDiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", revision="onnx", scheduler=ddim_scheduler, safety_checker=None, feature_extractor=None, provider=self.gpu_provider, sess_options=self.gpu_options, ) sd_pipe.set_progress_bar_config(disable=None) prompt = "open neural network exchange" generator = np.random.RandomState(0) output = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=generator, output_type="np") image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_inference_k_lms(self): lms_scheduler = LMSDiscreteScheduler.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", subfolder="scheduler", revision="onnx" ) sd_pipe = OnnxStableDiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", revision="onnx", scheduler=lms_scheduler, safety_checker=None, feature_extractor=None, provider=self.gpu_provider, sess_options=self.gpu_options, ) sd_pipe.set_progress_bar_config(disable=None) prompt = "open neural network exchange" generator = np.random.RandomState(0) output = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=generator, output_type="np") image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_intermediate_state(self): number_of_steps = 0 def test_callback_fn(step: int, timestep: int, latents: np.ndarray) -> None: test_callback_fn.has_been_called = True nonlocal number_of_steps number_of_steps += 1 if step == 0: assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array( [-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] ) assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3 elif step == 5: assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array( [-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] ) assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3 test_callback_fn.has_been_called = False pipe = OnnxStableDiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", revision="onnx", safety_checker=None, feature_extractor=None, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=None) prompt = "Andromeda galaxy in a bottle" generator = np.random.RandomState(0) pipe( prompt=prompt, num_inference_steps=5, guidance_scale=7.5, generator=generator, callback=test_callback_fn, callback_steps=1, ) assert test_callback_fn.has_been_called assert number_of_steps == 6 def test_stable_diffusion_no_safety_checker(self): pipe = OnnxStableDiffusionPipeline.from_pretrained( "stable-diffusion-v1-5/stable-diffusion-v1-5", revision="onnx", safety_checker=None, feature_extractor=None, provider=self.gpu_provider, sess_options=self.gpu_options, ) assert isinstance(pipe, OnnxStableDiffusionPipeline) assert pipe.safety_checker is None image = pipe("example prompt", num_inference_steps=2).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(tmpdirname) pipe = OnnxStableDiffusionPipeline.from_pretrained(tmpdirname) # sanity check that the pipeline still works assert pipe.safety_checker is None image = pipe("example prompt", num_inference_steps=2).images[0] assert image is not None
diffusers/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion.py", "repo_id": "diffusers", "token_count": 6756 }
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AutoencoderKL, EulerDiscreteScheduler, StableDiffusionLatentUpscalePipeline, StableDiffusionPipeline, UNet2DConditionModel, ) from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() def check_same_shape(tensor_list): shapes = [tensor.shape for tensor in tensor_list] return all(shape == shapes[0] for shape in shapes[1:]) class StableDiffusionLatentUpscalePipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionLatentUpscalePipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { "height", "width", "cross_attention_kwargs", "negative_prompt_embeds", "prompt_embeds", } required_optional_params = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess image_latents_params = frozenset([]) @property def dummy_image(self): batch_size = 1 num_channels = 4 sizes = (16, 16) image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) return image def get_dummy_components(self): torch.manual_seed(0) model = UNet2DConditionModel( act_fn="gelu", attention_head_dim=8, norm_num_groups=None, block_out_channels=[32, 32, 64, 64], time_cond_proj_dim=160, conv_in_kernel=1, conv_out_kernel=1, cross_attention_dim=32, down_block_types=( "KDownBlock2D", "KCrossAttnDownBlock2D", "KCrossAttnDownBlock2D", "KCrossAttnDownBlock2D", ), in_channels=8, mid_block_type=None, only_cross_attention=False, out_channels=5, resnet_time_scale_shift="scale_shift", time_embedding_type="fourier", timestep_post_act="gelu", up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D"), ) vae = AutoencoderKL( block_out_channels=[32, 32, 64, 64], in_channels=3, out_channels=3, down_block_types=[ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", ], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) scheduler = EulerDiscreteScheduler(prediction_type="sample") text_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act="quick_gelu", projection_dim=512, ) text_encoder = CLIPTextModel(text_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": model.eval(), "vae": vae.eval(), "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": self.dummy_image.cpu(), "generator": generator, "num_inference_steps": 2, "output_type": "np", } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (1, 256, 256, 3)) expected_slice = np.array( [0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] ) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_stable_diffusion_latent_upscaler_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionLatentUpscalePipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "french fries" output = sd_pipe(**inputs, negative_prompt=negative_prompt) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) expected_slice = np.array( [0.43865365, 0.404124, 0.42618454, 0.44333526, 0.40564927, 0.43818694, 0.4411913, 0.43404633, 0.46392226] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_latent_upscaler_multiple_init_images(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionLatentUpscalePipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["prompt"] = [inputs["prompt"]] * 2 inputs["image"] = inputs["image"].repeat(2, 1, 1, 1) image = sd_pipe(**inputs).images image_slice = image[-1, -3:, -3:, -1] assert image.shape == (2, 256, 256, 3) expected_slice = np.array( [0.38730142, 0.35695046, 0.40646142, 0.40967226, 0.3981609, 0.4195988, 0.4248805, 0.430259, 0.45694894] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=7e-3) def test_sequential_cpu_offload_forward_pass(self): super().test_sequential_cpu_offload_forward_pass(expected_max_diff=3e-3) def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=7e-3) def test_pt_np_pil_outputs_equivalent(self): super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3) def test_save_load_local(self): super().test_save_load_local(expected_max_difference=3e-3) def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=3e-3) def test_karras_schedulers_shape(self): skip_schedulers = [ "DDIMScheduler", "DDPMScheduler", "PNDMScheduler", "HeunDiscreteScheduler", "EulerAncestralDiscreteScheduler", "KDPM2DiscreteScheduler", "KDPM2AncestralDiscreteScheduler", "DPMSolverSDEScheduler", "EDMEulerScheduler", ] components = self.get_dummy_components() pipe = self.pipeline_class(**components) # make sure that PNDM does not need warm-up pipe.scheduler.register_to_config(skip_prk_steps=True) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = 2 outputs = [] for scheduler_enum in KarrasDiffusionSchedulers: if scheduler_enum.name in skip_schedulers: # no sigma schedulers are not supported # no schedulers continue scheduler_cls = getattr(diffusers, scheduler_enum.name) pipe.scheduler = scheduler_cls.from_config(pipe.scheduler.config) output = pipe(**inputs)[0] outputs.append(output) assert check_same_shape(outputs) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=5e-1) @require_torch_gpu @slow class StableDiffusionLatentUpscalePipelineIntegrationTests(unittest.TestCase): def setUp(self): super().setUp() gc.collect() torch.cuda.empty_cache() def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_latent_upscaler_fp16(self): generator = torch.manual_seed(33) pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16) pipe.to("cuda") upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained( "stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16 ) upscaler.to("cuda") prompt = "a photo of an astronaut high resolution, unreal engine, ultra realistic" low_res_latents = pipe(prompt, generator=generator, output_type="latent").images image = upscaler( prompt=prompt, image=low_res_latents, num_inference_steps=20, guidance_scale=0, generator=generator, output_type="np", ).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" ) assert np.abs((expected_image - image).mean()) < 5e-2 def test_latent_upscaler_fp16_image(self): generator = torch.manual_seed(33) upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained( "stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16 ) upscaler.to("cuda") prompt = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas" low_res_img = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" ) image = upscaler( prompt=prompt, image=low_res_img, num_inference_steps=20, guidance_scale=0, generator=generator, output_type="np", ).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" ) assert np.abs((expected_image - image).max()) < 5e-2
diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_latent_upscale.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_latent_upscale.py", "repo_id": "diffusers", "token_count": 5875 }
# coding=utf-8 # Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import tempfile import unittest from typing import List import numpy as np from transformers import AutoTokenizer, CLIPTextModel, CLIPTokenizer, T5EncoderModel from diffusers import ( AutoencoderKL, FlowMatchEulerDiscreteScheduler, FluxPipeline, FluxTransformer2DModel, TorchAoConfig, ) from diffusers.models.attention_processor import Attention from diffusers.utils.testing_utils import ( enable_full_determinism, is_torch_available, is_torchao_available, nightly, require_torch, require_torch_gpu, require_torchao_version_greater_or_equal, slow, torch_device, ) enable_full_determinism() if is_torch_available(): import torch import torch.nn as nn class LoRALayer(nn.Module): """Wraps a linear layer with LoRA-like adapter - Used for testing purposes only Taken from https://github.com/huggingface/transformers/blob/566302686a71de14125717dea9a6a45b24d42b37/tests/quantization/bnb/test_4bit.py#L62C5-L78C77 """ def __init__(self, module: nn.Module, rank: int): super().__init__() self.module = module self.adapter = nn.Sequential( nn.Linear(module.in_features, rank, bias=False), nn.Linear(rank, module.out_features, bias=False), ) small_std = (2.0 / (5 * min(module.in_features, module.out_features))) ** 0.5 nn.init.normal_(self.adapter[0].weight, std=small_std) nn.init.zeros_(self.adapter[1].weight) self.adapter.to(module.weight.device) def forward(self, input, *args, **kwargs): return self.module(input, *args, **kwargs) + self.adapter(input) if is_torchao_available(): from torchao.dtypes import AffineQuantizedTensor from torchao.quantization.linear_activation_quantized_tensor import LinearActivationQuantizedTensor from torchao.utils import get_model_size_in_bytes @require_torch @require_torch_gpu @require_torchao_version_greater_or_equal("0.7.0") class TorchAoConfigTest(unittest.TestCase): def test_to_dict(self): """ Makes sure the config format is properly set """ quantization_config = TorchAoConfig("int4_weight_only") torchao_orig_config = quantization_config.to_dict() for key in torchao_orig_config: self.assertEqual(getattr(quantization_config, key), torchao_orig_config[key]) def test_post_init_check(self): """ Test kwargs validations in TorchAoConfig """ _ = TorchAoConfig("int4_weight_only") with self.assertRaisesRegex(ValueError, "is not supported yet"): _ = TorchAoConfig("uint8") with self.assertRaisesRegex(ValueError, "does not support the following keyword arguments"): _ = TorchAoConfig("int4_weight_only", group_size1=32) def test_repr(self): """ Check that there is no error in the repr """ quantization_config = TorchAoConfig("int4_weight_only", modules_to_not_convert=["conv"], group_size=8) expected_repr = """TorchAoConfig { "modules_to_not_convert": [ "conv" ], "quant_method": "torchao", "quant_type": "int4_weight_only", "quant_type_kwargs": { "group_size": 8 } }""".replace(" ", "").replace("\n", "") quantization_repr = repr(quantization_config).replace(" ", "").replace("\n", "") self.assertEqual(quantization_repr, expected_repr) # Slices for these tests have been obtained on our aws-g6e-xlarge-plus runners @require_torch @require_torch_gpu @require_torchao_version_greater_or_equal("0.7.0") class TorchAoTest(unittest.TestCase): def tearDown(self): gc.collect() torch.cuda.empty_cache() def get_dummy_components( self, quantization_config: TorchAoConfig, model_id: str = "hf-internal-testing/tiny-flux-pipe" ): transformer = FluxTransformer2DModel.from_pretrained( model_id, subfolder="transformer", quantization_config=quantization_config, torch_dtype=torch.bfloat16, ) text_encoder = CLIPTextModel.from_pretrained(model_id, subfolder="text_encoder", torch_dtype=torch.bfloat16) text_encoder_2 = T5EncoderModel.from_pretrained( model_id, subfolder="text_encoder_2", torch_dtype=torch.bfloat16 ) tokenizer = CLIPTokenizer.from_pretrained(model_id, subfolder="tokenizer") tokenizer_2 = AutoTokenizer.from_pretrained(model_id, subfolder="tokenizer_2") vae = AutoencoderKL.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.bfloat16) scheduler = FlowMatchEulerDiscreteScheduler() return { "scheduler": scheduler, "text_encoder": text_encoder, "text_encoder_2": text_encoder_2, "tokenizer": tokenizer, "tokenizer_2": tokenizer_2, "transformer": transformer, "vae": vae, } def get_dummy_inputs(self, device: torch.device, seed: int = 0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator().manual_seed(seed) inputs = { "prompt": "an astronaut riding a horse in space", "height": 32, "width": 32, "num_inference_steps": 2, "output_type": "np", "generator": generator, } return inputs def get_dummy_tensor_inputs(self, device=None, seed: int = 0): batch_size = 1 num_latent_channels = 4 num_image_channels = 3 height = width = 4 sequence_length = 48 embedding_dim = 32 torch.manual_seed(seed) hidden_states = torch.randn((batch_size, height * width, num_latent_channels)).to(device, dtype=torch.bfloat16) torch.manual_seed(seed) encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to( device, dtype=torch.bfloat16 ) torch.manual_seed(seed) pooled_prompt_embeds = torch.randn((batch_size, embedding_dim)).to(device, dtype=torch.bfloat16) torch.manual_seed(seed) text_ids = torch.randn((sequence_length, num_image_channels)).to(device, dtype=torch.bfloat16) torch.manual_seed(seed) image_ids = torch.randn((height * width, num_image_channels)).to(device, dtype=torch.bfloat16) timestep = torch.tensor([1.0]).to(device, dtype=torch.bfloat16).expand(batch_size) return { "hidden_states": hidden_states, "encoder_hidden_states": encoder_hidden_states, "pooled_projections": pooled_prompt_embeds, "txt_ids": text_ids, "img_ids": image_ids, "timestep": timestep, } def _test_quant_type(self, quantization_config: TorchAoConfig, expected_slice: List[float], model_id: str): components = self.get_dummy_components(quantization_config, model_id) pipe = FluxPipeline(**components) pipe.to(device=torch_device) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0] output_slice = output[-1, -1, -3:, -3:].flatten() self.assertTrue(np.allclose(output_slice, expected_slice, atol=1e-3, rtol=1e-3)) def test_quantization(self): for model_id in ["hf-internal-testing/tiny-flux-pipe", "hf-internal-testing/tiny-flux-sharded"]: # fmt: off QUANTIZATION_TYPES_TO_TEST = [ ("int4wo", np.array([0.4648, 0.5234, 0.5547, 0.4219, 0.4414, 0.6445, 0.4336, 0.4531, 0.5625])), ("int4dq", np.array([0.4688, 0.5195, 0.5547, 0.418, 0.4414, 0.6406, 0.4336, 0.4531, 0.5625])), ("int8wo", np.array([0.4648, 0.5195, 0.5547, 0.4199, 0.4414, 0.6445, 0.4316, 0.4531, 0.5625])), ("int8dq", np.array([0.4648, 0.5195, 0.5547, 0.4199, 0.4414, 0.6445, 0.4316, 0.4531, 0.5625])), ("uint4wo", np.array([0.4609, 0.5234, 0.5508, 0.4199, 0.4336, 0.6406, 0.4316, 0.4531, 0.5625])), ("uint7wo", np.array([0.4648, 0.5195, 0.5547, 0.4219, 0.4414, 0.6445, 0.4316, 0.4531, 0.5625])), ] if TorchAoConfig._is_cuda_capability_atleast_8_9(): QUANTIZATION_TYPES_TO_TEST.extend([ ("float8wo_e5m2", np.array([0.4590, 0.5273, 0.5547, 0.4219, 0.4375, 0.6406, 0.4316, 0.4512, 0.5625])), ("float8wo_e4m3", np.array([0.4648, 0.5234, 0.5547, 0.4219, 0.4414, 0.6406, 0.4316, 0.4531, 0.5625])), # ===== # The following lead to an internal torch error: # RuntimeError: mat2 shape (32x4 must be divisible by 16 # Skip these for now; TODO(aryan): investigate later # ("float8dq_e4m3", np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])), # ("float8dq_e4m3_tensor", np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])), # ===== # Cutlass fails to initialize for below # ("float8dq_e4m3_row", np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])), # ===== ("fp4", np.array([0.4668, 0.5195, 0.5547, 0.4199, 0.4434, 0.6445, 0.4316, 0.4531, 0.5625])), ("fp6", np.array([0.4668, 0.5195, 0.5547, 0.4199, 0.4434, 0.6445, 0.4316, 0.4531, 0.5625])), ]) # fmt: on for quantization_name, expected_slice in QUANTIZATION_TYPES_TO_TEST: quant_kwargs = {} if quantization_name in ["uint4wo", "uint7wo"]: # The dummy flux model that we use has smaller dimensions. This imposes some restrictions on group_size here quant_kwargs.update({"group_size": 16}) quantization_config = TorchAoConfig( quant_type=quantization_name, modules_to_not_convert=["x_embedder"], **quant_kwargs ) self._test_quant_type(quantization_config, expected_slice, model_id) def test_int4wo_quant_bfloat16_conversion(self): """ Tests whether the dtype of model will be modified to bfloat16 for int4 weight-only quantization. """ quantization_config = TorchAoConfig("int4_weight_only", group_size=64) quantized_model = FluxTransformer2DModel.from_pretrained( "hf-internal-testing/tiny-flux-pipe", subfolder="transformer", quantization_config=quantization_config, torch_dtype=torch.bfloat16, ) weight = quantized_model.transformer_blocks[0].ff.net[2].weight self.assertTrue(isinstance(weight, AffineQuantizedTensor)) self.assertEqual(weight.quant_min, 0) self.assertEqual(weight.quant_max, 15) def test_device_map(self): # Note: We were not checking if the weight tensor's were AffineQuantizedTensor's before. If we did # it would have errored out. Now, we do. So, device_map basically never worked with or without # sharded checkpoints. This will need to be supported in the future (TODO(aryan)) """ Test if the quantized model int4 weight-only is working properly with "auto" and custom device maps. The custom device map performs cpu/disk offloading as well. Also verifies that the device map is correctly set (in the `hf_device_map` attribute of the model). """ custom_device_map_dict = { "time_text_embed": torch_device, "context_embedder": torch_device, "x_embedder": torch_device, "transformer_blocks.0": "cpu", "single_transformer_blocks.0": "disk", "norm_out": torch_device, "proj_out": "cpu", } device_maps = ["auto", custom_device_map_dict] # inputs = self.get_dummy_tensor_inputs(torch_device) # expected_slice = np.array([0.3457, -0.0366, 0.0105, -0.2275, -0.4941, 0.4395, -0.166, -0.6641, 0.4375]) for device_map in device_maps: # device_map_to_compare = {"": 0} if device_map == "auto" else device_map # Test non-sharded model - should work with self.assertRaises(NotImplementedError): with tempfile.TemporaryDirectory() as offload_folder: quantization_config = TorchAoConfig("int4_weight_only", group_size=64) _ = FluxTransformer2DModel.from_pretrained( "hf-internal-testing/tiny-flux-pipe", subfolder="transformer", quantization_config=quantization_config, device_map=device_map, torch_dtype=torch.bfloat16, offload_folder=offload_folder, ) # weight = quantized_model.transformer_blocks[0].ff.net[2].weight # self.assertTrue(quantized_model.hf_device_map == device_map_to_compare) # self.assertTrue(isinstance(weight, AffineQuantizedTensor)) # output = quantized_model(**inputs)[0] # output_slice = output.flatten()[-9:].detach().float().cpu().numpy() # self.assertTrue(np.allclose(output_slice, expected_slice, atol=1e-3, rtol=1e-3)) # Test sharded model - should not work with self.assertRaises(NotImplementedError): with tempfile.TemporaryDirectory() as offload_folder: quantization_config = TorchAoConfig("int4_weight_only", group_size=64) _ = FluxTransformer2DModel.from_pretrained( "hf-internal-testing/tiny-flux-sharded", subfolder="transformer", quantization_config=quantization_config, device_map=device_map, torch_dtype=torch.bfloat16, offload_folder=offload_folder, ) # weight = quantized_model.transformer_blocks[0].ff.net[2].weight # self.assertTrue(quantized_model.hf_device_map == device_map_to_compare) # self.assertTrue(isinstance(weight, AffineQuantizedTensor)) # output = quantized_model(**inputs)[0] # output_slice = output.flatten()[-9:].detach().float().cpu().numpy() # self.assertTrue(np.allclose(output_slice, expected_slice, atol=1e-3, rtol=1e-3)) def test_modules_to_not_convert(self): quantization_config = TorchAoConfig("int8_weight_only", modules_to_not_convert=["transformer_blocks.0"]) quantized_model_with_not_convert = FluxTransformer2DModel.from_pretrained( "hf-internal-testing/tiny-flux-pipe", subfolder="transformer", quantization_config=quantization_config, torch_dtype=torch.bfloat16, ) unquantized_layer = quantized_model_with_not_convert.transformer_blocks[0].ff.net[2] self.assertTrue(isinstance(unquantized_layer, torch.nn.Linear)) self.assertFalse(isinstance(unquantized_layer.weight, AffineQuantizedTensor)) self.assertEqual(unquantized_layer.weight.dtype, torch.bfloat16) quantized_layer = quantized_model_with_not_convert.proj_out self.assertTrue(isinstance(quantized_layer.weight, AffineQuantizedTensor)) quantization_config = TorchAoConfig("int8_weight_only") quantized_model = FluxTransformer2DModel.from_pretrained( "hf-internal-testing/tiny-flux-pipe", subfolder="transformer", quantization_config=quantization_config, torch_dtype=torch.bfloat16, ) size_quantized_with_not_convert = get_model_size_in_bytes(quantized_model_with_not_convert) size_quantized = get_model_size_in_bytes(quantized_model) self.assertTrue(size_quantized < size_quantized_with_not_convert) def test_training(self): quantization_config = TorchAoConfig("int8_weight_only") quantized_model = FluxTransformer2DModel.from_pretrained( "hf-internal-testing/tiny-flux-pipe", subfolder="transformer", quantization_config=quantization_config, torch_dtype=torch.bfloat16, ).to(torch_device) for param in quantized_model.parameters(): # freeze the model as only adapter layers will be trained param.requires_grad = False if param.ndim == 1: param.data = param.data.to(torch.float32) for _, module in quantized_model.named_modules(): if isinstance(module, Attention): module.to_q = LoRALayer(module.to_q, rank=4) module.to_k = LoRALayer(module.to_k, rank=4) module.to_v = LoRALayer(module.to_v, rank=4) with torch.amp.autocast(str(torch_device), dtype=torch.bfloat16): inputs = self.get_dummy_tensor_inputs(torch_device) output = quantized_model(**inputs)[0] output.norm().backward() for module in quantized_model.modules(): if isinstance(module, LoRALayer): self.assertTrue(module.adapter[1].weight.grad is not None) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0) @nightly def test_torch_compile(self): r"""Test that verifies if torch.compile works with torchao quantization.""" for model_id in ["hf-internal-testing/tiny-flux-pipe", "hf-internal-testing/tiny-flux-sharded"]: quantization_config = TorchAoConfig("int8_weight_only") components = self.get_dummy_components(quantization_config, model_id=model_id) pipe = FluxPipeline(**components) pipe.to(device=torch_device) inputs = self.get_dummy_inputs(torch_device) normal_output = pipe(**inputs)[0].flatten()[-32:] pipe.transformer = torch.compile(pipe.transformer, mode="max-autotune", fullgraph=True, dynamic=False) inputs = self.get_dummy_inputs(torch_device) compile_output = pipe(**inputs)[0].flatten()[-32:] # Note: Seems to require higher tolerance self.assertTrue(np.allclose(normal_output, compile_output, atol=1e-2, rtol=1e-3)) def test_memory_footprint(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ for model_id in ["hf-internal-testing/tiny-flux-pipe", "hf-internal-testing/tiny-flux-sharded"]: transformer_int4wo = self.get_dummy_components(TorchAoConfig("int4wo"), model_id=model_id)["transformer"] transformer_int4wo_gs32 = self.get_dummy_components( TorchAoConfig("int4wo", group_size=32), model_id=model_id )["transformer"] transformer_int8wo = self.get_dummy_components(TorchAoConfig("int8wo"), model_id=model_id)["transformer"] transformer_bf16 = self.get_dummy_components(None, model_id=model_id)["transformer"] # Will not quantized all the layers by default due to the model weights shapes not being divisible by group_size=64 for block in transformer_int4wo.transformer_blocks: self.assertTrue(isinstance(block.ff.net[2].weight, AffineQuantizedTensor)) self.assertTrue(isinstance(block.ff_context.net[2].weight, AffineQuantizedTensor)) # Will quantize all the linear layers except x_embedder for name, module in transformer_int4wo_gs32.named_modules(): if isinstance(module, nn.Linear) and name not in ["x_embedder"]: self.assertTrue(isinstance(module.weight, AffineQuantizedTensor)) # Will quantize all the linear layers for module in transformer_int8wo.modules(): if isinstance(module, nn.Linear): self.assertTrue(isinstance(module.weight, AffineQuantizedTensor)) total_int4wo = get_model_size_in_bytes(transformer_int4wo) total_int4wo_gs32 = get_model_size_in_bytes(transformer_int4wo_gs32) total_int8wo = get_model_size_in_bytes(transformer_int8wo) total_bf16 = get_model_size_in_bytes(transformer_bf16) # TODO: refactor to align with other quantization tests # Latter has smaller group size, so more groups -> more scales and zero points self.assertTrue(total_int4wo < total_int4wo_gs32) # int8 quantizes more layers compare to int4 with default group size self.assertTrue(total_int8wo < total_int4wo) # int4wo does not quantize too many layers because of default group size, but for the layers it does # there is additional overhead of scales and zero points self.assertTrue(total_bf16 < total_int4wo) def test_wrong_config(self): with self.assertRaises(ValueError): self.get_dummy_components(TorchAoConfig("int42")) def test_sequential_cpu_offload(self): r""" A test that checks if inference runs as expected when sequential cpu offloading is enabled. """ quantization_config = TorchAoConfig("int8wo") components = self.get_dummy_components(quantization_config) pipe = FluxPipeline(**components) pipe.enable_sequential_cpu_offload() inputs = self.get_dummy_inputs(torch_device) _ = pipe(**inputs) # Slices for these tests have been obtained on our aws-g6e-xlarge-plus runners @require_torch @require_torch_gpu @require_torchao_version_greater_or_equal("0.7.0") class TorchAoSerializationTest(unittest.TestCase): model_name = "hf-internal-testing/tiny-flux-pipe" def tearDown(self): gc.collect() torch.cuda.empty_cache() def get_dummy_model(self, quant_method, quant_method_kwargs, device=None): quantization_config = TorchAoConfig(quant_method, **quant_method_kwargs) quantized_model = FluxTransformer2DModel.from_pretrained( self.model_name, subfolder="transformer", quantization_config=quantization_config, torch_dtype=torch.bfloat16, ) return quantized_model.to(device) def get_dummy_tensor_inputs(self, device=None, seed: int = 0): batch_size = 1 num_latent_channels = 4 num_image_channels = 3 height = width = 4 sequence_length = 48 embedding_dim = 32 torch.manual_seed(seed) hidden_states = torch.randn((batch_size, height * width, num_latent_channels)).to(device, dtype=torch.bfloat16) encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to( device, dtype=torch.bfloat16 ) pooled_prompt_embeds = torch.randn((batch_size, embedding_dim)).to(device, dtype=torch.bfloat16) text_ids = torch.randn((sequence_length, num_image_channels)).to(device, dtype=torch.bfloat16) image_ids = torch.randn((height * width, num_image_channels)).to(device, dtype=torch.bfloat16) timestep = torch.tensor([1.0]).to(device, dtype=torch.bfloat16).expand(batch_size) return { "hidden_states": hidden_states, "encoder_hidden_states": encoder_hidden_states, "pooled_projections": pooled_prompt_embeds, "txt_ids": text_ids, "img_ids": image_ids, "timestep": timestep, } def _test_original_model_expected_slice(self, quant_method, quant_method_kwargs, expected_slice): quantized_model = self.get_dummy_model(quant_method, quant_method_kwargs, torch_device) inputs = self.get_dummy_tensor_inputs(torch_device) output = quantized_model(**inputs)[0] output_slice = output.flatten()[-9:].detach().float().cpu().numpy() weight = quantized_model.transformer_blocks[0].ff.net[2].weight self.assertTrue(isinstance(weight, (AffineQuantizedTensor, LinearActivationQuantizedTensor))) self.assertTrue(np.allclose(output_slice, expected_slice, atol=1e-3, rtol=1e-3)) def _check_serialization_expected_slice(self, quant_method, quant_method_kwargs, expected_slice, device): quantized_model = self.get_dummy_model(quant_method, quant_method_kwargs, device) with tempfile.TemporaryDirectory() as tmp_dir: quantized_model.save_pretrained(tmp_dir, safe_serialization=False) loaded_quantized_model = FluxTransformer2DModel.from_pretrained( tmp_dir, torch_dtype=torch.bfloat16, use_safetensors=False ).to(device=torch_device) inputs = self.get_dummy_tensor_inputs(torch_device) output = loaded_quantized_model(**inputs)[0] output_slice = output.flatten()[-9:].detach().float().cpu().numpy() self.assertTrue( isinstance( loaded_quantized_model.proj_out.weight, (AffineQuantizedTensor, LinearActivationQuantizedTensor) ) ) self.assertTrue(np.allclose(output_slice, expected_slice, atol=1e-3, rtol=1e-3)) def test_int_a8w8_cuda(self): quant_method, quant_method_kwargs = "int8_dynamic_activation_int8_weight", {} expected_slice = np.array([0.3633, -0.1357, -0.0188, -0.249, -0.4688, 0.5078, -0.1289, -0.6914, 0.4551]) device = "cuda" self._test_original_model_expected_slice(quant_method, quant_method_kwargs, expected_slice) self._check_serialization_expected_slice(quant_method, quant_method_kwargs, expected_slice, device) def test_int_a16w8_cuda(self): quant_method, quant_method_kwargs = "int8_weight_only", {} expected_slice = np.array([0.3613, -0.127, -0.0223, -0.2539, -0.459, 0.4961, -0.1357, -0.6992, 0.4551]) device = "cuda" self._test_original_model_expected_slice(quant_method, quant_method_kwargs, expected_slice) self._check_serialization_expected_slice(quant_method, quant_method_kwargs, expected_slice, device) def test_int_a8w8_cpu(self): quant_method, quant_method_kwargs = "int8_dynamic_activation_int8_weight", {} expected_slice = np.array([0.3633, -0.1357, -0.0188, -0.249, -0.4688, 0.5078, -0.1289, -0.6914, 0.4551]) device = "cpu" self._test_original_model_expected_slice(quant_method, quant_method_kwargs, expected_slice) self._check_serialization_expected_slice(quant_method, quant_method_kwargs, expected_slice, device) def test_int_a16w8_cpu(self): quant_method, quant_method_kwargs = "int8_weight_only", {} expected_slice = np.array([0.3613, -0.127, -0.0223, -0.2539, -0.459, 0.4961, -0.1357, -0.6992, 0.4551]) device = "cpu" self._test_original_model_expected_slice(quant_method, quant_method_kwargs, expected_slice) self._check_serialization_expected_slice(quant_method, quant_method_kwargs, expected_slice, device) # Slices for these tests have been obtained on our aws-g6e-xlarge-plus runners @require_torch @require_torch_gpu @require_torchao_version_greater_or_equal("0.7.0") @slow @nightly class SlowTorchAoTests(unittest.TestCase): def tearDown(self): gc.collect() torch.cuda.empty_cache() def get_dummy_components(self, quantization_config: TorchAoConfig): # This is just for convenience, so that we can modify it at one place for custom environments and locally testing cache_dir = None model_id = "black-forest-labs/FLUX.1-dev" transformer = FluxTransformer2DModel.from_pretrained( model_id, subfolder="transformer", quantization_config=quantization_config, torch_dtype=torch.bfloat16, cache_dir=cache_dir, ) text_encoder = CLIPTextModel.from_pretrained( model_id, subfolder="text_encoder", torch_dtype=torch.bfloat16, cache_dir=cache_dir ) text_encoder_2 = T5EncoderModel.from_pretrained( model_id, subfolder="text_encoder_2", torch_dtype=torch.bfloat16, cache_dir=cache_dir ) tokenizer = CLIPTokenizer.from_pretrained(model_id, subfolder="tokenizer", cache_dir=cache_dir) tokenizer_2 = AutoTokenizer.from_pretrained(model_id, subfolder="tokenizer_2", cache_dir=cache_dir) vae = AutoencoderKL.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.bfloat16, cache_dir=cache_dir) scheduler = FlowMatchEulerDiscreteScheduler() return { "scheduler": scheduler, "text_encoder": text_encoder, "text_encoder_2": text_encoder_2, "tokenizer": tokenizer, "tokenizer_2": tokenizer_2, "transformer": transformer, "vae": vae, } def get_dummy_inputs(self, device: torch.device, seed: int = 0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator().manual_seed(seed) inputs = { "prompt": "an astronaut riding a horse in space", "height": 512, "width": 512, "num_inference_steps": 20, "output_type": "np", "generator": generator, } return inputs def _test_quant_type(self, quantization_config, expected_slice): components = self.get_dummy_components(quantization_config) pipe = FluxPipeline(**components) pipe.enable_model_cpu_offload() weight = pipe.transformer.transformer_blocks[0].ff.net[2].weight self.assertTrue(isinstance(weight, (AffineQuantizedTensor, LinearActivationQuantizedTensor))) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0].flatten() output_slice = np.concatenate((output[:16], output[-16:])) self.assertTrue(np.allclose(output_slice, expected_slice, atol=1e-3, rtol=1e-3)) def test_quantization(self): # fmt: off QUANTIZATION_TYPES_TO_TEST = [ ("int8wo", np.array([0.0505, 0.0742, 0.1367, 0.0429, 0.0585, 0.1386, 0.0585, 0.0703, 0.1367, 0.0566, 0.0703, 0.1464, 0.0546, 0.0703, 0.1425, 0.0546, 0.3535, 0.7578, 0.5000, 0.4062, 0.7656, 0.5117, 0.4121, 0.7656, 0.5117, 0.3984, 0.7578, 0.5234, 0.4023, 0.7382, 0.5390, 0.4570])), ("int8dq", np.array([0.0546, 0.0761, 0.1386, 0.0488, 0.0644, 0.1425, 0.0605, 0.0742, 0.1406, 0.0625, 0.0722, 0.1523, 0.0625, 0.0742, 0.1503, 0.0605, 0.3886, 0.7968, 0.5507, 0.4492, 0.7890, 0.5351, 0.4316, 0.8007, 0.5390, 0.4179, 0.8281, 0.5820, 0.4531, 0.7812, 0.5703, 0.4921])), ] if TorchAoConfig._is_cuda_capability_atleast_8_9(): QUANTIZATION_TYPES_TO_TEST.extend([ ("float8wo_e4m3", np.array([0.0546, 0.0722, 0.1328, 0.0468, 0.0585, 0.1367, 0.0605, 0.0703, 0.1328, 0.0625, 0.0703, 0.1445, 0.0585, 0.0703, 0.1406, 0.0605, 0.3496, 0.7109, 0.4843, 0.4042, 0.7226, 0.5000, 0.4160, 0.7031, 0.4824, 0.3886, 0.6757, 0.4667, 0.3710, 0.6679, 0.4902, 0.4238])), ("fp5_e3m1", np.array([0.0527, 0.0762, 0.1309, 0.0449, 0.0645, 0.1328, 0.0566, 0.0723, 0.125, 0.0566, 0.0703, 0.1328, 0.0566, 0.0742, 0.1348, 0.0566, 0.3633, 0.7617, 0.5273, 0.4277, 0.7891, 0.5469, 0.4375, 0.8008, 0.5586, 0.4336, 0.7383, 0.5156, 0.3906, 0.6992, 0.5156, 0.4375])), ]) # fmt: on for quantization_name, expected_slice in QUANTIZATION_TYPES_TO_TEST: quantization_config = TorchAoConfig(quant_type=quantization_name, modules_to_not_convert=["x_embedder"]) self._test_quant_type(quantization_config, expected_slice) gc.collect() torch.cuda.empty_cache() torch.cuda.synchronize() def test_serialization_int8wo(self): quantization_config = TorchAoConfig("int8wo") components = self.get_dummy_components(quantization_config) pipe = FluxPipeline(**components) pipe.enable_model_cpu_offload() weight = pipe.transformer.x_embedder.weight self.assertTrue(isinstance(weight, AffineQuantizedTensor)) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0].flatten()[:128] with tempfile.TemporaryDirectory() as tmp_dir: pipe.transformer.save_pretrained(tmp_dir, safe_serialization=False) pipe.remove_all_hooks() del pipe.transformer gc.collect() torch.cuda.empty_cache() torch.cuda.synchronize() transformer = FluxTransformer2DModel.from_pretrained( tmp_dir, torch_dtype=torch.bfloat16, use_safetensors=False ) pipe.transformer = transformer pipe.enable_model_cpu_offload() weight = transformer.x_embedder.weight self.assertTrue(isinstance(weight, AffineQuantizedTensor)) loaded_output = pipe(**inputs)[0].flatten()[:128] # Seems to require higher tolerance depending on which machine it is being run. # A difference of 0.06 in normalized pixel space (-1 to 1), corresponds to a difference of # 0.06 / 2 * 255 = 7.65 in pixel space (0 to 255). On our CI runners, the difference is about 0.04, # on DGX it is 0.06, and on audace it is 0.037. So, we are using a tolerance of 0.06 here. self.assertTrue(np.allclose(output, loaded_output, atol=0.06)) def test_memory_footprint_int4wo(self): # The original checkpoints are in bf16 and about 24 GB expected_memory_in_gb = 6.0 quantization_config = TorchAoConfig("int4wo") cache_dir = None transformer = FluxTransformer2DModel.from_pretrained( "black-forest-labs/FLUX.1-dev", subfolder="transformer", quantization_config=quantization_config, torch_dtype=torch.bfloat16, cache_dir=cache_dir, ) int4wo_memory_in_gb = get_model_size_in_bytes(transformer) / 1024**3 self.assertTrue(int4wo_memory_in_gb < expected_memory_in_gb) def test_memory_footprint_int8wo(self): # The original checkpoints are in bf16 and about 24 GB expected_memory_in_gb = 12.0 quantization_config = TorchAoConfig("int8wo") cache_dir = None transformer = FluxTransformer2DModel.from_pretrained( "black-forest-labs/FLUX.1-dev", subfolder="transformer", quantization_config=quantization_config, torch_dtype=torch.bfloat16, cache_dir=cache_dir, ) int8wo_memory_in_gb = get_model_size_in_bytes(transformer) / 1024**3 self.assertTrue(int8wo_memory_in_gb < expected_memory_in_gb) @require_torch @require_torch_gpu @require_torchao_version_greater_or_equal("0.7.0") @slow @nightly class SlowTorchAoPreserializedModelTests(unittest.TestCase): def tearDown(self): gc.collect() torch.cuda.empty_cache() def get_dummy_inputs(self, device: torch.device, seed: int = 0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator().manual_seed(seed) inputs = { "prompt": "an astronaut riding a horse in space", "height": 512, "width": 512, "num_inference_steps": 20, "output_type": "np", "generator": generator, } return inputs def test_transformer_int8wo(self): # fmt: off expected_slice = np.array([0.0566, 0.0781, 0.1426, 0.0488, 0.0684, 0.1504, 0.0625, 0.0781, 0.1445, 0.0625, 0.0781, 0.1562, 0.0547, 0.0723, 0.1484, 0.0566, 0.5703, 0.8867, 0.7266, 0.5742, 0.875, 0.7148, 0.5586, 0.875, 0.7148, 0.5547, 0.8633, 0.7109, 0.5469, 0.8398, 0.6992, 0.5703]) # fmt: on # This is just for convenience, so that we can modify it at one place for custom environments and locally testing cache_dir = None transformer = FluxTransformer2DModel.from_pretrained( "hf-internal-testing/FLUX.1-Dev-TorchAO-int8wo-transformer", torch_dtype=torch.bfloat16, use_safetensors=False, cache_dir=cache_dir, ) pipe = FluxPipeline.from_pretrained( "black-forest-labs/FLUX.1-dev", transformer=transformer, torch_dtype=torch.bfloat16, cache_dir=cache_dir ) pipe.enable_model_cpu_offload() # Verify that all linear layer weights are quantized for name, module in pipe.transformer.named_modules(): if isinstance(module, nn.Linear): self.assertTrue(isinstance(module.weight, AffineQuantizedTensor)) # Verify outputs match expected slice inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0].flatten() output_slice = np.concatenate((output[:16], output[-16:])) self.assertTrue(np.allclose(output_slice, expected_slice, atol=1e-3, rtol=1e-3))
diffusers/tests/quantization/torchao/test_torchao.py/0
{ "file_path": "diffusers/tests/quantization/torchao/test_torchao.py", "repo_id": "diffusers", "token_count": 17641 }
import torch from diffusers import EulerAncestralDiscreteScheduler from diffusers.utils.testing_utils import torch_device from .test_schedulers import SchedulerCommonTest class EulerAncestralDiscreteSchedulerTest(SchedulerCommonTest): scheduler_classes = (EulerAncestralDiscreteScheduler,) num_inference_steps = 10 def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1100, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**kwargs) return config def test_timesteps(self): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_betas(self): for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]): self.check_over_configs(beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=schedule) def test_prediction_type(self): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=prediction_type) def test_rescale_betas_zero_snr(self): for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=rescale_betas_zero_snr) def test_full_loop_no_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps) generator = torch.manual_seed(0) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() sample = sample.to(torch_device) for i, t in enumerate(scheduler.timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample, generator=generator) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 152.3192) < 1e-2 assert abs(result_mean.item() - 0.1983) < 1e-3 def test_full_loop_with_v_prediction(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(prediction_type="v_prediction") scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps) generator = torch.manual_seed(0) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma sample = sample.to(torch_device) for i, t in enumerate(scheduler.timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample, generator=generator) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 108.4439) < 1e-2 assert abs(result_mean.item() - 0.1412) < 1e-3 def test_full_loop_device(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) scheduler.set_timesteps(self.num_inference_steps, device=torch_device) generator = torch.manual_seed(0) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() sample = sample.to(torch_device) for t in scheduler.timesteps: sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample, generator=generator) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 152.3192) < 1e-2 assert abs(result_mean.item() - 0.1983) < 1e-3 def test_full_loop_with_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) t_start = self.num_inference_steps - 2 scheduler.set_timesteps(self.num_inference_steps) generator = torch.manual_seed(0) model = self.dummy_model() sample = self.dummy_sample_deter * scheduler.init_noise_sigma # add noise noise = self.dummy_noise_deter noise = noise.to(sample.device) timesteps = scheduler.timesteps[t_start * scheduler.order :] sample = scheduler.add_noise(sample, noise, timesteps[:1]) for i, t in enumerate(timesteps): sample = scheduler.scale_model_input(sample, t) model_output = model(sample, t) output = scheduler.step(model_output, t, sample, generator=generator) sample = output.prev_sample result_sum = torch.sum(torch.abs(sample)) result_mean = torch.mean(torch.abs(sample)) assert abs(result_sum.item() - 56163.0508) < 1e-2, f" expected result sum 56163.0508, but get {result_sum}" assert abs(result_mean.item() - 73.1290) < 1e-3, f" expected result mean 73.1290, but get {result_mean}"
diffusers/tests/schedulers/test_scheduler_euler_ancestral.py/0
{ "file_path": "diffusers/tests/schedulers/test_scheduler_euler_ancestral.py", "repo_id": "diffusers", "token_count": 2492 }
# coding=utf-8 # Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import requests # Configuration GITHUB_REPO = "huggingface/diffusers" GITHUB_RUN_ID = os.getenv("GITHUB_RUN_ID") SLACK_WEBHOOK_URL = os.getenv("SLACK_WEBHOOK_URL") def main(args): action_url = f"https://github.com/{GITHUB_REPO}/actions/runs/{GITHUB_RUN_ID}" if args.status == "success": hub_path = "https://huggingface.co/datasets/diffusers/benchmarks/blob/main/collated_results.csv" message = ( "✅ New benchmark workflow successfully run.\n" f"🕸️ GitHub Action URL: {action_url}.\n" f"🤗 Check out the benchmarks here: {hub_path}." ) else: message = ( "❌ Something wrong happened in the benchmarking workflow.\n" f"Check out the GitHub Action to know more: {action_url}." ) payload = {"text": message} response = requests.post(SLACK_WEBHOOK_URL, json=payload) if response.status_code == 200: print("Notification sent to Slack successfully.") else: print("Failed to send notification to Slack.") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--status", type=str, default="success", choices=["success", "failure"]) args = parser.parse_args() main(args)
diffusers/utils/notify_benchmarking_status.py/0
{ "file_path": "diffusers/utils/notify_benchmarking_status.py", "repo_id": "diffusers", "token_count": 699 }
This tutorial explains how to use [Moss v1](https://github.com/jess-moss/moss-robot-arms) with LeRobot. ## Source the parts Follow this [README](https://github.com/jess-moss/moss-robot-arms). It contains the bill of materials, with link to source the parts, as well as the instructions to 3D print the parts, and advices if it's your first time printing or if you don't own a 3D printer already. **Important**: Before assembling, you will first need to configure your motors. To this end, we provide a nice script, so let's first install LeRobot. After configuration, we will also guide you through assembly. ## Install LeRobot On your computer: 1. [Install Miniconda](https://docs.anaconda.com/miniconda/#quick-command-line-install): ```bash mkdir -p ~/miniconda3 wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda3/miniconda.sh bash ~/miniconda3/miniconda.sh -b -u -p ~/miniconda3 rm ~/miniconda3/miniconda.sh ~/miniconda3/bin/conda init bash ``` 2. Restart shell or `source ~/.bashrc` 3. Create and activate a fresh conda environment for lerobot ```bash conda create -y -n lerobot python=3.10 && conda activate lerobot ``` 4. Clone LeRobot: ```bash git clone https://github.com/huggingface/lerobot.git ~/lerobot ``` 5. Install LeRobot with dependencies for the feetech motors: ```bash cd ~/lerobot && pip install -e ".[feetech]" ``` For Linux only (not Mac), install extra dependencies for recording datasets: ```bash conda install -y -c conda-forge ffmpeg pip uninstall -y opencv-python conda install -y -c conda-forge "opencv>=4.10.0" ``` ## Configure the motors Follow steps 1 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic) which illustrates the use of our scripts below. **Find USB ports associated to your arms** To find the correct ports for each arm, run the utility script twice: ```bash python lerobot/scripts/find_motors_bus_port.py ``` Example output when identifying the leader arm's port (e.g., `/dev/tty.usbmodem575E0031751` on Mac, or possibly `/dev/ttyACM0` on Linux): ``` Finding all available ports for the MotorBus. ['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751'] Remove the usb cable from your DynamixelMotorsBus and press Enter when done. [...Disconnect leader arm and press Enter...] The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0031751 Reconnect the usb cable. ``` Example output when identifying the follower arm's port (e.g., `/dev/tty.usbmodem575E0032081`, or possibly `/dev/ttyACM1` on Linux): ``` Finding all available ports for the MotorBus. ['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751'] Remove the usb cable from your DynamixelMotorsBus and press Enter when done. [...Disconnect follower arm and press Enter...] The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0032081 Reconnect the usb cable. ``` Troubleshooting: On Linux, you might need to give access to the USB ports by running: ```bash sudo chmod 666 /dev/ttyACM0 sudo chmod 666 /dev/ttyACM1 ``` #### Update config file IMPORTANTLY: Now that you have your ports, update the **port** default values of [`MossRobotConfig`](../lerobot/common/robot_devices/robots/configs.py). You will find something like: ```python @RobotConfig.register_subclass("moss") @dataclass class MossRobotConfig(ManipulatorRobotConfig): calibration_dir: str = ".cache/calibration/moss" # `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes. # Set this to a positive scalar to have the same value for all motors, or a list that is the same length as # the number of motors in your follower arms. max_relative_target: int | None = None leader_arms: dict[str, MotorsBusConfig] = field( default_factory=lambda: { "main": FeetechMotorsBusConfig( port="/dev/tty.usbmodem58760431091", <-- UPDATE HERE motors={ # name: (index, model) "shoulder_pan": [1, "sts3215"], "shoulder_lift": [2, "sts3215"], "elbow_flex": [3, "sts3215"], "wrist_flex": [4, "sts3215"], "wrist_roll": [5, "sts3215"], "gripper": [6, "sts3215"], }, ), } ) follower_arms: dict[str, MotorsBusConfig] = field( default_factory=lambda: { "main": FeetechMotorsBusConfig( port="/dev/tty.usbmodem585A0076891", <-- UPDATE HERE motors={ # name: (index, model) "shoulder_pan": [1, "sts3215"], "shoulder_lift": [2, "sts3215"], "elbow_flex": [3, "sts3215"], "wrist_flex": [4, "sts3215"], "wrist_roll": [5, "sts3215"], "gripper": [6, "sts3215"], }, ), } ) ``` **Configure your motors** Plug your first motor and run this script to set its ID to 1. It will also set its present position to 2048, so expect your motor to rotate: ```bash python lerobot/scripts/configure_motor.py \ --port /dev/tty.usbmodem58760432961 \ --brand feetech \ --model sts3215 \ --baudrate 1000000 \ --ID 1 ``` Note: These motors are currently limitated. They can take values between 0 and 4096 only, which corresponds to a full turn. They can't turn more than that. 2048 is at the middle of this range, so we can take -2048 steps (180 degrees anticlockwise) and reach the maximum range, or take +2048 steps (180 degrees clockwise) and reach the maximum range. The configuration step also sets the homing offset to 0, so that if you misassembled the arm, you can always update the homing offset to account for a shift up to ± 2048 steps (± 180 degrees). Then unplug your motor and plug the second motor and set its ID to 2. ```bash python lerobot/scripts/configure_motor.py \ --port /dev/tty.usbmodem58760432961 \ --brand feetech \ --model sts3215 \ --baudrate 1000000 \ --ID 2 ``` Redo the process for all your motors until ID 6. Do the same for the 6 motors of the leader arm. **Remove the gears of the 6 leader motors** Follow step 2 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic). You need to remove the gear for the motors of the leader arm. As a result, you will only use the position encoding of the motor and reduce friction to more easily operate the leader arm. **Add motor horn to the motors** Follow step 3 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic). For Moss v1, you need to align the holes on the motor horn to the motor spline to be approximately 3, 6, 9 and 12 o'clock. Try to avoid rotating the motor while doing so to keep position 2048 set during configuration. It is especially tricky for the leader motors as it is more sensible without the gears, but it's ok if it's a bit rotated. ## Assemble the arms Follow step 4 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic). The first arm should take a bit more than 1 hour to assemble, but once you get use to it, you can do it under 1 hour for the second arm. ## Calibrate Next, you'll need to calibrate your Moss v1 robot to ensure that the leader and follower arms have the same position values when they are in the same physical position. This calibration is essential because it allows a neural network trained on one Moss v1 robot to work on another. **Manual calibration of follower arm** /!\ Contrarily to step 6 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic) which illustrates the auto calibration, we will actually do manual calibration of follower for now. You will need to move the follower arm to these positions sequentially: | 1. Zero position | 2. Rotated position | 3. Rest position | |---|---|---| | <img src="../media/moss/follower_zero.webp?raw=true" alt="Moss v1 follower arm zero position" title="Moss v1 follower arm zero position" style="width:100%;"> | <img src="../media/moss/follower_rotated.webp?raw=true" alt="Moss v1 follower arm rotated position" title="Moss v1 follower arm rotated position" style="width:100%;"> | <img src="../media/moss/follower_rest.webp?raw=true" alt="Moss v1 follower arm rest position" title="Moss v1 follower arm rest position" style="width:100%;"> | Make sure both arms are connected and run this script to launch manual calibration: ```bash python lerobot/scripts/control_robot.py \ --robot.type=moss \ --robot.cameras='{}' \ --control.type=calibrate \ --control.arms='["main_follower"]' ``` **Manual calibration of leader arm** Follow step 6 of the [assembly video](https://www.youtube.com/watch?v=DA91NJOtMic) which illustrates the manual calibration. You will need to move the leader arm to these positions sequentially: | 1. Zero position | 2. Rotated position | 3. Rest position | |---|---|---| | <img src="../media/moss/leader_zero.webp?raw=true" alt="Moss v1 leader arm zero position" title="Moss v1 leader arm zero position" style="width:100%;"> | <img src="../media/moss/leader_rotated.webp?raw=true" alt="Moss v1 leader arm rotated position" title="Moss v1 leader arm rotated position" style="width:100%;"> | <img src="../media/moss/leader_rest.webp?raw=true" alt="Moss v1 leader arm rest position" title="Moss v1 leader arm rest position" style="width:100%;"> | Run this script to launch manual calibration: ```bash python lerobot/scripts/control_robot.py \ --robot.type=moss \ --robot.cameras='{}' \ --control.type=calibrate \ --control.arms='["main_leader"]' ``` ## Teleoperate **Simple teleop** Then you are ready to teleoperate your robot! Run this simple script (it won't connect and display the cameras): ```bash python lerobot/scripts/control_robot.py \ --robot.type=moss \ --robot.cameras='{}' \ --control.type=teleoperate ``` **Teleop with displaying cameras** Follow [this guide to setup your cameras](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#c-add-your-cameras-with-opencvcamera). Then you will be able to display the cameras on your computer while you are teleoperating by running the following code. This is useful to prepare your setup before recording your first dataset. ```bash python lerobot/scripts/control_robot.py \ --robot.type=moss \ --control.type=teleoperate ``` ## Record a dataset Once you're familiar with teleoperation, you can record your first dataset with Moss v1. If you want to use the Hugging Face hub features for uploading your dataset and you haven't previously done it, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens): ```bash huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential ``` Store your Hugging Face repository name in a variable to run these commands: ```bash HF_USER=$(huggingface-cli whoami | head -n 1) echo $HF_USER ``` Record 2 episodes and upload your dataset to the hub: ```bash python lerobot/scripts/control_robot.py \ --robot.type=moss \ --control.type=record \ --control.fps=30 \ --control.single_task="Grasp a lego block and put it in the bin." \ --control.repo_id=${HF_USER}/moss_test \ --control.tags='["moss","tutorial"]' \ --control.warmup_time_s=5 \ --control.episode_time_s=30 \ --control.reset_time_s=30 \ --control.num_episodes=2 \ --control.push_to_hub=true ``` Note: You can resume recording by adding `--control.resume=true`. Also if you didn't push your dataset yet, add `--control.local_files_only=true`. ## Visualize a dataset If you uploaded your dataset to the hub with `--control.push_to_hub=true`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by: ```bash echo ${HF_USER}/moss_test ``` If you didn't upload with `--control.push_to_hub=false`, you can also visualize it locally with: ```bash python lerobot/scripts/visualize_dataset_html.py \ --repo-id ${HF_USER}/moss_test \ --local-files-only 1 ``` ## Replay an episode Now try to replay the first episode on your robot: ```bash python lerobot/scripts/control_robot.py \ --robot.type=moss \ --control.type=replay \ --control.fps=30 \ --control.repo_id=${HF_USER}/moss_test \ --control.episode=0 ``` Note: If you didn't push your dataset yet, add `--control.local_files_only=true`. ## Train a policy To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command: ```bash python lerobot/scripts/train.py \ --dataset.repo_id=${HF_USER}/moss_test \ --policy.type=act \ --output_dir=outputs/train/act_moss_test \ --job_name=act_moss_test \ --device=cuda \ --wandb.enable=true ``` Note: If you didn't push your dataset yet, add `--control.local_files_only=true`. Let's explain it: 1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/moss_test`. 2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor sates, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset. 4. We provided `device=cuda` since we are training on a Nvidia GPU, but you could use `device=mps` to train on Apple silicon. 5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`. Training should take several hours. You will find checkpoints in `outputs/train/act_moss_test/checkpoints`. ## Evaluate your policy You can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes: ```bash python lerobot/scripts/control_robot.py \ --robot.type=moss \ --control.type=record \ --control.fps=30 \ --control.single_task="Grasp a lego block and put it in the bin." \ --control.repo_id=${HF_USER}/eval_act_moss_test \ --control.tags='["tutorial"]' \ --control.warmup_time_s=5 \ --control.episode_time_s=30 \ --control.reset_time_s=30 \ --control.num_episodes=10 \ --control.push_to_hub=true \ --control.policy.path=outputs/train/act_moss_test/checkpoints/last/pretrained_model ``` As you can see, it's almost the same command as previously used to record your training dataset. Two things changed: 1. There is an additional `--control.policy.path` argument which indicates the path to your policy checkpoint with (e.g. `outputs/train/eval_act_moss_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `${HF_USER}/act_moss_test`). 2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `${HF_USER}/eval_act_moss_test`). ## More Follow this [previous tutorial](https://github.com/huggingface/lerobot/blob/main/examples/7_get_started_with_real_robot.md#4-train-a-policy-on-your-data) for a more in-depth tutorial on controlling real robots with LeRobot. If you have any question or need help, please reach out on Discord in the channel [`#moss-arm`](https://discord.com/channels/1216765309076115607/1275374638985252925).
lerobot/examples/11_use_moss.md/0
{ "file_path": "lerobot/examples/11_use_moss.md", "repo_id": "lerobot", "token_count": 5335 }
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from pprint import pformat import torch from lerobot.common.datasets.lerobot_dataset import ( LeRobotDataset, LeRobotDatasetMetadata, MultiLeRobotDataset, ) from lerobot.common.datasets.transforms import ImageTransforms from lerobot.configs.policies import PreTrainedConfig from lerobot.configs.train import TrainPipelineConfig IMAGENET_STATS = { "mean": [[[0.485]], [[0.456]], [[0.406]]], # (c,1,1) "std": [[[0.229]], [[0.224]], [[0.225]]], # (c,1,1) } def resolve_delta_timestamps( cfg: PreTrainedConfig, ds_meta: LeRobotDatasetMetadata ) -> dict[str, list] | None: """Resolves delta_timestamps by reading from the 'delta_indices' properties of the PreTrainedConfig. Args: cfg (PreTrainedConfig): The PreTrainedConfig to read delta_indices from. ds_meta (LeRobotDatasetMetadata): The dataset from which features and fps are used to build delta_timestamps against. Returns: dict[str, list] | None: A dictionary of delta_timestamps, e.g.: { "observation.state": [-0.04, -0.02, 0] "observation.action": [-0.02, 0, 0.02] } returns `None` if the the resulting dict is empty. """ delta_timestamps = {} for key in ds_meta.features: if key == "next.reward" and cfg.reward_delta_indices is not None: delta_timestamps[key] = [i / ds_meta.fps for i in cfg.reward_delta_indices] if key == "action" and cfg.action_delta_indices is not None: delta_timestamps[key] = [i / ds_meta.fps for i in cfg.action_delta_indices] if key.startswith("observation.") and cfg.observation_delta_indices is not None: delta_timestamps[key] = [i / ds_meta.fps for i in cfg.observation_delta_indices] if len(delta_timestamps) == 0: delta_timestamps = None return delta_timestamps def make_dataset(cfg: TrainPipelineConfig) -> LeRobotDataset | MultiLeRobotDataset: """Handles the logic of setting up delta timestamps and image transforms before creating a dataset. Args: cfg (TrainPipelineConfig): A TrainPipelineConfig config which contains a DatasetConfig and a PreTrainedConfig. Raises: NotImplementedError: The MultiLeRobotDataset is currently deactivated. Returns: LeRobotDataset | MultiLeRobotDataset """ image_transforms = ( ImageTransforms(cfg.dataset.image_transforms) if cfg.dataset.image_transforms.enable else None ) if isinstance(cfg.dataset.repo_id, str): ds_meta = LeRobotDatasetMetadata(cfg.dataset.repo_id, local_files_only=cfg.dataset.local_files_only) delta_timestamps = resolve_delta_timestamps(cfg.policy, ds_meta) dataset = LeRobotDataset( cfg.dataset.repo_id, episodes=cfg.dataset.episodes, delta_timestamps=delta_timestamps, image_transforms=image_transforms, video_backend=cfg.dataset.video_backend, local_files_only=cfg.dataset.local_files_only, ) else: raise NotImplementedError("The MultiLeRobotDataset isn't supported for now.") dataset = MultiLeRobotDataset( cfg.dataset.repo_id, # TODO(aliberts): add proper support for multi dataset # delta_timestamps=delta_timestamps, image_transforms=image_transforms, video_backend=cfg.dataset.video_backend, ) logging.info( "Multiple datasets were provided. Applied the following index mapping to the provided datasets: " f"{pformat(dataset.repo_id_to_index , indent=2)}" ) if cfg.dataset.use_imagenet_stats: for key in dataset.meta.camera_keys: for stats_type, stats in IMAGENET_STATS.items(): dataset.meta.stats[key][stats_type] = torch.tensor(stats, dtype=torch.float32) return dataset
lerobot/lerobot/common/datasets/factory.py/0
{ "file_path": "lerobot/lerobot/common/datasets/factory.py", "repo_id": "lerobot", "token_count": 1906 }
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Use this script to batch encode lerobot dataset from their raw format to LeRobotDataset and push their updated version to the hub. Under the hood, this script reuses 'push_dataset_to_hub.py'. It assumes that you already downloaded raw datasets, which you can do with the related '_download_raw.py' script. For instance, for codebase_version = 'v1.6', the following command was run, assuming raw datasets from lerobot-raw were downloaded in 'raw/datasets/directory': ```bash python lerobot/common/datasets/push_dataset_to_hub/_encode_datasets.py \ --raw-dir raw/datasets/directory \ --raw-repo-ids lerobot-raw \ --local-dir push/datasets/directory \ --tests-data-dir tests/data \ --push-repo lerobot \ --vcodec libsvtav1 \ --pix-fmt yuv420p \ --g 2 \ --crf 30 ``` """ import argparse from pathlib import Path from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION from lerobot.common.datasets.push_dataset_to_hub._download_raw import AVAILABLE_RAW_REPO_IDS from lerobot.common.datasets.push_dataset_to_hub.utils import check_repo_id from lerobot.scripts.push_dataset_to_hub import push_dataset_to_hub def get_push_repo_id_from_raw(raw_repo_id: str, push_repo: str) -> str: dataset_id_raw = raw_repo_id.split("/")[1] dataset_id = dataset_id_raw.removesuffix("_raw") return f"{push_repo}/{dataset_id}" def encode_datasets( raw_dir: Path, raw_repo_ids: list[str], push_repo: str, vcodec: str, pix_fmt: str, g: int, crf: int, local_dir: Path | None = None, tests_data_dir: Path | None = None, raw_format: str | None = None, dry_run: bool = False, ) -> None: if len(raw_repo_ids) == 1 and raw_repo_ids[0].lower() == "lerobot-raw": raw_repo_ids_format = AVAILABLE_RAW_REPO_IDS else: if raw_format is None: raise ValueError(raw_format) raw_repo_ids_format = {id_: raw_format for id_ in raw_repo_ids} for raw_repo_id, repo_raw_format in raw_repo_ids_format.items(): check_repo_id(raw_repo_id) dataset_repo_id_push = get_push_repo_id_from_raw(raw_repo_id, push_repo) dataset_raw_dir = raw_dir / raw_repo_id dataset_dir = local_dir / dataset_repo_id_push if local_dir is not None else None encoding = { "vcodec": vcodec, "pix_fmt": pix_fmt, "g": g, "crf": crf, } if not (dataset_raw_dir).is_dir(): raise NotADirectoryError(dataset_raw_dir) if not dry_run: push_dataset_to_hub( dataset_raw_dir, raw_format=repo_raw_format, repo_id=dataset_repo_id_push, local_dir=dataset_dir, resume=True, encoding=encoding, tests_data_dir=tests_data_dir, ) else: print( f"DRY RUN: {dataset_raw_dir} --> {dataset_dir} --> {dataset_repo_id_push}@{CODEBASE_VERSION}" ) def main(): parser = argparse.ArgumentParser() parser.add_argument( "--raw-dir", type=Path, default=Path("data"), help="Directory where raw datasets are located.", ) parser.add_argument( "--raw-repo-ids", type=str, nargs="*", default=["lerobot-raw"], help="""Raw dataset repo ids. if 'lerobot-raw', the keys from `AVAILABLE_RAW_REPO_IDS` will be used and raw datasets will be fetched from the 'lerobot-raw/' repo and pushed with their associated format. It is assumed that each dataset is located at `raw_dir / raw_repo_id` """, ) parser.add_argument( "--raw-format", type=str, default=None, help="""Raw format to use for the raw repo-ids. Must be specified if --raw-repo-ids is not 'lerobot-raw'""", ) parser.add_argument( "--local-dir", type=Path, default=None, help="""When provided, writes the dataset converted to LeRobotDataset format in this directory (e.g. `data/lerobot/aloha_mobile_chair`).""", ) parser.add_argument( "--push-repo", type=str, default="lerobot", help="Repo to upload datasets to", ) parser.add_argument( "--vcodec", type=str, default="libsvtav1", help="Codec to use for encoding videos", ) parser.add_argument( "--pix-fmt", type=str, default="yuv420p", help="Pixel formats (chroma subsampling) to be used for encoding", ) parser.add_argument( "--g", type=int, default=2, help="Group of pictures sizes to be used for encoding.", ) parser.add_argument( "--crf", type=int, default=30, help="Constant rate factors to be used for encoding.", ) parser.add_argument( "--tests-data-dir", type=Path, default=None, help=( "When provided, save tests artifacts into the given directory " "(e.g. `--tests-data-dir tests/data` will save to tests/data/{--repo-id})." ), ) parser.add_argument( "--dry-run", type=int, default=0, help="If not set to 0, this script won't download or upload anything.", ) args = parser.parse_args() encode_datasets(**vars(args)) if __name__ == "__main__": main()
lerobot/lerobot/common/datasets/push_dataset_to_hub/_encode_datasets.py/0
{ "file_path": "lerobot/lerobot/common/datasets/push_dataset_to_hub/_encode_datasets.py", "repo_id": "lerobot", "token_count": 2733 }
from dataclasses import dataclass, field from lerobot.common.optim.optimizers import AdamWConfig from lerobot.common.optim.schedulers import ( CosineDecayWithWarmupSchedulerConfig, ) from lerobot.configs.policies import PreTrainedConfig from lerobot.configs.types import FeatureType, NormalizationMode, PolicyFeature @PreTrainedConfig.register_subclass("pi0") @dataclass class PI0Config(PreTrainedConfig): # Input / output structure. n_obs_steps: int = 1 chunk_size: int = 50 n_action_steps: int = 50 normalization_mapping: dict[str, NormalizationMode] = field( default_factory=lambda: { "VISUAL": NormalizationMode.IDENTITY, "STATE": NormalizationMode.MEAN_STD, "ACTION": NormalizationMode.MEAN_STD, } ) # Shorter state and action vectors will be padded max_state_dim: int = 32 max_action_dim: int = 32 # Image preprocessing resize_imgs_with_padding: tuple[int, int] = (224, 224) # Add empty images. Used by pi0_aloha_sim which adds the empty # left and right wrist cameras in addition to the top camera. empty_cameras: int = 0 # Converts the joint and gripper values from the standard Aloha space to # the space used by the pi internal runtime which was used to train the base model. adapt_to_pi_aloha: bool = False # Converts joint dimensions to deltas with respect to the current state before passing to the model. # Gripper dimensions will remain in absolute values. use_delta_joint_actions_aloha: bool = False # Tokenizer tokenizer_max_length: int = 48 # Projector proj_width: int = 1024 # Decoding num_steps: int = 10 # Attention utils use_cache: bool = True attention_implementation: str = "eager" # or fa2, flex # Finetuning settings freeze_vision_encoder: bool = True train_expert_only: bool = False train_state_proj: bool = True # Training presets optimizer_lr: float = 2.5e-5 optimizer_betas: tuple[float, float] = (0.9, 0.95) optimizer_eps: float = 1e-8 optimizer_weight_decay: float = 1e-10 scheduler_warmup_steps: int = 1_000 scheduler_decay_steps: int = 30_000 scheduler_decay_lr: float = 2.5e-6 # TODO: Add EMA def __post_init__(self): super().__post_init__() """Input validation (not exhaustive).""" if self.n_action_steps > self.chunk_size: raise ValueError( f"The chunk size is the upper bound for the number of action steps per model invocation. Got " f"{self.n_action_steps} for `n_action_steps` and {self.chunk_size} for `chunk_size`." ) if self.n_obs_steps != 1: raise ValueError( f"Multiple observation steps not handled yet. Got `nobs_steps={self.n_obs_steps}`" ) if self.use_delta_joint_actions_aloha: raise NotImplementedError( "`use_delta_joint_actions_aloha` is used by pi0 for aloha real models. It is not ported yet in LeRobot." ) def validate_features(self) -> None: # TODO: implement value error # if not self.image_features and not self.env_state_feature: # raise ValueError("You must provide at least one image or the environment state among the inputs.") for i in range(self.empty_cameras): key = f"observation.images.empty_camera_{i}" empty_camera = PolicyFeature( type=FeatureType.VISUAL, shape=(3, 480, 640), ) self.input_features[key] = empty_camera def get_optimizer_preset(self) -> AdamWConfig: return AdamWConfig( lr=self.optimizer_lr, betas=self.optimizer_betas, eps=self.optimizer_eps, weight_decay=self.optimizer_weight_decay, ) def get_scheduler_preset(self): return CosineDecayWithWarmupSchedulerConfig( peak_lr=self.optimizer_lr, decay_lr=self.scheduler_decay_lr, num_warmup_steps=self.scheduler_warmup_steps, num_decay_steps=self.scheduler_decay_steps, ) @property def observation_delta_indices(self) -> None: return None @property def action_delta_indices(self) -> list: return list(range(self.chunk_size)) @property def reward_delta_indices(self) -> None: return None
lerobot/lerobot/common/policies/pi0/configuration_pi0.py/0
{ "file_path": "lerobot/lerobot/common/policies/pi0/configuration_pi0.py", "repo_id": "lerobot", "token_count": 1875 }
""" This file contains utilities for recording frames from Intel Realsense cameras. """ import argparse import concurrent.futures import logging import math import shutil import threading import time import traceback from collections import Counter from pathlib import Path from threading import Thread import numpy as np from PIL import Image from lerobot.common.robot_devices.cameras.configs import IntelRealSenseCameraConfig from lerobot.common.robot_devices.utils import ( RobotDeviceAlreadyConnectedError, RobotDeviceNotConnectedError, busy_wait, ) from lerobot.common.utils.utils import capture_timestamp_utc SERIAL_NUMBER_INDEX = 1 def find_cameras(raise_when_empty=True, mock=False) -> list[dict]: """ Find the names and the serial numbers of the Intel RealSense cameras connected to the computer. """ if mock: import tests.mock_pyrealsense2 as rs else: import pyrealsense2 as rs cameras = [] for device in rs.context().query_devices(): serial_number = int(device.get_info(rs.camera_info(SERIAL_NUMBER_INDEX))) name = device.get_info(rs.camera_info.name) cameras.append( { "serial_number": serial_number, "name": name, } ) if raise_when_empty and len(cameras) == 0: raise OSError( "Not a single camera was detected. Try re-plugging, or re-installing `librealsense` and its python wrapper `pyrealsense2`, or updating the firmware." ) return cameras def save_image(img_array, serial_number, frame_index, images_dir): try: img = Image.fromarray(img_array) path = images_dir / f"camera_{serial_number}_frame_{frame_index:06d}.png" path.parent.mkdir(parents=True, exist_ok=True) img.save(str(path), quality=100) logging.info(f"Saved image: {path}") except Exception as e: logging.error(f"Failed to save image for camera {serial_number} frame {frame_index}: {e}") def save_images_from_cameras( images_dir: Path, serial_numbers: list[int] | None = None, fps=None, width=None, height=None, record_time_s=2, mock=False, ): """ Initializes all the cameras and saves images to the directory. Useful to visually identify the camera associated to a given serial number. """ if serial_numbers is None or len(serial_numbers) == 0: camera_infos = find_cameras(mock=mock) serial_numbers = [cam["serial_number"] for cam in camera_infos] if mock: import tests.mock_cv2 as cv2 else: import cv2 print("Connecting cameras") cameras = [] for cam_sn in serial_numbers: print(f"{cam_sn=}") config = IntelRealSenseCameraConfig( serial_number=cam_sn, fps=fps, width=width, height=height, mock=mock ) camera = IntelRealSenseCamera(config) camera.connect() print( f"IntelRealSenseCamera({camera.serial_number}, fps={camera.fps}, width={camera.width}, height={camera.height}, color_mode={camera.color_mode})" ) cameras.append(camera) images_dir = Path(images_dir) if images_dir.exists(): shutil.rmtree( images_dir, ) images_dir.mkdir(parents=True, exist_ok=True) print(f"Saving images to {images_dir}") frame_index = 0 start_time = time.perf_counter() try: with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor: while True: now = time.perf_counter() for camera in cameras: # If we use async_read when fps is None, the loop will go full speed, and we will end up # saving the same images from the cameras multiple times until the RAM/disk is full. image = camera.read() if fps is None else camera.async_read() if image is None: print("No Frame") bgr_converted_image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) executor.submit( save_image, bgr_converted_image, camera.serial_number, frame_index, images_dir, ) if fps is not None: dt_s = time.perf_counter() - now busy_wait(1 / fps - dt_s) if time.perf_counter() - start_time > record_time_s: break print(f"Frame: {frame_index:04d}\tLatency (ms): {(time.perf_counter() - now) * 1000:.2f}") frame_index += 1 finally: print(f"Images have been saved to {images_dir}") for camera in cameras: camera.disconnect() class IntelRealSenseCamera: """ The IntelRealSenseCamera class is similar to OpenCVCamera class but adds additional features for Intel Real Sense cameras: - is instantiated with the serial number of the camera - won't randomly change as it can be the case of OpenCVCamera for Linux, - can also be instantiated with the camera's name — if it's unique — using IntelRealSenseCamera.init_from_name(), - depth map can be returned. To find the camera indices of your cameras, you can run our utility script that will save a few frames for each camera: ```bash python lerobot/common/robot_devices/cameras/intelrealsense.py --images-dir outputs/images_from_intelrealsense_cameras ``` When an IntelRealSenseCamera is instantiated, if no specific config is provided, the default fps, width, height and color_mode of the given camera will be used. Example of instantiating with a serial number: ```python from lerobot.common.robot_devices.cameras.configs import IntelRealSenseCameraConfig config = IntelRealSenseCameraConfig(serial_number=128422271347) camera = IntelRealSenseCamera(config) camera.connect() color_image = camera.read() # when done using the camera, consider disconnecting camera.disconnect() ``` Example of instantiating with a name if it's unique: ``` config = IntelRealSenseCameraConfig(name="Intel RealSense D405") ``` Example of changing default fps, width, height and color_mode: ```python config = IntelRealSenseCameraConfig(serial_number=128422271347, fps=30, width=1280, height=720) config = IntelRealSenseCameraConfig(serial_number=128422271347, fps=90, width=640, height=480) config = IntelRealSenseCameraConfig(serial_number=128422271347, fps=90, width=640, height=480, color_mode="bgr") # Note: might error out upon `camera.connect()` if these settings are not compatible with the camera ``` Example of returning depth: ```python config = IntelRealSenseCameraConfig(serial_number=128422271347, use_depth=True) camera = IntelRealSenseCamera(config) camera.connect() color_image, depth_map = camera.read() ``` """ def __init__( self, config: IntelRealSenseCameraConfig, ): self.config = config if config.name is not None: self.serial_number = self.find_serial_number_from_name(config.name) else: self.serial_number = config.serial_number self.fps = config.fps self.width = config.width self.height = config.height self.channels = config.channels self.color_mode = config.color_mode self.use_depth = config.use_depth self.force_hardware_reset = config.force_hardware_reset self.mock = config.mock self.camera = None self.is_connected = False self.thread = None self.stop_event = None self.color_image = None self.depth_map = None self.logs = {} if self.mock: import tests.mock_cv2 as cv2 else: import cv2 # TODO(alibets): Do we keep original width/height or do we define them after rotation? self.rotation = None if config.rotation == -90: self.rotation = cv2.ROTATE_90_COUNTERCLOCKWISE elif config.rotation == 90: self.rotation = cv2.ROTATE_90_CLOCKWISE elif config.rotation == 180: self.rotation = cv2.ROTATE_180 def find_serial_number_from_name(self, name): camera_infos = find_cameras() camera_names = [cam["name"] for cam in camera_infos] this_name_count = Counter(camera_names)[name] if this_name_count > 1: # TODO(aliberts): Test this with multiple identical cameras (Aloha) raise ValueError( f"Multiple {name} cameras have been detected. Please use their serial number to instantiate them." ) name_to_serial_dict = {cam["name"]: cam["serial_number"] for cam in camera_infos} cam_sn = name_to_serial_dict[name] return cam_sn def connect(self): if self.is_connected: raise RobotDeviceAlreadyConnectedError( f"IntelRealSenseCamera({self.serial_number}) is already connected." ) if self.mock: import tests.mock_pyrealsense2 as rs else: import pyrealsense2 as rs config = rs.config() config.enable_device(str(self.serial_number)) if self.fps and self.width and self.height: # TODO(rcadene): can we set rgb8 directly? config.enable_stream(rs.stream.color, self.width, self.height, rs.format.rgb8, self.fps) else: config.enable_stream(rs.stream.color) if self.use_depth: if self.fps and self.width and self.height: config.enable_stream(rs.stream.depth, self.width, self.height, rs.format.z16, self.fps) else: config.enable_stream(rs.stream.depth) self.camera = rs.pipeline() try: profile = self.camera.start(config) is_camera_open = True except RuntimeError: is_camera_open = False traceback.print_exc() # If the camera doesn't work, display the camera indices corresponding to # valid cameras. if not is_camera_open: # Verify that the provided `serial_number` is valid before printing the traceback camera_infos = find_cameras() serial_numbers = [cam["serial_number"] for cam in camera_infos] if self.serial_number not in serial_numbers: raise ValueError( f"`serial_number` is expected to be one of these available cameras {serial_numbers}, but {self.serial_number} is provided instead. " "To find the serial number you should use, run `python lerobot/common/robot_devices/cameras/intelrealsense.py`." ) raise OSError(f"Can't access IntelRealSenseCamera({self.serial_number}).") color_stream = profile.get_stream(rs.stream.color) color_profile = color_stream.as_video_stream_profile() actual_fps = color_profile.fps() actual_width = color_profile.width() actual_height = color_profile.height() # Using `math.isclose` since actual fps can be a float (e.g. 29.9 instead of 30) if self.fps is not None and not math.isclose(self.fps, actual_fps, rel_tol=1e-3): # Using `OSError` since it's a broad that encompasses issues related to device communication raise OSError( f"Can't set {self.fps=} for IntelRealSenseCamera({self.serial_number}). Actual value is {actual_fps}." ) if self.width is not None and self.width != actual_width: raise OSError( f"Can't set {self.width=} for IntelRealSenseCamera({self.serial_number}). Actual value is {actual_width}." ) if self.height is not None and self.height != actual_height: raise OSError( f"Can't set {self.height=} for IntelRealSenseCamera({self.serial_number}). Actual value is {actual_height}." ) self.fps = round(actual_fps) self.width = round(actual_width) self.height = round(actual_height) self.is_connected = True def read(self, temporary_color: str | None = None) -> np.ndarray | tuple[np.ndarray, np.ndarray]: """Read a frame from the camera returned in the format height x width x channels (e.g. 480 x 640 x 3) of type `np.uint8`, contrarily to the pytorch format which is float channel first. When `use_depth=True`, returns a tuple `(color_image, depth_map)` with a depth map in the format height x width (e.g. 480 x 640) of type np.uint16. Note: Reading a frame is done every `camera.fps` times per second, and it is blocking. If you are reading data from other sensors, we advise to use `camera.async_read()` which is non blocking version of `camera.read()`. """ if not self.is_connected: raise RobotDeviceNotConnectedError( f"IntelRealSenseCamera({self.serial_number}) is not connected. Try running `camera.connect()` first." ) if self.mock: import tests.mock_cv2 as cv2 else: import cv2 start_time = time.perf_counter() frame = self.camera.wait_for_frames(timeout_ms=5000) color_frame = frame.get_color_frame() if not color_frame: raise OSError(f"Can't capture color image from IntelRealSenseCamera({self.serial_number}).") color_image = np.asanyarray(color_frame.get_data()) requested_color_mode = self.color_mode if temporary_color is None else temporary_color if requested_color_mode not in ["rgb", "bgr"]: raise ValueError( f"Expected color values are 'rgb' or 'bgr', but {requested_color_mode} is provided." ) # IntelRealSense uses RGB format as default (red, green, blue). if requested_color_mode == "bgr": color_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR) h, w, _ = color_image.shape if h != self.height or w != self.width: raise OSError( f"Can't capture color image with expected height and width ({self.height} x {self.width}). ({h} x {w}) returned instead." ) if self.rotation is not None: color_image = cv2.rotate(color_image, self.rotation) # log the number of seconds it took to read the image self.logs["delta_timestamp_s"] = time.perf_counter() - start_time # log the utc time at which the image was received self.logs["timestamp_utc"] = capture_timestamp_utc() if self.use_depth: depth_frame = frame.get_depth_frame() if not depth_frame: raise OSError(f"Can't capture depth image from IntelRealSenseCamera({self.serial_number}).") depth_map = np.asanyarray(depth_frame.get_data()) h, w = depth_map.shape if h != self.height or w != self.width: raise OSError( f"Can't capture depth map with expected height and width ({self.height} x {self.width}). ({h} x {w}) returned instead." ) if self.rotation is not None: depth_map = cv2.rotate(depth_map, self.rotation) return color_image, depth_map else: return color_image def read_loop(self): while not self.stop_event.is_set(): if self.use_depth: self.color_image, self.depth_map = self.read() else: self.color_image = self.read() def async_read(self): """Access the latest color image""" if not self.is_connected: raise RobotDeviceNotConnectedError( f"IntelRealSenseCamera({self.serial_number}) is not connected. Try running `camera.connect()` first." ) if self.thread is None: self.stop_event = threading.Event() self.thread = Thread(target=self.read_loop, args=()) self.thread.daemon = True self.thread.start() num_tries = 0 while self.color_image is None: # TODO(rcadene, aliberts): intelrealsense has diverged compared to opencv over here num_tries += 1 time.sleep(1 / self.fps) if num_tries > self.fps and (self.thread.ident is None or not self.thread.is_alive()): raise Exception( "The thread responsible for `self.async_read()` took too much time to start. There might be an issue. Verify that `self.thread.start()` has been called." ) if self.use_depth: return self.color_image, self.depth_map else: return self.color_image def disconnect(self): if not self.is_connected: raise RobotDeviceNotConnectedError( f"IntelRealSenseCamera({self.serial_number}) is not connected. Try running `camera.connect()` first." ) if self.thread is not None and self.thread.is_alive(): # wait for the thread to finish self.stop_event.set() self.thread.join() self.thread = None self.stop_event = None self.camera.stop() self.camera = None self.is_connected = False def __del__(self): if getattr(self, "is_connected", False): self.disconnect() if __name__ == "__main__": parser = argparse.ArgumentParser( description="Save a few frames using `IntelRealSenseCamera` for all cameras connected to the computer, or a selected subset." ) parser.add_argument( "--serial-numbers", type=int, nargs="*", default=None, help="List of serial numbers used to instantiate the `IntelRealSenseCamera`. If not provided, find and use all available camera indices.", ) parser.add_argument( "--fps", type=int, default=30, help="Set the number of frames recorded per seconds for all cameras. If not provided, use the default fps of each camera.", ) parser.add_argument( "--width", type=str, default=640, help="Set the width for all cameras. If not provided, use the default width of each camera.", ) parser.add_argument( "--height", type=str, default=480, help="Set the height for all cameras. If not provided, use the default height of each camera.", ) parser.add_argument( "--images-dir", type=Path, default="outputs/images_from_intelrealsense_cameras", help="Set directory to save a few frames for each camera.", ) parser.add_argument( "--record-time-s", type=float, default=2.0, help="Set the number of seconds used to record the frames. By default, 2 seconds.", ) args = parser.parse_args() save_images_from_cameras(**vars(args))
lerobot/lerobot/common/robot_devices/cameras/intelrealsense.py/0
{ "file_path": "lerobot/lerobot/common/robot_devices/cameras/intelrealsense.py", "repo_id": "lerobot", "token_count": 8222 }
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import threading import time from contextlib import ContextDecorator class TimeBenchmark(ContextDecorator): """ Measures execution time using a context manager or decorator. This class supports both context manager and decorator usage, and is thread-safe for multithreaded environments. Args: print: If True, prints the elapsed time upon exiting the context or completing the function. Defaults to False. Examples: Using as a context manager: >>> benchmark = TimeBenchmark() >>> with benchmark: ... time.sleep(1) >>> print(f"Block took {benchmark.result:.4f} seconds") Block took approximately 1.0000 seconds Using with multithreading: ```python import threading benchmark = TimeBenchmark() def context_manager_example(): with benchmark: time.sleep(0.01) print(f"Block took {benchmark.result_ms:.2f} milliseconds") threads = [] for _ in range(3): t1 = threading.Thread(target=context_manager_example) threads.append(t1) for t in threads: t.start() for t in threads: t.join() ``` Expected output: Block took approximately 10.00 milliseconds Block took approximately 10.00 milliseconds Block took approximately 10.00 milliseconds """ def __init__(self, print=False): self.local = threading.local() self.print_time = print def __enter__(self): self.local.start_time = time.perf_counter() return self def __exit__(self, *exc): self.local.end_time = time.perf_counter() self.local.elapsed_time = self.local.end_time - self.local.start_time if self.print_time: print(f"Elapsed time: {self.local.elapsed_time:.4f} seconds") return False @property def result(self): return getattr(self.local, "elapsed_time", None) @property def result_ms(self): return self.result * 1e3
lerobot/lerobot/common/utils/benchmark.py/0
{ "file_path": "lerobot/lerobot/common/utils/benchmark.py", "repo_id": "lerobot", "token_count": 1033 }
import os import time from pathlib import Path from serial.tools import list_ports # Part of pyserial library def find_available_ports(): if os.name == "nt": # Windows # List COM ports using pyserial ports = [port.device for port in list_ports.comports()] else: # Linux/macOS # List /dev/tty* ports for Unix-based systems ports = [str(path) for path in Path("/dev").glob("tty*")] return ports def find_port(): print("Finding all available ports for the MotorsBus.") ports_before = find_available_ports() print("Ports before disconnecting:", ports_before) print("Remove the USB cable from your MotorsBus and press Enter when done.") input() # Wait for user to disconnect the device time.sleep(0.5) # Allow some time for port to be released ports_after = find_available_ports() ports_diff = list(set(ports_before) - set(ports_after)) if len(ports_diff) == 1: port = ports_diff[0] print(f"The port of this MotorsBus is '{port}'") print("Reconnect the USB cable.") elif len(ports_diff) == 0: raise OSError(f"Could not detect the port. No difference was found ({ports_diff}).") else: raise OSError(f"Could not detect the port. More than one port was found ({ports_diff}).") if __name__ == "__main__": # Helper to find the USB port associated with your MotorsBus. find_port()
lerobot/lerobot/scripts/find_motors_bus_port.py/0
{ "file_path": "lerobot/lerobot/scripts/find_motors_bus_port.py", "repo_id": "lerobot", "token_count": 508 }
#!/usr/bin/env python # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from huggingface_hub import DatasetCard from lerobot.common.datasets.utils import create_lerobot_dataset_card def test_default_parameters(): card = create_lerobot_dataset_card() assert isinstance(card, DatasetCard) assert card.data.tags == ["LeRobot"] assert card.data.task_categories == ["robotics"] assert card.data.configs == [ { "config_name": "default", "data_files": "data/*/*.parquet", } ] def test_with_tags(): tags = ["tag1", "tag2"] card = create_lerobot_dataset_card(tags=tags) assert card.data.tags == ["LeRobot", "tag1", "tag2"]
lerobot/tests/lerobot/common/datasets/test_utils.py/0
{ "file_path": "lerobot/tests/lerobot/common/datasets/test_utils.py", "repo_id": "lerobot", "token_count": 435 }
import queue import time from multiprocessing import queues from unittest.mock import MagicMock, patch import numpy as np import pytest from PIL import Image from lerobot.common.datasets.image_writer import ( AsyncImageWriter, image_array_to_image, safe_stop_image_writer, write_image, ) DUMMY_IMAGE = "test_image.png" def test_init_threading(): writer = AsyncImageWriter(num_processes=0, num_threads=2) try: assert writer.num_processes == 0 assert writer.num_threads == 2 assert isinstance(writer.queue, queue.Queue) assert len(writer.threads) == 2 assert len(writer.processes) == 0 assert all(t.is_alive() for t in writer.threads) finally: writer.stop() def test_init_multiprocessing(): writer = AsyncImageWriter(num_processes=2, num_threads=2) try: assert writer.num_processes == 2 assert writer.num_threads == 2 assert isinstance(writer.queue, queues.JoinableQueue) assert len(writer.threads) == 0 assert len(writer.processes) == 2 assert all(p.is_alive() for p in writer.processes) finally: writer.stop() def test_zero_threads(): with pytest.raises(ValueError): AsyncImageWriter(num_processes=0, num_threads=0) def test_image_array_to_image_rgb(img_array_factory): img_array = img_array_factory(100, 100) result_image = image_array_to_image(img_array) assert isinstance(result_image, Image.Image) assert result_image.size == (100, 100) assert result_image.mode == "RGB" def test_image_array_to_image_pytorch_format(img_array_factory): img_array = img_array_factory(100, 100).transpose(2, 0, 1) result_image = image_array_to_image(img_array) assert isinstance(result_image, Image.Image) assert result_image.size == (100, 100) assert result_image.mode == "RGB" @pytest.mark.skip("TODO: implement") def test_image_array_to_image_single_channel(img_array_factory): img_array = img_array_factory(channels=1) result_image = image_array_to_image(img_array) assert isinstance(result_image, Image.Image) assert result_image.size == (100, 100) assert result_image.mode == "L" def test_image_array_to_image_float_array(img_array_factory): img_array = img_array_factory(dtype=np.float32) result_image = image_array_to_image(img_array) assert isinstance(result_image, Image.Image) assert result_image.size == (100, 100) assert result_image.mode == "RGB" assert np.array(result_image).dtype == np.uint8 def test_image_array_to_image_out_of_bounds_float(): # Float array with values out of [0, 1] img_array = np.random.uniform(-1, 2, size=(100, 100, 3)).astype(np.float32) result_image = image_array_to_image(img_array) assert isinstance(result_image, Image.Image) assert result_image.size == (100, 100) assert result_image.mode == "RGB" assert np.array(result_image).dtype == np.uint8 assert np.array(result_image).min() >= 0 and np.array(result_image).max() <= 255 def test_write_image_numpy(tmp_path, img_array_factory): image_array = img_array_factory() fpath = tmp_path / DUMMY_IMAGE write_image(image_array, fpath) assert fpath.exists() saved_image = np.array(Image.open(fpath)) assert np.array_equal(image_array, saved_image) def test_write_image_image(tmp_path, img_factory): image_pil = img_factory() fpath = tmp_path / DUMMY_IMAGE write_image(image_pil, fpath) assert fpath.exists() saved_image = Image.open(fpath) assert list(saved_image.getdata()) == list(image_pil.getdata()) assert np.array_equal(image_pil, saved_image) def test_write_image_exception(tmp_path): image_array = "invalid data" fpath = tmp_path / DUMMY_IMAGE with patch("builtins.print") as mock_print: write_image(image_array, fpath) mock_print.assert_called() assert not fpath.exists() def test_save_image_numpy(tmp_path, img_array_factory): writer = AsyncImageWriter() try: image_array = img_array_factory() fpath = tmp_path / DUMMY_IMAGE fpath.parent.mkdir(parents=True, exist_ok=True) writer.save_image(image_array, fpath) writer.wait_until_done() assert fpath.exists() saved_image = np.array(Image.open(fpath)) assert np.array_equal(image_array, saved_image) finally: writer.stop() def test_save_image_numpy_multiprocessing(tmp_path, img_array_factory): writer = AsyncImageWriter(num_processes=2, num_threads=2) try: image_array = img_array_factory() fpath = tmp_path / DUMMY_IMAGE writer.save_image(image_array, fpath) writer.wait_until_done() assert fpath.exists() saved_image = np.array(Image.open(fpath)) assert np.array_equal(image_array, saved_image) finally: writer.stop() def test_save_image_torch(tmp_path, img_tensor_factory): writer = AsyncImageWriter() try: image_tensor = img_tensor_factory() fpath = tmp_path / DUMMY_IMAGE fpath.parent.mkdir(parents=True, exist_ok=True) writer.save_image(image_tensor, fpath) writer.wait_until_done() assert fpath.exists() saved_image = np.array(Image.open(fpath)) expected_image = (image_tensor.permute(1, 2, 0).cpu().numpy() * 255).astype(np.uint8) assert np.array_equal(expected_image, saved_image) finally: writer.stop() def test_save_image_torch_multiprocessing(tmp_path, img_tensor_factory): writer = AsyncImageWriter(num_processes=2, num_threads=2) try: image_tensor = img_tensor_factory() fpath = tmp_path / DUMMY_IMAGE writer.save_image(image_tensor, fpath) writer.wait_until_done() assert fpath.exists() saved_image = np.array(Image.open(fpath)) expected_image = (image_tensor.permute(1, 2, 0).cpu().numpy() * 255).astype(np.uint8) assert np.array_equal(expected_image, saved_image) finally: writer.stop() def test_save_image_pil(tmp_path, img_factory): writer = AsyncImageWriter() try: image_pil = img_factory() fpath = tmp_path / DUMMY_IMAGE fpath.parent.mkdir(parents=True, exist_ok=True) writer.save_image(image_pil, fpath) writer.wait_until_done() assert fpath.exists() saved_image = Image.open(fpath) assert list(saved_image.getdata()) == list(image_pil.getdata()) finally: writer.stop() def test_save_image_pil_multiprocessing(tmp_path, img_factory): writer = AsyncImageWriter(num_processes=2, num_threads=2) try: image_pil = img_factory() fpath = tmp_path / DUMMY_IMAGE writer.save_image(image_pil, fpath) writer.wait_until_done() assert fpath.exists() saved_image = Image.open(fpath) assert list(saved_image.getdata()) == list(image_pil.getdata()) finally: writer.stop() def test_save_image_invalid_data(tmp_path): writer = AsyncImageWriter() try: image_array = "invalid data" fpath = tmp_path / DUMMY_IMAGE fpath.parent.mkdir(parents=True, exist_ok=True) with patch("builtins.print") as mock_print: writer.save_image(image_array, fpath) writer.wait_until_done() mock_print.assert_called() assert not fpath.exists() finally: writer.stop() def test_save_image_after_stop(tmp_path, img_array_factory): writer = AsyncImageWriter() writer.stop() image_array = img_array_factory() fpath = tmp_path / DUMMY_IMAGE writer.save_image(image_array, fpath) time.sleep(1) assert not fpath.exists() def test_stop(): writer = AsyncImageWriter(num_processes=0, num_threads=2) writer.stop() assert not any(t.is_alive() for t in writer.threads) def test_stop_multiprocessing(): writer = AsyncImageWriter(num_processes=2, num_threads=2) writer.stop() assert not any(p.is_alive() for p in writer.processes) def test_multiple_stops(): writer = AsyncImageWriter() writer.stop() writer.stop() # Should not raise an exception assert not any(t.is_alive() for t in writer.threads) def test_multiple_stops_multiprocessing(): writer = AsyncImageWriter(num_processes=2, num_threads=2) writer.stop() writer.stop() # Should not raise an exception assert not any(t.is_alive() for t in writer.threads) def test_wait_until_done(tmp_path, img_array_factory): writer = AsyncImageWriter(num_processes=0, num_threads=4) try: num_images = 100 image_arrays = [img_array_factory(height=500, width=500) for _ in range(num_images)] fpaths = [tmp_path / f"frame_{i:06d}.png" for i in range(num_images)] for image_array, fpath in zip(image_arrays, fpaths, strict=True): fpath.parent.mkdir(parents=True, exist_ok=True) writer.save_image(image_array, fpath) writer.wait_until_done() for i, fpath in enumerate(fpaths): assert fpath.exists() saved_image = np.array(Image.open(fpath)) assert np.array_equal(saved_image, image_arrays[i]) finally: writer.stop() def test_wait_until_done_multiprocessing(tmp_path, img_array_factory): writer = AsyncImageWriter(num_processes=2, num_threads=2) try: num_images = 100 image_arrays = [img_array_factory() for _ in range(num_images)] fpaths = [tmp_path / f"frame_{i:06d}.png" for i in range(num_images)] for image_array, fpath in zip(image_arrays, fpaths, strict=True): fpath.parent.mkdir(parents=True, exist_ok=True) writer.save_image(image_array, fpath) writer.wait_until_done() for i, fpath in enumerate(fpaths): assert fpath.exists() saved_image = np.array(Image.open(fpath)) assert np.array_equal(saved_image, image_arrays[i]) finally: writer.stop() def test_exception_handling(tmp_path, img_array_factory): writer = AsyncImageWriter() try: image_array = img_array_factory() with ( patch.object(writer.queue, "put", side_effect=queue.Full("Queue is full")), pytest.raises(queue.Full) as exc_info, ): writer.save_image(image_array, tmp_path / "test.png") assert str(exc_info.value) == "Queue is full" finally: writer.stop() def test_with_different_image_formats(tmp_path, img_array_factory): writer = AsyncImageWriter() try: image_array = img_array_factory() formats = ["png", "jpeg", "bmp"] for fmt in formats: fpath = tmp_path / f"test_image.{fmt}" write_image(image_array, fpath) assert fpath.exists() finally: writer.stop() def test_safe_stop_image_writer_decorator(): class MockDataset: def __init__(self): self.image_writer = MagicMock(spec=AsyncImageWriter) @safe_stop_image_writer def function_that_raises_exception(dataset=None): raise Exception("Test exception") dataset = MockDataset() with pytest.raises(Exception) as exc_info: function_that_raises_exception(dataset=dataset) assert str(exc_info.value) == "Test exception" dataset.image_writer.stop.assert_called_once() def test_main_process_time(tmp_path, img_tensor_factory): writer = AsyncImageWriter() try: image_tensor = img_tensor_factory() fpath = tmp_path / DUMMY_IMAGE start_time = time.perf_counter() writer.save_image(image_tensor, fpath) end_time = time.perf_counter() time_spent = end_time - start_time # Might need to adjust this threshold depending on hardware assert time_spent < 0.01, f"Main process time exceeded threshold: {time_spent}s" writer.wait_until_done() assert fpath.exists() finally: writer.stop()
lerobot/tests/test_image_writer.py/0
{ "file_path": "lerobot/tests/test_image_writer.py", "repo_id": "lerobot", "token_count": 5192 }
# Model arguments model_name_or_path: Qwen/Qwen2.5-Math-7B model_revision: main torch_dtype: bfloat16 attn_implementation: flash_attention_2 # Data training arguments dataset_name: DigitalLearningGmbH/MATH-lighteval dataset_configs: - train # Num processes is less by 1 as vLLM is using 1 GPU num_processes: 7 # GRPO trainer config bf16: true use_vllm: true vllm_device: auto vllm_gpu_memory_utilization: 0.7 do_eval: true eval_strategy: steps eval_steps: 100 gradient_accumulation_steps: 8 gradient_checkpointing: true gradient_checkpointing_kwargs: use_reentrant: false hub_model_id: Qwen-2.5-7B-Simple-RL hub_strategy: every_save learning_rate: 3.0e-06 log_level: info logging_steps: 5 logging_strategy: steps lr_scheduler_type: cosine max_prompt_length: 512 max_completion_length: 1024 max_steps: -1 num_generations: 7 num_train_epochs: 1 output_dir: data/Qwen-2.5-7B-Simple-RL overwrite_output_dir: true per_device_eval_batch_size: 2 per_device_train_batch_size: 2 push_to_hub: true report_to: - wandb save_strategy: "no" seed: 42 warmup_ratio: 0.1
open-r1/recipes/Qwen2.5-Math-7B/grpo/config_simple_rl.yaml/0
{ "file_path": "open-r1/recipes/Qwen2.5-Math-7B/grpo/config_simple_rl.yaml", "repo_id": "open-r1", "token_count": 420 }
"""Reward functions for GRPO training.""" import math import re from latex2sympy2_extended import NormalizationConfig from math_verify import LatexExtractionConfig, parse, verify def accuracy_reward(completions, solution, **kwargs): """Reward function that checks if the completion is the same as the ground truth.""" contents = [completion[0]["content"] for completion in completions] rewards = [] for content, sol in zip(contents, solution): gold_parsed = parse( sol, extraction_mode="first_match", extraction_config=[LatexExtractionConfig()], ) if len(gold_parsed) != 0: # We require the answer to be provided in correct latex (no malformed operators) answer_parsed = parse( content, extraction_config=[ LatexExtractionConfig( normalization_config=NormalizationConfig( nits=False, malformed_operators=False, basic_latex=True, equations=True, boxed="all", units=True, ), # Ensures that boxed is tried first boxed_match_priority=0, try_extract_without_anchor=False, ) ], extraction_mode="first_match", ) # Reward 1 if the content is the same as the ground truth, 0 otherwise reward = float(verify(answer_parsed, gold_parsed)) else: # If the gold solution is not parseable, we reward 1 to skip this example reward = 1.0 print("Failed to parse gold solution: ", sol) rewards.append(reward) return rewards def format_reward(completions, **kwargs): """Reward function that checks if the completion has a specific format.""" pattern = r"^<think>.*?</think>\s*<answer>.*?</answer>$" completion_contents = [completion[0]["content"] for completion in completions] matches = [re.match(pattern, content, re.DOTALL | re.MULTILINE) for content in completion_contents] return [1.0 if match else 0.0 for match in matches] def reasoning_steps_reward(completions, **kwargs): """Reward function that checks for clear step-by-step reasoning. Regex pattern: Step \d+: - matches "Step 1:", "Step 2:", etc. ^\d+\. - matches numbered lists like "1.", "2.", etc. at start of line \n- - matches bullet points with hyphens \n\* - matches bullet points with asterisks First,|Second,|Next,|Finally, - matches transition words """ pattern = r"(Step \d+:|^\d+\.|\n-|\n\*|First,|Second,|Next,|Finally,)" completion_contents = [completion[0]["content"] for completion in completions] matches = [len(re.findall(pattern, content)) for content in completion_contents] # Magic nubmer 3 to encourage 3 steps and more, otherwise partial reward return [min(1.0, count / 3) for count in matches] def get_cosine_scaled_reward( min_value_wrong: float = -1.0, max_value_wrong: float = -0.5, min_value_correct: float = 0.5, max_value_correct: float = 1.0, max_len: int = 1000, ): def cosine_scaled_reward(completions, solution, **kwargs): """Reward function that scales based on completion length using a cosine schedule. Shorter correct solutions are rewarded more than longer ones. Longer incorrect solutions are penalized less than shorter ones. Args: completions: List of model completions solution: List of ground truth solutions This function is parameterized by the following arguments: min_value_wrong: Minimum reward for wrong answers max_value_wrong: Maximum reward for wrong answers min_value_correct: Minimum reward for correct answers max_value_correct: Maximum reward for correct answers max_len: Maximum length for scaling """ contents = [completion[0]["content"] for completion in completions] rewards = [] for content, sol in zip(contents, solution): gold_parsed = parse(sol, extraction_mode="first_match", extraction_config=[LatexExtractionConfig()]) if len(gold_parsed) == 0: rewards.append(1.0) # Skip unparseable examples print("Failed to parse gold solution: ", sol) continue answer_parsed = parse( content, extraction_config=[ LatexExtractionConfig( normalization_config=NormalizationConfig( nits=False, malformed_operators=False, basic_latex=True, equations=True, boxed=True, units=True, ), boxed_match_priority=0, try_extract_without_anchor=False, ) ], extraction_mode="first_match", ) is_correct = verify(answer_parsed, gold_parsed) gen_len = len(content) # Apply cosine scaling based on length progress = gen_len / max_len cosine = math.cos(progress * math.pi) if is_correct: min_value = min_value_correct max_value = max_value_correct else: # Swap min/max for incorrect answers min_value = max_value_wrong max_value = min_value_wrong reward = min_value + 0.5 * (max_value - min_value) * (1.0 + cosine) rewards.append(float(reward)) return rewards return cosine_scaled_reward
open-r1/src/open_r1/rewards.py/0
{ "file_path": "open-r1/src/open_r1/rewards.py", "repo_id": "open-r1", "token_count": 2787 }
# Minimal makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build SOURCEDIR = source BUILDDIR = _build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
peft/docs/Makefile/0
{ "file_path": "peft/docs/Makefile", "repo_id": "peft", "token_count": 237 }
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Model merging Training a model for each task can be costly, take up storage space, and the models aren't able to learn new information to improve their performance. Multitask learning can overcome some of these limitations by training a model to learn several tasks, but it is expensive to train and designing a dataset for it is challenging. *Model merging* offers a solution to these challenges by combining multiple pretrained models into one model, giving it the combined abilities of each individual model without any additional training. PEFT provides several methods for merging models like a linear or SVD combination. This guide focuses on two methods that are more efficient for merging LoRA adapters by eliminating redundant parameters: * [TIES](https://hf.co/papers/2306.01708) - TrIm, Elect, and Merge (TIES) is a three-step method for merging models. First, redundant parameters are trimmed, then conflicting signs are resolved into an aggregated vector, and finally the parameters whose signs are the same as the aggregate sign are averaged. This method takes into account that some values (redundant and sign disagreement) can degrade performance in the merged model. * [DARE](https://hf.co/papers/2311.03099) - Drop And REscale is a method that can be used to prepare for other model merging methods like TIES. It works by randomly dropping parameters according to a drop rate and rescaling the remaining parameters. This helps to reduce the number of redundant and potentially interfering parameters among multiple models. Models are merged with the [`~LoraModel.add_weighted_adapter`] method, and the specific model merging method is specified in the `combination_type` parameter. ## Merge method With TIES and DARE, merging is enabled by setting `combination_type` and `density` to a value of the weights to keep from the individual models. For example, let's merge three finetuned [TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T](https://huggingface.co/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T) models: [tinyllama_lora_nobots](https://huggingface.co/smangrul/tinyllama_lora_norobots), [tinyllama_lora_sql](https://huggingface.co/smangrul/tinyllama_lora_sql), and [tinyllama_lora_adcopy](https://huggingface.co/smangrul/tinyllama_lora_adcopy). <Tip warninig={true}> When you're attempting to merge fully trained models with TIES, you should be aware of any special tokens each model may have added to the embedding layer which are not a part of the original checkpoint's vocabulary. This may cause an issue because each model may have added a special token to the same embedding position. If this is the case, you should use the [`~transformers.PreTrainedModel.resize_token_embeddings`] method to avoid merging the special tokens at the same embedding index. <br> This shouldn't be an issue if you're only merging LoRA adapters trained from the same base model. </Tip> Load a base model and can use the [`~PeftModel.load_adapter`] method to load and assign each adapter a name: ```py from peft import PeftConfig, PeftModel from transformers import AutoModelForCausalLM, AutoTokenizer import torch config = PeftConfig.from_pretrained("smangrul/tinyllama_lora_norobots") model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, load_in_4bit=True, device_map="auto").eval() tokenizer = AutoTokenizer.from_pretrained("smangrul/tinyllama_lora_norobots") model.config.vocab_size = 32005 model.resize_token_embeddings(32005) model = PeftModel.from_pretrained(model, "smangrul/tinyllama_lora_norobots", adapter_name="norobots") _ = model.load_adapter("smangrul/tinyllama_lora_sql", adapter_name="sql") _ = model.load_adapter("smangrul/tinyllama_lora_adcopy", adapter_name="adcopy") ``` Set the adapters, weights, `adapter_name`, `combination_type`, and `density` with the [`~LoraModel.add_weighted_adapter`] method. <hfoptions id="merge-method"> <hfoption id="TIES"> Weight values greater than `1.0` typically produce better results because they preserve the correct scale. A good default starting value for the weights is to set all values to `1.0`. ```py adapters = ["norobots", "adcopy", "sql"] weights = [2.0, 1.0, 1.0] adapter_name = "merge" density = 0.2 model.add_weighted_adapter(adapters, weights, adapter_name, combination_type="ties", density=density) ``` </hfoption> <hfoption id="DARE"> ```py adapters = ["norobots", "adcopy", "sql"] weights = [2.0, 0.3, 0.7] adapter_name = "merge" density = 0.2 model.add_weighted_adapter(adapters, weights, adapter_name, combination_type="dare_ties", density=density) ``` </hfoption> </hfoptions> Set the newly merged model as the active model with the [`~LoraModel.set_adapter`] method. ```py model.set_adapter("merge") ``` Now you can use the merged model as an instruction-tuned model to write ad copy or SQL queries! <hfoptions id="ties"> <hfoption id="instruct"> ```py messages = [ {"role": "user", "content": "Write an essay about Generative AI."}, ] text = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False) inputs = tokenizer(text, return_tensors="pt") inputs = {k: v.to("cuda") for k, v in inputs.items()} outputs = model.generate(**inputs, max_new_tokens=256, do_sample=True, top_p=0.95, temperature=0.2, repetition_penalty=1.2, eos_token_id=tokenizer.eos_token_id) print(tokenizer.decode(outputs[0])) ``` </hfoption> <hfoption id="ad copy"> ```py messages = [ {"role": "system", "content": "Create a text ad given the following product and description."}, {"role": "user", "content": "Product: Sony PS5 PlayStation Console\nDescription: The PS5 console unleashes new gaming possibilities that you never anticipated."}, ] text = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False) inputs = tokenizer(text, return_tensors="pt") inputs = {k: v.to("cuda") for k, v in inputs.items()} outputs = model.generate(**inputs, max_new_tokens=128, do_sample=True, top_p=0.95, temperature=0.2, repetition_penalty=1.2, eos_token_id=tokenizer.eos_token_id) print(tokenizer.decode(outputs[0])) ``` </hfoption> <hfoption id="SQL"> ```py text = """Table: 2-11365528-2 Columns: ['Team', 'Head Coach', 'President', 'Home Ground', 'Location'] Natural Query: Who is the Head Coach of the team whose President is Mario Volarevic? SQL Query:""" inputs = tokenizer(text, return_tensors="pt") inputs = {k: v.to("cuda") for k, v in inputs.items()} outputs = model.generate(**inputs, max_new_tokens=64, repetition_penalty=1.1, eos_token_id=tokenizer("</s>").input_ids[-1]) print(tokenizer.decode(outputs[0])) ``` </hfoption> </hfoptions> ## Merging (IA)³ Models The (IA)³ models facilitate linear merging of adapters. To merge adapters in an (IA)³ model, utilize the `add_weighted_adapter` method from the `IA3Model` class. This method is analogous to the `add_weighted_adapter` method used in `LoraModel`, with the key difference being the absence of the `combination_type` parameter. For example, to merge three (IA)³ adapters into a PEFT model, you would proceed as follows: ```py adapters = ["adapter1", "adapter2", "adapter3"] weights = [0.4, 0.3, 0.3] adapter_name = "merge" model.add_weighted_adapter(adapters, weights, adapter_name) ``` It is recommended that the weights sum to 1.0 to preserve the scale of the model. The merged model can then be set as the active model using the `set_adapter` method: ```py model.set_adapter("merge") ```
peft/docs/source/developer_guides/model_merging.md/0
{ "file_path": "peft/docs/source/developer_guides/model_merging.md", "repo_id": "peft", "token_count": 2547 }
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Tuners A tuner (or adapter) is a module that can be plugged into a `torch.nn.Module`. [`BaseTuner`] base class for other tuners and provides shared methods and attributes for preparing an adapter configuration and replacing a target module with the adapter module. [`BaseTunerLayer`] is a base class for adapter layers. It offers methods and attributes for managing adapters such as activating and disabling adapters. ## BaseTuner [[autodoc]] tuners.tuners_utils.BaseTuner ## BaseTunerLayer [[autodoc]] tuners.tuners_utils.BaseTunerLayer
peft/docs/source/package_reference/tuners.md/0
{ "file_path": "peft/docs/source/package_reference/tuners.md", "repo_id": "peft", "token_count": 330 }
IDX=$1 PROMPT_IDX=$((IDX % 25)) CLASS_IDX=$((IDX % 30)) # Define the UNIQUE_TOKEN, CLASS_TOKENs, and SUBJECT_NAMES UNIQUE_TOKEN="qwe" SUBJECT_NAMES=( "backpack" "backpack_dog" "bear_plushie" "berry_bowl" "can" "candle" "cat" "cat2" "clock" "colorful_sneaker" "dog" "dog2" "dog3" "dog5" "dog6" "dog7" "dog8" "duck_toy" "fancy_boot" "grey_sloth_plushie" "monster_toy" "pink_sunglasses" "poop_emoji" "rc_car" "red_cartoon" "robot_toy" "shiny_sneaker" "teapot" "vase" "wolf_plushie" ) CLASS_TOKENs=( "backpack" "backpack" "stuffed animal" "bowl" "can" "candle" "cat" "cat" "clock" "sneaker" "dog" "dog" "dog" "dog" "dog" "dog" "dog" "toy" "boot" "stuffed animal" "toy" "glasses" "toy" "toy" "cartoon" "toy" "sneaker" "teapot" "vase" "stuffed animal" ) CLASS_TOKEN=${CLASS_TOKENs[$CLASS_IDX]} SELECTED_SUBJECT=${SUBJECT_NAMES[$CLASS_IDX]} if [[ $CLASS_IDX =~ ^(0|1|2|3|4|5|8|9|17|18|19|20|21|22|23|24|25|26|27|28|29)$ ]]; then PROMPT_LIST=( "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} in the jungle." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} in the snow." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on the beach." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on a cobblestone street." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of pink fabric." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of a wooden floor." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} with a city in the background." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} with a mountain in the background." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} with a blue house in the background." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of a purple rug in a forest." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} with a wheat field in the background." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} with a tree and autumn leaves in the background." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} with the Eiffel Tower in the background." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} floating on top of water." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} floating in an ocean of milk." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of green grass with sunflowers around it." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of a mirror." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of the sidewalk in a crowded street." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of a dirt road." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of a white rug." "a red ${UNIQUE_TOKEN} ${CLASS_TOKEN}." "a purple ${UNIQUE_TOKEN} ${CLASS_TOKEN}." "a shiny ${UNIQUE_TOKEN} ${CLASS_TOKEN}." "a wet ${UNIQUE_TOKEN} ${CLASS_TOKEN}." "a cube shaped ${UNIQUE_TOKEN} ${CLASS_TOKEN}." ) prompt_test_list=( "a ${CLASS_TOKEN} in the jungle" "a ${CLASS_TOKEN} in the snow" "a ${CLASS_TOKEN} on the beach" "a ${CLASS_TOKEN} on a cobblestone street" "a ${CLASS_TOKEN} on top of pink fabric" "a ${CLASS_TOKEN} on top of a wooden floor" "a ${CLASS_TOKEN} with a city in the background" "a ${CLASS_TOKEN} with a mountain in the background" "a ${CLASS_TOKEN} with a blue house in the background" "a ${CLASS_TOKEN} on top of a purple rug in a forest" "a ${CLASS_TOKEN} with a wheat field in the background" "a ${CLASS_TOKEN} with a tree and autumn leaves in the background" "a ${CLASS_TOKEN} with the Eiffel Tower in the background" "a ${CLASS_TOKEN} floating on top of water" "a ${CLASS_TOKEN} floating in an ocean of milk" "a ${CLASS_TOKEN} on top of green grass with sunflowers around it" "a ${CLASS_TOKEN} on top of a mirror" "a ${CLASS_TOKEN} on top of the sidewalk in a crowded street" "a ${CLASS_TOKEN} on top of a dirt road" "a ${CLASS_TOKEN} on top of a white rug" "a red ${CLASS_TOKEN}" "a purple ${CLASS_TOKEN}" "a shiny ${CLASS_TOKEN}" "a wet ${CLASS_TOKEN}" "a cube shaped ${CLASS_TOKEN}" ) else PROMPT_LIST=( "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} in the jungle." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} in the snow." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on the beach." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on a cobblestone street." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of pink fabric." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of a wooden floor." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} with a city in the background." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} with a mountain in the background." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} with a blue house in the background." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} on top of a purple rug in a forest." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} wearing a red hat." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} wearing a santa hat." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} wearing a rainbow scarf." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} wearing a black top hat and a monocle." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} in a chef outfit." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} in a firefighter outfit." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} in a police outfit." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} wearing pink glasses." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} wearing a yellow shirt." "a ${UNIQUE_TOKEN} ${CLASS_TOKEN} in a purple wizard outfit." "a red ${UNIQUE_TOKEN} ${CLASS_TOKEN}." "a purple ${UNIQUE_TOKEN} ${CLASS_TOKEN}." "a shiny ${UNIQUE_TOKEN} ${CLASS_TOKEN}." "a wet ${UNIQUE_TOKEN} ${CLASS_TOKEN}." "a cube shaped ${UNIQUE_TOKEN} ${CLASS_TOKEN}." ) prompt_test_list=( "a ${CLASS_TOKEN} in the jungle" "a ${CLASS_TOKEN} in the snow" "a ${CLASS_TOKEN} on the beach" "a ${CLASS_TOKEN} on a cobblestone street" "a ${CLASS_TOKEN} on top of pink fabric" "a ${CLASS_TOKEN} on top of a wooden floor" "a ${CLASS_TOKEN} with a city in the background" "a ${CLASS_TOKEN} with a mountain in the background" "a ${CLASS_TOKEN} with a blue house in the background" "a ${CLASS_TOKEN} on top of a purple rug in a forest" "a ${CLASS_TOKEN} wearing a red hat" "a ${CLASS_TOKEN} wearing a santa hat" "a ${CLASS_TOKEN} wearing a rainbow scarf" "a ${CLASS_TOKEN} wearing a black top hat and a monocle" "a ${CLASS_TOKEN} in a chef outfit" "a ${CLASS_TOKEN} in a firefighter outfit" "a ${CLASS_TOKEN} in a police outfit" "a ${CLASS_TOKEN} wearing pink glasses" "a ${CLASS_TOKEN} wearing a yellow shirt" "a ${CLASS_TOKEN} in a purple wizard outfit" "a red ${CLASS_TOKEN}" "a purple ${CLASS_TOKEN}" "a shiny ${CLASS_TOKEN}" "a wet ${CLASS_TOKEN}" "a cube shaped ${CLASS_TOKEN}" ) fi VALIDATION_PROMPT=${PROMPT_LIST[@]} INSTANCE_PROMPT="a photo of ${UNIQUE_TOKEN} ${CLASS_TOKEN}" CLASS_PROMPT="a photo of ${CLASS_TOKEN}" export MODEL_NAME="stabilityai/stable-diffusion-2-1" # export MODEL_NAME="runwayml/stable-diffusion-v1-5" PEFT_TYPE="boft" BLOCK_NUM=8 BLOCK_SIZE=0 N_BUTTERFLY_FACTOR=1 export PROJECT_NAME="dreambooth_${PEFT_TYPE}" export RUN_NAME="${SELECTED_SUBJECT}_${PEFT_TYPE}_${BLOCK_NUM}${BLOCK_SIZE}${N_BUTTERFLY_FACTOR}" export INSTANCE_DIR="./data/dreambooth/dataset/${SELECTED_SUBJECT}" export CLASS_DIR="./data/class_data/${CLASS_TOKEN}" export OUTPUT_DIR="./data/output/${PEFT_TYPE}" accelerate launch train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir="$CLASS_DIR" \ --output_dir=$OUTPUT_DIR \ --wandb_project_name=$PROJECT_NAME \ --wandb_run_name=$RUN_NAME \ --with_prior_preservation --prior_loss_weight=1.0 \ --instance_prompt="$INSTANCE_PROMPT" \ --validation_prompt="$VALIDATION_PROMPT" \ --class_prompt="$CLASS_PROMPT" \ --resolution=512 \ --train_batch_size=1 \ --num_dataloader_workers=2 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --use_boft \ --boft_block_num=$BLOCK_NUM \ --boft_block_size=$BLOCK_SIZE \ --boft_n_butterfly_factor=$N_BUTTERFLY_FACTOR \ --boft_dropout=0.1 \ --boft_bias="boft_only" \ --learning_rate=3e-5 \ --max_train_steps=1010 \ --checkpointing_steps=200 \ --validation_steps=200 \ --enable_xformers_memory_efficient_attention \ --report_to="wandb" \
peft/examples/boft_dreambooth/train_dreambooth.sh/0
{ "file_path": "peft/examples/boft_dreambooth/train_dreambooth.sh", "repo_id": "peft", "token_count": 3412 }
<jupyter_start><jupyter_code>from datasets import load_dataset from transformers import set_seed, AutoModelForSeq2SeqLM, AutoTokenizer from peft import get_peft_model, MultitaskPromptTuningConfig, TaskType, MultitaskPromptTuningInit set_seed(42) model_name = "google/flan-t5-base" peft_config = MultitaskPromptTuningConfig( tokenizer_name_or_path=model_name, num_tasks=2, task_type=TaskType.SEQ_2_SEQ_LM, prompt_tuning_init=MultitaskPromptTuningInit.TEXT, num_virtual_tokens=50, num_transformer_submodules=1, prompt_tuning_init_text="classify the following into either positive or negative, or entailment, neutral or contradiction:", ) tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name) model = get_peft_model(model, peft_config) model = model.cuda() def send_to_device(batch): for i in batch: batch[i] = batch[i].cuda() return batch def get_sst2(split: str): examples = load_dataset("sst2")[split] result_examples = [] for example in examples: result_examples.append({}) result_examples[-1]["input"] = example["sentence"].strip() + "</s>" result_examples[-1]["output"] = ( f"positive{tokenizer.eos_token}" if example["label"] == 1 else f"negative{tokenizer.eos_token}" ) result_examples[-1]["task_id"] = 0 return result_examples def get_mnli(split: str): examples = load_dataset("multi_nli")[split] result_examples = [] for example in examples: result_examples.append({}) result_examples[-1]["input"] = example["premise"].strip() + " " + example["hypothesis"].strip() + "</s>" if example["label"] == 0: result_examples[-1]["output"] = f"entailment{tokenizer.eos_token}" elif example["label"] == 1: result_examples[-1]["output"] = f"neutral{tokenizer.eos_token}" else: result_examples[-1]["output"] = f"contradiction{tokenizer.eos_token}" result_examples[-1]["task_id"] = 1 return result_examples from typing import Tuple from torch.utils.data import Dataset, DataLoader import torch class MyDataset(Dataset): def __init__(self, split: str, mode: str = "source") -> None: super().__init__() if split == "train": if mode == "source": self.examples = get_sst2(split) + get_mnli(split) elif mode == "target": self.examples = get_sst2(split) if split == "val": self.examples = get_sst2("validation") if split == "test": self.examples = get_sst2("validation") def __getitem__(self, index) -> dict: return self.examples[index] def __len__(self) -> int: return len(self.examples) def __getitem__(self, index) -> dict: return self.examples[index] def __len__(self) -> int: return len(self.examples) def collate_fn(batch: dict) -> Tuple[torch.Tensor, torch.Tensor]: input = [i["input"] for i in batch] input = tokenizer(input, add_special_tokens=False, return_tensors="pt", padding=True) output = [i["output"] for i in batch] output = tokenizer(output, add_special_tokens=False, return_tensors="pt", padding=True).input_ids output[output == tokenizer.pad_token_id] = -100 task_ids = [i["task_id"] for i in batch] task_ids = torch.tensor(task_ids) return { "input_ids": input.input_ids, "attention_mask": input.attention_mask, "labels": output, "task_ids": task_ids, } train = DataLoader(MyDataset("train"), shuffle=True, batch_size=8, collate_fn=collate_fn) val = DataLoader(MyDataset("val"), shuffle=False, batch_size=8, collate_fn=collate_fn) test = DataLoader(MyDataset("test"), shuffle=False, batch_size=8, collate_fn=collate_fn)<jupyter_output><empty_output><jupyter_text>source training<jupyter_code>from torch.optim.adamw import AdamW from transformers import get_cosine_schedule_with_warmup from tqdm import tqdm from sklearn.metrics import f1_score POSITIVE_TOKEN_ID = tokenizer(" positive", add_special_tokens=False)["input_ids"][0] NEGATIVE_TOKEN_ID = tokenizer(" negative", add_special_tokens=False)["input_ids"][0] def classify(batch): batch = send_to_device(batch) # we pass labels here since we need to generate and peft doesn't support generation yet. # No clue how to get around this scores = model(**batch).logits preds = [] for i in range(scores.shape[0]): if scores[i, 0, POSITIVE_TOKEN_ID] > scores[i, 0, NEGATIVE_TOKEN_ID]: preds.append(POSITIVE_TOKEN_ID) else: preds.append(NEGATIVE_TOKEN_ID) return preds @torch.inference_mode() def evaluate(model, data): loss = 0 preds = [] golds = [] for batch in tqdm(data): batch = send_to_device(batch) loss += model(**batch).loss golds.extend(batch["labels"][:, 0].tolist()) preds.extend(classify(batch)) return loss / len(val), f1_score(golds, preds, pos_label=POSITIVE_TOKEN_ID) optimizer = AdamW(model.parameters(), lr=1e-4) scheduler = get_cosine_schedule_with_warmup(optimizer, 200, len(train)) n = 1000 step = 0 train_ = tqdm(train) val_loss, f1 = evaluate(model, val) print( f""" before source training val loss = {val_loss} f1 = {f1}""" ) for batch in train_: if step % n == 0: val_loss, f1 = evaluate(model, val) print( f""" step = {step} val loss = {val_loss} f1 = {f1}""" ) model.save_pretrained(f"checkpoints_source/{step}") step += 1 batch = send_to_device(batch) loss = model(**batch).loss loss.backward() optimizer.step() scheduler.step() train_.set_postfix(train_loss=loss)<jupyter_output><empty_output><jupyter_text>target training<jupyter_code>train = DataLoader(MyDataset("train", "target"), shuffle=True, batch_size=8, collate_fn=collate_fn) val = DataLoader(MyDataset("val", "target"), shuffle=False, batch_size=8, collate_fn=collate_fn) test = DataLoader(MyDataset("test", "target"), shuffle=False, batch_size=8, collate_fn=collate_fn)<jupyter_output><empty_output><jupyter_text>create a fresh model<jupyter_code>peft_config = MultitaskPromptTuningConfig( tokenizer_name_or_path=model_name, num_tasks=1, task_type=TaskType.SEQ_2_SEQ_LM, prompt_tuning_init=MultitaskPromptTuningInit.EXACT_SOURCE_TASK, prompt_tuning_init_state_dict_path="checkpoints_source/50000/adapter_model.bin", num_virtual_tokens=50, num_transformer_submodules=1, ) tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name) model = get_peft_model(model, peft_config) model = model.cuda() optimizer = AdamW(model.parameters(), lr=1e-4) scheduler = get_cosine_schedule_with_warmup(optimizer, 200, len(train)) n = 1000 step = 0 train_ = tqdm(train) val_loss, f1 = evaluate(model, val) print( f""" before target training val loss = {val_loss} f1 = {f1}""" ) for batch in train_: if step % n == 0: val_loss, f1 = evaluate(model, val) print( f""" step = {step} val loss = {val_loss} f1 = {f1}""" ) model.save_pretrained(f"checkpoints_target/{step}") step += 1 batch = send_to_device(batch) loss = model(**batch).loss loss.backward() optimizer.step() scheduler.step() train_.set_postfix(train_loss=loss) # load last checkpoint for now from peft import set_peft_model_state_dict sd_6000 = torch.load("checkpoints_target/6000/adapter_model.bin") set_peft_model_state_dict(model, sd_6000) # evaluate val val_loss, f1 = evaluate(model, val) print( f""" final val loss = {val_loss} f1 = {f1}""" ) # evaluate test test_loss, f1 = evaluate(model, test) print( f""" final test loss = {test_loss} f1 = {f1}""" )<jupyter_output><empty_output>
peft/examples/conditional_generation/multitask_prompt_tuning.ipynb/0
{ "file_path": "peft/examples/conditional_generation/multitask_prompt_tuning.ipynb", "repo_id": "peft", "token_count": 3341 }
<jupyter_start><jupyter_text>CPT Training and InferenceThis notebook demonstrates the training and evaluation process of Context-Aware Prompt Tuning (CPT) using the Hugging Face Trainer. For more details, refer to the [Paper](https://huggingface.co/papers/2410.17222). Sections Overview:1. **Setup**: Import libraries and configure the environment.2. **Data Preparation**: Load and preprocess the dataset.3. **Model Training**: Configure and train the model.4. **Evaluation**: Test the model's performance and visualize results. Setup--- Installation<jupyter_code>!pip install datasets !pip install git+https://github.com/huggingface/peft<jupyter_output>Requirement already satisfied: datasets in /usr/local/lib/python3.10/dist-packages (3.1.0) Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from datasets) (3.16.1) Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.10/dist-packages (from datasets) (1.26.4) Requirement already satisfied: pyarrow>=15.0.0 in /usr/local/lib/python3.10/dist-packages (from datasets) (17.0.0) Requirement already satisfied: dill<0.3.9,>=0.3.0 in /usr/local/lib/python3.10/dist-packages (from datasets) (0.3.8) Requirement already satisfied: pandas in /usr/local/lib/python3.10/dist-packages (from datasets) (2.2.2) Requirement already satisfied: requests>=2.32.2 in /usr/local/lib/python3.10/dist-packages (from datasets) (2.32.3) Requirement already satisfied: tqdm>=4.66.3 in /usr/local/lib/python3.10/dist-packages (from datasets) (4.66.6) Requirement already satisfied: xxhash in /usr/local/lib/python3.10/dist-packages (from datasets) (3.5.0) Requirement already [...]<jupyter_text>Imports<jupyter_code>from typing import Any, Dict, List, Union import numpy as np import torch from datasets import load_dataset from torch.utils.data import Dataset from tqdm import tqdm from transformers import ( AutoModelForCausalLM, AutoTokenizer, DataCollatorForLanguageModeling, Trainer, TrainingArguments, ) from peft import CPTConfig, get_peft_model MAX_INPUT_LENGTH = 1024 MAX_ICL_SAMPLES = 10 NUM_TRAINING_SAMPLES = 100 model_id = 'bigscience/bloom-1b7'<jupyter_output><empty_output><jupyter_text>Data Preparation---<jupyter_code># Initialize the tokenizer tokenizer = AutoTokenizer.from_pretrained( model_id, # The name or path of the pre-trained tokenizer (e.g., "bert-base-uncased"). cache_dir='.', # Directory to cache the tokenizer files locally. padding_side='right', # Specifies that padding should be added to the right side of sequences. trust_remote_code=True # Allows loading tokenizer implementations from external sources. ) # Load the SST-2 dataset from the GLUE benchmark dataset = load_dataset('glue', 'sst2') def add_string_labels(example): """ Converts numerical labels into human-readable string labels. Args: example (dict): A single example from the dataset with a numerical 'label'. Returns: dict: The example augmented with a 'label_text' field. """ # Map numerical label to string label example['label_text'] = "positive" if example['label'] == 1 else "negative" return example # Subset and process the training dataset context_dataset = dataset['train'].select(range(MAX_ICL_SAMPLES)).map(add_string_labels) train_dataset = dataset['train'].select(range(MAX_ICL_SAMPLES, NUM_TRAINING_SAMPLES + MAX_ICL_SAMPLES)).map(add_string_labels)<jupyter_output><empty_output><jupyter_text>**Note:** This notebook uses small subsets of the dataset to ensure quick execution. For proper testing and evaluation, it is recommended to use the entire dataset by setting quick_review to False.<jupyter_code>quick_review = True # set to False for a comprehensive evaluation num_of_test_examples = 100 if quick_review else len(dataset['validation']) # Subset and process the validation dataset test_dataset = dataset['validation'].select(range(num_of_test_examples)).map(add_string_labels) class CPTDataset(Dataset): def __init__(self, samples, tokenizer, template, max_length=MAX_INPUT_LENGTH): """ Initialize the CPTDataset with samples, a tokenizer, and a template. Args: samples (list): List of samples containing input sentences and labels. tokenizer: Tokenizer instance for encoding text. template (dict): Dictionary defining input/output templates and separators. max_length (int): Maximum input length for truncation. """ self.template = template self.tokenizer = tokenizer self.max_length = max_length # Storage for tokenized inputs and masks self.attention_mask = [] self.input_ids = [] self.input_type_mask = [] self.inter_seperator_ids = self._get_input_ids(template['inter_seperator']) # Tokenize each sample and prepare inputs for sample_i in tqdm(samples): input_text, label = sample_i['sentence'], sample_i['label_text'] input_ids, attention_mask, input_type_mask = self.preprocess_sentence(input_text, label) self.input_ids.append(input_ids) self.attention_mask.append(attention_mask) self.input_type_mask.append(input_type_mask) def _get_input_ids(self, text): """ Tokenize the given text into input IDs. Args: text (str): The text to tokenize. Returns: list: Tokenized input IDs. """ return self.tokenizer(text, add_special_tokens=False)["input_ids"] def preprocess_sentence(self, input_text, label): """ Preprocess a sentence and its corresponding label using templates. Args: input_text (str): The input sentence. label (str): The label text (e.g., "positive", "negative"). Returns: tuple: (input_ids, attention_mask, input_type_mask) """ # Split input template into parts input_template_part_1_text, input_template_part_2_text = self.template['input'].split('{}') input_template_tokenized_part1 = self._get_input_ids(input_template_part_1_text) input_tokenized = self._get_input_ids(input_text) input_template_tokenized_part2 = self._get_input_ids(input_template_part_2_text) # Separator token sep_tokenized = self._get_input_ids(self.template['intra_seperator']) # Process the label using the template label_template_part_1, label_template_part_2 = self.template['output'].split('{}') label_template_part1_tokenized = self._get_input_ids(label_template_part_1) label_tokenized = self._get_input_ids(label) label_template_part2_tokenized = self._get_input_ids(label_template_part_2) # End-of-sequence token eos = [self.tokenizer.eos_token_id] if self.tokenizer.eos_token_id is not None else [] # Concatenate all tokenized parts input_ids = input_template_tokenized_part1 + input_tokenized + input_template_tokenized_part2 + sep_tokenized + label_template_part1_tokenized + label_tokenized + label_template_part2_tokenized + eos # Generate attention and type masks attention_mask = [1] * len(input_ids) input_type_mask = [1] * len(input_template_tokenized_part1) + [2] * len(input_tokenized) + [1] * len( input_template_tokenized_part2) + [0] * len(sep_tokenized) + \ [3] * len(label_template_part1_tokenized) + [4] * len(label_tokenized) + [3] * len( \ label_template_part2_tokenized) + [0] * len(eos) # Ensure all masks and inputs are the same length assert len(input_type_mask) == len(input_ids) == len(attention_mask) return input_ids, attention_mask, input_type_mask def __len__(self): """ Get the number of examples in the dataset. Returns: int: Number of examples. """ return len(self.input_ids) def __getitem__(self, idx): """ Get the tokenized representation for the given index. Args: idx (int): Index of the example. Returns: dict: Tokenized inputs with attention and type masks. """ return { "input_ids": self.input_ids[idx], "attention_mask": self.attention_mask[idx], "input_type_mask": self.input_type_mask[idx] } # Define templates for tokenization templates = { 'input': 'input: {}', # Input template with placeholder 'intra_seperator': ' ', # Separator between input and output 'output': 'output: {}', # Output template with placeholder 'inter_seperator': '\n' # Separator between examples } # Initialize the dataset cpt_train_dataset = CPTDataset(train_dataset, tokenizer, templates) # - `templates`: Define how inputs and outputs should be formatted and separated. # - `CPTDataset`: Converts the raw dataset into tokenized input IDs and masks. # Initialize storage for context-level information context_ids = [] # Concatenated input IDs for all samples context_attention_mask = [] # Concatenated attention masks context_input_type_mask = [] # Concatenated input type masks first_type_mask = 0 # Initial offset for input type mask cpt_context_dataset = CPTDataset(context_dataset, tokenizer, templates) # Iterate through the CPT training dataset for i in range(len(context_dataset)): # Add input IDs to the context context_ids += cpt_context_dataset[i]['input_ids'] # Add attention mask to the context context_attention_mask += cpt_context_dataset[i]['attention_mask'] # Adjust and add the input type mask to the context context_input_type_mask += [ i + first_type_mask if i > 0 else 0 # Increment type indices dynamically for i in cpt_context_dataset[i]['input_type_mask'] ] # Increment the type mask offset after processing the sample first_type_mask += 4<jupyter_output>100%|██████████| 10/10 [00:00<00:00, 1133.50it/s]<jupyter_text>Model Training--- Load model<jupyter_code># Load a pre-trained causal language model base_model = AutoModelForCausalLM.from_pretrained( model_id, cache_dir='.', torch_dtype=torch.float16, device_map='auto' ) # Initialize the CPT configuration config = CPTConfig( cpt_token_ids=context_ids, cpt_mask=context_attention_mask, cpt_tokens_type_mask=context_input_type_mask, opt_weighted_loss_type='decay', opt_loss_decay_factor=0.95, # we choose the exponential decay factor applied to the loss opt_projection_epsilon=0.2, # we choose the projection over the input tokens opt_projection_format_epsilon=0.1, # we choose the projection over input and output templates tokenizer_name_or_path=model_id, ) # Initialize the CPT model with PEFT model = get_peft_model(base_model, config)<jupyter_output><empty_output><jupyter_text>Setting Collate Function<jupyter_code>class CPTDataCollatorForLanguageModeling(DataCollatorForLanguageModeling): def __init__(self, tokenizer, training=True, mlm=False): """ Custom collator for CPT-style language modeling. Args: tokenizer: The tokenizer to handle tokenization and special tokens. training (bool): If True, operates in training mode; otherwise in evaluation mode. mlm (bool): If True, enables masked language modeling. """ super().__init__(tokenizer, mlm=mlm) # Initialize the parent class self.training = training # Add a special padding token if not already defined self.tokenizer.add_special_tokens({"pad_token": "[PAD]"}) def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]: """ Process a batch of examples for language modeling. Args: examples (List): A batch of examples with tokenized inputs and optional sample masks. Returns: Dict: A dictionary containing padded and tensor-converted inputs, attention masks, input type masks, and optional sample masks and labels. """ # Initialize a list to collect sample masks if provided list_sample_mask = [] for i in range(len(examples)): if "sample_mask" in examples[i].keys(): list_sample_mask.append(examples[i].pop("sample_mask")) # Define a helper function for padding sequences to the maximum length max_len = max(len(ex["input_ids"]) for ex in examples) # Define a helper function for padding sequences to the maximum length def pad_sequence(sequence, max_len, pad_value=0): return sequence + [pad_value] * (max_len - len(sequence)) # Pad and convert `input_ids`, `attention_mask`, and `input_type_mask` to tensors input_ids = torch.tensor([pad_sequence(ex["input_ids"], max_len) for ex in examples]) attention_mask = torch.tensor([pad_sequence(ex["attention_mask"], max_len) for ex in examples]) input_type_mask = torch.tensor([pad_sequence(ex["input_type_mask"], max_len) for ex in examples]) # Create the initial batch dictionary batch = {"input_ids": input_ids, "attention_mask": attention_mask, "input_type_mask": input_type_mask} # Create a tensor to store sample masks tensor_sample_mask = batch["input_ids"].clone().long() tensor_sample_mask[:, :] = 0 # Initialize with zeros # Populate the tensor with the provided sample masks for i in range(len(list_sample_mask)): tensor_sample_mask[i, : len(list_sample_mask[i])] = list_sample_mask[i] # Copy `input_ids` to use as `labels` batch["labels"] = batch["input_ids"].clone() # If in evaluation mode, include the `sample_mask` in the batch if not self.training: batch["sample_mask"] = tensor_sample_mask return batch<jupyter_output><empty_output><jupyter_text>Training<jupyter_code>training_args = TrainingArguments( output_dir='../.', use_cpu=False, auto_find_batch_size=False, learning_rate=1e-4, logging_steps=100, per_device_train_batch_size=1, save_total_limit=1, remove_unused_columns=False, num_train_epochs=5, fp16=True, save_strategy='no', logging_dir="logs", report_to="none" ) trainer = Trainer( model=model, args=training_args, train_dataset=cpt_train_dataset, # Custom CPT training dataset. data_collator=CPTDataCollatorForLanguageModeling(tokenizer, training=True, mlm=False) ) trainer.train()<jupyter_output><empty_output><jupyter_text>Model Evaluation---<jupyter_code>model.eval() # Select relevant columns from the test dataset test_dataset = test_dataset.select_columns(['sentence', 'label_text']) # Convert the test dataset to a CPT-compatible format cpt_test_dataset = CPTDataset(test_dataset, tokenizer, templates) # Get the device where the model is loaded (CPU or GPU) device = model.device list_bool_predictions = [] for i in range(len(test_dataset)): input_ids, input_type_mask = cpt_test_dataset[i]['input_ids'], cpt_test_dataset[i]['input_type_mask'] # Pass the inputs through the model outputs = model( input_ids=torch.Tensor(input_ids).long().to(device=device).view(1, -1), labels=torch.Tensor(input_ids).long().to(device=device).view(1, -1), input_type_mask=torch.Tensor(input_type_mask).long().to(device=device).view(1, -1) ) # Shift logits to exclude the last token and match the labels shifted_logits = outputs.logits[..., :-1, :].contiguous().to(model.dtype)[0, -len(input_ids) + 1:] shift_labels = torch.Tensor(input_ids).long().to(device=device).view(1, -1)[0, 1:].contiguous().to(device) shifted_input_type_mask = torch.Tensor(input_type_mask).long().to(device=device).view(1, -1)[..., 1:].contiguous().to(device) # Create a mask for the type `4` tokens (label tokens) mask = torch.Tensor(shifted_input_type_mask).long().to(device=device).view(-1,) == 4 # Extract logits and labels corresponding to the mask logit = shifted_logits[mask] label = shift_labels[mask] # All possible label tokens for `negative` and `positive` all_labels = torch.Tensor([tokenizer(i, add_special_tokens=False)["input_ids"] for i in ['negative', 'positive']]).long().to(device).view(-1,) # Compare logits with label tokens and infer prediction prediction = logit[0, torch.Tensor([tokenizer(i, add_special_tokens=False)["input_ids"] for i in ['negative', 'positive']]).long().to(device).view(-1,)].argmax() prediction_text = 'negative' if prediction == 0 else 'positive' print(f"Sentence: {tokenizer.decode(input_ids)} \n \t The prediction is: {prediction_text}\n \t The GT is {tokenizer.decode(label)}") list_bool_predictions.append(prediction_text == tokenizer.decode(label)) print(f'The model Acc is {100 * np.mean(list_bool_predictions)}%')<jupyter_output>100%|██████████| 100/100 [00:00<00:00, 1972.82it/s]
peft/examples/cpt_finetuning/cpt_train_and_inference.ipynb/0
{ "file_path": "peft/examples/cpt_finetuning/cpt_train_and_inference.ipynb", "repo_id": "peft", "token_count": 6570 }
<jupyter_start><jupyter_text>Finetuning Whisper-large-V2 on Colab using PEFT-Lora + BNB INT8 training In this Colab, we present a step-by-step guide on how to fine-tune Whisper for any multilingual ASR dataset using Hugging Face 🤗 Transformers and 🤗 PEFT. Using 🤗 PEFT and `bitsandbytes`, you can train the `whisper-large-v2` seamlessly on a colab with T4 GPU (16 GB VRAM). In this notebook, with most parts from [fine_tune_whisper.ipynb](https://colab.research.google.com/github/sanchit-gandhi/notebooks/blob/main/fine_tune_whisper.ipynbscrollTo=BRdrdFIeU78w) is adapted to train using PEFT LoRA+BNB INT8.For more details on model, datasets and metrics, refer blog [Fine-Tune Whisper For Multilingual ASR with 🤗 Transformers](https://huggingface.co/blog/fine-tune-whisper) Inital Setup<jupyter_code>!add-apt-repository -y ppa:jonathonf/ffmpeg-4 !apt update !apt install -y ffmpeg !pip install datasets>=2.6.1 !pip install git+https://github.com/huggingface/transformers !pip install librosa !pip install evaluate>=0.30 !pip install jiwer !pip install gradio !pip install -q bitsandbytes datasets accelerate !pip install -q git+https://github.com/huggingface/transformers.git@main git+https://github.com/huggingface/peft.git@main<jupyter_output><empty_output><jupyter_text>Linking the notebook to the Hub is straightforward - it simply requires entering your Hub authentication token when prompted. Find your Hub authentication token [here](https://huggingface.co/settings/tokens):<jupyter_code>from huggingface_hub import notebook_login notebook_login() # Select CUDA device index import os os.environ["CUDA_VISIBLE_DEVICES"] = "0" model_name_or_path = "openai/whisper-large-v2" language = "Marathi" language_abbr = "mr" task = "transcribe" dataset_name = "mozilla-foundation/common_voice_11_0"<jupyter_output><empty_output><jupyter_text>Load Dataset<jupyter_code>from datasets import load_dataset, DatasetDict common_voice = DatasetDict() common_voice["train"] = load_dataset(dataset_name, language_abbr, split="train+validation", use_auth_token=True) common_voice["test"] = load_dataset(dataset_name, language_abbr, split="test", use_auth_token=True) print(common_voice) common_voice = common_voice.remove_columns( ["accent", "age", "client_id", "down_votes", "gender", "locale", "path", "segment", "up_votes"] ) print(common_voice)<jupyter_output>DatasetDict({ train: Dataset({ features: ['audio', 'sentence'], num_rows: 3927 }) test: Dataset({ features: ['audio', 'sentence'], num_rows: 1816 }) })<jupyter_text>Prepare Feature Extractor, Tokenizer and Data<jupyter_code>from transformers import WhisperFeatureExtractor feature_extractor = WhisperFeatureExtractor.from_pretrained(model_name_or_path) from transformers import WhisperTokenizer tokenizer = WhisperTokenizer.from_pretrained(model_name_or_path, language=language, task=task) from transformers import WhisperProcessor processor = WhisperProcessor.from_pretrained(model_name_or_path, language=language, task=task)<jupyter_output><empty_output><jupyter_text>Prepare Data<jupyter_code>print(common_voice["train"][0])<jupyter_output>{'audio': {'path': '/root/.cache/huggingface/datasets/downloads/extracted/f7e1ef6a2d14f20194999aad5040c5d4bb3ead1377de3e1bbc6e9dba34d18a8a/common_voice_mr_30585613.mp3', 'array': array([-1.3727526e-15, -1.2400461e-13, -1.5159097e-13, ..., 4.7928120e-06, 3.5631349e-06, 1.6352631e-06], dtype=float32), 'sampling_rate': 48000}, 'sentence': 'आईचे आजारपण वाढत चालले, तसतशी मथीही नीट खातपीतनाशी झाली.'}<jupyter_text>Since our input audio is sampled at 48kHz, we need to _downsample_ it to 16kHz prior to passing it to the Whisper feature extractor, 16kHz being the sampling rate expected by the Whisper model. We'll set the audio inputs to the correct sampling rate using dataset's [`cast_column`](https://huggingface.co/docs/datasets/package_reference/main_classes.html?highlight=cast_columndatasets.DatasetDict.cast_column)method. This operation does not change the audio in-place, but rather signals to `datasets` to resample audio samples _on the fly_ the first time that they are loaded:<jupyter_code>from datasets import Audio common_voice = common_voice.cast_column("audio", Audio(sampling_rate=16000))<jupyter_output><empty_output><jupyter_text>Re-loading the first audio sample in the Common Voice dataset will resample it to the desired sampling rate:<jupyter_code>print(common_voice["train"][0])<jupyter_output>{'audio': {'path': '/root/.cache/huggingface/datasets/downloads/extracted/f7e1ef6a2d14f20194999aad5040c5d4bb3ead1377de3e1bbc6e9dba34d18a8a/common_voice_mr_30585613.mp3', 'array': array([-4.4097186e-14, -9.4153831e-14, 3.4645775e-13, ..., -7.6018655e-06, -1.8617659e-06, 4.4520480e-06], dtype=float32), 'sampling_rate': 16000}, 'sentence': 'आईचे आजारपण वाढत चालले, तसतशी मथीही नीट खातपीतनाशी झाली.'}<jupyter_text>Now we can write a function to prepare our data ready for the model:1. We load and resample the audio data by calling `batch["audio"]`. As explained above, 🤗 Datasets performs any necessary resampling operations on the fly.2. We use the feature extractor to compute the log-Mel spectrogram input features from our 1-dimensional audio array.3. We encode the transcriptions to label ids through the use of the tokenizer.<jupyter_code>def prepare_dataset(batch): # load and resample audio data from 48 to 16kHz audio = batch["audio"] # compute log-Mel input features from input audio array batch["input_features"] = feature_extractor(audio["array"], sampling_rate=audio["sampling_rate"]).input_features[0] # encode target text to label ids batch["labels"] = tokenizer(batch["sentence"]).input_ids return batch<jupyter_output><empty_output><jupyter_text>We can apply the data preparation function to all of our training examples using dataset's `.map` method. The argument `num_proc` specifies how many CPU cores to use. Setting `num_proc` > 1 will enable multiprocessing. If the `.map` method hangs with multiprocessing, set `num_proc=1` and process the dataset sequentially.<jupyter_code>common_voice = common_voice.map(prepare_dataset, remove_columns=common_voice.column_names["train"], num_proc=2) common_voice["train"]<jupyter_output><empty_output><jupyter_text>Training and Evaluation Define a Data Collator<jupyter_code>import torch from dataclasses import dataclass from typing import Any, Dict, List, Union @dataclass class DataCollatorSpeechSeq2SeqWithPadding: processor: Any def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: # split inputs and labels since they have to be of different lengths and need different padding methods # first treat the audio inputs by simply returning torch tensors input_features = [{"input_features": feature["input_features"]} for feature in features] batch = self.processor.feature_extractor.pad(input_features, return_tensors="pt") # get the tokenized label sequences label_features = [{"input_ids": feature["labels"]} for feature in features] # pad the labels to max length labels_batch = self.processor.tokenizer.pad(label_features, return_tensors="pt") # replace padding with -100 to ignore loss correctly labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) # if bos token is appended in previous tokenization step, # cut bos token here as it's append later anyways if (labels[:, 0] == self.processor.tokenizer.bos_token_id).all().cpu().item(): labels = labels[:, 1:] batch["labels"] = labels return batch<jupyter_output><empty_output><jupyter_text>Let's initialise the data collator we've just defined:<jupyter_code>data_collator = DataCollatorSpeechSeq2SeqWithPadding(processor=processor)<jupyter_output><empty_output><jupyter_text>Evaluation Metrics We'll use the word error rate (WER) metric, the 'de-facto' metric for assessing ASR systems. For more information, refer to the WER [docs](https://huggingface.co/metrics/wer). We'll load the WER metric from 🤗 Evaluate:<jupyter_code>import evaluate metric = evaluate.load("wer")<jupyter_output><empty_output><jupyter_text>We then simply have to define a function that takes our model predictions and returns the WER metric. This function, called`compute_metrics`, first replaces `-100` with the `pad_token_id`in the `label_ids` (undoing the step we applied in the data collator to ignore padded tokens correctly in the loss).It then decodes the predicted and label ids to strings. Finally,it computes the WER between the predictions and reference labels:<jupyter_code>def compute_metrics(pred): pred_ids = pred.predictions label_ids = pred.label_ids # replace -100 with the pad_token_id label_ids[label_ids == -100] = tokenizer.pad_token_id # we do not want to group tokens when computing the metrics pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True) label_str = tokenizer.batch_decode(label_ids, skip_special_tokens=True) wer = 100 * metric.compute(predictions=pred_str, references=label_str) return {"wer": wer}<jupyter_output><empty_output><jupyter_text>Load a Pre-Trained Checkpoint Now let's load the pre-trained Whisper `small` checkpoint. Again, this is trivial through use of 🤗 Transformers!<jupyter_code>from transformers import WhisperForConditionalGeneration, BitsAndBytesConfig model = WhisperForConditionalGeneration.from_pretrained(model_name_or_path, quantization_config=BitsAndBytesConfig(load_in_8bit=True)) # model.hf_device_map - this should be {" ": 0}<jupyter_output><empty_output><jupyter_text>Override generation arguments - no tokens are forced as decoder outputs (see [`forced_decoder_ids`](https://huggingface.co/docs/transformers/main_classes/text_generationtransformers.generation_utils.GenerationMixin.generate.forced_decoder_ids)), no tokens are suppressed during generation (see [`suppress_tokens`](https://huggingface.co/docs/transformers/main_classes/text_generationtransformers.generation_utils.GenerationMixin.generate.suppress_tokens)):<jupyter_code>model.config.forced_decoder_ids = None model.config.suppress_tokens = []<jupyter_output><empty_output><jupyter_text>Post-processing on the modelFinally, we need to apply some post-processing on the 8-bit model to enable training, let's freeze all our layers, and cast all non `int8` layers in `float32` for stability.<jupyter_code>from peft import prepare_model_for_kbit_training model = prepare_model_for_kbit_training(model)<jupyter_output><empty_output><jupyter_text>Apply LoRAHere comes the magic with `peft`! Let's load a `PeftModel` and specify that we are going to use low-rank adapters (LoRA) using `get_peft_model` utility function from `peft`.<jupyter_code>from peft import LoraConfig, PeftModel, LoraModel, LoraConfig, get_peft_model config = LoraConfig(r=32, lora_alpha=64, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none") model = get_peft_model(model, config) model.print_trainable_parameters()<jupyter_output>trainable params: 15728640 || all params: 1559033600 || trainable%: 1.0088711365810203<jupyter_text>We are ONLY using **1%** of the total trainable parameters, thereby performing **Parameter-Efficient Fine-Tuning** Define the Training Configuration In the final step, we define all the parameters related to training. For more detail on the training arguments, refer to the Seq2SeqTrainingArguments [docs](https://huggingface.co/docs/transformers/main_classes/trainertransformers.Seq2SeqTrainingArguments).<jupyter_code>from transformers import Seq2SeqTrainingArguments training_args = Seq2SeqTrainingArguments( output_dir="temp", # change to a repo name of your choice per_device_train_batch_size=8, gradient_accumulation_steps=1, # increase by 2x for every 2x decrease in batch size learning_rate=1e-3, warmup_steps=50, num_train_epochs=3, eval_strategy="epoch", fp16=True, per_device_eval_batch_size=8, generation_max_length=128, logging_steps=25, remove_unused_columns=False, # required as the PeftModel forward doesn't have the signature of the wrapped model's forward label_names=["labels"], # same reason as above )<jupyter_output><empty_output><jupyter_text>**Few Important Notes:**1. `remove_unused_columns=False` and `label_names=["labels"]` are required as the PeftModel's forward doesn't have the signature of the base model's forward.2. INT8 training required autocasting. `predict_with_generate` can't be passed to Trainer because it internally calls transformer's `generate` without autocasting leading to errors. 3. Because of point 2, `compute_metrics` shouldn't be passed to `Seq2SeqTrainer` as seen below. (commented out)<jupyter_code>from transformers import Seq2SeqTrainer, TrainerCallback, TrainingArguments, TrainerState, TrainerControl from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR class SavePeftModelCallback(TrainerCallback): def on_save( self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs, ): checkpoint_folder = os.path.join(args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}") peft_model_path = os.path.join(checkpoint_folder, "adapter_model") kwargs["model"].save_pretrained(peft_model_path) pytorch_model_path = os.path.join(checkpoint_folder, "pytorch_model.bin") if os.path.exists(pytorch_model_path): os.remove(pytorch_model_path) return control trainer = Seq2SeqTrainer( args=training_args, model=model, train_dataset=common_voice["train"], eval_dataset=common_voice["test"], data_collator=data_collator, # compute_metrics=compute_metrics, tokenizer=processor.feature_extractor, callbacks=[SavePeftModelCallback], ) model.config.use_cache = False # silence the warnings. Please re-enable for inference! trainer.train() model_name_or_path = "openai/whisper-large-v2" peft_model_id = "smangrul/" + f"{model_name_or_path}-{model.peft_config.peft_type}-colab".replace("/", "-") model.push_to_hub(peft_model_id) print(peft_model_id)<jupyter_output>Uploading the following files to smangrul/openai-whisper-large-v2-LORA-colab: adapter_model.bin,adapter_config.json<jupyter_text>Evaluation and Inference **Important points to note while inferencing**:1. As `predict_with_generate` can't be used, we will write the eval loop with `torch.cuda.amp.autocast()` as shown below. 2. As the base model is frozen, PEFT model sometimes fails ot recognise the language while decoding.Hence, we force the starting tokens to mention the language we are transcribing. This is done via `forced_decoder_ids = processor.get_decoder_prompt_ids(language="Marathi", task="transcribe")` and passing that to the `model.generate` call.3. Please note that [AutoEvaluate Leaderboard](https://huggingface.co/spaces/autoevaluate/leaderboards?dataset=mozilla-foundation%2Fcommon_voice_11_0&only_verified=0&task=automatic-speech-recognition&config=mr&split=test&metric=wer) for `mr` language on `common_voice_11_0` has a bug wherein openai's `BasicTextNormalizer` normalizer is used while evaluation leading to degerated output text, an example is shown below:```without normalizer: 'स्विच्चान नरुवित्तीची पद्दत मोठ्या प्रमाणात आमलात आणल्या बसोन या दुपन्याने अनेक राथ प्रवेश केला आहे.'with normalizer: 'स व च च न नर व त त च पद दत म ठ य प रम ण त आमल त आणल य बस न य द पन य न अन क र थ प रव श क ल आह'```Post fixing this bug, we report the 2 metrics for the top model of the leaderboard and the PEFT model:1. `wer`: `wer` without using the `BasicTextNormalizer` as it doesn't cater to most indic languages. This is want we consider as true performance metric.2. `normalized_wer`: `wer` using the `BasicTextNormalizer` to be comparable to the leaderboard metrics.Below are the results:| Model | DrishtiSharma/whisper-large-v2-marathi | smangrul/openai-whisper-large-v2-LORA-colab ||----------------|----------------------------------------|---------------------------------------------|| wer | 35.6457 | 36.1356 || normalized_wer | 13.6440 | 14.0165 |We see that PEFT model's performance is comparable to the fully fine-tuned model on the top of the leaderboard. At the same time, we are able to train the large model in Colab notebook with limited GPU memory and the added advantage of resulting checkpoint being jsut `63` MB.<jupyter_code>from peft import PeftModel, PeftConfig from transformers import WhisperForConditionalGeneration, Seq2SeqTrainer peft_model_id = "smangrul/openai-whisper-large-v2-LORA-colab" peft_config = PeftConfig.from_pretrained(peft_model_id) model = WhisperForConditionalGeneration.from_pretrained( peft_config.base_model_name_or_path, quantization_config=BitsAndBytesConfig(load_in_8bit=True), device_map="auto" ) model = PeftModel.from_pretrained(model, peft_model_id) from torch.utils.data import DataLoader from tqdm import tqdm import numpy as np import gc eval_dataloader = DataLoader(common_voice["test"], batch_size=8, collate_fn=data_collator) model.eval() for step, batch in enumerate(tqdm(eval_dataloader)): with torch.cuda.amp.autocast(): with torch.no_grad(): generated_tokens = ( model.generate( input_features=batch["input_features"].to("cuda"), decoder_input_ids=batch["labels"][:, :4].to("cuda"), max_new_tokens=255, ) .cpu() .numpy() ) labels = batch["labels"].cpu().numpy() labels = np.where(labels != -100, labels, tokenizer.pad_token_id) decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) metric.add_batch( predictions=decoded_preds, references=decoded_labels, ) del generated_tokens, labels, batch gc.collect() wer = 100 * metric.compute() print(f"{wer=}")<jupyter_output><empty_output><jupyter_text>Using AutomaticSpeechRecognitionPipeline **Few important notes:**1. `pipe()` should be in the autocast context manager `with torch.cuda.amp.autocast():`2. `forced_decoder_ids` specifying the `language` being transcribed should be provided in `generate_kwargs` dict.3. You will get warning along the below lines which is **safe to ignore**.```The model 'PeftModel' is not supported for . Supported models are ['SpeechEncoderDecoderModel', 'Speech2TextForConditionalGeneration', 'SpeechT5ForSpeechToText', 'WhisperForConditionalGeneration', 'Data2VecAudioForCTC', 'HubertForCTC', 'MCTCTForCTC', 'SEWForCTC', 'SEWDForCTC', 'UniSpeechForCTC', 'UniSpeechSatForCTC', 'Wav2Vec2ForCTC', 'Wav2Vec2ConformerForCTC', 'WavLMForCTC'].```<jupyter_code>import torch import gradio as gr from transformers import ( AutomaticSpeechRecognitionPipeline, WhisperForConditionalGeneration, WhisperTokenizer, WhisperProcessor, ) from peft import PeftModel, PeftConfig peft_model_id = "smangrul/openai-whisper-large-v2-LORA-colab" language = "Marathi" task = "transcribe" peft_config = PeftConfig.from_pretrained(peft_model_id) model = WhisperForConditionalGeneration.from_pretrained( peft_config.base_model_name_or_path, quantization_config=BitsAndBytesConfig(load_in_8bit=True), device_map="auto" ) model = PeftModel.from_pretrained(model, peft_model_id) tokenizer = WhisperTokenizer.from_pretrained(peft_config.base_model_name_or_path, language=language, task=task) processor = WhisperProcessor.from_pretrained(peft_config.base_model_name_or_path, language=language, task=task) feature_extractor = processor.feature_extractor forced_decoder_ids = processor.get_decoder_prompt_ids(language=language, task=task) pipe = AutomaticSpeechRecognitionPipeline(model=model, tokenizer=tokenizer, feature_extractor=feature_extractor) def transcribe(audio): with torch.cuda.amp.autocast(): text = pipe(audio, generate_kwargs={"forced_decoder_ids": forced_decoder_ids}, max_new_tokens=255)["text"] return text iface = gr.Interface( fn=transcribe, inputs=gr.Audio(source="microphone", type="filepath"), outputs="text", title="PEFT LoRA + INT8 Whisper Large V2 Marathi", description="Realtime demo for Marathi speech recognition using `PEFT-LoRA+INT8` fine-tuned Whisper Large V2 model.", ) iface.launch(share=True)<jupyter_output><empty_output>
peft/examples/int8_training/peft_bnb_whisper_large_v2_training.ipynb/0
{ "file_path": "peft/examples/int8_training/peft_bnb_whisper_large_v2_training.ipynb", "repo_id": "peft", "token_count": 7713 }
<jupyter_start><jupyter_text>Using PEFT with custom models `peft` allows us to fine-tune models efficiently with LoRA. In this short notebook, we will demonstrate how to train a simple multilayer perceptron (MLP) using `peft`. Imports Make sure that you have the latest version of `peft` installed. To ensure that, run this in your Python environment: python -m pip install --upgrade peft<jupyter_code>import copy import os # ignore bnb warnings os.environ["BITSANDBYTES_NOWELCOME"] = "1" import peft import torch from torch import nn import torch.nn.functional as F torch.manual_seed(0)<jupyter_output><empty_output><jupyter_text>Data We will create a toy dataset consisting of random data for a classification task. There is a little bit of signal in the data, so we should expect that the loss of the model can improve during training.<jupyter_code>X = torch.rand((1000, 20)) y = (X.sum(1) > 10).long() n_train = 800 batch_size = 64 train_dataloader = torch.utils.data.DataLoader( torch.utils.data.TensorDataset(X[:n_train], y[:n_train]), batch_size=batch_size, shuffle=True, ) eval_dataloader = torch.utils.data.DataLoader( torch.utils.data.TensorDataset(X[n_train:], y[n_train:]), batch_size=batch_size, )<jupyter_output><empty_output><jupyter_text>Model As a model, we use a simple multilayer perceptron (MLP). For demonstration purposes, we use a very large number of hidden units. This is totally overkill for this task but it helps to demonstrate the advantages of `peft`. In more realistic settings, models will also be quite large on average, so this is not far-fetched.<jupyter_code>class MLP(nn.Module): def __init__(self, num_units_hidden=2000): super().__init__() self.seq = nn.Sequential( nn.Linear(20, num_units_hidden), nn.ReLU(), nn.Linear(num_units_hidden, num_units_hidden), nn.ReLU(), nn.Linear(num_units_hidden, 2), nn.LogSoftmax(dim=-1), ) def forward(self, X): return self.seq(X)<jupyter_output><empty_output><jupyter_text>Training Here are just a few training hyper-parameters and a simple function that performs the training and evaluation loop.<jupyter_code>lr = 0.002 batch_size = 64 max_epochs = 30 device = "cpu" if not torch.cuda.is_available() else "cuda" def train(model, optimizer, criterion, train_dataloader, eval_dataloader, epochs): for epoch in range(epochs): model.train() train_loss = 0 for xb, yb in train_dataloader: xb = xb.to(device) yb = yb.to(device) outputs = model(xb) loss = criterion(outputs, yb) train_loss += loss.detach().float() loss.backward() optimizer.step() optimizer.zero_grad() model.eval() eval_loss = 0 for xb, yb in eval_dataloader: xb = xb.to(device) yb = yb.to(device) with torch.no_grad(): outputs = model(xb) loss = criterion(outputs, yb) eval_loss += loss.detach().float() eval_loss_total = (eval_loss / len(eval_dataloader)).item() train_loss_total = (train_loss / len(train_dataloader)).item() print(f"{epoch=:<2} {train_loss_total=:.4f} {eval_loss_total=:.4f}")<jupyter_output><empty_output><jupyter_text>Training without peft Let's start without using `peft` to see what we can expect from the model training.<jupyter_code>module = MLP().to(device) optimizer = torch.optim.Adam(module.parameters(), lr=lr) criterion = nn.CrossEntropyLoss() %time train(module, optimizer, criterion, train_dataloader, eval_dataloader, epochs=max_epochs)<jupyter_output>epoch=0 train_loss_total=0.7970 eval_loss_total=0.6472 epoch=1 train_loss_total=0.5597 eval_loss_total=0.4898 epoch=2 train_loss_total=0.3696 eval_loss_total=0.3323 epoch=3 train_loss_total=0.2364 eval_loss_total=0.5454 epoch=4 train_loss_total=0.2428 eval_loss_total=0.2843 epoch=5 train_loss_total=0.1251 eval_loss_total=0.2514 epoch=6 train_loss_total=0.0952 eval_loss_total=0.2068 epoch=7 train_loss_total=0.0831 eval_loss_total=0.2395 epoch=8 train_loss_total=0.0655 eval_loss_total=0.2524 epoch=9 train_loss_total=0.0380 eval_loss_total=0.3650 epoch=10 train_loss_total=0.0363 eval_loss_total=0.3495 epoch=11 train_loss_total=0.0231 eval_loss_total=0.2360 epoch=12 train_loss_total=0.0162 eval_loss_total=0.2276 epoch=13 train_loss_total=0.0094 eval_loss_total=0.2716 epoch=14 train_loss_total=0.0065 eval_loss_total=0.2237 epoch=15 train_loss_total=0.0054 eval_loss_total=0.2366 epoch=16 train_loss_total=0.0035 eval_loss_total=0.2673 epoch=17 trai[...]<jupyter_text>Okay, so we got an eval loss of ~0.26, which is much better than random. Training with peft Now let's train with `peft`. First we check the names of the modules, so that we can configure `peft` to fine-tune the right modules.<jupyter_code>[(n, type(m)) for n, m in MLP().named_modules()]<jupyter_output><empty_output><jupyter_text>Next we can define the LoRA config. There is nothing special going on here. We set the LoRA rank to 8 and select the layers `seq.0` and `seq.2` to be used for LoRA fine-tuning. As for `seq.4`, which is the output layer, we set it as `module_to_save`, which means it is also trained but no LoRA is applied. *Note: Not all layers types can be fine-tuned with LoRA. At the moment, linear layers, embeddings, `Conv2D` and `transformers.pytorch_utils.Conv1D` are supported.<jupyter_code>config = peft.LoraConfig( r=8, target_modules=["seq.0", "seq.2"], modules_to_save=["seq.4"], )<jupyter_output><empty_output><jupyter_text>Now let's create the `peft` model by passing our initial MLP, as well as the config we just defined, to `get_peft_model`.<jupyter_code>module = MLP().to(device) module_copy = copy.deepcopy(module) # we keep a copy of the original model for later peft_model = peft.get_peft_model(module, config) optimizer = torch.optim.Adam(peft_model.parameters(), lr=lr) criterion = nn.CrossEntropyLoss() peft_model.print_trainable_parameters()<jupyter_output>trainable params: 56,164 || all params: 4,100,164 || trainable%: 1.369798866581922<jupyter_text>Checking the numbers, we see that only ~1% of parameters are actually trained, which is what we like to see.Now let's start the training:<jupyter_code>%time train(peft_model, optimizer, criterion, train_dataloader, eval_dataloader, epochs=max_epochs)<jupyter_output>epoch=0 train_loss_total=0.6918 eval_loss_total=0.6518 epoch=1 train_loss_total=0.5975 eval_loss_total=0.6125 epoch=2 train_loss_total=0.5402 eval_loss_total=0.4929 epoch=3 train_loss_total=0.3886 eval_loss_total=0.3476 epoch=4 train_loss_total=0.2677 eval_loss_total=0.3185 epoch=5 train_loss_total=0.1938 eval_loss_total=0.2294 epoch=6 train_loss_total=0.1712 eval_loss_total=0.2653 epoch=7 train_loss_total=0.1555 eval_loss_total=0.2764 epoch=8 train_loss_total=0.1218 eval_loss_total=0.2104 epoch=9 train_loss_total=0.0846 eval_loss_total=0.1756 epoch=10 train_loss_total=0.0710 eval_loss_total=0.1873 epoch=11 train_loss_total=0.0372 eval_loss_total=0.1539 epoch=12 train_loss_total=0.0350 eval_loss_total=0.2348 epoch=13 train_loss_total=0.0298 eval_loss_total=0.4605 epoch=14 train_loss_total=0.0355 eval_loss_total=0.2208 epoch=15 train_loss_total=0.0099 eval_loss_total=0.1583 epoch=16 train_loss_total=0.0051 eval_loss_total=0.2042 epoch=17 trai[...]<jupyter_text>In the end, we see that the eval loss is very similar to the one we saw earlier when we trained without `peft`. This is quite nice to see, given that we are training a much smaller number of parameters. Check which parameters were updated Finally, just to check that LoRA was applied as expected, we check what original weights were updated what weights stayed the same.<jupyter_code>for name, param in peft_model.base_model.named_parameters(): if "lora" not in name: continue print(f"New parameter {name:<13} | {param.numel():>5} parameters | updated") params_before = dict(module_copy.named_parameters()) for name, param in peft_model.base_model.named_parameters(): if "lora" in name: continue name_before = ( name.partition(".")[-1].replace("original_", "").replace("module.", "").replace("modules_to_save.default.", "") ) param_before = params_before[name_before] if torch.allclose(param, param_before): print(f"Parameter {name_before:<13} | {param.numel():>7} parameters | not updated") else: print(f"Parameter {name_before:<13} | {param.numel():>7} parameters | updated")<jupyter_output>Parameter seq.0.weight | 40000 parameters | not updated Parameter seq.0.bias | 2000 parameters | not updated Parameter seq.2.weight | 4000000 parameters | not updated Parameter seq.2.bias | 2000 parameters | not updated Parameter seq.4.weight | 4000 parameters | not updated Parameter seq.4.bias | 2 parameters | not updated Parameter seq.4.weight | 4000 parameters | updated Parameter seq.4.bias | 2 parameters | updated<jupyter_text>So we can see that apart from the new LoRA weights that were added, only the last layer was updated. Since the LoRA weights and the last layer have comparitively few parameters, this gives us a big boost in efficiency. Sharing the model through Hugging Face Hub Pushing the model to HF Hub With the `peft` model, it is also very easy to push a model the Hugging Face Hub. Below, we demonstrate how it works. It is assumed that you have a valid Hugging Face account and are logged in:<jupyter_code>user = "BenjaminB" # put your user name here model_name = "peft-lora-with-custom-model" model_id = f"{user}/{model_name}" peft_model.push_to_hub(model_id);<jupyter_output><empty_output><jupyter_text>As we can see, the adapter size is only 211 kB. Loading the model from HF Hub Now, it only takes one step to load the model from HF Hub. To do this, we can use `PeftModel.from_pretrained`, passing our base model and the model ID:<jupyter_code>loaded = peft.PeftModel.from_pretrained(module_copy, model_id) type(loaded)<jupyter_output><empty_output><jupyter_text>Let's check that the two models produce the same output:<jupyter_code>y_peft = peft_model(X.to(device)) y_loaded = loaded(X.to(device)) torch.allclose(y_peft, y_loaded)<jupyter_output><empty_output><jupyter_text>Clean up Finally, as a clean up step, you may want to delete the repo.<jupyter_code>from huggingface_hub import delete_repo delete_repo(model_id)<jupyter_output><empty_output>
peft/examples/multilayer_perceptron/multilayer_perceptron_lora.ipynb/0
{ "file_path": "peft/examples/multilayer_perceptron/multilayer_perceptron_lora.ipynb", "repo_id": "peft", "token_count": 4094 }
accelerate launch --config_file "configs/fsdp_config.yaml" train.py \ --seed 100 \ --model_name_or_path "meta-llama/Llama-2-70b-hf" \ --dataset_name "smangrul/ultrachat-10k-chatml" \ --chat_template_format "chatml" \ --add_special_tokens False \ --append_concat_token False \ --splits "train,test" \ --max_seq_len 2048 \ --num_train_epochs 1 \ --logging_steps 5 \ --log_level "info" \ --logging_strategy "steps" \ --eval_strategy "epoch" \ --save_strategy "epoch" \ --push_to_hub \ --hub_private_repo True \ --hub_strategy "every_save" \ --bf16 True \ --packing True \ --learning_rate 1e-4 \ --lr_scheduler_type "cosine" \ --weight_decay 1e-4 \ --warmup_ratio 0.0 \ --max_grad_norm 1.0 \ --output_dir "mistral-sft-lora-fsdp" \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 8 \ --gradient_accumulation_steps 4 \ --gradient_checkpointing True \ --use_reentrant False \ --dataset_text_field "content" \ --use_flash_attn True \ --use_peft_lora True \ --lora_r 8 \ --lora_alpha 16 \ --lora_dropout 0.1 \ --lora_target_modules "all-linear" \ --use_4bit_quantization False
peft/examples/sft/run_peft_fsdp.sh/0
{ "file_path": "peft/examples/sft/run_peft_fsdp.sh", "repo_id": "peft", "token_count": 453 }
""" Utility to clean cache files that exceed a specific time in days according to their last access time recorded in the cache. Exit code: - 1 if no candidates are found - 0 if candidates are found Deletion can be enabled by passing `-d` parameter, otherwise it will only list the candidates. """ import sys from datetime import datetime as dt from huggingface_hub import scan_cache_dir def find_old_revisions(scan_results, max_age_days=30): """Find commit hashes of objects in the cache. These objects need a last access time that is above the passed `max_age_days` parameter. Returns an empty list if no objects are found. Time measurement is based of the current time and the recorded last access tiem in the cache. """ now = dt.now() revisions = [(i.revisions, i.last_accessed) for i in scan_results.repos] revisions_ages = [(rev, (now - dt.fromtimestamp(ts_access)).days) for rev, ts_access in revisions] delete_candidates = [rev for rev, age in revisions_ages if age > max_age_days] hashes = [n.commit_hash for rev in delete_candidates for n in rev] return hashes def delete_old_revisions(scan_results, delete_candidates, do_delete=False): delete_operation = scan_results.delete_revisions(*delete_candidates) print(f"Would free {delete_operation.expected_freed_size_str}") print(f"Candidates: {delete_candidates}") if do_delete: print("Deleting now.") delete_operation.execute() else: print("Not deleting, pass the -d flag.") if __name__ == "__main__": from argparse import ArgumentParser parser = ArgumentParser() parser.add_argument("-a", "--max-age", type=int, default=30, help="Max. age in days items in the cache may have.") parser.add_argument( "-d", "--delete", action="store_true", help=( "Delete mode; Really delete items if there are candidates. Exit code = 0 when we found something to delete, 1 " "otherwise." ), ) args = parser.parse_args() scan_results = scan_cache_dir() delete_candidates = find_old_revisions(scan_results, args.max_age) if not delete_candidates: print("No delete candidates found, not deleting anything.") sys.exit(1) delete_old_revisions(scan_results, delete_candidates, do_delete=args.delete)
peft/scripts/ci_clean_cache.py/0
{ "file_path": "peft/scripts/ci_clean_cache.py", "repo_id": "peft", "token_count": 807 }
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from itertools import chain from typing import Dict, Type, Union import torch from torch import nn from peft.tuners.lycoris_utils import LycorisConfig, LycorisTuner from .layer import Conv2d, Linear, LoHaLayer class LoHaModel(LycorisTuner): """ Creates Low-Rank Hadamard Product model from a pretrained model. The method is partially described in https://arxiv.org/abs/2108.06098 Current implementation heavily borrows from https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/loha.py Args: model (`torch.nn.Module`): The model to which the adapter tuner layers will be attached. config ([`LoHaConfig`]): The configuration of the LoHa model. adapter_name (`str`): The name of the adapter, defaults to `"default"`. low_cpu_mem_usage (`bool`, `optional`, defaults to `False`): Create empty adapter weights on meta device. Useful to speed up the loading process. Returns: `torch.nn.Module`: The LoHa model. Example: ```py >>> from diffusers import StableDiffusionPipeline >>> from peft import LoHaModel, LoHaConfig >>> config_te = LoHaConfig( ... r=8, ... lora_alpha=32, ... target_modules=["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"], ... rank_dropout=0.0, ... module_dropout=0.0, ... init_weights=True, ... ) >>> config_unet = LoHaConfig( ... r=8, ... lora_alpha=32, ... target_modules=[ ... "proj_in", ... "proj_out", ... "to_k", ... "to_q", ... "to_v", ... "to_out.0", ... "ff.net.0.proj", ... "ff.net.2", ... ], ... rank_dropout=0.0, ... module_dropout=0.0, ... init_weights=True, ... use_effective_conv2d=True, ... ) >>> model = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") >>> model.text_encoder = LoHaModel(model.text_encoder, config_te, "default") >>> model.unet = LoHaModel(model.unet, config_unet, "default") ``` **Attributes**: - **model** ([`~torch.nn.Module`]) -- The model to be adapted. - **peft_config** ([`LoHaConfig`]): The configuration of the LoHa model. """ prefix: str = "hada_" layers_mapping: Dict[Type[torch.nn.Module], Type[LoHaLayer]] = { torch.nn.Conv2d: Conv2d, torch.nn.Linear: Linear, } def _create_and_replace( self, config: LycorisConfig, adapter_name: str, target: Union[LoHaLayer, nn.Module], target_name: str, parent: nn.Module, current_key: str, ) -> None: """ A private method to create and replace the target module with the adapter module. """ # Regexp matching - Find key which matches current target_name in patterns provided pattern_keys = list(chain(config.rank_pattern.keys(), config.alpha_pattern.keys())) target_name_key = next(filter(lambda key: re.match(rf"(.*\.)?{key}$", current_key), pattern_keys), target_name) kwargs = config.to_dict() kwargs["r"] = config.rank_pattern.get(target_name_key, config.r) kwargs["alpha"] = config.alpha_pattern.get(target_name_key, config.alpha) if isinstance(target, LoHaLayer): target.update_layer(adapter_name, **kwargs) else: new_module = self._create_new_module(config, adapter_name, target, **kwargs) self._replace_module(parent, target_name, new_module, target)
peft/src/peft/tuners/loha/model.py/0
{ "file_path": "peft/src/peft/tuners/loha/model.py", "repo_id": "peft", "token_count": 1880 }
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import math import warnings from typing import Any, Optional, Union import torch import torch.nn as nn import torch.nn.functional as F from accelerate.utils.imports import is_xpu_available from torch import svd_lowrank from transformers.pytorch_utils import Conv1D from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge from peft.utils.integrations import ( dequantize_module_weight, gather_params_ctx, get_bnb_param_type, skip_init_on_device, ) from peft.utils.other import transpose from .config import LoraConfig from .dora import DoraConv2dLayer, DoraConv3dLayer, DoraEmbeddingLayer, DoraLinearLayer, _DoraConvNdLayer class LoraLayer(BaseTunerLayer): # All names of layers that may contain (trainable) adapter weights adapter_layer_names = ("lora_A", "lora_B", "lora_embedding_A", "lora_embedding_B") # All names of other parameters that may contain adapter-related parameters other_param_names = ("r", "lora_alpha", "scaling", "lora_dropout") def __init__(self, base_layer: nn.Module, ephemeral_gpu_offload: bool = False, **kwargs) -> None: self.base_layer = base_layer self.r = {} self.lora_alpha = {} self.scaling = {} self.lora_dropout = nn.ModuleDict({}) self.lora_A = nn.ModuleDict({}) self.lora_B = nn.ModuleDict({}) # For Embedding layer self.lora_embedding_A = nn.ParameterDict({}) self.lora_embedding_B = nn.ParameterDict({}) # Mark the weight as unmerged self._disable_adapters = False self.merged_adapters = [] self.use_dora: dict[str, bool] = {} self.lora_bias: dict[str, bool] = {} self.lora_magnitude_vector = torch.nn.ModuleDict() # for DoRA self._caches: dict[str, Any] = {} self.ephemeral_gpu_offload: bool = ephemeral_gpu_offload # flag to enable/disable casting of input to weight dtype during forward call self.cast_input_dtype_enabled: bool = True self.kwargs = kwargs base_layer = self.get_base_layer() if isinstance(base_layer, nn.Linear): in_features, out_features = base_layer.in_features, base_layer.out_features elif isinstance(base_layer, nn.Conv1d): in_features, out_features = base_layer.in_channels, base_layer.out_channels elif isinstance(base_layer, nn.Conv2d): in_features, out_features = base_layer.in_channels, base_layer.out_channels elif isinstance(base_layer, nn.Conv3d): in_features, out_features = base_layer.in_channels, base_layer.out_channels elif isinstance(base_layer, nn.Embedding): in_features, out_features = base_layer.num_embeddings, base_layer.embedding_dim elif isinstance(base_layer, Conv1D): in_features, out_features = ( base_layer.weight.ds_shape if hasattr(base_layer.weight, "ds_shape") else base_layer.weight.shape ) elif isinstance(base_layer, nn.MultiheadAttention): if not base_layer._qkv_same_embed_dim: raise ValueError(f"Only same dim for query/key/value is supported as of now for {self.__class__}.") in_features, out_features = base_layer.embed_dim, 3 * base_layer.embed_dim elif hasattr(base_layer, "infeatures") and hasattr(base_layer, "outfeatures"): # QuantLinear in_features, out_features = base_layer.infeatures, base_layer.outfeatures elif hasattr(base_layer, "input_size") and hasattr(base_layer, "output_size"): # Megatron ColumnParallelLinear,RowParallelLinear in_features, out_features = base_layer.input_size, base_layer.output_size elif hasattr(base_layer, "codebooks") and base_layer.__class__.__name__ == "QuantizedLinear": # AQLM QuantLinear in_features, out_features = base_layer.in_features, base_layer.out_features elif hasattr(base_layer, "w_bit") and base_layer.__class__.__name__ == "WQLinear_GEMM": # Awq layers in_features, out_features = base_layer.in_features, base_layer.out_features elif base_layer.__class__.__name__ == "EetqLinear": # Eetq layers in_features, out_features = base_layer.in_features, base_layer.out_features elif hasattr(base_layer, "W_q") and base_layer.__class__.__name__ == "HQQLinear": # HQQ layers in_features, out_features = base_layer.in_features, base_layer.out_features else: # possibly support user provided custom layer types using dynamic dispatch if hasattr(base_layer, "in_features") and hasattr(base_layer, "out_features"): in_features, out_features = base_layer.in_features, base_layer.out_features else: in_features, out_features = None, None warnings.warn( f"Unsupported layer type '{type(base_layer)}' encountered, proceed at your own risk.", UserWarning ) self.in_features = in_features self.out_features = out_features def update_layer( self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora, use_dora: bool = False, lora_bias: bool = False, ): # This code works for linear layers, override for other layer types if r <= 0: raise ValueError(f"`r` should be a positive integer value but the value passed is {r}") self.r[adapter_name] = r self.lora_alpha[adapter_name] = lora_alpha if lora_dropout > 0.0: lora_dropout_layer = nn.Dropout(p=lora_dropout) else: lora_dropout_layer = nn.Identity() self.lora_dropout.update(nn.ModuleDict({adapter_name: lora_dropout_layer})) # Actual trainable parameters self.lora_A[adapter_name] = nn.Linear(self.in_features, r, bias=False) self.lora_B[adapter_name] = nn.Linear(r, self.out_features, bias=lora_bias) self.lora_bias[adapter_name] = lora_bias if use_rslora: self.scaling[adapter_name] = lora_alpha / math.sqrt(r) else: self.scaling[adapter_name] = lora_alpha / r # for inits that require access to the base weight, use gather_param_ctx so that the weight is gathered when using DeepSpeed if isinstance(init_lora_weights, str) and init_lora_weights.startswith("pissa"): with gather_params_ctx(self.get_base_layer().weight): self.pissa_init(adapter_name, init_lora_weights) elif isinstance(init_lora_weights, str) and init_lora_weights.startswith("corda"): with gather_params_ctx(self.get_base_layer().weight): self.corda_init(adapter_name, init_lora_weights) elif isinstance(init_lora_weights, str) and init_lora_weights.lower() == "olora": with gather_params_ctx(self.get_base_layer().weight): self.olora_init(adapter_name) elif init_lora_weights == "loftq": with gather_params_ctx(self.get_base_layer().weight): self.loftq_init(adapter_name) elif init_lora_weights == "eva": nn.init.zeros_(self.lora_B[adapter_name].weight) elif init_lora_weights: self.reset_lora_parameters(adapter_name, init_lora_weights) # call this before dora_init self._move_adapter_to_device_of_base_layer(adapter_name) if use_dora: self.dora_init(adapter_name) self.use_dora[adapter_name] = True else: self.use_dora[adapter_name] = False self.set_adapter(self.active_adapters) def reset_lora_parameters(self, adapter_name, init_lora_weights): if init_lora_weights is False: return if adapter_name in self.lora_A.keys(): if init_lora_weights is True: # initialize A the same way as the default for nn.Linear and B to zero # https://github.com/microsoft/LoRA/blob/a0a92e0f26c067cf94747bdbf1ce73793fa44d19/loralib/layers.py#L124 nn.init.kaiming_uniform_(self.lora_A[adapter_name].weight, a=math.sqrt(5)) elif init_lora_weights.lower() == "gaussian": nn.init.normal_(self.lora_A[adapter_name].weight, std=1 / self.r[adapter_name]) else: raise ValueError(f"Unknown initialization {init_lora_weights=}") nn.init.zeros_(self.lora_B[adapter_name].weight) if self.lora_bias[adapter_name]: nn.init.zeros_(self.lora_B[adapter_name].bias) if adapter_name in self.lora_embedding_A.keys(): # Initialize A to zeros and B the same way as the default for nn.Embedding, see: # https://github.com/microsoft/LoRA/blob/4c0333854cb905966f8cc4e9a74068c1e507c7b7/loralib/layers.py#L59-L60 nn.init.zeros_(self.lora_embedding_A[adapter_name]) nn.init.normal_(self.lora_embedding_B[adapter_name]) if self.lora_bias[adapter_name]: # embeddings are not supported at the moment, but still adding this for consistency nn.init.zeros_(self.lora_embedding_B[adapter_name].bias) def olora_init(self, adapter_name): base_layer = self.get_base_layer() orig_weight = base_layer.weight bnb_param_type = get_bnb_param_type(orig_weight) dtype = orig_weight.dtype if bnb_param_type: # check without importing bitsandbytes and robust to bnb_4bit_quant_storage=float* weight_tensor = dequantize_module_weight(base_layer) elif dtype in [torch.float32, torch.float16, torch.bfloat16]: weight_tensor = orig_weight else: raise TypeError(f"Unsupported data type for the base layer. Got {dtype}.") scale_factor = self.scaling[adapter_name] r = self.r[adapter_name] weight_tensor = weight_tensor.to(torch.float32) Q, R = torch.linalg.qr(weight_tensor.data) Qr, Rr = Q[:, :r], R[:r] self.lora_A[adapter_name].weight.data = Rr.contiguous() self.lora_B[adapter_name].weight.data = Qr.contiguous() weight_tensor.data -= scale_factor * self.lora_B[adapter_name].weight @ self.lora_A[adapter_name].weight if bnb_param_type == "4bit": weight_tensor = orig_weight.__class__( weight_tensor, quant_type=orig_weight.quant_type, quant_storage=orig_weight.quant_storage, compress_statistics=orig_weight.compress_statistics, module=orig_weight.module, ).to(orig_weight.device) base_layer.weight = weight_tensor elif bnb_param_type == "8bit": weight_tensor = orig_weight.__class__( weight_tensor, requires_grad=orig_weight.requires_grad, has_fp16_weights=orig_weight.has_fp16_weights, ).to(orig_weight.device) base_layer.weight = weight_tensor else: weight_tensor = weight_tensor.to(dtype) base_layer.weight.data = weight_tensor def pissa_init(self, adapter_name, init_lora_weights): weight = self.get_base_layer().weight dtype = weight.dtype if dtype not in [torch.float32, torch.float16, torch.bfloat16]: raise TypeError( "Please initialize PiSSA under float32, float16, or bfloat16. " "Subsequently, re-quantize the residual model to help minimize quantization errors." ) weight = transpose(weight.to(torch.float32), self.fan_in_fan_out) if init_lora_weights == "pissa": # USV^T = W <-> VSU^T = W^T, where W^T = weight.data in R^{out_channel, in_channel}, V, S, Uh = torch.linalg.svd(weight.data, full_matrices=False) Vr = V[:, : self.r[adapter_name]] Sr = S[: self.r[adapter_name]] Sr /= self.scaling[adapter_name] Uhr = Uh[: self.r[adapter_name]] elif len(init_lora_weights.split("_niter_")) == 2: Vr, Sr, Ur = svd_lowrank( weight.data, self.r[adapter_name], niter=int(init_lora_weights.split("_niter_")[-1]) ) Sr /= self.scaling[adapter_name] Uhr = Ur.t() else: raise ValueError( f"init_lora_weights should be 'pissa' or 'pissa_niter_[number of iters]', got {init_lora_weights} instead." ) lora_A = torch.diag(torch.sqrt(Sr)) @ Uhr lora_B = Vr @ torch.diag(torch.sqrt(Sr)) self.lora_A[adapter_name].weight.data = lora_A self.lora_B[adapter_name].weight.data = lora_B weight = weight.data - self.scaling[adapter_name] * lora_B @ lora_A weight = transpose(weight.to(dtype), self.fan_in_fan_out) self.get_base_layer().weight.data = weight def corda_init(self, adapter_name, init_lora_weights): linear = self.get_base_layer() weight = linear.weight dtype = weight.dtype if dtype not in [torch.float32, torch.float16, torch.bfloat16]: raise TypeError( "Please initialize CorDA under float32, float16, or bfloat16. " "Subsequently, re-quantize the residual model to help minimize quantization errors." ) weight = weight.to(torch.float32) out_dim = weight.data.size(0) in_dim = weight.data.size(1) # Calculate WC from covariance matrix if not hasattr(linear, "eigens"): raise ValueError( "`eigens` attribute not found for layer, please run `preprocess_corda` first. " "More information can be found at examples/corda_finetuning/README.md." ) eigens = linear.eigens U = eigens.U_WC S = eigens.S_WC V = eigens.V_WC r = self.r[adapter_name] # nan or inf check if torch.isnan(S).any() or torch.isinf(S).any(): raise ValueError( "Invalid value found in matrix S. Please file an issue at https://github.com/huggingface/peft/issues." ) if torch.isnan(U).any() or torch.isinf(U).any(): raise ValueError( "Invalid value found in matrix U. Please file an issue at https://github.com/huggingface/peft/issues." ) if torch.isnan(V).any() or torch.isinf(V).any(): raise ValueError( "Invalid value found in matrix V. Please file an issue at https://github.com/huggingface/peft/issues." ) # Sanity check if U.size(0) != out_dim or U.size(1) != r: raise ValueError( f"Matrix U size mismatch: {U.size()} vs. ({out_dim}, {r}). Please make sure the `lora_config` and " "`model` argument of `preprocess_corda` is consistent with `get_peft_model`. If you're using cache " "in `preprocess_corda`, please make sure the cache is built with the same model and LoRA rank." ) if S.size(0) != r: raise ValueError( f"Matrix S size mismatch: {S.size()} vs. ({r},). Please make sure the `lora_config` and `model` argument " "of `preprocess_corda` is consistent with `get_peft_model`. If you're using cache in `preprocess_corda`, " "please make sure the cache is built with the same model and LoRA rank." ) if V.size(0) != in_dim or V.size(1) != r: raise ValueError( f"Matrix V size mismatch: {V.size()} vs. ({in_dim}, {r}). Please make sure the `lora_config` and " "`model` argument of `preprocess_corda` is consistent with `get_peft_model`. If you're using cache " "in `preprocess_corda`, please make sure the cache is built with the same model and LoRA rank." ) # Apply alpha S /= self.scaling[adapter_name] # Init lora_A and lora_B weights lora_A = V.t().mul(S.sqrt().view(-1, 1)).contiguous() lora_B = U.mul(S.sqrt()).contiguous() self.lora_A[adapter_name].weight.data = lora_A self.lora_B[adapter_name].weight.data = lora_B weight = weight.data - self.scaling[adapter_name] * lora_B @ lora_A weight = weight.to(dtype) self.get_base_layer().weight.data = weight # Remove redundant fields del linear.eigens def loftq_init(self, adapter_name): from peft.utils.loftq_utils import loftq_init weight = self.get_base_layer().weight kwargs = { "num_bits": self.kwargs.get("loftq_bits", 4), "reduced_rank": self.r[adapter_name], "num_iter": self.kwargs.get("loftq_iter", 1), } qweight, lora_A, lora_B = loftq_init(weight, **kwargs) if adapter_name in self.lora_A.keys(): # initialize A the same way as the default for nn.Linear and B to zero self.lora_A[adapter_name].weight.data = lora_A self.lora_B[adapter_name].weight.data = lora_B if adapter_name in self.lora_embedding_A.keys(): # initialize a the same way as the default for nn.linear and b to zero self.lora_embedding_A[adapter_name].weight.data = lora_A self.lora_embedding_B[adapter_name].weight.data = lora_B self.get_base_layer().weight.data = qweight def dora_init(self, adapter_name: str) -> None: if not self.lora_magnitude_vector: # first dora layer being added, add lora_magnitude_vector to the list of learnable parameters self.adapter_layer_names = self.adapter_layer_names[:] + ("lora_magnitude_vector",) dora_layer = DoraLinearLayer(fan_in_fan_out=getattr(self, "fan_in_fan_out", False)) lora_A = self.lora_A[adapter_name].weight lora_B = self.lora_B[adapter_name].weight place_on_cpu = self.ephemeral_gpu_offload and (lora_A.device.type == "cpu" or lora_B.device.type == "cpu") if self.ephemeral_gpu_offload: if lora_A.device.type in ["cuda", "xpu"]: lora_B = lora_B.to(lora_A.device) else: if lora_B.device.type not in ["cuda", "xpu"]: if is_xpu_available(): lora_B = lora_B.to("xpu") else: lora_B = lora_B.to("cuda") lora_A = lora_A.to(lora_B.device) scaling = self.scaling[adapter_name] dora_layer.update_layer( base_layer=self.get_base_layer(), lora_A=lora_A, lora_B=lora_B, scaling=scaling, place_on_cpu=place_on_cpu ) self.lora_magnitude_vector[adapter_name] = dora_layer def _cache_store(self, key: str, value: Any) -> None: self._caches[key] = value def _cache_pop(self, key: str) -> Any: value = self._caches.pop(key) return value def set_scale(self, adapter, scale): if adapter not in self.scaling: # Ignore the case where the adapter is not in the layer return self.scaling[adapter] = scale * self.lora_alpha[adapter] / self.r[adapter] def scale_layer(self, scale: float) -> None: if scale == 1: return for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue self.scaling[active_adapter] *= scale def unscale_layer(self, scale=None) -> None: for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue if scale is None: self.scaling[active_adapter] = self.lora_alpha[active_adapter] / self.r[active_adapter] else: self.scaling[active_adapter] /= scale def _check_forward_args(self, x, *args, **kwargs): """Check if the arguments are compatible with the configs and state of the model""" adapter_names = kwargs.get("adapter_names", None) if adapter_names is None: return if len(x) != len(adapter_names): msg = ( "Length of `adapter_names` should be the same as the number of inputs, but got " f"{len(adapter_names)} and {len(x)} respectively." ) raise ValueError(msg) if self.merged: # It is unclear what would be the right thing to do if users pass adapter_names and there are merged # adapters. Therefore, it is better to raise an error in this case. msg = "Cannot pass `adapter_names` when there are merged adapters, please call `unmerge_adapter` first." raise ValueError(msg) # DoRA is not supported (yet), check that it's not being used. Don't check "__base__", as this is the # placeholder for the base model. unique_adapters = {name for name in adapter_names if name != "__base__"} for adapter_name in unique_adapters: if self.use_dora.get(adapter_name, False): msg = "Cannot pass `adapter_names` when DoRA is enabled." raise ValueError(msg) def _mixed_batch_forward( self, x: torch.Tensor, *args: Any, adapter_names: list[str], **kwargs: Any ) -> torch.Tensor: # This is a special method that handles the case when users pass the argument `adapter_names`. This is an # extra argument that allows mixing different adapters in the same batch at inference time. result = self.base_layer(x, *args, **kwargs) torch_result_dtype = result.dtype unique_adapters = set(adapter_names) sub_batch_indices_list = [] for adapter in unique_adapters: sub_batch_indices_list.append([index for index, item in enumerate(adapter_names) if item == adapter]) for i, active_adapter in enumerate(unique_adapters): if active_adapter == "__base__": continue if active_adapter not in self.lora_A.keys(): continue lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] # getting the sub-batch, passing it to LoRA layers and updating the corresponding indices of the linear # layer output sub_batch = x[sub_batch_indices_list[i]].to(lora_A.weight.dtype) lora_output = lora_B(lora_A(dropout(sub_batch))) * scaling result[sub_batch_indices_list[i]] += lora_output.to(torch_result_dtype) return result def _cast_input_dtype(self, x: torch.Tensor, dtype: torch.dtype) -> torch.Tensor: """ Whether to cast the dtype of the input to the forward method. Usually, we want to enable this to align the input dtype with the dtype of the weight, but by setting layer.cast_input_dtype=False, this can be disabled if necessary. Enabling or disabling can be managed via the peft.helpers.disable_lora_input_dtype_casting context manager. """ if (not self.cast_input_dtype_enabled) or (x.dtype == dtype): return x return x.to(dtype=dtype) # Below code is based on https://github.com/microsoft/LoRA/blob/main/loralib/layers.py # and modified to work with PyTorch FSDP # ------------------------------------------------------------------------------------------ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information. # ------------------------------------------------------------------------------------------ class Linear(nn.Module, LoraLayer): # Lora implemented in a dense layer def __init__( self, base_layer, adapter_name: str, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out) is_target_conv_1d_layer: bool = False, init_lora_weights: Union[bool, str] = True, use_rslora: bool = False, use_dora: bool = False, lora_bias: bool = False, **kwargs, ) -> None: super().__init__() LoraLayer.__init__(self, base_layer, **kwargs) self.fan_in_fan_out = fan_in_fan_out self._active_adapter = adapter_name self.update_layer( adapter_name, r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, init_lora_weights=init_lora_weights, use_rslora=use_rslora, use_dora=use_dora, lora_bias=lora_bias, ) self.is_target_conv_1d_layer = is_target_conv_1d_layer def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`list[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: # no adapter to merge return for active_adapter in adapter_names: if active_adapter in self.lora_A.keys(): base_layer = self.get_base_layer() if safe_merge: # Note that safe_merge will be slower than the normal merge # because of the copy operation. orig_weights = base_layer.weight.data.clone() delta_weight = self.get_delta_weight(active_adapter) if not self.use_dora[active_adapter]: orig_weights += delta_weight else: # handle dora # since delta_weight already includes scaling, set it to 1 here weight_norm = ( self.lora_magnitude_vector[active_adapter] .get_weight_norm(orig_weights, transpose(delta_weight, self.fan_in_fan_out), scaling=1) .detach() ) # We need to cache weight_norm because it has to be based on the original weights. We # cannot calculate it on the fly based on the merged weights when unmerging because its a # different value self._cache_store(f"{active_adapter}-weight_norm", weight_norm) dora_factor = self.lora_magnitude_vector[active_adapter].weight / weight_norm dora_factor = transpose(dora_factor.view(-1, 1), self.fan_in_fan_out) orig_weights = dora_factor * (orig_weights + delta_weight) if not torch.isfinite(orig_weights).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) base_layer.weight.data = orig_weights if self.lora_bias[active_adapter]: new_bias = base_layer.bias + self.lora_B[active_adapter].bias if not torch.isfinite(new_bias).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) base_layer.bias.data = new_bias else: delta_weight = self.get_delta_weight(active_adapter) if not self.use_dora[active_adapter]: base_layer.weight.data += delta_weight else: # handle dora # since delta_weight already includes scaling, set it to 1 here weight_norm = ( self.lora_magnitude_vector[active_adapter] .get_weight_norm( base_layer.weight, transpose(delta_weight, self.fan_in_fan_out), scaling=1 ) .detach() ) # We need to cache weight_norm because it has to be based on the original weights. We # cannot calculate it on the fly based on the merged weights when unmerging because its a # different value self._cache_store(f"{active_adapter}-weight_norm", weight_norm) dora_factor = self.lora_magnitude_vector[active_adapter].weight / weight_norm dora_factor = transpose(dora_factor.view(-1, 1), self.fan_in_fan_out) new_weight = dora_factor * (base_layer.weight.data + delta_weight) base_layer.weight.data = new_weight if self.lora_bias[active_adapter]: base_layer.bias.data += self.lora_B[active_adapter].bias self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter in self.lora_A.keys(): weight = self.get_base_layer().weight delta_weight = self.get_delta_weight(active_adapter) if not self.use_dora[active_adapter]: weight.data -= delta_weight else: weight_norm = self._cache_pop(f"{active_adapter}-weight_norm") dora_factor = self.lora_magnitude_vector[active_adapter].weight / weight_norm weight_orig = weight.data / dora_factor.view(-1, 1) - delta_weight weight.data = weight_orig if self.lora_bias[active_adapter]: self.get_base_layer().bias.data -= self.lora_B[active_adapter].bias def get_delta_weight(self, adapter) -> torch.Tensor: """ Compute the delta weight for the given adapter. Args: adapter (str): The name of the adapter for which the delta weight should be computed. """ device = self.lora_B[adapter].weight.device dtype = self.lora_B[adapter].weight.dtype # In case users wants to merge the adapter weights that are in # (b)float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to # (b)float16 because some CPUs have slow bf16/fp16 matmuls. cast_to_fp32 = device.type == "cpu" and (dtype == torch.float16 or dtype == torch.bfloat16) weight_A = self.lora_A[adapter].weight weight_B = self.lora_B[adapter].weight if cast_to_fp32: weight_A = weight_A.float() weight_B = weight_B.float() output_tensor = transpose(weight_B @ weight_A, self.fan_in_fan_out) * self.scaling[adapter] if cast_to_fp32: output_tensor = output_tensor.to(dtype=dtype) # cast back the weights self.lora_A[adapter].weight.data = weight_A.to(dtype) self.lora_B[adapter].weight.data = weight_B.to(dtype) return output_tensor def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: self._check_forward_args(x, *args, **kwargs) adapter_names = kwargs.pop("adapter_names", None) if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif adapter_names is not None: result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) torch_result_dtype = result.dtype for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] x = self._cast_input_dtype(x, lora_A.weight.dtype) if not self.use_dora[active_adapter]: result = result + lora_B(lora_A(dropout(x))) * scaling else: if isinstance(dropout, nn.Identity) or not self.training: base_result = result else: x = dropout(x) base_result = None result = result + self.lora_magnitude_vector[active_adapter]( x, lora_A=lora_A, lora_B=lora_B, scaling=scaling, base_layer=self.get_base_layer(), base_result=base_result, ) result = result.to(torch_result_dtype) return result def __repr__(self) -> str: rep = super().__repr__() return "lora." + rep class Embedding(nn.Module, LoraLayer): # LoRA implemented in a Embedding layer def __init__( self, base_layer: nn.Module, adapter_name: str, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, init_lora_weights: Union[bool, str] = True, use_rslora: bool = False, use_dora: bool = False, lora_bias: bool = False, **kwargs, ) -> None: if lora_bias: # lora_bias=True is not supported (yet) for embedding layers, as they use nn.Parameter raise ValueError(f"lora_bias={lora_bias} is not supported for {self.__class__.__name__}.") super().__init__() LoraLayer.__init__(self, base_layer) self._active_adapter = adapter_name self.update_layer( adapter_name, r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, init_lora_weights=init_lora_weights, use_rslora=use_rslora, use_dora=use_dora, lora_bias=lora_bias, ) def update_layer( self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora, use_dora, lora_bias ): if r <= 0: raise ValueError(f"`r` should be a positive integer value but the value passed is {r}") self.r[adapter_name] = r self.lora_alpha[adapter_name] = lora_alpha if lora_dropout > 0.0: lora_dropout_layer = nn.Dropout(p=lora_dropout) else: lora_dropout_layer = nn.Identity() self.lora_dropout[adapter_name] = lora_dropout_layer # Actual trainable parameters weight_A = torch.randn((r, self.in_features)) weight_B = torch.randn((self.out_features, r)) self.lora_embedding_A[adapter_name] = nn.Parameter(weight_A) self.lora_embedding_B[adapter_name] = nn.Parameter(weight_B) self.lora_bias[adapter_name] = lora_bias if use_rslora: self.scaling[adapter_name] = lora_alpha / math.sqrt(r) else: self.scaling[adapter_name] = lora_alpha / r if init_lora_weights == "loftq": self.loftq_init(adapter_name) elif init_lora_weights: self.reset_lora_parameters(adapter_name, init_lora_weights) # call this before dora_init self._move_adapter_to_device_of_base_layer(adapter_name) if use_dora: self.dora_init(adapter_name) self.use_dora[adapter_name] = True else: self.use_dora[adapter_name] = False self.set_adapter(self.active_adapters) def dora_init(self, adapter_name: str) -> None: if self.lora_magnitude_vector is None: # first dora layer being added, add lora_magnitude_vector to the list of learnable parameters self.adapter_layer_names = self.adapter_layer_names[:] + ("lora_magnitude_vector",) dora_layer = DoraEmbeddingLayer(fan_in_fan_out=True) lora_embedding_A = self.lora_embedding_A[adapter_name] lora_embedding_B = self.lora_embedding_B[adapter_name] scaling = self.scaling[adapter_name] dora_layer.update_layer( base_layer=self.get_base_layer(), lora_A=lora_embedding_A, lora_B=lora_embedding_B, scaling=scaling ) self.lora_magnitude_vector[adapter_name] = dora_layer def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`list[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: # no adapter to merge return for active_adapter in adapter_names: if active_adapter in self.lora_embedding_A.keys(): base_layer = self.get_base_layer() if safe_merge: # Note that safe_merge will be slower than the normal merge # because of the copy operation. orig_weights = base_layer.weight.data.clone() orig_weights += self.get_delta_weight(active_adapter) if not torch.isfinite(orig_weights).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) base_layer.weight.data = orig_weights else: base_layer.weight.data += self.get_delta_weight(active_adapter) self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter in self.lora_embedding_A.keys(): self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter) def get_delta_weight(self, adapter) -> torch.Tensor: """ Compute the delta weight for the given adapter. Args: adapter (str): The name of the adapter for which the delta weight should be computed. """ device = self.lora_embedding_B[adapter].device dtype = self.lora_embedding_A[adapter].dtype # In case users wants to merge the adapter weights that are in # (b)float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to # (b)float16 because some CPUs have slow bf16/fp16 matmuls. cast_to_fp32 = device.type == "cpu" and (dtype == torch.float16 or dtype == torch.bfloat16) weight_A = self.lora_embedding_A[adapter] weight_B = self.lora_embedding_B[adapter] if cast_to_fp32: weight_A = weight_A.float() weight_B = weight_B.float() output_tensor = transpose(weight_B @ weight_A, True) * self.scaling[adapter] if cast_to_fp32: output_tensor = output_tensor.to(dtype=dtype) # cast back the weights self.lora_embedding_A[adapter] = weight_A.to(dtype) self.lora_embedding_B[adapter] = weight_B.to(dtype) return output_tensor def _mixed_batch_forward( self, x: torch.Tensor, *args: Any, adapter_names: list[str], **kwargs: Any ) -> torch.Tensor: # This is a special method that handles the case when users pass the argument `adapter_names`. This is an # extra argument that allows mixing different adapters in the same batch at inference time. result = self.base_layer(x, *args, **kwargs) unique_adapters = set(adapter_names) sub_batch_indices_list = [] for adapter in unique_adapters: sub_batch_indices_list.append([index for index, item in enumerate(adapter_names) if item == adapter]) for i, active_adapter in enumerate(unique_adapters): if active_adapter == "__base__": continue if active_adapter not in self.lora_embedding_A.keys(): continue embedding_A = self.lora_embedding_A[active_adapter].T embedding_B = self.lora_embedding_B[active_adapter].T scaling = self.scaling[active_adapter] # getting the sub-batch, passing it to LoRA layers and updating the corresponding indices of the linear # layer output sub_batch = x[sub_batch_indices_list[i]] after_A = self._embed(sub_batch, embedding_A) result[sub_batch_indices_list[i]] += (after_A @ embedding_B) * scaling return result def _embed(self, input: torch.Tensor, weight: torch.Tensor) -> torch.Tensor: base_layer = self.get_base_layer() return F.embedding( input, weight, padding_idx=base_layer.padding_idx, max_norm=base_layer.max_norm, norm_type=base_layer.norm_type, scale_grad_by_freq=base_layer.scale_grad_by_freq, sparse=base_layer.sparse, ) def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: # TODO: no dtype conversion here, unlike in Linear, is that correct? self._check_forward_args(x, *args, **kwargs) adapter_names = kwargs.pop("adapter_names", None) if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif adapter_names is not None: result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) torch_result_dtype = result.dtype for active_adapter in self.active_adapters: if active_adapter not in self.lora_embedding_A: continue embedding_A = self.lora_embedding_A[active_adapter].T embedding_B = self.lora_embedding_B[active_adapter].T scaling = self.scaling[active_adapter] if not self.use_dora[active_adapter]: after_A = self._embed(x, embedding_A) result = result + (after_A @ embedding_B) * scaling else: mag_norm_scale, dora_result = self.lora_magnitude_vector[active_adapter]( x, lora_A=embedding_A, lora_B=embedding_B, scaling=scaling, base_layer=self.get_base_layer(), embed_fn=self._embed, ) result = mag_norm_scale * result + dora_result result = result.to(torch_result_dtype) return result def __repr__(self) -> str: rep = super().__repr__() return "lora." + rep class _ConvNd(nn.Module, LoraLayer): # Lora implemented in a conv(2,3)d layer def __init__( self, base_layer: nn.Module, adapter_name: str, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, init_lora_weights: Union[bool, str] = True, use_rslora: bool = False, use_dora: bool = False, lora_bias: bool = False, **kwargs, ) -> None: super().__init__() LoraLayer.__init__(self, base_layer) self._active_adapter = adapter_name self._kernel_dim = base_layer.weight.dim() self.update_layer( adapter_name, r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, init_lora_weights=init_lora_weights, use_rslora=use_rslora, use_dora=use_dora, lora_bias=lora_bias, ) def update_layer( self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora, use_dora, lora_bias ): if r <= 0: raise ValueError(f"`r` should be a positive integer value but the value passed is {r}") self.r[adapter_name] = r self.lora_alpha[adapter_name] = lora_alpha if lora_dropout > 0.0: lora_dropout_layer = nn.Dropout(p=lora_dropout) else: lora_dropout_layer = nn.Identity() self.lora_dropout[adapter_name] = lora_dropout_layer # Actual trainable parameters base_layer = self.get_base_layer() kernel_size = base_layer.kernel_size stride = base_layer.stride padding = base_layer.padding conv_layer = type(base_layer) out_kernel = out_stride = (1,) * (self._kernel_dim - 2) self.lora_A[adapter_name] = conv_layer(self.in_features, r, kernel_size, stride, padding, bias=False) self.lora_B[adapter_name] = conv_layer(r, self.out_features, out_kernel, out_stride, bias=lora_bias) self.lora_bias[adapter_name] = lora_bias if use_rslora: self.scaling[adapter_name] = lora_alpha / math.sqrt(r) else: self.scaling[adapter_name] = lora_alpha / r if init_lora_weights == "loftq": self.loftq_init(adapter_name) elif init_lora_weights: self.reset_lora_parameters(adapter_name, init_lora_weights) # call this before dora_init self._move_adapter_to_device_of_base_layer(adapter_name) if use_dora: self.dora_init(adapter_name) self.use_dora[adapter_name] = True else: self.use_dora[adapter_name] = False self.set_adapter(self.active_adapters) def _get_dora_factor_view(self): return (-1,) + (1,) * (self._kernel_dim - 1) def dora_init(self, adapter_name: str) -> None: if self.lora_magnitude_vector is None: # first dora layer being added, add lora_magnitude_vector to the list of learnable parameters self.adapter_layer_names = self.adapter_layer_names[:] + ("lora_magnitude_vector",) dora_layer_class = self._get_dora_layer_class() dora_layer = dora_layer_class(fan_in_fan_out=False) lora_A = self.lora_A[adapter_name].weight lora_B = self.lora_B[adapter_name].weight scaling = self.scaling[adapter_name] dora_layer.update_layer(base_layer=self.get_base_layer(), lora_A=lora_A, lora_B=lora_B, scaling=scaling) self.lora_magnitude_vector[adapter_name] = dora_layer def _get_dora_layer_class(self) -> type[_DoraConvNdLayer]: # Subclasses should override this method to return the appropriate DoraLayer class raise NotImplementedError def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: """ Merge the active adapter weights inside the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`list[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: # no adapter to merge return for active_adapter in adapter_names: if active_adapter in self.lora_A.keys(): base_layer = self.get_base_layer() if safe_merge: # Note that safe_merge will be slower than the normal merge # because of the copy operation. orig_weights = base_layer.weight.data.clone() delta_weight = self.get_delta_weight(active_adapter) if not self.use_dora[active_adapter]: orig_weights += delta_weight else: # handle dora # since delta_weight already includes scaling, set it to 1 here weight_norm = ( self.lora_magnitude_vector[active_adapter] .get_weight_norm(orig_weights, delta_weight, scaling=1) .detach() ) # We need to cache weight_norm because it has to be based on the original weights. We # cannot calculate it on the fly based on the merged weights when unmerging because its a # different value self._cache_store(f"{active_adapter}-weight_norm", weight_norm) dora_factor = self.lora_magnitude_vector[active_adapter].weight / weight_norm orig_weights = dora_factor.view(*self._get_dora_factor_view()) * (orig_weights + delta_weight) if not torch.isfinite(orig_weights).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) base_layer.weight.data = orig_weights if self.lora_bias[active_adapter]: new_bias = base_layer.bias + self.lora_B[active_adapter].bias if not torch.isfinite(new_bias).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) base_layer.bias.data = new_bias else: delta_weight = self.get_delta_weight(active_adapter) if not self.use_dora[active_adapter]: base_layer.weight.data += delta_weight else: # handle dora # since delta_weight already includes scaling, set it to 1 here weight_norm = ( self.lora_magnitude_vector[active_adapter] .get_weight_norm(base_layer.weight, delta_weight, scaling=1) .detach() ) # We need to cache weight_norm because it has to be based on the original weights. We # cannot calculate it on the fly based on the merged weights when unmerging because its a # different value self._cache_store(f"{active_adapter}-weight_norm", weight_norm) dora_factor = self.lora_magnitude_vector[active_adapter].weight / weight_norm new_weight = dora_factor.view(*self._get_dora_factor_view()) * ( base_layer.weight.data + delta_weight ) base_layer.weight.data = new_weight if self.lora_bias[active_adapter]: base_layer.bias.data += self.lora_B[active_adapter].bias self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter in self.lora_A.keys(): weight = self.get_base_layer().weight delta_weight = self.get_delta_weight(active_adapter) if not self.use_dora[active_adapter]: weight.data -= delta_weight else: weight_norm = self._cache_pop(f"{active_adapter}-weight_norm") dora_factor = self.lora_magnitude_vector[active_adapter].weight / weight_norm weight_orig = weight.data / dora_factor.view(*self._get_dora_factor_view()) - delta_weight weight.data = weight_orig if self.lora_bias[active_adapter]: self.get_base_layer().bias.data -= self.lora_B[active_adapter].bias def get_delta_weight(self, adapter) -> torch.Tensor: """ Compute the delta weight for the given adapter. Args: adapter (str): The name of the adapter for which the delta weight should be computed. """ device = self.lora_B[adapter].weight.device dtype = self.lora_A[adapter].weight.dtype # In case users wants to merge the adapter weights that are in # (b)float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to # (b)float16 because some CPUs have slow bf16/fp16 matmuls. cast_to_fp32 = device.type == "cpu" and (dtype == torch.float16 or dtype == torch.bfloat16) weight_A = self.lora_A[adapter].weight weight_B = self.lora_B[adapter].weight if cast_to_fp32: weight_A = weight_A.float() weight_B = weight_B.float() # https://github.com/bmaltais/kohya_ss/blob/feb6728762a8f463d15ba936d189d4c3abfaa1ab/networks/lora.py#L117 if self.get_base_layer().weight.size()[2:4] == (1, 1): # conv2d 1x1 output_tensor = (weight_B.squeeze(3).squeeze(2) @ weight_A.squeeze(3).squeeze(2)).unsqueeze(2).unsqueeze( 3 ) * self.scaling[adapter] else: output_tensor = ( self.conv_fn( weight_A.transpose(0, 1), weight_B, ).transpose(0, 1) * self.scaling[adapter] ) if cast_to_fp32: output_tensor = output_tensor.to(dtype=dtype) # cast back the weights self.lora_A[adapter].weight.data = weight_A.to(dtype) self.lora_B[adapter].weight.data = weight_B.to(dtype) return output_tensor def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: self._check_forward_args(x, *args, **kwargs) adapter_names = kwargs.pop("adapter_names", None) if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif adapter_names is not None: result = self._mixed_batch_forward(x, *args, adapter_names=adapter_names, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) torch_result_dtype = result.dtype for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] x = self._cast_input_dtype(x, lora_A.weight.dtype) if not self.use_dora[active_adapter]: result = result + lora_B(lora_A(dropout(x))) * scaling else: x = dropout(x) result = result + self.lora_magnitude_vector[active_adapter]( x, lora_A=lora_A, lora_B=lora_B, scaling=scaling, base_layer=self.get_base_layer(), ) result = result.to(torch_result_dtype) return result def __repr__(self) -> str: rep = super().__repr__() return "lora." + rep class Conv2d(_ConvNd): # Lora implemented in a conv2d layer def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if not self._kernel_dim == 4: raise ValueError(f"Conv2d layer kernel must have 4 dimensions, not {self._kernel_dim}") self.conv_fn = F.conv2d def _get_dora_layer_class(self): return DoraConv2dLayer class Conv1d(_ConvNd): # Lora implemented in a conv1d layer def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if not self._kernel_dim == 3: raise ValueError(f"Conv1d layer kernel must have 3 dimensions, not {self._kernel_dim}") self.conv_fn = F.conv1d def _get_dora_layer_class(self): raise NotImplementedError class Conv3d(_ConvNd): # Lora implemented in a conv3d layer def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if not self._kernel_dim == 5: raise ValueError(f"Conv3d layer kernel must have 5 dimensions, not {self._kernel_dim}") self.conv_fn = F.conv3d def _get_dora_layer_class(self): return DoraConv3dLayer class MultiheadAttention(nn.Module, LoraLayer): """LoRA implemented in a multihead attention layer This is currently only implemented for the case of `_qkv_same_embed_dim = True`, i.e. query, key, and value having the same dimension. Note: LoRA is applied to both the in_proj (query/key/value) and out_proj. There is currently no way to specify only one of them. Don't try to apply LoRA to the out_proj of MultiheadAttention by targeting that layer specifically, since the forward method of that layer is not being used, hence the LoRA adapter would be ignored. This is a little bit hacky because of the way that MultiheadAttention is implemented in PyTorch: There are no `nn.Linear` layers which we can hook onto or, in case of output projection, `.forward` is not used. This implementation works around these problems by merging the weights before the forward call and unmerging them after the forward call. """ def __init__( self, base_layer, adapter_name: str, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, init_lora_weights: Union[bool, str] = True, use_rslora: bool = False, use_dora: bool = False, **kwargs, ) -> None: # TODO work with separate weights if not getattr(base_layer, "_qkv_same_embed_dim", True): # default for this value appears to be True: # https://github.com/pytorch/pytorch/blob/701ba5203fe68d55d655bd4d6c008be94cf34ea5/torch/nn/modules/activation.py#L1128-L1130 raise ValueError( f"Only same embed for query/key/value is supported as of now for {self.__class__.__name__}." ) if use_dora: # TODO: probably not so hard to implement raise ValueError(f"{self.__class__.__name__} does not support DoRA (yet), please set use_dora to False") super().__init__() LoraLayer.__init__(self, base_layer, **kwargs) # Note: LoRA is applied to both in_proj and out_proj. There is currently no way to only specify one of them. if isinstance(base_layer.out_proj, nn.Linear): self.base_layer.out_proj = Linear( base_layer.out_proj, adapter_name, r=r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, init_lora_weights=init_lora_weights, use_rslora=use_rslora, use_dora=use_dora, **kwargs, ) else: raise ValueError(f"out_proj must be an instance of nn.Linear for {self.__class__.__name__}.") self._active_adapter = adapter_name self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora) @property def embed_dim(self) -> int: return self.get_base_layer().embed_dim @property def kdim(self) -> Optional[int]: return self.get_base_layer().kdim @property def vdim(self) -> Optional[int]: return self.get_base_layer().vdim @property def _qkv_same_embed_dim(self) -> bool: return self.get_base_layer()._qkv_same_embed_dim @property def num_heads(self) -> int: return self.get_base_layer().num_heads @property def dropout(self) -> float: return self.get_base_layer().dropout @property def batch_first(self) -> bool: return self.get_base_layer().batch_first @property def head_dim(self) -> int: return self.get_base_layer().head_dim @property def in_proj_weight(self) -> nn.Parameter: return self.get_base_layer().in_proj_weight @property def in_proj_bias(self) -> nn.Parameter: return self.get_base_layer().in_proj_bias @property def out_proj(self) -> nn.Module: return self.get_base_layer().out_proj.get_base_layer() @property def bias_k(self) -> Optional[nn.Parameter]: return self.get_base_layer().bias_k @property def bias_v(self) -> Optional[nn.Parameter]: return self.get_base_layer().bias_v def merge_masks(self, *args, **kwargs) -> tuple[Optional[torch.Tensor], Optional[int]]: return self.get_base_layer().merge_masks(*args, **kwargs) @property def add_zero_attn(self) -> bool: return self.get_base_layer().add_zero_attn def update_layer(self, *args, **kwargs) -> None: super().update_layer(*args, **kwargs) # Note: LoRA is applied to both in_proj and out_proj. There is currently no way to only specify one of them. self.base_layer.out_proj.update_layer(*args, **kwargs) def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: # no adapter to merge return # Implementation follows this: # https://github.com/Baijiong-Lin/LoRA-Torch/blob/4bfed6820b64fcf47064c30f30606a190a4f0d2e/loratorch/layers.py#L73-L79 # Notably, instead of mutating the weight, we delete the original weight and replace it by the merged weight # TODO: work with separate weights for active_adapter in adapter_names: if active_adapter in self.lora_A.keys(): base_layer = self.get_base_layer() if safe_merge: # TODO: work with separate weights # merging in_proj (nn.Parameter) orig_weights_in = base_layer.in_proj_weight.data.detach().clone() orig_weights_in += self.get_delta_weight(active_adapter) if not torch.isfinite(orig_weights_in).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) # merging out_proj (subclass of nn.Linear) orig_weights_out = base_layer.out_proj.weight.data.detach().clone() orig_weights_out += base_layer.out_proj.get_delta_weight(active_adapter) if not torch.isfinite(orig_weights_out).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) # unregister parameter implicitly and overwrite using merged weights; gradients are computed after # forward and, thus, after unmerging (see forward()), therefore this is safe to do. del base_layer.in_proj_weight base_layer.in_proj_weight = orig_weights_in del base_layer.out_proj.get_base_layer().weight base_layer.out_proj.get_base_layer().weight = orig_weights_out base_layer.out_proj.merge(adapter_names=[active_adapter]) else: # merging in_proj (nn.Parameter) # TODO: work with separate weights weight_merged = base_layer.in_proj_weight.data.detach() + self.get_delta_weight(active_adapter) # unregister parameter implicitly and overwrite using merged weights; gradients are computed after # forward and, thus, after unmerging (see forward()), therefore this is safe to do. del base_layer.in_proj_weight base_layer.in_proj_weight = weight_merged # merging out_proj (subclass of nn.Linear) weight_merged = base_layer.out_proj.weight.data.detach() + base_layer.out_proj.get_delta_weight( active_adapter ) del base_layer.out_proj.get_base_layer().weight base_layer.out_proj.get_base_layer().weight = weight_merged base_layer.out_proj.merge(adapter_names=[active_adapter]) self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return # TODO work with separate weights base_layer = self.get_base_layer() while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter in self.lora_A.keys(): # Ensure that requires_grad=False for the base weights after unmerging. This may not matter since # requires_grad was False when the optimizer was initialized, but still let's try to be correct here. # in_proj old_weight = base_layer.in_proj_weight.data - self.get_delta_weight(active_adapter) del base_layer.in_proj_weight base_layer.register_parameter("in_proj_weight", nn.Parameter(old_weight, requires_grad=False)) # out_proj old_weight = base_layer.out_proj.base_layer.weight.data - base_layer.out_proj.get_delta_weight( active_adapter ) del base_layer.out_proj.base_layer.weight base_layer.out_proj.base_layer.register_parameter( "weight", nn.Parameter(old_weight, requires_grad=False) ) self.get_base_layer().out_proj.unmerge() def unload_and_optionally_merge_module( self, merge: bool, safe_merge: bool, adapter_names: Optional[list[str]] ) -> nn.MultiheadAttention: """ Merging and unloading of the MultiheadAttention module This requires an extra step for MultiheadAttention, which is why there is this special method instead of relying on the normal merge_and_unload code path. """ if merge: self.merge(safe_merge=safe_merge, adapter_names=adapter_names) base_layer = self.get_base_layer() # extra steps: re-register weights, take care of out_proj layer # in_proj weight = base_layer.in_proj_weight del base_layer.in_proj_weight base_layer.register_parameter("in_proj_weight", nn.Parameter(weight.data, requires_grad=weight.requires_grad)) # out_proj out_proj_layer = base_layer.out_proj.get_base_layer() weight = out_proj_layer.weight del out_proj_layer.weight out_proj_layer.register_parameter("weight", nn.Parameter(weight.data, requires_grad=weight.requires_grad)) base_layer.out_proj = out_proj_layer return base_layer def get_delta_weight(self, adapter) -> torch.Tensor: """ Compute the delta weight for the given adapter. Args: adapter (str): The name of the adapter for which the delta weight should be computed. """ device = self.lora_B[adapter].weight.device dtype = self.lora_B[adapter].weight.dtype # In case users wants to merge the adapter weights that are in # float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to # float16 because the `@` and matmul operation in general is not supported in torch + cpu + fp16. cast_to_fp32 = device.type == "cpu" and dtype == torch.float16 weight_A = self.lora_A[adapter].weight weight_B = self.lora_B[adapter].weight if cast_to_fp32: weight_A = weight_A.float() weight_B = weight_B.float() output_tensor = (weight_B @ weight_A) * self.scaling[adapter] if cast_to_fp32: output_tensor = output_tensor.to(dtype=dtype) # cast back the weights self.lora_A[adapter].weight.data = weight_A.to(dtype) self.lora_B[adapter].weight.data = weight_B.to(dtype) return output_tensor def _check_forward_args(self, x, *args, **kwargs): if "adapter_names" in kwargs: raise TypeError(f"lora.{self.__class__.__name__} does not support mixed adapter batches.") super()._check_forward_args(x, *args, **kwargs) def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: previous_dtype = x.dtype self._check_forward_args(x, *args, **kwargs) if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: out_proj = self.get_base_layer().out_proj if out_proj.active_adapters != self.active_adapters: # We have a case that in_proj and out_proj have diverging merged adapters. We cannot # really deal with this correctly, thus it's better to raise than possibly create a hard to debug mess cls_name = self.get_base_layer().__class__.__name__ raise ValueError( f"The out_proj layer of {cls_name} has merged layers but {cls_name} itself doesn't; please ensure " "that either both or none have merged layers" ) # Merge all adapters that are active for this module, i.e. the LoRA weights for in_proj and out_proj. # in_proj uses nn.Parameters, therefore, there is no forward method to be used and we have to explicitly # merge for the LoRA weights to have an effect: # https://github.com/pytorch/pytorch/blob/6ebb26d572d5fcdc6ac0d1297bdf8d1eb5d20722/torch/nn/modules/activation.py#L1020 # For out_proj, we have an nn.Linear (or rather: NonDynamicallyQuantizableLinear), but its forward method # is not used: # https://github.com/pytorch/pytorch/blob/6ebb26d572d5fcdc6ac0d1297bdf8d1eb5d20722/torch/nn/modules/activation.py#L1267-L1271 # Therefore, its LoRA weights also need to be merged to have an effect. active_adapters = [a for a in self.active_adapters if a in self.lora_A] try: self.merge(adapter_names=active_adapters) result = self.base_layer(x, *args, **kwargs) finally: # it's safe to call unmerge(), which unmerges all adapters, because we checked that not self.merged, # i.e. there is was no merged layer before self.unmerge() result = (result[0].to(previous_dtype), result[1].to(previous_dtype) if result[1] is not None else result[1]) return result # The decorator is needed in case low_cpu_mem_usage=True is used, as we don't want the base layer weights to be # moved to meta device. This requires the use of PEFT's implementation of init_empty_weight instead of using the one # from accelerate. @skip_init_on_device def _restore_weights(self): # Restore the weights as registered parameters on the base layer. # This is necessary because the way that weights are merged/unmerged (which is necessary for forward to work # correctly), the Module "forgets" these attributes. Therefore, we need to call register_parameter explicitly. # We cannot call register_parameter for merging/unmerging because that cuts them off from the autograd graph. # Note that this is hacky, since we need to ensure that _restore_weights is called by each method that needs it. # in_proj # TODO work with separate weights base_layer = self.get_base_layer() weight = base_layer.in_proj_weight del base_layer.in_proj_weight base_layer.register_parameter("in_proj_weight", nn.Parameter(weight.data, requires_grad=weight.requires_grad)) # out_proj base_layer = base_layer.out_proj.get_base_layer() weight = base_layer.weight del base_layer.weight base_layer.register_parameter("weight", nn.Parameter(weight.data, requires_grad=weight.requires_grad)) def state_dict(self, *args, **kwargs): self._restore_weights() return super().state_dict(*args, **kwargs) def named_modules(self, *args, **kwargs): # Note: no need to also implement modules(), as modules() calls named_modules() under the hood self._restore_weights() return super().named_modules(*args, **kwargs) def __repr__(self) -> str: rep = super().__repr__() return "lora." + rep def dispatch_default( target: torch.nn.Module, adapter_name: str, lora_config: LoraConfig, **kwargs, ) -> Optional[torch.nn.Module]: new_module = None if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target if isinstance(target_base_layer, torch.nn.Embedding): embedding_kwargs = kwargs.copy() embedding_kwargs.pop("fan_in_fan_out", None) embedding_kwargs.update(lora_config.loftq_config) new_module = Embedding(target, adapter_name, **embedding_kwargs) elif isinstance(target_base_layer, torch.nn.Conv2d): kwargs.update(lora_config.loftq_config) new_module = Conv2d(target, adapter_name, **kwargs) elif isinstance(target_base_layer, torch.nn.Conv3d): kwargs.update(lora_config.loftq_config) new_module = Conv3d(target, adapter_name, **kwargs) elif isinstance(target_base_layer, nn.Conv1d): kwargs.update(lora_config.loftq_config) new_module = Conv1d(target, adapter_name, **kwargs) elif isinstance(target_base_layer, torch.nn.MultiheadAttention): kwargs.update(lora_config.loftq_config) new_module = MultiheadAttention(target, adapter_name, **kwargs) elif isinstance(target_base_layer, torch.nn.Linear): if kwargs["fan_in_fan_out"]: warnings.warn( "fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. " "Setting fan_in_fan_out to False." ) kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = False kwargs.update(lora_config.loftq_config) new_module = Linear(target, adapter_name, **kwargs) elif isinstance(target_base_layer, Conv1D): if not kwargs["fan_in_fan_out"]: warnings.warn( "fan_in_fan_out is set to False but the target module is `Conv1D`. Setting fan_in_fan_out to True." ) kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = True kwargs.update(lora_config.loftq_config) new_module = Linear(target, adapter_name, is_target_conv_1d_layer=True, **kwargs) return new_module
peft/src/peft/tuners/lora/layer.py/0
{ "file_path": "peft/src/peft/tuners/lora/layer.py", "repo_id": "peft", "token_count": 37815 }
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Based on https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/nlp/modules/common/prompt_encoder.py # with some refactor import warnings import torch from .config import PromptEncoderConfig, PromptEncoderReparameterizationType class PromptEncoder(torch.nn.Module): """ The prompt encoder network that is used to generate the virtual token embeddings for p-tuning. Args: config ([`PromptEncoderConfig`]): The configuration of the prompt encoder. Example: ```py >>> from peft import PromptEncoder, PromptEncoderConfig >>> config = PromptEncoderConfig( ... peft_type="P_TUNING", ... task_type="SEQ_2_SEQ_LM", ... num_virtual_tokens=20, ... token_dim=768, ... num_transformer_submodules=1, ... num_attention_heads=12, ... num_layers=12, ... encoder_reparameterization_type="MLP", ... encoder_hidden_size=768, ... ) >>> prompt_encoder = PromptEncoder(config) ``` **Attributes**: - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prompt encoder. - **mlp_head** (`torch.nn.Sequential`) -- The MLP head of the prompt encoder if `inference_mode=False`. - **lstm_head** (`torch.nn.LSTM`) -- The LSTM head of the prompt encoder if `inference_mode=False` and `encoder_reparameterization_type="LSTM"`. - **token_dim** (`int`) -- The hidden embedding dimension of the base transformer model. - **input_size** (`int`) -- The input size of the prompt encoder. - **output_size** (`int`) -- The output size of the prompt encoder. - **hidden_size** (`int`) -- The hidden size of the prompt encoder. - **total_virtual_tokens** (`int`): The total number of virtual tokens of the prompt encoder. - **encoder_type** (Union[[`PromptEncoderReparameterizationType`], `str`]): The encoder type of the prompt encoder. Input shape: (`batch_size`, `total_virtual_tokens`) Output shape: (`batch_size`, `total_virtual_tokens`, `token_dim`) """ def __init__(self, config): super().__init__() self.token_dim = config.token_dim self.input_size = self.token_dim self.output_size = self.token_dim self.hidden_size = config.encoder_hidden_size self.total_virtual_tokens = config.num_virtual_tokens * config.num_transformer_submodules self.encoder_type = config.encoder_reparameterization_type # embedding self.embedding = torch.nn.Embedding(self.total_virtual_tokens, self.token_dim) if not config.inference_mode: if self.encoder_type == PromptEncoderReparameterizationType.LSTM: lstm_dropout = config.encoder_dropout num_layers = config.encoder_num_layers # LSTM self.lstm_head = torch.nn.LSTM( input_size=self.input_size, hidden_size=self.hidden_size, num_layers=num_layers, dropout=lstm_dropout, bidirectional=True, batch_first=True, ) self.mlp_head = torch.nn.Sequential( torch.nn.Linear(self.hidden_size * 2, self.hidden_size * 2), torch.nn.ReLU(), torch.nn.Linear(self.hidden_size * 2, self.output_size), ) elif self.encoder_type == PromptEncoderReparameterizationType.MLP: encoder_num_layers_default = PromptEncoderConfig.encoder_num_layers if config.encoder_num_layers != encoder_num_layers_default: warnings.warn( f"for {self.encoder_type.value}, the argument `encoder_num_layers` is ignored. " f"Exactly {encoder_num_layers_default} MLP layers are used." ) layers = [ torch.nn.Linear(self.input_size, self.hidden_size), torch.nn.ReLU(), torch.nn.Linear(self.hidden_size, self.hidden_size), torch.nn.ReLU(), torch.nn.Linear(self.hidden_size, self.output_size), ] self.mlp_head = torch.nn.Sequential(*layers) else: raise ValueError("Prompt encoder type not recognized. Please use one of MLP (recommended) or LSTM.") def forward(self, indices): input_embeds = self.embedding(indices) if self.encoder_type == PromptEncoderReparameterizationType.LSTM: output_embeds = self.mlp_head(self.lstm_head(input_embeds)[0]) elif self.encoder_type == PromptEncoderReparameterizationType.MLP: output_embeds = self.mlp_head(input_embeds) else: raise ValueError("Prompt encoder type not recognized. Please use one of MLP (recommended) or LSTM.") return output_embeds
peft/src/peft/tuners/p_tuning/model.py/0
{ "file_path": "peft/src/peft/tuners/p_tuning/model.py", "repo_id": "peft", "token_count": 2476 }