python_code
stringlengths
0
456k
import logging from typing import List, Optional from .logger import DistributedLogger __all__ = ['get_dist_logger', 'DistributedLogger', 'disable_existing_loggers'] def get_dist_logger(name: str = 'colossalai') -> DistributedLogger: """Get logger instance based on name. The DistributedLogger will create singleton instances, which means that only one logger instance is created per name. Args: name (str): name of the logger, name must be unique Returns: :class:`colossalai.logging.DistributedLogger`: A distributed logger singleton instance. """ return DistributedLogger.get_instance(name=name) def disable_existing_loggers(include: Optional[List[str]] = None, exclude: List[str] = ['colossalai']) -> None: """Set the level of existing loggers to `WARNING`. By default, it will "disable" all existing loggers except the logger named "colossalai". Args: include (Optional[List[str]], optional): Loggers whose name in this list will be disabled. If set to `None`, `exclude` argument will be used. Defaults to None. exclude (List[str], optional): Loggers whose name not in this list will be disabled. This argument will be used only when `include` is None. Defaults to ['colossalai']. """ if include is None: filter_func = lambda name: name not in exclude else: filter_func = lambda name: name in include for log_name in logging.Logger.manager.loggerDict.keys(): if filter_func(log_name): logging.getLogger(log_name).setLevel(logging.WARNING)
#!/usr/bin/env python # -*- encoding: utf-8 -*- import inspect import logging from pathlib import Path from typing import List, Union import colossalai from colossalai.context.parallel_mode import ParallelMode class DistributedLogger: """This is a distributed event logger class essentially based on :class:`logging`. Args: name (str): The name of the logger. Note: The parallel_mode used in ``info``, ``warning``, ``debug`` and ``error`` should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_. """ __instances = dict() @staticmethod def get_instance(name: str): """Get the unique single logger instance based on name. Args: name (str): The name of the logger. Returns: DistributedLogger: A DistributedLogger object """ if name in DistributedLogger.__instances: return DistributedLogger.__instances[name] else: logger = DistributedLogger(name=name) return logger def __init__(self, name): if name in DistributedLogger.__instances: raise Exception( 'Logger with the same name has been created, you should use colossalai.logging.get_dist_logger') else: handler = None formatter = logging.Formatter('colossalai - %(name)s - %(levelname)s: %(message)s') try: from rich.logging import RichHandler handler = RichHandler(show_path=False, markup=True, rich_tracebacks=True) handler.setFormatter(formatter) except ImportError: handler = logging.StreamHandler() handler.setFormatter(formatter) self._name = name self._logger = logging.getLogger(name) self._logger.setLevel(logging.INFO) if handler is not None: self._logger.addHandler(handler) self._logger.propagate = False DistributedLogger.__instances[name] = self @staticmethod def __get_call_info(): stack = inspect.stack() # stack[1] gives previous function ('info' in our case) # stack[2] gives before previous function and so on fn = stack[2][1] ln = stack[2][2] func = stack[2][3] return fn, ln, func @staticmethod def _check_valid_logging_level(level: str): assert level in ['INFO', 'DEBUG', 'WARNING', 'ERROR'], 'found invalid logging level' def set_level(self, level: str) -> None: """Set the logging level Args: level (str): Can only be INFO, DEBUG, WARNING and ERROR. """ self._check_valid_logging_level(level) self._logger.setLevel(getattr(logging, level)) def log_to_file(self, path: Union[str, Path], mode: str = 'a', level: str = 'INFO', suffix: str = None) -> None: """Save the logs to file Args: path (A string or pathlib.Path object): The file to save the log. mode (str): The mode to write log into the file. level (str): Can only be INFO, DEBUG, WARNING and ERROR. suffix (str): The suffix string of log's name. """ assert isinstance(path, (str, Path)), \ f'expected argument path to be type str or Path, but got {type(path)}' self._check_valid_logging_level(level) if isinstance(path, str): path = Path(path) # create log directory path.mkdir(parents=True, exist_ok=True) # set the default file name if path is a directory if not colossalai.core.global_context.is_initialized(ParallelMode.GLOBAL): rank = 0 else: rank = colossalai.core.global_context.get_global_rank() if suffix is not None: log_file_name = f'rank_{rank}_{suffix}.log' else: log_file_name = f'rank_{rank}.log' path = path.joinpath(log_file_name) # add file handler file_handler = logging.FileHandler(path, mode) file_handler.setLevel(getattr(logging, level)) formatter = logging.Formatter('colossalai - %(name)s - %(levelname)s: %(message)s') file_handler.setFormatter(formatter) self._logger.addHandler(file_handler) def _log(self, level, message: str, parallel_mode: ParallelMode = ParallelMode.GLOBAL, ranks: List[int] = None) -> None: if ranks is None: getattr(self._logger, level)(message) else: local_rank = colossalai.core.global_context.get_local_rank(parallel_mode) if local_rank in ranks: getattr(self._logger, level)(message) def info(self, message: str, parallel_mode: ParallelMode = ParallelMode.GLOBAL, ranks: List[int] = None) -> None: """Log an info message. Args: message (str): The message to be logged. parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): The parallel mode used for logging. Defaults to ParallelMode.GLOBAL. ranks (List[int]): List of parallel ranks. """ message_prefix = "{}:{} {}".format(*self.__get_call_info()) self._log('info', message_prefix, parallel_mode, ranks) self._log('info', message, parallel_mode, ranks) def warning(self, message: str, parallel_mode: ParallelMode = ParallelMode.GLOBAL, ranks: List[int] = None) -> None: """Log a warning message. Args: message (str): The message to be logged. parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): The parallel mode used for logging. Defaults to ParallelMode.GLOBAL. ranks (List[int]): List of parallel ranks. """ message_prefix = "{}:{} {}".format(*self.__get_call_info()) self._log('warning', message_prefix, parallel_mode, ranks) self._log('warning', message, parallel_mode, ranks) def debug(self, message: str, parallel_mode: ParallelMode = ParallelMode.GLOBAL, ranks: List[int] = None) -> None: """Log a debug message. Args: message (str): The message to be logged. parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): The parallel mode used for logging. Defaults to ParallelMode.GLOBAL. ranks (List[int]): List of parallel ranks. """ message_prefix = "{}:{} {}".format(*self.__get_call_info()) self._log('debug', message_prefix, parallel_mode, ranks) self._log('debug', message, parallel_mode, ranks) def error(self, message: str, parallel_mode: ParallelMode = ParallelMode.GLOBAL, ranks: List[int] = None) -> None: """Log an error message. Args: message (str): The message to be logged. parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`): The parallel mode used for logging. Defaults to ParallelMode.GLOBAL. ranks (List[int]): List of parallel ranks. """ message_prefix = "{}:{} {}".format(*self.__get_call_info()) self._log('error', message_prefix, parallel_mode, ranks) self._log('error', message, parallel_mode, ranks)
from ._trainer import Trainer __all__ = ['Trainer']
from typing import Union, List, Any import torch from torch.utils.data import DataLoader from tqdm import tqdm from colossalai.engine import Engine from colossalai.logging import DistributedLogger from colossalai.utils import MultiTimer from colossalai.utils import is_dp_rank_0, is_tp_rank_0, is_no_pp_or_last_stage from colossalai.trainer.hooks import BaseHook class Trainer: r"""This is a class tending for easy deployments of users' training and evaluation instead of writing their own scripts. It is similar with ``ignite.engine`` and ``keras.engine``, but is called `Trainer`. Args: engine (:class:`Engine`): Engine responsible for the process function. timer (:class:`MultiTimer`, optional): Timer used to monitor the whole training. logger (:class:`colossalai.logging.DistributedLogger`, optional): Logger used to record the whole training log. Examples: >>> # define model, criterion, optimizer, lr_scheduler, train_dataloader for your training >>> model = ... >>> criterion = ... >>> optimizer = ... >>> train_dataloader = ... >>> # Initialize your engine, train_dataloader, test_dataloader, lr_scheduler >>> engine, train_dataloader, _, _ = colossalai.initialize(model, optimizer, criterion) >>> # Beginning training progress >>> timier = ... >>> logger = ... >>> trainer = Trainer(engine=engine, logger=logger, timer=timier) >>> # add hooks you would like to use here. >>> hook_list = [] >>> trainer.fit( >>> train_dataloader=train_dataloader, >>> epochs=gpc.config.NUM_EPOCHS, >>> test_interval=1, >>> hooks=hook_list, >>> display_progress=True, >>> return_output_label=False >>> ) More examples and details could be found in `Training with engine and trainer <https://www.colossalai.org/docs/basics/engine_trainer>`_ and `ColossalAI-Examples <https://github.com/hpcaitech/ColossalAI-Examples/tree/main>`_. """ def __init__( self, engine: Engine, timer: MultiTimer = None, logger: DistributedLogger = None, ): # training-ralated params self._engine = engine self._max_epochs = 0 self._cur_epoch = 0 self._max_steps = 0 self._cur_step = 0 self._steps_per_epoch = 0 # misc params self._logger = logger self._verbose = logger is not None # hooks can store states in this dict, and could be consumed by other hooks self.states = dict() # build hooks self.hooks = list() # multi-timer for time benchmarking self._timer = timer @property def cur_epoch(self): """Returns the index of the current epoch.""" return self._cur_epoch @cur_epoch.setter def cur_epoch(self, epoch: int): """Set how many epochs have been processed.""" # allow setter for training resumption self._cur_epoch = epoch @property def cur_step(self): """Returns how many iteration steps have been processed.""" return self._cur_step @property def max_epochs(self): return self._max_epochs @property def max_steps(self): return self._max_steps @property def steps_per_epoch(self): return self._steps_per_epoch @property def engine(self): return self._engine def _set_current_step(self, epoch: int): """Sets current step number. Args: epoch (int): Step number to be set. """ self._cur_step = epoch * self._steps_per_epoch def _call_timer(self, action: str, item: str, *args, **kwargs) -> None: """Call timer funciton with a given timer name. Args: action (str): Function to be called on timer. item (str): Name of the timer. args (list): args used for action function. kwargs (dict): kwargs used for action function. """ if self._timer is not None: getattr(self._timer, action)(item, *args, **kwargs) def _reset_states(self) -> None: """Clear trainer states""" self.states = dict() def _call_hooks(self, func, output=None): """Calls specific hooks in the current time point. Args: func (str): A string represents the time point. output (Any, optional): Output of the model after running an iteration or None in any other time points. """ # Only after iter hook will receive output for hook in self.hooks: if output is None: getattr(hook, func)(self) else: getattr(hook, func)(self, *output) @staticmethod def _should_display_progress(display_progress: bool): """Only display progress on DP rank 0, TP rank 0 and PP last rank""" return (display_progress and is_dp_rank_0() and is_tp_rank_0() and is_no_pp_or_last_stage()) def _train_epoch( self, train_dataloader: DataLoader, epoch: int = None, display_progress: bool = False, return_output_label: bool = True, ): # set training state self._engine.train() data_iter = iter(train_dataloader) progress = range(self._steps_per_epoch) if display_progress: if epoch is None: progress = tqdm(progress, desc="[Train]") else: progress = tqdm(progress, desc=f"[Epoch {epoch} / Train]") self._call_hooks("before_train_epoch") self._call_timer(action="start", item="Train-epoch") for i in progress: self._call_hooks("before_train_iter") self._call_timer(action="start", item="Train-step") # run 1 training step self.engine.zero_grad() logits, label, loss = self.engine.execute_schedule( data_iter, forward_only=False, return_loss=True, return_output_label=return_output_label, ) self.engine.step() self._call_timer(action="stop", item="Train-step", keep_in_history=True) self._call_hooks("after_train_iter", output=(logits, label, loss)) self._cur_step += 1 if display_progress: if "step_metrics" in self.states: progress.set_postfix(**self.states["step_metrics"]) # stop when max iter is reached if self._exceed_max_step(): break self._call_timer(action="stop", item="Train-epoch", keep_in_history=True) self._call_hooks("after_train_epoch") self._call_timer(action="reset", item="Train-epoch") def _eval( self, test_dataloader: DataLoader, epoch: int = None, display_progress: bool = False, return_output_label: bool = True, ): # switch engine status self._engine.eval() data_iter = iter(test_dataloader) num_steps = len(test_dataloader) self._call_hooks("before_test") # prepare progress bar progress = range(num_steps) if display_progress: desc = "Evaluation" if epoch is not None: desc = "[Epoch %d / Test]" % epoch progress = tqdm(progress, desc=desc) self._call_hooks("before_test_epoch") self._call_timer(action="start", item="Test-epoch") with torch.no_grad(): for _ in progress: self._call_hooks("before_test_iter") self._call_timer(action="start", item="Test-step") logits, label, loss = self.engine.execute_schedule( data_iter, forward_only=True, return_loss=True, return_output_label=return_output_label, ) self._call_timer(action="stop", item="Test-step", keep_in_history=True) self._call_hooks("after_test_iter", output=(logits, label, loss)) if display_progress: if "step_metrics" in self.states: progress.set_postfix(**self.states["step_metrics"]) self._call_timer(action="stop", item="Test-epoch", keep_in_history=True) self._call_hooks("after_test_epoch") self._call_hooks("after_test") self._call_timer(action="reset", item="Test-step") self._call_timer(action="reset", item="Test-epoch") def _exceed_max_step(self): return self._max_steps is not None and self._cur_step >= self._max_steps def fit( self, train_dataloader: DataLoader, epochs: int, max_steps: int = None, test_dataloader: DataLoader = None, test_interval: int = 1, hooks: List[BaseHook] = None, display_progress: bool = False, return_output_label: bool = True, ): r"""Trains the model to fit training data. Args: train_dataloader (:class:`torch.utils.data.DataLoader`): DataLoader for training. epochs (int): Maximum number of epochs. max_steps (int, optional): Maximum number of running iterations. test_dataloader (:class:`torch.utils.data.DataLoader`, optional): DataLoader for validation. test_interval (int, optional): Interval of validation hooks (list[BaseHook], optional): A list of hooks used in training. display_progress (bool, optional): If True, a progress bar will be displayed. """ # set epochs and steps, consider gradient accumulation self._steps_per_epoch = len(train_dataloader) self._max_steps = max_steps self._max_epochs = epochs # check if testing is required should_test = False if test_dataloader is not None: should_test = True display_progress = self._should_display_progress(display_progress) # reset hooks self._reset_states() if hooks is not None: assert isinstance(hooks, list), f"expected argument hooks be to list, but got {type(hooks)}" for hook in hooks: assert isinstance(hook, BaseHook), \ f'expected the hook to be of type BaseHook, but got {type(hook)}' else: hooks = [] self.hooks = hooks self.hooks.sort(key=lambda hook: hook.priority) if self._verbose: for hook in self.hooks: self._logger.info( f"Using {hook.__class__.__name__} for training, priority = {hook.priority}", ranks=[0], ) self._logger.info("Lower value means higher priority for calling hook function", ranks=[0]) self._call_hooks("after_hook_is_attached") self._engine.train() self._call_hooks("before_train") # recover step value if resuming training last_epoch = self._cur_epoch if self.cur_epoch != 0: self._set_current_step(last_epoch) for epoch in range(last_epoch, epochs): # train for one epoch self._train_epoch( train_dataloader=train_dataloader, epoch=epoch, display_progress=display_progress, return_output_label=return_output_label, ) # start eval if should_test and epoch % test_interval == 0: self._eval( test_dataloader=test_dataloader, display_progress=display_progress, epoch=epoch, return_output_label=return_output_label, ) self._cur_epoch += 1 # check for termination if self._exceed_max_step(): self._logger.info( f"Max number of steps {max_steps} has been reached, training is stopped automatically", ranks=[0], ) break self._call_hooks("after_train") self._call_timer("reset", "Train-epoch") def evaluate( self, test_dataloader: DataLoader, hooks: List[BaseHook] = None, display_progress: bool = False, return_output_label: bool = True, ): """Evaluates the model with testing data. Args: test_dataloader (:class:`torch.utils.data.DataLoader`, optional): Dataloader for testing. hooks (list, optional): A list of hooks used in evaluation. Defaults to None. display_progress (bool, optional): If True, the evaluation progress will be printed. Defaults to False. return_output_label (bool, optional): If True, the output of model and the label will be returned. Defaults to True. """ # set display display_progress = self._should_display_progress(display_progress) # reset hooks self._reset_states() if hooks is not None: assert isinstance(hooks, list), f"expected argument hooks be to list, but got {type(hooks)}" else: hooks = [] self.hooks = hooks self.hooks.sort(key=lambda hook: hook.priority) if self._verbose: for hook in self.hooks: self._logger.info( f"Using {hook.__class__.__name__} for training, priority = {hook.priority}", ranks=[0], ) self._logger.info("Lower value means higher priority for calling hook function", ranks=[0]) self._call_hooks("after_hook_is_attached") # eval self._eval( test_dataloader=test_dataloader, display_progress=display_progress, return_output_label=return_output_label, ) def predict(self, data: Union[Any, List[Any]]): """Uses trained model to make a prediction for a tensor or a tensor list. Args: data (Union[:class:`torch.tensor`, List[:class:`torch.tensor`]]): Data as the input. Returns: :class:`torch.tensor`: The output of model as the prediction """ # predict without labels self._engine.eval() # prepare a list of (data, label) to make it iterable # for compatibility with schedule simple_dataloader = [(data, None)] data_iter = iter(simple_dataloader) output, _, _ = self.engine.execute_schedule(data_iter, forward_only=True, return_loss=False) return output
#!/usr/bin/env python # -*- encoding: utf-8 -*- from abc import ABC, abstractmethod from typing import Callable import torch import torch.distributed as dist from colossalai.communication import all_reduce from colossalai.context import ParallelMode from colossalai.core import global_context as gpc from colossalai.registry import HOOKS from colossalai.utils import get_current_device, is_no_pp_or_last_stage from ._base_hook import BaseHook from ._commons_ import _format_number class Metric(ABC): """A basic class of metric collectors. It collects a specific metric during training or evaluation and would always be used with :class:`MetricHook` to help it update its states and show the metric. So please use corresponding hook class to make the metric collector works. Args: epoch_only (bool): Whether the metric only read for the full epoch. """ def __init__(self, epoch_only: bool): # is the metric only read for the full epoch self._epoch_only = epoch_only @property def epoch_only(self): """Returns :attr:`epoch_only`. """ return self._epoch_only @abstractmethod def reset(self) -> None: """Resets the metric to it's initial state. By default, this is called at the start of each epoch. """ pass @abstractmethod def update(self, *args, **kwargs) -> None: """Updates the metric's state using the passed batch output. By default, this is called once for each batch. """ pass @abstractmethod def get_last_step_value(self) -> float: """Returns the metric value in the last iteration. """ pass @abstractmethod def get_accumulated_value(self): """Computes the metric based on it's accumulated state. By default, this is called at the end of each epoch. :return: the actual quantity of interest :rtype: Any """ pass @staticmethod @abstractmethod def is_better(a, b) -> bool: """Compares a and b, and returns whether a is better than b :return: The result of comparison :rtype: bool """ pass class LossMetric(Metric): """A metric collector for loss. Args: epoch_only (bool): Whether the metric only read for the full epoch. """ def __init__(self, epoch_only): super().__init__(epoch_only=epoch_only) self.last_step_loss = torch.zeros(1, device=get_current_device()) self.accum_loss = torch.zeros(1, device=get_current_device()) self.count = 0 def reset(self) -> None: """Sets :attr:`last_step_loss` and :attr:`accum_loss` to zero. """ self.last_step_loss.zero_() self.accum_loss.zero_() self.count = 0 def update(self, loss) -> None: """Updates :attr:`last_step_loss` and :attr:`accum_loss` with current loss. It expects the output has loss. Args: loss (:class:`torch.tensor`): Current loss of the output. """ # expect output to be logits, label and loss loss_ = loss.detach() self.last_step_loss.copy_(loss_) self.accum_loss.add_(loss_) self.count += 1 def get_accumulated_value(self): """Returns accumulated loss. """ if gpc.is_initialized(ParallelMode.DATA): dist.all_reduce(self.accum_loss, op=dist.ReduceOp.SUM, group=gpc.get_group(ParallelMode.DATA)) self.accum_loss.div_(gpc.get_world_size(ParallelMode.DATA)) self.accum_loss.div_(self.count) return self.accum_loss.item() def get_last_step_value(self) -> float: """Returns :attr:`last_step_loss`. """ return self.last_step_loss.cpu().item() @staticmethod def is_better(a, b): return a < b class LearningRateMetric(Metric): """A metric collector for learning rate. Args: epoch_only (bool): Whether the metric only read for the full epoch. initial_lr (float, optional): Initial learning rate, defaults to 0.0. """ def __init__(self, epoch_only: bool, initial_lr: float = 0.): super().__init__(epoch_only=epoch_only) self.lr = initial_lr def reset(self) -> None: pass def update(self, lr) -> None: self.lr = lr def get_last_step_value(self) -> float: return self.lr def get_accumulated_value(self): return self.lr @staticmethod def is_better(a, b) -> bool: pass class AccuracyMetric(Metric): """A metric collector for accuracy. It only works for classification tasks. Args: epoch_only (bool): Whether the metric only read for the full epoch. accuracy_func (:class:`typing.Callable`): Accuracy function for the classification task. """ def __init__(self, epoch_only: bool, accuracy_func: Callable): super().__init__(epoch_only=epoch_only) self.acc = accuracy_func self.last_step_sum = torch.zeros(1, device=get_current_device()) self.last_step_correct = torch.zeros(1, device=get_current_device()) self.accumulated_sum = torch.zeros(1, device=get_current_device()) self.accumulated_correct = torch.zeros(1, device=get_current_device()) def reset(self) -> None: self.last_step_sum.zero_() self.last_step_correct.zero_() self.accumulated_sum.zero_() self.accumulated_correct.zero_() def update(self, logits, targets, batch_size) -> None: """Updates last step accuracy and accumulated accuracy with current logits and labels. It expects the output has logits and labels. Args: logits (:class:`torch.tensor`): The logits output of the model. targets (:class:`torch.tensor`): Real labels of the dataset. batch_size (int): Batch size of the task. """ if isinstance(logits, (list, tuple)): logits = logits[0] if isinstance(targets, (list, tuple)): targets = targets[0] # update correct = self.acc(logits, targets) self.last_step_sum.fill_(batch_size) self.last_step_correct.fill_(correct) self.accumulated_sum += self.last_step_sum self.accumulated_correct += self.last_step_correct def get_last_step_value(self) -> float: self.last_step_sum = all_reduce(self.last_step_sum, ParallelMode.DATA) self.last_step_correct = all_reduce(self.last_step_correct, ParallelMode.DATA) return _format_number((self.last_step_correct / self.last_step_sum).cpu().item()) def get_accumulated_value(self): self.accumulated_sum = all_reduce(self.accumulated_sum, ParallelMode.DATA) self.accumulated_correct = all_reduce(self.accumulated_correct, ParallelMode.DATA) return (self.accumulated_correct / self.accumulated_sum).item() @staticmethod def is_better(a, b) -> bool: return a > b class MetricHook(BaseHook): """Specialized hook classes for :class:`Metric`. Some help metric collectors initialize, reset and update their states. Others are used to display and record the metric. Args: priority (int): Priority in the printing, hooks with small priority will be printed in front defaults to 1. If different hooks share same priority, the order of printing would depend on the hooks order in the hook list. """ def __init__( self, priority: int, ): super().__init__(priority) self._is_stage_to_compute = is_no_pp_or_last_stage() def _check_metric_states_initialization(self, trainer): if 'metrics' not in trainer.states: self.init_runner_states(trainer, 'metrics', dict(train={}, test={})) @HOOKS.register_module class LossHook(MetricHook): """Specialized hook class for :class:`Loss`. Args: priority (int, optional): Priority in the printing, hooks with small priority will be printed in front defaults to 0. If different hooks share same priority, the order of printing would depend on the hooks order in the hook list. """ def __init__(self, priority: int = 0): super().__init__(priority) def after_hook_is_attached(self, trainer): self._check_metric_states_initialization(trainer) if self._is_stage_to_compute: self.train_loss = LossMetric(epoch_only=False) self.test_loss = LossMetric(epoch_only=True) # register the metric calculator trainer.states['metrics']['train']['Loss'] = self.train_loss trainer.states['metrics']['test']['Loss'] = self.test_loss def before_train_epoch(self, trainer): if self._is_stage_to_compute: self.train_loss.reset() def after_train_iter(self, trainer, logits, label, loss): if self._is_stage_to_compute: self.train_loss.update(loss) def before_test_epoch(self, trainer): if self._is_stage_to_compute: self.test_loss.reset() def after_test_iter(self, trainer, logits, label, loss): if self._is_stage_to_compute: self.test_loss.update(loss) @HOOKS.register_module class AccuracyHook(MetricHook): """Specialized hook class for :class:`Accuracy`. Args: accuracy_func (:class:`typing.Callable`): Accuracy function for the classification task. priority (int, optional): Priority in the printing, hooks with small priority will be printed in front defaults to 0. If different hooks share same priority, the order of printing would depend on the hooks order in the hook list. """ def __init__(self, accuracy_func: Callable, priority: int = 0): super().__init__(priority) self.accuracy_func = accuracy_func def after_hook_is_attached(self, trainer): self._check_metric_states_initialization(trainer) if self._is_stage_to_compute: self.metric = AccuracyMetric(epoch_only=True, accuracy_func=self.accuracy_func) # register the metric trainer.states['metrics']['test']['Accuracy'] = self.metric def before_test(self, trainer): if self._is_stage_to_compute: self.metric.reset() def after_test_iter(self, trainer, logits, targets, *args): if self._is_stage_to_compute: batch_size = trainer.engine.schedule.batch_size self.metric.update(logits, targets, batch_size) class ThroughputMetric(Metric): """Metric for :class:`Throughput`. Args: epoch_only (bool): Whether the metric only read for the full epoch. """ def __init__(self, epoch_only: bool, ignored_steps: int = 0, tflop_per_step: int = 0, use_local: bool = False): super().__init__(epoch_only=epoch_only) self.ignored_steps = ignored_steps self.cur_steps = 0 self.accumulated_num_samples = torch.zeros(1, device=get_current_device()) self.accumulated_used_time = torch.zeros(1, device=get_current_device()) self.last_step_num_samples = torch.zeros(1, device=get_current_device()) self.last_step_used_time = torch.zeros(1, device=get_current_device()) self._tflop_per_step = tflop_per_step self._use_local = use_local def reset(self) -> None: # self.cur_steps = 0 self.accumulated_num_samples.zero_() self.accumulated_used_time.zero_() self.last_step_num_samples.zero_() self.last_step_used_time.zero_() def update(self, num_samples, time) -> None: self.cur_steps += 1 self.last_step_num_samples.fill_(num_samples) self.last_step_used_time.fill_(time) if self.cur_steps >= self.ignored_steps: self.accumulated_num_samples += self.last_step_num_samples self.accumulated_used_time += self.last_step_used_time def get_last_step_value(self) -> float: if self._use_local: self.last_step_num_samples *= gpc.get_world_size(ParallelMode.DATA) else: self.last_step_used_time = all_reduce(self.last_step_used_time, ParallelMode.DATA) / \ gpc.get_world_size(ParallelMode.DATA) self.last_step_num_samples = all_reduce(self.last_step_num_samples, ParallelMode.DATA) sample_per_sec = _format_number(self.last_step_num_samples / (self.last_step_used_time + 1e-12).item()) return sample_per_sec def get_last_step_info(self) -> str: if self._use_local: self.last_step_num_samples *= gpc.get_world_size(ParallelMode.DATA) else: self.last_step_used_time = all_reduce(self.last_step_used_time, ParallelMode.DATA) / \ gpc.get_world_size(ParallelMode.DATA) self.last_step_num_samples = all_reduce(self.last_step_num_samples, ParallelMode.DATA) sample_per_sec = _format_number(self.last_step_num_samples / (self.last_step_used_time + 1e-12).item()) if self._tflop_per_step > 0: tflops = _format_number(self._tflop_per_step / (self.last_step_used_time.item() + 1e-12)) return f"{sample_per_sec} sample_per_sec, {tflops} Tflops" else: return f"{sample_per_sec} sample_per_sec" def get_accumulated_value(self) -> float: self.accumulated_used_time = all_reduce(self.accumulated_used_time, ParallelMode.DATA) / \ gpc.get_world_size(ParallelMode.DATA) self.accumulated_num_samples = all_reduce(self.accumulated_num_samples, ParallelMode.DATA) return (self.accumulated_num_samples / (self.accumulated_used_time + 1e-12)).item() @staticmethod def is_better(a, b) -> bool: pass @HOOKS.register_module class ThroughputHook(MetricHook): """Specialized hook class for :class:`Throughput`. Hook to measure execution throughput (samples/sec). Args: ignored_steps (int, optional): the number of initial training steps to ignore. priority (int, optional): Priority in the printing, hooks with small priority will be printed in front defaults to 10. If different hooks share same priority, the order of printing would depend on the hooks order in the hook list. tflop_per_step(int, optional): tera floating point operations per step. use_local (bool, optional): Whether to use local time for throughput calculation. """ def __init__(self, ignored_steps: int = 0, priority: int = 10, tflop_per_step: int = 0, use_local=False): super().__init__(priority) self.ignored_steps = ignored_steps self._tflop_per_step = tflop_per_step self._use_local = use_local def after_hook_is_attached(self, trainer): self._check_metric_states_initialization(trainer) if self._is_stage_to_compute: self.metric = ThroughputMetric(epoch_only=True, ignored_steps=self.ignored_steps, tflop_per_step=self._tflop_per_step, use_local=self._use_local) # register the metric trainer.states['metrics']['train']['Throughput'] = self.metric trainer.states['metrics']['test']['Throughput'] = self.metric def before_train_epoch(self, trainer): if self._is_stage_to_compute: self.metric.reset() def after_train_iter(self, trainer, *args): if self._is_stage_to_compute: self.metric.update(trainer.engine.schedule.batch_size, trainer._timer.get_timer('Train-step').get_elapsed_time()) def before_test(self, trainer): if self._is_stage_to_compute: self.metric.reset() def after_test_iter(self, trainer, *args): if self._is_stage_to_compute: self.metric.update(trainer.engine.schedule.batch_size, trainer._timer.get_timer('Test-step').get_elapsed_time())
#!/usr/bin/env python # -*- encoding: utf-8 -*- from abc import ABC from torch import Tensor class BaseHook(ABC): """This class allows users to add desired actions in specific time points during training or evaluation. :param priority: Priority in the printing, hooks with small priority will be printed in front :type priority: int """ def __init__(self, priority: int) -> None: self.priority = priority def after_hook_is_attached(self, trainer): """Actions after hooks are attached to trainer. """ pass def before_train(self, trainer): """Actions before training. """ pass def after_train(self, trainer): """Actions after training. """ pass def before_train_iter(self, trainer): """Actions before running a training iteration. """ pass def after_train_iter(self, trainer, output: Tensor, label: Tensor, loss: Tensor): """Actions after running a training iteration. Args: trainer (:class:`Trainer`): Trainer which is using this hook. output (:class:`torch.Tensor`): Output of the model. label (:class:`torch.Tensor`): Labels of the input data. loss (:class:`torch.Tensor`): Loss between the output and input data. """ pass def before_train_epoch(self, trainer): """Actions before starting a training epoch. """ pass def after_train_epoch(self, trainer): """Actions after finishing a training epoch. """ pass def before_test(self, trainer): """Actions before evaluation. """ pass def after_test(self, trainer): """Actions after evaluation. """ pass def before_test_epoch(self, trainer): """Actions before starting a testing epoch. """ pass def after_test_epoch(self, trainer): """Actions after finishing a testing epoch. """ pass def before_test_iter(self, trainer): """Actions before running a testing iteration. """ pass def after_test_iter(self, trainer, output: Tensor, label: Tensor, loss: Tensor): """Actions after running a testing iteration. Args: trainer (:class:`Trainer`): Trainer which is using this hook output (:class:`torch.Tensor`): Output of the model label (:class:`torch.Tensor`): Labels of the input data loss (:class:`torch.Tensor`): Loss between the output and input data """ pass def init_runner_states(self, trainer, key, val): """Initializes trainer's state. Args: trainer (:class:`Trainer`): Trainer which is using this hook key: Key of state to be reset val: Value of state to be reset """ if key not in trainer.states: trainer.states[key] = val
#!/usr/bin/env python # -*- encoding: utf-8 -*- import os import os.path as osp from typing import List from colossalai.context import ParallelMode from colossalai.core import global_context as gpc from colossalai.registry import HOOKS from colossalai.logging import DistributedLogger from colossalai.utils import report_memory_usage, is_dp_rank_0, \ is_tp_rank_0, is_no_pp_or_last_stage, MultiTimer from ._base_hook import BaseHook from ._commons_ import _format_number from colossalai.trainer.hooks._metric_hook import ThroughputMetric class LogByEpochHook(BaseHook): """Hook to log by epoch. Args: logger (:class:`colossalai.logging.DistributedLogger`): Logger for recording the log information. interval (int, optional): Interval of printing log information, defaults to 1. priority (int, optional): Priority in the printing, hooks with small priority will be printed in front, defaults to 1. If different hooks share same priority, the order of printing would depend on the hooks order in the hook list. """ def __init__(self, logger, interval: int = 1, priority: int = 1): super().__init__(priority) self.logger = logger self._interval = interval def _is_epoch_to_log(self, trainer): return trainer.cur_epoch % self._interval == 0 @HOOKS.register_module class LogMetricByStepHook(BaseHook): """Hook to log metric by step. Args: priority (int, optional): Priority in the printing, hooks with small priority will be printed in front, defaults to 10. If different hooks share same priority, the order of printing would depend on the hooks order in the hook list. """ def __init__(self, priority: int = 10): super().__init__(priority) def after_train_iter(self, trainer, *args): trainer.states['step_metrics'] = dict() for metric_name, metric_calculator in trainer.states['metrics']['train'].items(): if isinstance(metric_calculator, ThroughputMetric): trainer.states['step_metrics'][metric_name.lower()] = metric_calculator.get_last_step_info() else: trainer.states['step_metrics'][metric_name.lower()] = metric_calculator.get_last_step_value() def after_test_iter(self, trainer, *args): trainer.states['step_metrics'] = dict() for metric_name, metric_calculator in trainer.states['metrics']['test'].items(): if isinstance(metric_calculator, ThroughputMetric): trainer.states['step_metrics'][metric_name.lower()] = metric_calculator.get_last_step_info() else: trainer.states['step_metrics'][metric_name.lower()] = metric_calculator.get_last_step_value() @HOOKS.register_module class LogMetricByEpochHook(LogByEpochHook): """Specialized hook to record the metric to log. Args: logger (:class:`colossalai.logging.DistributedLogger`): Logger for recording the log information. interval (int, optional): Interval of printing log information, defaults to 1. priority (int, optional): Priority in the printing, hooks with small priority will be printed in front, defaults to 10. If different hooks share same priority, the order of printing would depend on the hooks order in the hook list. """ def __init__(self, logger, interval: int = 1, priority: int = 10) -> None: super().__init__(logger, interval, priority) self._is_rank_to_log = is_dp_rank_0() and is_tp_rank_0() and is_no_pp_or_last_stage() def _get_str(self, trainer, mode): msg = [] for metric_name, metric_calculator in trainer.states['metrics'][mode].items(): msg.append(f'{metric_name} = {_format_number(metric_calculator.get_accumulated_value())}') msg = ' | '.join(msg) return msg def after_train_epoch(self, trainer): if self._is_epoch_to_log(trainer): msg = self._get_str(trainer=trainer, mode='train') if self._is_rank_to_log: self.logger.info(f'[Epoch {trainer.cur_epoch} / Train]: {msg}') # f'Training - Epoch {trainer.cur_epoch} - {self.__class__.__name__}: {msg}') def after_test_epoch(self, trainer): if self._is_epoch_to_log(trainer): msg = self._get_str(trainer=trainer, mode='test') if self._is_rank_to_log: self.logger.info(f'[Epoch {trainer.cur_epoch} / Test]: {msg}') # f'Testing - Epoch {trainer.cur_epoch} - {self.__class__.__name__}: {msg}') @HOOKS.register_module class TensorboardHook(BaseHook): """Specialized hook to record the metric to Tensorboard. Args: log_dir (str): Directory of log. ranks (list): Ranks of processors. parallel_mode (:class:`colossalai.context.parallel_mode.ParallelMode`, optional): Parallel mode used in trainer, defaults to colossalai.context.parallel_mode.ParallelMode.GLOBAL. priority (int, optional): Priority in the printing, hooks with small priority will be printed in front, defaults to 10. If different hooks share same priority, the order of printing would depend on the hooks order in the hook list. """ def __init__( self, log_dir: str, ranks: List = None, parallel_mode: ParallelMode = ParallelMode.GLOBAL, priority: int = 10, ) -> None: super().__init__(priority=priority) from torch.utils.tensorboard import SummaryWriter # create log dir if not gpc.is_initialized(ParallelMode.GLOBAL) or gpc.get_global_rank() == 0: os.makedirs(log_dir, exist_ok=True) # determine the ranks to generate tensorboard logs self._is_valid_rank_to_log = False if not gpc.is_initialized(parallel_mode): self._is_valid_rank_to_log = True else: local_rank = gpc.get_local_rank(parallel_mode) if ranks is None or local_rank in ranks: self._is_valid_rank_to_log = True # check for if gpc.is_initialized(ParallelMode.PIPELINE) and \ not gpc.is_last_rank(ParallelMode.PIPELINE) and self._is_valid_rank_to_log: raise ValueError("Tensorboard hook can only log on the last rank of pipeline process group") if self._is_valid_rank_to_log: # create workspace on only one rank if gpc.is_initialized(parallel_mode): rank = gpc.get_local_rank(parallel_mode) else: rank = 0 # create workspace log_dir = osp.join(log_dir, f'{parallel_mode}_rank_{rank}') os.makedirs(log_dir, exist_ok=True) self.writer = SummaryWriter(log_dir=log_dir, filename_suffix=f'_rank_{rank}') def _log_by_iter(self, trainer, mode: str): for metric_name, metric_calculator in trainer.states['metrics'][mode].items(): if metric_calculator.epoch_only: continue val = metric_calculator.get_last_step_value() if self._is_valid_rank_to_log: self.writer.add_scalar(f'{metric_name}/{mode}', val, trainer.cur_step) def _log_by_epoch(self, trainer, mode: str): for metric_name, metric_calculator in trainer.states['metrics'][mode].items(): if metric_calculator.epoch_only: val = metric_calculator.get_accumulated_value() if self._is_valid_rank_to_log: self.writer.add_scalar(f'{metric_name}/{mode}', val, trainer.cur_step) def after_test_iter(self, trainer, *args): self._log_by_iter(trainer, mode='test') def after_test_epoch(self, trainer): self._log_by_epoch(trainer, mode='test') def after_train_iter(self, trainer, *args): self._log_by_iter(trainer, mode='train') def after_train_epoch(self, trainer): self._log_by_epoch(trainer, mode='train') @HOOKS.register_module class LogTimingByEpochHook(LogByEpochHook): """Specialized hook to write timing record to log. Args: timer (:class:`colossalai.utils.MultiTimer`): Timer for the hook. logger (:class:`colossalai.logging.DistributedLogger`): Logger for recording the log information. interval (int, optional): Interval of printing log information, defaults to 1. priority (int, optional): Priority in the printing, hooks with small priority will be printed in front defaults to 10. If different hooks share same priority, the order of printing would depend on the hooks order in the hook list. log_eval (bool, optional): Whether writes in evaluation, defaults to True. ignore_num_train_steps (int, optional): Number of training steps to ignore, defaults to 0. """ def __init__(self, timer: MultiTimer, logger: DistributedLogger, interval: int = 1, priority: int = 10, log_eval: bool = True, ignore_num_train_steps: int = 0) -> None: super().__init__(logger=logger, interval=interval, priority=priority) self._timer = timer self._log_eval = log_eval self._is_rank_to_log = is_dp_rank_0() and is_tp_rank_0() and is_no_pp_or_last_stage() # extra handling to avoid the unstable readings of the first # few training steps to affect the history mean time self._ignore_num_train_steps = ignore_num_train_steps self._is_train_step_history_trimmed = False def _get_message(self, mode): msg = [] for timer_name, timer in self._timer: if timer_name.startswith(mode): last_elapsed_time = timer.get_elapsed_time() if timer.has_history: if timer_name == 'Train-step' and not self._is_train_step_history_trimmed: timer._history = timer._history[self._ignore_num_train_steps:] self._is_train_step_history_trimmed = True history_mean = timer.get_history_mean() history_sum = timer.get_history_sum() msg.append( f'{timer_name}: last = {_format_number(last_elapsed_time)} s, mean = {_format_number(history_mean)} s' ) else: msg.append(f'{timer_name}: last = {_format_number(last_elapsed_time)} s') msg = ' | '.join(msg) return msg def after_train_epoch(self, trainer): """Writes log after finishing a training epoch. """ if self._is_epoch_to_log(trainer) and self._is_rank_to_log: msg = self._get_message('Train') self.logger.info(f'[Epoch {trainer.cur_epoch} / Train]: {msg} | #steps/epoch = {trainer.steps_per_epoch}') def after_test_epoch(self, trainer): """Writes log after finishing a testing epoch. """ if self._is_epoch_to_log(trainer) and self._is_rank_to_log and self._log_eval: msg = self._get_message('Test') self.logger.info(f'[Epoch {trainer.cur_epoch} / Test]: {msg}') @HOOKS.register_module class LogMemoryByEpochHook(LogByEpochHook): """Specialized Hook to write memory usage record to log. Args: logger (:class:`colossalai.logging.DistributedLogger`): Logger for recording the log information. interval (int, optional): Interval of printing log information, defaults to 1. priority (int, optional): Priority in the printing, hooks with small priority will be printed in front defaults to 1. If different hooks share same priority, the order of printing would depend on the hooks order in the hook list. log_eval (bool, optional): Whether writes in evaluation, defaults to True. """ def __init__( self, logger: DistributedLogger, interval: int = 1, priority: int = 10, log_eval: bool = True, report_cpu: bool = False, # no reference ) -> None: super().__init__(logger=logger, interval=interval, priority=priority) self._log_eval = log_eval self._is_rank_to_log = is_dp_rank_0() and is_tp_rank_0() def before_train(self, trainer): """Resets before training. """ if self._is_epoch_to_log(trainer) and self._is_rank_to_log: report_memory_usage('Before-train', self.logger) def after_train_epoch(self, trainer): """Writes log after finishing a training epoch. """ if self._is_epoch_to_log(trainer) and self._is_rank_to_log: report_memory_usage(f'[Epoch {trainer.cur_epoch} / Train]', self.logger) def after_test(self, trainer): """Reports after testing. """ if self._is_epoch_to_log(trainer) and self._is_rank_to_log and self._log_eval: report_memory_usage(f'[Epoch {trainer.cur_epoch} / Test]', self.logger)
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch from colossalai.logging import get_dist_logger from colossalai.registry import HOOKS from colossalai.trainer.hooks import BaseHook from colossalai.utils.checkpointing import save_checkpoint from ._lr_scheduler_hook import LRSchedulerHook @HOOKS.register_module class SaveCheckpointHook(BaseHook): """Saves the model by interval in training process. Args: interval (int, optional): Number of epochs between saving the checkpoint, defaults to 1. if save_by_iter is True, this arg refers to the number of iters between saving. checkpoint_dir (str, optional): File name to save the checkpoint, defaults to None. model (torch.nn.Module, Optional): The model to save, defaults to None. When not passing, 'trainer.engine.model' will be used. We encourage you to pass the model in it to avoid some unexpected bugs, especially when using **DDP**. save_by_iter (bool, optional): Whether saving the checkpoint by iter, default to False. priority (int, optional): Priority in the printing, hooks with small priority will be printed in front defaults to 10. If different hooks share same priority, the order of printing would depend on the hooks order in the hook list. """ def __init__(self, interval: int = 1, checkpoint_dir: str = None, model: torch.nn.Module = None, save_by_iter: bool = False, priority: int = 10): super().__init__(priority=priority) self.interval = interval self.checkpoint_dir = checkpoint_dir self.model = model self.save_by_iter = save_by_iter self.logger = get_dist_logger() # get lr scheduler from the LRSchedulerHook before train self._lr_scheduler = None def after_hook_is_attached(self, trainer): # get lr scheduler if exists for hook in trainer.hooks: if isinstance(hook, LRSchedulerHook): self._lr_scheduler = hook.lr_scheduler break self.model = self.model if self.model is not None else trainer.engine.model def after_train_iter(self, trainer, output, label, loss): """Saves the model after a training iter. """ # save by interval if self.save_by_iter and trainer.cur_step % self.interval == 0: save_checkpoint(self.checkpoint_dir, trainer.cur_epoch, self.model, trainer.engine.optimizer, self._lr_scheduler) self.logger.info(f'checkpoint for iteration {trainer.cur_step} is saved to {self.checkpoint_dir}', ranks=[0]) else: pass def after_train_epoch(self, trainer): """Saves the model after a training epoch. """ # save by interval if trainer.cur_epoch % self.interval == 0: save_checkpoint(self.checkpoint_dir, trainer.cur_epoch, self.model, trainer.engine.optimizer, self._lr_scheduler) self.logger.info(f'checkpoint for epoch {trainer.cur_epoch} is saved to {self.checkpoint_dir}', ranks=[0])
from ._base_hook import BaseHook from ._checkpoint_hook import SaveCheckpointHook from ._log_hook import (LogMemoryByEpochHook, LogMetricByEpochHook, LogMetricByStepHook, LogTimingByEpochHook, TensorboardHook) from ._lr_scheduler_hook import LRSchedulerHook from ._metric_hook import AccuracyHook, LossHook, MetricHook, ThroughputHook __all__ = [ 'BaseHook', 'MetricHook', 'LossHook', 'AccuracyHook', 'LogMetricByEpochHook', 'TensorboardHook', 'LogTimingByEpochHook', 'LogMemoryByEpochHook', 'LRSchedulerHook', 'ThroughputHook', 'LogMetricByStepHook', 'SaveCheckpointHook' ]
from colossalai.registry import HOOKS from torch import Tensor from ._metric_hook import LearningRateMetric, MetricHook @HOOKS.register_module class LRSchedulerHook(MetricHook): r"""Build LR scheduler for trainer. Args: lr_scheduler (:class:`colossalai.nn.lr_scheduler`): The specific LR scheduler in range of ``colossalai.nn.lr_scheduler``, more details about ``lr_scheduler`` could be found in `lr_scheduler <https://github.com/hpcaitech/ColossalAI/tree/main/colossalai/nn/lr_scheduler>`_. by_epoch (bool): If `True`, the LR will be scheduled every epoch. Else, the LR will be scheduled every batch. store_lr_in_state (bool, optional): If `True`, store the learning rate in each state, defaults to `True`. priority (int, optional): Priority in the printing, hooks with small priority will be printed in front defaults to 1. If different hooks share same priority, the order of printing would depend on the hooks order in the hook list. """ def __init__( self, lr_scheduler, by_epoch: bool, store_lr_in_state: bool = True, priority: int = 1, ): super().__init__(priority=priority) self.by_epoch = by_epoch self.lr_scheduler = lr_scheduler self.store_lr_in_state = store_lr_in_state def after_hook_is_attached(self, trainer): self._check_metric_states_initialization(trainer) trainer.states['metrics']['train']['LR'] = LearningRateMetric(epoch_only=self.by_epoch, initial_lr=self.lr_scheduler.get_last_lr()[0]) def after_train_epoch(self, trainer): if self.by_epoch: self.lr_scheduler.step() trainer.states['metrics']['train']['LR'].update(self.lr_scheduler.get_last_lr()[0]) def after_train_iter(self, trainer, output: Tensor, label: Tensor, loss: Tensor): if not self.by_epoch: self.lr_scheduler.step() trainer.states['metrics']['train']['LR'].update(self.lr_scheduler.get_last_lr()[0])
import torch def _format_number(val, prec=5): if isinstance(val, float): return f'{val:.{prec}g}' elif torch.is_tensor(val) and torch.is_floating_point(val): return f'{val.item():.{prec}g}' return val
from .cuda_native import FusedScaleMaskSoftmax, LayerNorm, MultiHeadAttention __all__ = [ "LayerNorm", "FusedScaleMaskSoftmax", "MultiHeadAttention", ]
import torch from colossalai.nn.layer.colossalai_layer import Embedding, Linear from colossalai.utils import get_current_device from .bias_dropout_add import bias_dropout_add_fused_train from .bias_gelu import bias_gelu_impl JIT_OPTIONS_SET = False def set_jit_fusion_options(): """Set PyTorch JIT layer fusion options. """ # LSG: the latest pytorch and CUDA versions may not support # the following jit settings global JIT_OPTIONS_SET if JIT_OPTIONS_SET == False: # flags required to enable jit fusion kernels TORCH_MAJOR = int(torch.__version__.split('.')[0]) TORCH_MINOR = int(torch.__version__.split('.')[1]) if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR >= 10): # nvfuser torch._C._jit_set_profiling_executor(True) torch._C._jit_set_profiling_mode(True) torch._C._jit_override_can_fuse_on_cpu(False) torch._C._jit_override_can_fuse_on_gpu(False) torch._C._jit_set_texpr_fuser_enabled(False) torch._C._jit_set_nvfuser_enabled(True) torch._C._debug_set_autodiff_subgraph_inlining(False) else: # legacy pytorch fuser torch._C._jit_set_profiling_mode(False) torch._C._jit_set_profiling_executor(False) torch._C._jit_override_can_fuse_on_cpu(True) torch._C._jit_override_can_fuse_on_gpu(True) JIT_OPTIONS_SET = True def warmup_jit_fusion(batch_size: int, hidden_size: int, seq_length: int = 512, vocab_size: int = 32768, dtype: torch.dtype = torch.float32): """ Compilie JIT functions before the main training steps """ embed = Embedding(vocab_size, hidden_size).to(get_current_device()) linear_1 = Linear(hidden_size, hidden_size * 4, skip_bias_add=True).to(get_current_device()) linear_2 = Linear(hidden_size * 4, hidden_size, skip_bias_add=True).to(get_current_device()) x = torch.randint(vocab_size, (batch_size, seq_length), dtype=torch.long, device=get_current_device()) x = embed(x) y, y_bias = linear_1(x) z, z_bias = linear_2(y) # Warmup JIT fusions with the input grad_enable state of both forward # prop and recomputation for bias_grad, input_grad in zip([True, True], [False, True]): for _ in range(10): bias = torch.rand_like(y_bias, dtype=dtype, device=get_current_device()) input_ = torch.rand_like(y, dtype=dtype, device=get_current_device()) bias.requires_grad, input_.requires_grad = bias_grad, input_grad bias_gelu_impl(input_, bias) # Warmup fused bias+dropout+add dropout_rate = 0.1 # Warmup JIT fusions with the input grad_enable state of both forward # prop and recomputation for input_grad, bias_grad, residual_grad in zip([False, True], [True, True], [True, True]): for _ in range(10): input_ = torch.rand_like(z, dtype=dtype, device=get_current_device()) residual = torch.rand_like(x, dtype=dtype, device=get_current_device()) bias = torch.rand_like(z_bias, dtype=dtype, device=get_current_device()) input_.requires_grad = input_grad bias.requires_grad = bias_grad residual.requires_grad = residual_grad bias_dropout_add_fused_train(input_, bias, residual, dropout_rate) torch.cuda.empty_cache()
from .option import set_jit_fusion_options from .bias_dropout_add import bias_dropout_add_fused_train, bias_dropout_add_fused_inference from .bias_gelu import bias_gelu_impl __all__ = [ "bias_dropout_add_fused_train", "bias_dropout_add_fused_inference", "bias_gelu_impl", "set_jit_fusion_options" ]
import torch def bias_dropout_add(x, bias, residual, prob, training): # type: (Tensor, Tensor, Tensor, float, bool) -> Tensor out = torch.nn.functional.dropout(x + bias, p=prob, training=training) out = residual + out return out @torch.jit.script def bias_dropout_add_fused_train(x: torch.Tensor, bias: torch.Tensor, residual: torch.Tensor, prob: float) -> torch.Tensor: return bias_dropout_add(x, bias, residual, prob, True) @torch.jit.script def bias_dropout_add_fused_inference(x: torch.Tensor, bias: torch.Tensor, residual: torch.Tensor, prob: float) -> torch.Tensor: return bias_dropout_add(x, bias, residual, prob, False)
import torch ###### BIAS GELU FUSION/ NO AUTOGRAD ################ # 1/sqrt(2*pi)-> 0.3989423 # 1/sqrt(2) -> 0.70710678 # sqrt(2/pi) -> 0.79788456 # this function is tanh approximation of gelu # actual gelu is: # x * 0.5 * (1.0 + torch.erf(x * 0.70710678)) @torch.jit.script def bias_gelu(bias, y): x = bias + y return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))) # gradient of tanh approximation of gelu # gradient of actual gelu is: # 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x) @torch.jit.script def bias_gelu_back(g, bias, y): x = bias + y tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)) # sqrt(2/pi) * 3 * 0.044715 -> 0.1070322243 ff = 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)) + 0.5 * (1 + tanh_out) return ff * g class GeLUFunction(torch.autograd.Function): @staticmethod # bias is an optional argument def forward(ctx, input, bias): ctx.save_for_backward(input, bias) return bias_gelu(bias, input) @staticmethod def backward(ctx, grad_output): input, bias = ctx.saved_tensors tmp = bias_gelu_back(grad_output, bias, input) return tmp, tmp bias_gelu_impl = GeLUFunction.apply
import math from dataclasses import dataclass import torch from torch import nn from torch.autograd import Function def check_config(config): if config.hidden_size % config.nhead != 0: raise Exception("hidden_size % nhead != 0") factor = 8 if config.fp16 else 4 upbound = factor * 1024 * 4 if config.hidden_size > upbound: # as required by ln backward kernel currently raise Exception(f"hidden_size > {upbound}") head_dim = config.hidden_size // config.nhead if head_dim % factor != 0: # as required by reshape kernel raise Exception(f"head_dim({head_dim}) % {factor} != 0") def calc_offset(sizes): offsets = [0] tmp = 0 for x in sizes: tmp += x offsets.append(tmp) return offsets colossal_multihead_attention = None @dataclass class Config: max_batch_tokens: int # max batch token numbers max_seq_len: int # max sequence length hidden_size: int # size of transformer hidden layers nhead: int # number of heads in attention attn_prob_dropout_ratio: float # attention score dropout ratio hidden_dropout_ratio: float # dropout ration before residual norm_first: bool # norm_first fp16: bool # fp16 presion class MultiHeadAttention1DFunc(Function): @staticmethod def forward(ctx, input, input_mask, in_proj_weight, in_proj_bias, out_proj_weight, out_proj_bias, norm_weight, norm_bias, config): cuda_module = colossal_multihead_attention forward_func = (cuda_module.multihead_attention_fw_fp16 if config.fp16 else cuda_module.multihead_attention_fw_fp32) if config.fp16: input = input.to(torch.half) input_mask = input_mask.to(torch.half) (output,) = forward_func(config.layer_id, input, input_mask, in_proj_weight, in_proj_bias, out_proj_weight, out_proj_bias, norm_weight, norm_bias, config.training, config.norm_first) if config.is_grad_enabled and config.training: ctx.save_for_backward(output, input, input_mask, in_proj_weight, in_proj_bias, out_proj_weight, out_proj_bias, norm_weight, norm_bias) ctx.config = config return output @staticmethod def backward(ctx, grad_output): assert ctx.config.training cuda_module = colossal_multihead_attention backward_func = (cuda_module.multihead_attention_bw_fp16 if ctx.config.fp16 else cuda_module.multihead_attention_bw_fp32) output, input, input_mask, in_proj_weight, in_proj_bias, out_proj_weight, \ out_proj_bias, norm_weight, norm_bias = ctx.saved_tensors grad_input = None grad_in_proj_weight = None grad_in_proj_bias = None grad_out_proj_weight = None grad_out_proj_bias = None grad_norm_weight = None grad_norm_bias = None if ctx.config.fp16: grad_output = grad_output.to(torch.half) output = output.to(torch.half) input = input.to(torch.half) input_mask = input_mask.to(torch.half) grad_input, grad_in_proj_weight, grad_in_proj_bias, grad_out_proj_weight, \ grad_out_proj_bias, grad_norm_weight, grad_norm_bias = backward_func( ctx.config.layer_id, grad_output, output, input, input_mask, in_proj_weight, in_proj_bias, out_proj_weight, out_proj_bias, norm_weight, norm_bias) return (grad_input, None, grad_in_proj_weight, grad_in_proj_bias, grad_out_proj_weight, grad_out_proj_bias, grad_norm_weight, grad_norm_bias, None) class MultiHeadAttention(nn.Module): """Initialize the MultiHeadAttention. Static variable: layer_id: The layer-index counter starting from 0 and incrementing by 1 every time a layer object is instantiated, e.g. if a model has 24 transformer layers, layer_id goes from 0 to 23. Arguments: hidden_size: Total dimension of hidden_size. nhead: Number of parallel attention heads. batch_size: Batch Size for one foward max_seq_len: Max length of input sequence dropout: Dropout probability norm_first: perform LayerNorms before attention """ layer_id = 0 def __init__(self, hidden_size, nhead, batch_size, max_seq_len, dropout=0.0, norm_first=False, fp16=True, pg=None): super(MultiHeadAttention, self).__init__() self.config = Config(batch_size * max_seq_len, max_seq_len, hidden_size, nhead, dropout, dropout, norm_first, fp16) check_config(self.config) self.pg = pg self.pg_size = 1 if self.pg: self.pg_size = pg.size() self.config.layer_id = MultiHeadAttention.layer_id MultiHeadAttention.layer_id = MultiHeadAttention.layer_id + 1 # Load cuda modules if needed global colossal_multihead_attention if colossal_multihead_attention is None: from colossalai.kernel.op_builder import MultiHeadAttnBuilder multihead_attention = MultiHeadAttnBuilder().load() colossal_multihead_attention = multihead_attention # create the layer in cuda kernels. cuda_module = colossal_multihead_attention create_layer_func = (cuda_module.create_multihead_attention_fp16 if self.config.fp16 else cuda_module.create_multihead_attention_fp32) create_layer_func( self.config.layer_id, self.config.max_batch_tokens, self.config.max_seq_len, self.config.hidden_size, self.config.nhead, self.config.attn_prob_dropout_ratio, self.config.hidden_dropout_ratio, self.config.norm_first, self.pg, ) hs = self.config.hidden_size self.precision = torch.float32 if self.config.fp16: self.precision = torch.half self.hs_per_rank = int(hs / self.pg_size) self.in_proj_weight = nn.Parameter(torch.Tensor(3, self.hs_per_rank, hs)) self.in_proj_bias = nn.Parameter(torch.Tensor(3, self.hs_per_rank)) self.out_proj_weight = nn.Parameter(torch.Tensor(hs, self.hs_per_rank)) self.out_proj_bias = nn.Parameter(torch.Tensor(hs)) self.norm_weight = nn.Parameter(torch.Tensor(hs)) self.norm_bias = nn.Parameter(torch.Tensor(hs)) self.reset_parameters() torch.cuda.empty_cache() def calc_bound(self, w): fan_in, _ = nn.init._calculate_fan_in_and_fan_out(w) bound = 1.0 / math.sqrt(fan_in) return bound def reset_parameters(self): hs = self.config.hidden_size nn.init.zeros_(self.out_proj_bias) nn.init.ones_(self.norm_weight) nn.init.zeros_(self.norm_bias) if self.pg_size > 1: rank_in_pg = torch.distributed.get_rank(self.pg) attn_qkvw_global = torch.empty(hs * 3, hs) attn_qkvb_global = torch.empty(hs * 3) nn.init.xavier_uniform_(attn_qkvw_global, 1.0 / math.sqrt(2.0)) bound = self.calc_bound(attn_qkvw_global) nn.init.uniform_(attn_qkvb_global, -bound, bound) attn_qkvw_global = attn_qkvw_global.cuda() attn_qkvb_global = attn_qkvb_global.cuda() torch.distributed.broadcast(attn_qkvw_global, src=0, group=self.pg) torch.distributed.broadcast(attn_qkvb_global, src=0, group=self.pg) attn_qkvw_global = attn_qkvw_global.cpu() attn_qkvb_global = attn_qkvb_global.cpu() with torch.no_grad(): self.in_proj_weight.copy_( attn_qkvw_global.view(3, hs, hs)[:, int(hs * rank_in_pg / self.pg_size):int(hs * (rank_in_pg + 1) / self.pg_size), :]) self.in_proj_bias.copy_( attn_qkvb_global.view(3, hs)[:, int(hs * rank_in_pg / self.pg_size):int(hs * (rank_in_pg + 1) / self.pg_size)]) attn_ow_global = torch.empty(hs, hs) nn.init.xavier_uniform_(attn_ow_global, 1.0) attn_ow_global = attn_ow_global.cuda() torch.distributed.broadcast(attn_ow_global, src=0, group=self.pg) attn_ow_global = attn_ow_global.cpu() with torch.no_grad(): self.out_proj_weight.copy_(attn_ow_global[:, int(hs * rank_in_pg / self.pg_size):int(hs * (rank_in_pg + 1) / self.pg_size)]) else: attn_qkvw = self.in_proj_weight.view(-1, hs) nn.init.xavier_uniform_(attn_qkvw, 1.0 / math.sqrt(2.0)) bound = self.calc_bound(attn_qkvw) nn.init.uniform_(self.in_proj_bias, -bound, bound) nn.init.xavier_uniform_(self.out_proj_weight, 1.0) def state_dict(self, destination=None, prefix="", keep_vars=False): destination = torch.nn.Module.state_dict(self, destination=destination, prefix=prefix, keep_vars=keep_vars) return destination def forward(self, hidden_states, encoder_padding_mask): self.config.training = self.training self.config.is_grad_enabled = torch.is_grad_enabled() hidden_states = hidden_states.contiguous() encoder_padding_mask = ((encoder_padding_mask * -1e8).type_as(hidden_states).contiguous()) bs, sl, dim = hidden_states.size() if bs * sl > self.config.max_batch_tokens: raise ValueError(f"Batch token numbers {bs * sl} exceeds the limit {self.config.max_batch_tokens}.") if sl > self.config.max_seq_len: raise ValueError(f"Sequence length {sl} exceeds the limit {self.config.max_seq_len}.") if len(encoder_padding_mask.size()) == 1: assert bs == 1 and sl == encoder_padding_mask.size(0) else: assert bs == encoder_padding_mask.size(0) and sl == encoder_padding_mask.size(1) output = MultiHeadAttention1DFunc.apply(hidden_states, encoder_padding_mask, self.in_proj_weight, self.in_proj_bias, self.out_proj_weight, self.out_proj_bias, self.norm_weight, self.norm_bias, self.config) return output.to(self.precision)
from .layer_norm import MixedFusedLayerNorm as LayerNorm from .multihead_attention import MultiHeadAttention from .scaled_softmax import FusedScaleMaskSoftmax, ScaledUpperTriangMaskedSoftmax __all__ = ['LayerNorm', 'MultiHeadAttention', 'FusedScaleMaskSoftmax', 'ScaledUpperTriangMaskedSoftmax']
import enum import torch import torch.nn as nn from colossalai.kernel.op_builder.scaled_masked_softmax import ScaledMaskedSoftmaxBuilder from colossalai.kernel.op_builder.scaled_upper_triangle_masked_softmax import ScaledUpperTrainglemaskedSoftmaxBuilder try: from colossalai._C import scaled_masked_softmax, scaled_upper_triang_masked_softmax except ImportError: scaled_masked_softmax = None scaled_upper_triang_masked_softmax = None class AttnMaskType(enum.Enum): padding = 1 causal = 2 class ScaledUpperTriangMaskedSoftmax(torch.autograd.Function): """ Fused operation which performs following three operations in sequence 1. Scale the tensor. 2. Apply upper triangular mask (typically used in gpt models). 3. Perform softmax. """ @staticmethod def forward(ctx, inputs, scale): global scaled_upper_triang_masked_softmax if scaled_upper_triang_masked_softmax: scaled_upper_triang_masked_softmax = ScaledUpperTrainglemaskedSoftmaxBuilder().load() scale_t = torch.tensor([scale]) softmax_results = scaled_upper_triang_masked_softmax.forward(inputs, scale_t[0]) ctx.save_for_backward(softmax_results, scale_t) return softmax_results @staticmethod def backward(ctx, output_grads): softmax_results, scale_t = ctx.saved_tensors input_grads = scaled_upper_triang_masked_softmax.backward(output_grads, softmax_results, scale_t[0]) return input_grads, None class ScaledMaskedSoftmax(torch.autograd.Function): """ Fused operation which performs following three operations in sequence 1. Scale the tensor. 2. Apply the mask. 3. Perform softmax. """ @staticmethod def forward(ctx, inputs, mask, scale): scale_t = torch.tensor([scale]) # build and load kernel if not pre-built global scaled_masked_softmax if scaled_masked_softmax is None: scaled_masked_softmax = ScaledMaskedSoftmaxBuilder().load() softmax_results = scaled_masked_softmax.forward(inputs, mask, scale_t[0]) ctx.save_for_backward(softmax_results, scale_t) return softmax_results @staticmethod def backward(ctx, output_grads): softmax_results, scale_t = ctx.saved_tensors input_grads = scaled_masked_softmax.backward(output_grads, softmax_results, scale_t[0]) return input_grads, None, None, None class FusedScaleMaskSoftmax(nn.Module): """ Fused operation: scaling + mask + softmax Arguments: input_in_fp16: Flag to indicate if input in fp16 data format. input_in_bf16: Flag to indicate if input in bf16 data format. attn_mask_type: Attention mask type (pad or causal) scaled_masked_softmax_fusion: Flag to indicate user want to use softmax fusion mask_func: Mask function to be applied. softmax_in_fp32: If True, softmax in performed at fp32 precision. scale: Scaling factor used in input tensor scaling. """ def __init__( self, input_in_fp16, input_in_bf16, attn_mask_type, scaled_masked_softmax_fusion, mask_func, softmax_in_fp32, scale, ): super(FusedScaleMaskSoftmax, self).__init__() self.input_in_fp16 = input_in_fp16 self.input_in_bf16 = input_in_bf16 assert not (self.input_in_fp16 and self.input_in_bf16), "both fp16 and bf16 flags cannot be active at the same time." self.input_in_float16 = self.input_in_fp16 or self.input_in_bf16 self.attn_mask_type = attn_mask_type self.scaled_masked_softmax_fusion = scaled_masked_softmax_fusion self.mask_func = mask_func self.softmax_in_fp32 = softmax_in_fp32 self.scale = scale assert (self.scale is None or softmax_in_fp32), "softmax should be in fp32 when scaled" def forward(self, input, mask): # [b, np, sq, sk] assert input.dim() == 4 if self.is_kernel_available(mask, *input.size()): return self.forward_fused_softmax(input, mask) else: return self.forward_torch_softmax(input, mask) def is_kernel_available(self, mask, b, np, sq, sk): attn_batches = b * np if (self.scaled_masked_softmax_fusion # user want to fuse and self.input_in_float16 # input must be fp16 and mask is not None # mask tensor must not be None and 16 < sk <= 2048 # sk must be 16 ~ 2048 and sq % 4 == 0 # sq must be divisor of 4 and attn_batches % 4 == 0 # np * b must be divisor of 4 ): if 0 <= sk <= 2048: batch_per_block = self.get_batch_per_block(sq, sk, b, np) if self.attn_mask_type == AttnMaskType.causal: if attn_batches % batch_per_block == 0: return True else: if sq % batch_per_block == 0: return True return False def forward_fused_softmax(self, input, mask): b, np, sq, sk = input.size() scale = self.scale if self.scale is not None else 1.0 if self.attn_mask_type == AttnMaskType.causal: assert sq == sk, "causal mask is only for self attention" # input is 3D tensor (attn_batches, sq, sk) input = input.view(-1, sq, sk) probs = ScaledUpperTriangMaskedSoftmax.apply(input, scale) return probs.view(b, np, sq, sk) else: # input is 4D tensor (b, np, sq, sk) return ScaledMaskedSoftmax.apply(input, mask, scale) def forward_torch_softmax(self, input, mask): if self.input_in_float16 and self.softmax_in_fp32: input = input.float() if self.scale is not None: input = input * self.scale mask_output = self.mask_func(input, mask) if mask is not None else input probs = torch.nn.Softmax(dim=-1)(mask_output) if self.input_in_float16 and self.softmax_in_fp32: if self.input_in_fp16: probs = probs.half() else: probs = probs.bfloat16() return probs def get_batch_per_block(self, sq, sk, b, np): return scaled_masked_softmax.get_batch_per_block(sq, sk, b, np)
""" Fused Attention =============== This is a Triton implementation of the Flash Attention algorithm (see: Dao et al., https://arxiv.org/pdf/2205.14135v2.pdf; Rabe and Staats https://arxiv.org/pdf/2112.05682v2.pdf; Triton https://github.com/openai/triton) """ import math import os import subprocess import torch def triton_cuda_check(): cuda_home = os.getenv("CUDA_HOME", default="/usr/local/cuda") cuda_version = subprocess.check_output([os.path.join(cuda_home, "bin/nvcc"), "--version"]).decode().strip() cuda_version = cuda_version.split('release ')[1] cuda_version = cuda_version.split(',')[0] cuda_version = cuda_version.split('.') if len(cuda_version) == 2 and \ (int(cuda_version[0]) == 11 and int(cuda_version[1]) >= 4) or \ int(cuda_version[0]) > 11: return True return False try: import triton import triton.language as tl if triton_cuda_check(): HAS_TRITON = True else: print("triton requires cuda >= 11.4") HAS_TRITON = False except ImportError: print('please install triton from https://github.com/openai/triton') HAS_TRITON = False try: from flash_attn.flash_attention import FlashAttention from flash_attn.flash_attn_interface import ( flash_attn_unpadded_func, flash_attn_unpadded_kvpacked_func, flash_attn_unpadded_qkvpacked_func, ) HAS_FLASH_ATTN = True except ImportError: HAS_FLASH_ATTN = False print('please install flash_attn from https://github.com/HazyResearch/flash-attention') try: from xformers.ops.fmha import memory_efficient_attention HAS_MEM_EFF_ATTN = True except ImportError: HAS_MEM_EFF_ATTN = False print('please install xformers from https://github.com/facebookresearch/xformers') if HAS_TRITON: @triton.jit def _fwd_kernel( Q, K, V, sm_scale, TMP, L, M, # NOTE: TMP is a scratchpad buffer to workaround a compiler bug Out, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, stride_oz, stride_oh, stride_om, stride_on, Z, H, N_CTX, BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr, ): start_m = tl.program_id(0) off_hz = tl.program_id(1) # initialize offsets offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) offs_n = tl.arange(0, BLOCK_N) offs_d = tl.arange(0, BLOCK_DMODEL) off_q = off_hz * stride_qh + offs_m[:, None] * stride_qm + offs_d[None, :] * stride_qk off_k = off_hz * stride_qh + offs_n[:, None] * stride_kn + offs_d[None, :] * stride_kk off_v = off_hz * stride_qh + offs_n[:, None] * stride_qm + offs_d[None, :] * stride_qk # Initialize pointers to Q, K, V q_ptrs = Q + off_q k_ptrs = K + off_k v_ptrs = V + off_v # initialize pointer to m and l t_ptrs = TMP + off_hz * N_CTX + offs_m m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf") l_i = tl.zeros([BLOCK_M], dtype=tl.float32) acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32) # load q: it will stay in SRAM throughout q = tl.load(q_ptrs) # loop over k, v and update accumulator for start_n in range(0, (start_m + 1) * BLOCK_M, BLOCK_N): start_n = tl.multiple_of(start_n, BLOCK_N) # -- compute qk ---- k = tl.load(k_ptrs + start_n * stride_kn) qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) qk += tl.dot(q, k, trans_b=True) qk *= sm_scale qk += tl.where(offs_m[:, None] >= (start_n + offs_n[None, :]), 0, float("-inf")) # -- compute m_ij, p, l_ij m_ij = tl.max(qk, 1) p = tl.exp(qk - m_ij[:, None]) l_ij = tl.sum(p, 1) # -- update m_i and l_i m_i_new = tl.maximum(m_i, m_ij) alpha = tl.exp(m_i - m_i_new) beta = tl.exp(m_ij - m_i_new) l_i_new = alpha * l_i + beta * l_ij # -- update output accumulator -- # scale p p_scale = beta / l_i_new p = p * p_scale[:, None] # scale acc acc_scale = l_i / l_i_new * alpha tl.store(t_ptrs, acc_scale) acc_scale = tl.load(t_ptrs) # BUG: have to store and immediately load acc = acc * acc_scale[:, None] # update acc v = tl.load(v_ptrs + start_n * stride_vk) p = p.to(tl.float16) acc += tl.dot(p, v) # update m_i and l_i l_i = l_i_new m_i = m_i_new # rematerialize offsets to save registers start_m = tl.program_id(0) offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) # write back l and m l_ptrs = L + off_hz * N_CTX + offs_m m_ptrs = M + off_hz * N_CTX + offs_m tl.store(l_ptrs, l_i) tl.store(m_ptrs, m_i) # initialize pointers to output offs_n = tl.arange(0, BLOCK_DMODEL) off_o = off_hz * stride_oh + offs_m[:, None] * stride_om + offs_n[None, :] * stride_on out_ptrs = Out + off_o tl.store(out_ptrs, acc) @triton.jit def _bwd_preprocess( Out, DO, L, NewDO, Delta, BLOCK_M: tl.constexpr, D_HEAD: tl.constexpr, ): off_m = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M) off_n = tl.arange(0, D_HEAD) # load o = tl.load(Out + off_m[:, None] * D_HEAD + off_n[None, :]).to(tl.float32) do = tl.load(DO + off_m[:, None] * D_HEAD + off_n[None, :]).to(tl.float32) denom = tl.load(L + off_m).to(tl.float32) # compute do = do / denom[:, None] delta = tl.sum(o * do, axis=1) # write-back tl.store(NewDO + off_m[:, None] * D_HEAD + off_n[None, :], do) tl.store(Delta + off_m, delta) @triton.jit def _bwd_kernel( Q, K, V, sm_scale, Out, DO, DQ, DK, DV, L, M, D, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, Z, H, N_CTX, num_block, BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr, ): off_hz = tl.program_id(0) off_z = off_hz // H off_h = off_hz % H # offset pointers for batch/head Q += off_z * stride_qz + off_h * stride_qh K += off_z * stride_qz + off_h * stride_qh V += off_z * stride_qz + off_h * stride_qh DO += off_z * stride_qz + off_h * stride_qh DQ += off_z * stride_qz + off_h * stride_qh DK += off_z * stride_qz + off_h * stride_qh DV += off_z * stride_qz + off_h * stride_qh for start_n in range(0, num_block): lo = start_n * BLOCK_M # initialize row/col offsets offs_qm = lo + tl.arange(0, BLOCK_M) offs_n = start_n * BLOCK_M + tl.arange(0, BLOCK_M) offs_m = tl.arange(0, BLOCK_N) offs_k = tl.arange(0, BLOCK_DMODEL) # initialize pointers to value-like data q_ptrs = Q + (offs_qm[:, None] * stride_qm + offs_k[None, :] * stride_qk) k_ptrs = K + (offs_n[:, None] * stride_kn + offs_k[None, :] * stride_kk) v_ptrs = V + (offs_n[:, None] * stride_qm + offs_k[None, :] * stride_qk) do_ptrs = DO + (offs_qm[:, None] * stride_qm + offs_k[None, :] * stride_qk) dq_ptrs = DQ + (offs_qm[:, None] * stride_qm + offs_k[None, :] * stride_qk) # pointer to row-wise quantities in value-like data D_ptrs = D + off_hz * N_CTX m_ptrs = M + off_hz * N_CTX # initialize dv amd dk dv = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32) dk = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32) # k and v stay in SRAM throughout k = tl.load(k_ptrs) v = tl.load(v_ptrs) # loop over rows for start_m in range(lo, num_block * BLOCK_M, BLOCK_M): offs_m_curr = start_m + offs_m # load q, k, v, do on-chip q = tl.load(q_ptrs) # recompute p = softmax(qk, dim=-1).T # NOTE: `do` is pre-divided by `l`; no normalization here qk = tl.dot(q, k, trans_b=True) qk = tl.where(offs_m_curr[:, None] >= (offs_n[None, :]), qk, float("-inf")) m = tl.load(m_ptrs + offs_m_curr) p = tl.exp(qk * sm_scale - m[:, None]) # compute dv do = tl.load(do_ptrs) dv += tl.dot(p.to(tl.float16), do, trans_a=True) # compute dp = dot(v, do) Di = tl.load(D_ptrs + offs_m_curr) dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) - Di[:, None] dp += tl.dot(do, v, trans_b=True) # compute ds = p * (dp - delta[:, None]) ds = p * dp * sm_scale # compute dk = dot(ds.T, q) dk += tl.dot(ds.to(tl.float16), q, trans_a=True) # # compute dq dq = tl.load(dq_ptrs, eviction_policy="evict_last") dq += tl.dot(ds.to(tl.float16), k) tl.store(dq_ptrs, dq, eviction_policy="evict_last") # # increment pointers dq_ptrs += BLOCK_M * stride_qm q_ptrs += BLOCK_M * stride_qm do_ptrs += BLOCK_M * stride_qm # write-back dv_ptrs = DV + (offs_n[:, None] * stride_qm + offs_k[None, :] * stride_qk) dk_ptrs = DK + (offs_n[:, None] * stride_kn + offs_k[None, :] * stride_kk) tl.store(dv_ptrs, dv) tl.store(dk_ptrs, dk) class _TritonFlashAttention(torch.autograd.Function): @staticmethod def forward(ctx, q, k, v, sm_scale): BLOCK = 128 # shape constraints Lq, Lk, Lv = q.shape[-1], k.shape[-1], v.shape[-1] assert Lq == Lk and Lk == Lv assert Lk in {16, 32, 64, 128} o = torch.empty_like(q) grid = (triton.cdiv(q.shape[2], BLOCK), q.shape[0] * q.shape[1]) tmp = torch.empty((q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32) L = torch.empty((q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32) m = torch.empty((q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32) num_warps = 4 if Lk <= 64 else 8 _fwd_kernel[grid]( q, k, v, sm_scale, tmp, L, m, o, q.stride(0), q.stride(1), q.stride(2), q.stride(3), k.stride(0), k.stride(1), k.stride(2), k.stride(3), v.stride(0), v.stride(1), v.stride(2), v.stride(3), o.stride(0), o.stride(1), o.stride(2), o.stride(3), q.shape[0], q.shape[1], q.shape[2], BLOCK_M=BLOCK, BLOCK_N=BLOCK, BLOCK_DMODEL=Lk, num_warps=num_warps, num_stages=1, ) ctx.save_for_backward(q, k, v, o, L, m) ctx.BLOCK = BLOCK ctx.grid = grid ctx.sm_scale = sm_scale ctx.BLOCK_DMODEL = Lk return o @staticmethod def backward(ctx, do): q, k, v, o, l, m = ctx.saved_tensors do = do.contiguous() dq = torch.zeros_like(q, dtype=torch.float32) dk = torch.empty_like(k) dv = torch.empty_like(v) do_scaled = torch.empty_like(do) delta = torch.empty_like(l) _bwd_preprocess[(ctx.grid[0] * ctx.grid[1],)]( o, do, l, do_scaled, delta, BLOCK_M=ctx.BLOCK, D_HEAD=ctx.BLOCK_DMODEL, ) # NOTE: kernel currently buggy for other values of `num_warps` num_warps = 8 _bwd_kernel[(ctx.grid[1],)]( q, k, v, ctx.sm_scale, o, do_scaled, dq, dk, dv, l, m, delta, q.stride(0), q.stride(1), q.stride(2), q.stride(3), k.stride(0), k.stride(1), k.stride(2), k.stride(3), v.stride(0), v.stride(1), v.stride(2), v.stride(3), q.shape[0], q.shape[1], q.shape[2], ctx.grid[0], BLOCK_M=ctx.BLOCK, BLOCK_N=ctx.BLOCK, BLOCK_DMODEL=ctx.BLOCK_DMODEL, num_warps=num_warps, num_stages=1, ) return dq, dk, dv, None def triton_flash_attention(q, k, v, sm_scale): """ Arguments: q: (batch, nheads, seq, headdim) k: (batch, nheads, seq, headdim) v: (batch, nheads, seq, headdim) sm_scale: float. The scaling of QK^T before applying softmax. Return: out: (batch, nheads, seq, headdim) """ if HAS_TRITON: return _TritonFlashAttention.apply(q, k, v, sm_scale) else: raise RuntimeError("Triton kernel requires CUDA 11.4+!") if HAS_FLASH_ATTN: from einops import rearrange class MaskedFlashAttention(torch.nn.Module): def __init__(self, num_attention_heads: int, attention_head_size: int, attention_dropout: float) -> None: super().__init__() self.num_attention_heads = num_attention_heads self.attention_head_size = attention_head_size self.attention_func = FlashAttention(softmax_scale=math.sqrt(attention_head_size), attention_dropout=attention_dropout) def forward(self, query_key_value: torch.Tensor, attention_mask: torch.Tensor, causal=False): if attention_mask.dtype is not torch.bool: attention_mask = attention_mask.bool() qkv = rearrange(query_key_value, 'b s (three h d) -> b s three h d', three=3, h=self.num_attention_heads) context, _ = self.attention_func(qkv, key_padding_mask=attention_mask, causal=causal) context = rearrange(context, 'b s h d -> b s (h d)') return context def flash_attention_qkv(qkv, sm_scale, batch_size, seq_len, dropout_p=0., causal=False): """ Arguments: qkv: (batch * seqlen, 3, nheads, headdim) batch_size: int. seq_len: int. sm_scale: float. The scaling of QK^T before applying softmax. Default to 1 / sqrt(headdim). dropout_p: float. causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling). Return: out: (total, nheads, headdim). """ max_s = seq_len cu_seqlens = torch.arange(0, (batch_size + 1) * seq_len, step=seq_len, dtype=torch.int32, device=qkv.device) out = flash_attn_unpadded_qkvpacked_func(qkv, cu_seqlens, max_s, dropout_p, softmax_scale=sm_scale, causal=causal) return out def flash_attention_q_kv(q, kv, sm_scale, batch_size, q_seqlen, kv_seqlen, dropout_p=0., causal=False): """ Arguments: q: (batch * q_seqlen, nheads, headdim) kv: (batch * kv_seqlen, 2, nheads, headdim) batch_size: int. seq_len: int. sm_scale: float. The scaling of QK^T before applying softmax. Default to 1 / sqrt(headdim). dropout_p: float. causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling). Return: out: (total, nheads, headdim). """ cu_seqlens_q = torch.arange(0, (batch_size + 1) * q_seqlen, step=q_seqlen, dtype=torch.int32, device=q.device) cu_seqlens_k = torch.arange(0, (batch_size + 1) * kv_seqlen, step=kv_seqlen, dtype=torch.int32, device=kv.device) out = flash_attn_unpadded_kvpacked_func(q, kv, cu_seqlens_q, cu_seqlens_k, q_seqlen, kv_seqlen, dropout_p, sm_scale, causal) return out def flash_attention_q_k_v(q, k, v, sm_scale, batch_size, q_seqlen, kv_seqlen, dropout_p=0., causal=False): """ Arguments: q: (batch * q_seqlen, nheads, headdim) k: (batch * kv_seqlen, nheads, headdim) v: (batch * kv_seqlen, nheads, headdim) batch_size: int. seq_len: int. dropout_p: float. Dropout probability. sm_scale: float. The scaling of QK^T before applying softmax. Default to 1 / sqrt(headdim). causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling). Return: out: (total, nheads, headdim). """ cu_seqlens_q = torch.arange(0, (batch_size + 1) * q_seqlen, step=q_seqlen, dtype=torch.int32, device=q.device) cu_seqlens_kv = torch.arange(0, (batch_size + 1) * kv_seqlen, step=kv_seqlen, dtype=torch.int32, device=k.device) return flash_attn_unpadded_func(q, k, v, cu_seqlens_q, cu_seqlens_kv, q_seqlen, kv_seqlen, dropout_p, sm_scale, causal) if HAS_MEM_EFF_ATTN: from einops import rearrange from xformers.ops.fmha import LowerTriangularMask class MemoryEfficientAttention(torch.nn.Module): def __init__(self, hidden_size: int, num_attention_heads: int, attention_dropout: float = 0.0): super().__init__() attention_head_size = hidden_size // num_attention_heads self.scale = 1 / attention_head_size**0.5 self.dropout = attention_dropout def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: torch.Tensor): context = memory_efficient_attention(query, key, value, attention_mask, self.dropout, self.scale) context = rearrange(context, 'b s h d -> b s (h d)') return context
"""This code is from NVIDIA apex: https://github.com/NVIDIA/apex with some changes. """ import numbers import torch from torch.cuda.amp import custom_bwd, custom_fwd from torch.nn import init from torch.nn.parameter import Parameter from colossalai.kernel.op_builder.layernorm import LayerNormBuilder try: from colossalai._C import layer_norm except ImportError: layer_norm = None class FusedLayerNormAffineFunction(torch.autograd.Function): @staticmethod @custom_fwd(cast_inputs=torch.float32) def forward(ctx, input, weight, bias, normalized_shape, eps): ctx.normalized_shape = normalized_shape ctx.eps = eps input_ = input.contiguous() weight_ = weight.contiguous() bias_ = bias.contiguous() global layer_norm if layer_norm is None: layer_norm = LayerNormBuilder().load() output, mean, invvar = layer_norm.forward_affine(input_, ctx.normalized_shape, weight_, bias_, ctx.eps) ctx.layernorm_op = layer_norm ctx.save_for_backward(input_, weight_, bias_, mean, invvar) return output @staticmethod @custom_bwd def backward(ctx, grad_output): input_, weight_, bias_, mean, invvar = ctx.saved_tensors grad_input = grad_weight = grad_bias = None grad_input, grad_weight, grad_bias \ = layer_norm.backward_affine( grad_output.contiguous(), mean, invvar, input_, ctx.normalized_shape, weight_, bias_, ctx.eps) return grad_input, grad_weight, grad_bias, None, None class MixedFusedLayerNorm(torch.nn.Module): def __init__(self, normalized_shape, eps=1e-5, device=None, dtype=None): super(MixedFusedLayerNorm, self).__init__() if isinstance(normalized_shape, numbers.Integral): normalized_shape = (normalized_shape,) self.normalized_shape = torch.Size(normalized_shape) self.eps = eps self.weight = Parameter(torch.empty(*normalized_shape, device=device, dtype=dtype)) self.bias = Parameter(torch.empty(*normalized_shape, device=device, dtype=dtype)) self.reset_parameters() def reset_parameters(self): init.ones_(self.weight) init.zeros_(self.bias) def forward(self, input): return FusedLayerNormAffineFunction.apply(input, self.weight, self.bias, self.normalized_shape, self.eps) def __repr__(self): return f'MixedFusedLayerNorm(normalized_shape={self.normalized_shape}, eps={self.eps})'
import os from torchvision.datasets import CIFAR10 def main(): dir_path = os.path.dirname(os.path.realpath(__file__)) data_root = os.path.join(dir_path, 'data') dataset = CIFAR10(root=data_root, download=True) if __name__ == '__main__': main()
import torch from torchvision.models import resnet50 from tqdm import tqdm import colossalai from colossalai.auto_parallel.tensor_shard.initialize import initialize_model from colossalai.core import global_context as gpc from colossalai.device.device_mesh import DeviceMesh from colossalai.logging import get_dist_logger from colossalai.nn.lr_scheduler import CosineAnnealingLR def synthesize_data(): img = torch.rand(gpc.config.BATCH_SIZE, 3, 32, 32) label = torch.randint(low=0, high=10, size=(gpc.config.BATCH_SIZE,)) return img, label def main(): colossalai.launch_from_torch(config='./config.py') logger = get_dist_logger() # trace the model with meta data model = resnet50(num_classes=10).cuda() input_sample = {'x': torch.rand([gpc.config.BATCH_SIZE * torch.distributed.get_world_size(), 3, 32, 32]).to('meta')} device_mesh = DeviceMesh(physical_mesh_id=torch.tensor([0, 1, 2, 3]), mesh_shape=[2, 2], init_process_group=True) model, solution = initialize_model(model, input_sample, device_mesh=device_mesh, return_solution=True) if gpc.get_global_rank() == 0: for node_strategy in solution: print(node_strategy) # build criterion criterion = torch.nn.CrossEntropyLoss() # optimizer optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4) # lr_scheduler lr_scheduler = CosineAnnealingLR(optimizer, total_steps=gpc.config.NUM_EPOCHS) for epoch in range(gpc.config.NUM_EPOCHS): model.train() # if we use synthetic data # we assume it only has 10 steps per epoch num_steps = range(10) progress = tqdm(num_steps) for _ in progress: # generate fake data img, label = synthesize_data() img = img.cuda() label = label.cuda() optimizer.zero_grad() output = model(img) train_loss = criterion(output, label) train_loss.backward(train_loss) torch.cuda.synchronize() optimizer.step() lr_scheduler.step() # run evaluation model.eval() correct = 0 total = 0 # if we use synthetic data # we assume it only has 10 steps for evaluation num_steps = range(10) progress = tqdm(num_steps) for _ in progress: # generate fake data img, label = synthesize_data() img = img.cuda() label = label.cuda() with torch.no_grad(): output = model(img) test_loss = criterion(output, label) pred = torch.argmax(output, dim=-1) correct += torch.sum(pred == label) total += img.size(0) logger.info( f"Epoch {epoch} - train loss: {train_loss:.5}, test loss: {test_loss:.5}, acc: {correct / total:.5}, lr: {lr_scheduler.get_last_lr()[0]:.5g}", ranks=[0]) if __name__ == '__main__': main()
BATCH_SIZE = 32 NUM_EPOCHS = 2
import time from copy import deepcopy from functools import partial from typing import Callable, Tuple import numpy as np import torch import torch.nn as nn import torchvision.models as tm from transformers import GPT2Config, GPT2LMHeadModel from colossalai.auto_parallel.checkpoint import CheckpointSolverRotor from colossalai.fx import metainfo_trace def bench(gm: torch.fx.GraphModule, criterion: torch.nn.Module, data_gen: Callable, num_steps: int = 5) -> Tuple[int, int]: """Benchmarking a given graph module Args: gm (torch.fx.GraphModule): The graph module to benchmark. criterion (torch.nn.Module): Loss function. data_gen (Callable): Data generator. num_steps (int, optional): Number of test steps. Defaults to 5. Returns: Tuple[int, int]: peak memory in MB and step time in MS. """ gm.train() gm.cuda() step_time = float('inf') torch.cuda.synchronize() torch.cuda.empty_cache() torch.cuda.reset_peak_memory_stats() cached = torch.cuda.max_memory_allocated(device="cuda") try: for _ in range(num_steps): args, label = data_gen() output, loss = None, None torch.cuda.synchronize(device="cuda") start = time.time() output = gm(*args) loss = criterion(output, label) loss.backward() torch.cuda.synchronize(device="cuda") step_time = min(step_time, time.time() - start) for child in gm.children(): for param in child.parameters(): param.grad = None del args, label, output, loss except: del args, label, output, loss gm.to("cpu") torch.cuda.empty_cache() peak_mem = (torch.cuda.max_memory_allocated(device="cuda") - cached) / 1024**2 return peak_mem, step_time * 1.0e3 def bench_rotor(gm: torch.fx.GraphModule, criterion: torch.nn.Module, data_gen: Callable, num_steps: int = 5, sample_points: int = 20, free_memory: int = torch.cuda.mem_get_info()[0], start_factor: int = 4) -> Tuple[np.array, list, list]: """Auto Checkpoint Rotor Algorithm benchmarking Benchmarks the Auto Checkpoint Rotor Algorithm for a given graph module and data. Args: gm (torch.fx.GraphModule): The graph module to benchmark. criterion (torch.nn.Module): Loss function. data_gen (Callable): Data generator. num_steps (int, optional): Number of test steps. Defaults to 5. sample_points (int, optional): Number of sample points. Defaults to 20. free_memory (int, optional): Max memory budget in Byte. Defaults to torch.cuda.mem_get_info()[0]. start_factor (int, optional): Start memory budget factor for benchmark, the start memory budget will be free_memory / start_factor. Defaults to 4. Returns: Tuple[np.array, list, list]: return budgets vector (MB), peak memory vector (MB), step time vector (MS). """ peak_hist, step_hist = [], [] raw_graph = deepcopy(gm.graph) for budget in np.linspace(free_memory // start_factor, free_memory, sample_points): gm = metainfo_trace(gm, *data_gen()[0]) solver = CheckpointSolverRotor(gm.graph, free_memory=budget) try: gm.graph = solver.solve(verbose=False) peak_memory, step_time = bench(gm, criterion, data_gen, num_steps=num_steps) except: peak_memory, step_time = budget / 1024**2, float('inf') peak_hist.append(peak_memory) step_hist.append(step_time) gm.graph = deepcopy(raw_graph) return np.linspace(free_memory // start_factor, free_memory, sample_points) / 1024**2, peak_hist, step_hist class GPTLMModel(nn.Module): """ GPT Model """ def __init__(self, hidden_size=768, num_layers=12, num_attention_heads=12, max_seq_len=1024, vocab_size=50257, checkpoint=False): super().__init__() self.checkpoint = checkpoint self.model = GPT2LMHeadModel( GPT2Config(n_embd=hidden_size, n_layer=num_layers, n_head=num_attention_heads, n_positions=max_seq_len, n_ctx=max_seq_len, vocab_size=vocab_size)) if checkpoint: self.model.gradient_checkpointing_enable() def forward(self, input_ids, attention_mask): # Only return lm_logits return self.model(input_ids=input_ids, attention_mask=attention_mask, use_cache=not self.checkpoint)[0] class GPTLMLoss(nn.Module): """ GPT Loss """ def __init__(self): super().__init__() self.loss_fn = nn.CrossEntropyLoss() def forward(self, logits, labels): shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens return self.loss_fn(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) def gpt2_medium(checkpoint=False): return GPTLMModel(hidden_size=1024, num_layers=24, num_attention_heads=16, checkpoint=checkpoint) def gpt2_xl(checkpoint=False): return GPTLMModel(hidden_size=1600, num_layers=48, num_attention_heads=32, checkpoint=checkpoint) def gpt2_6b(checkpoint=False): return GPTLMModel(hidden_size=4096, num_layers=30, num_attention_heads=16, checkpoint=checkpoint) def data_gen_gpt2(batch_size, seq_len, vocab_size, device='cuda:0'): """ Generate random data for gpt2 benchmarking """ input_ids = torch.randint(0, vocab_size, (batch_size, seq_len), device=device) attention_mask = torch.ones_like(input_ids, device=device) return (input_ids, attention_mask), attention_mask def data_gen_resnet(batch_size, shape, device='cuda:0'): """ Generate random data for resnet benchmarking """ data = torch.empty(batch_size, *shape, device=device) label = torch.empty(batch_size, dtype=torch.long, device=device).random_(1000) return (data,), label
from setuptools import find_packages, setup setup( name='auto_parallel', version='0.0.1', description='', packages=find_packages(), install_requires=[ 'torch', 'numpy', 'tqdm', ], )
import time from argparse import ArgumentParser from functools import partial import matplotlib.pyplot as plt import torch import torch.multiprocessing as mp import torchvision.models as tm from bench_utils import GPTLMLoss, bench_rotor, data_gen_gpt2, data_gen_resnet, gpt2_medium import colossalai from colossalai.auto_parallel.checkpoint import CheckpointSolverRotor from colossalai.fx import metainfo_trace, symbolic_trace from colossalai.utils import free_port def _benchmark(rank, world_size, port, args): """ Auto activation checkpoint solver benchmark, we provide benchmark on two models: gpt2_medium and resnet50. The benchmark will sample in a range of memory budget for each model and output the benchmark summary and data visualization of peak memory vs. budget memory and relative step time vs. peak memory. """ colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') if args.model == 'resnet50': model = tm.resnet50() data_gen = partial(data_gen_resnet, batch_size=128, shape=(3, 224, 224)) gm = symbolic_trace(model) gm = metainfo_trace(gm, torch.empty(128, 3, 224, 224, device='meta')) loss = torch.nn.CrossEntropyLoss() else: model = gpt2_medium() data_gen = partial(data_gen_gpt2, batch_size=8, seq_len=1024, vocab_size=50257) data, mask = data_gen(device='meta')[0] gm = symbolic_trace(model, meta_args={'input_ids': data, 'attention_mask': mask}) gm = metainfo_trace(gm, data, mask) loss = GPTLMLoss() free_memory = 11000 * 1024**2 if args.model == 'resnet50' else 56000 * 1024**2 start_factor = 4 if args.model == 'resnet50' else 10 # trace and benchmark budgets, peak_hist, step_hist = bench_rotor(gm, loss, data_gen, num_steps=5, sample_points=15, free_memory=free_memory, start_factor=start_factor) # print summary print("==============benchmark summary==============") for budget, peak, step in zip(budgets, peak_hist, step_hist): print(f'memory budget: {budget:.3f} MB, peak memory: {peak:.3f} MB, step time: {step:.3f} MS') # plot valid results fig, axs = plt.subplots(1, 2, figsize=(16, 8)) valid_idx = step_hist.index(next(step for step in step_hist if step != float("inf"))) # plot peak memory vs. budget memory axs[0].plot(budgets[valid_idx:], peak_hist[valid_idx:]) axs[0].plot([budgets[valid_idx], budgets[-1]], [budgets[valid_idx], budgets[-1]], linestyle='--') axs[0].set_xlabel("Budget Memory (MB)") axs[0].set_ylabel("Peak Memory (MB)") axs[0].set_title("Peak Memory vs. Budget Memory") # plot relative step time vs. budget memory axs[1].plot(peak_hist[valid_idx:], [step_time / step_hist[-1] for step_time in step_hist[valid_idx:]]) axs[1].plot([peak_hist[valid_idx], peak_hist[-1]], [1.0, 1.0], linestyle='--') axs[1].set_xlabel("Peak Memory (MB)") axs[1].set_ylabel("Relative Step Time") axs[1].set_title("Step Time vs. Peak Memory") axs[1].set_ylim(0.8, 1.5) # save plot fig.savefig(f"{args.model}_benchmark.png") def auto_activation_checkpoint_benchmark(args): world_size = 1 run_func_module = partial(_benchmark, world_size=world_size, port=free_port(), args=args) mp.spawn(run_func_module, nprocs=world_size) if __name__ == "__main__": parser = ArgumentParser("Auto Activation Checkpoint Solver Benchmark") parser.add_argument("--model", type=str, default='gpt2', choices=['gpt2', 'resnet50']) args = parser.parse_args() auto_activation_checkpoint_benchmark(args)
import time from argparse import ArgumentParser from copy import deepcopy from functools import partial import matplotlib.pyplot as plt import numpy as np import torch import torch.multiprocessing as mp import torchvision.models as tm from bench_utils import bench, data_gen_resnet import colossalai from colossalai.auto_parallel.checkpoint import CheckpointSolverRotor from colossalai.fx import metainfo_trace, symbolic_trace from colossalai.utils import free_port def _benchmark(rank, world_size, port): """Auto activation checkpoint batchsize benchmark This benchmark test the through put of Resnet152 with our activation solver given the memory budget of 95% of maximum GPU memory, and with the batch size of [512, 1024, 2048], you could see that using auto activation checkpoint with optimality guarantee, we might be able to find better batch size for the model, as larger batch size means that we are able to use larger portion of GPU FLOPS, while recomputation scheduling with our solver only result in minor performance drop. So at last we might be able to find better training batch size for our model (combine with large batch training optimizer such as LAMB). """ colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') model = tm.resnet152() gm = symbolic_trace(model) raw_graph = deepcopy(gm.graph) peak_mems, through_puts, batch_sizes = [], [], [512, 1024, 2048] for batch_size in batch_sizes: batch_size = int(batch_size) gm = metainfo_trace(gm, torch.empty(batch_size, 3, 224, 224, device='meta')) solver = CheckpointSolverRotor(gm.graph, free_memory=torch.cuda.mem_get_info()[0] * 0.95) gm.graph = solver.solve() peak_mem, step_time = bench(gm, torch.nn.CrossEntropyLoss(), partial(data_gen_resnet, batch_size=batch_size, shape=(3, 224, 224)), num_steps=5) peak_mems.append(peak_mem) through_puts.append(batch_size / step_time * 1.0e3) gm.graph = deepcopy(raw_graph) # print results print("===============benchmark summary================") for batch_size, peak_mem, through_put in zip(batch_sizes, peak_mems, through_puts): print(f'batch_size: {int(batch_size)}, peak memory: {peak_mem:.3f} MB, through put: {through_put:.3f} images/s') def auto_activation_checkpoint_batchsize_benchmark(): world_size = 1 run_func_module = partial(_benchmark, world_size=world_size, port=free_port()) mp.spawn(run_func_module, nprocs=world_size) if __name__ == "__main__": auto_activation_checkpoint_batchsize_benchmark()
from colossalai.amp import AMP_TYPE # hyper-parameters TRAIN_ITERS = 10 DECAY_ITERS = 4 WARMUP_FRACTION = 0.01 GLOBAL_BATCH_SIZE = 32 # dp world size * sentences per GPU EVAL_ITERS = 10 EVAL_INTERVAL = 10 LR = 0.0001 MIN_LR = 1e-05 WEIGHT_DECAY = 0.01 SEQ_LENGTH = 128 # BERT config DEPTH = 4 NUM_ATTENTION_HEADS = 4 HIDDEN_SIZE = 128 # model config ADD_BINARY_HEAD = False # random seed SEED = 1234 # pipeline config # only enabled when pipeline > 1 NUM_MICRO_BATCHES = 4 # colossalai config parallel = dict(pipeline=1, tensor=dict(size=2, mode='sequence')) fp16 = dict(mode=AMP_TYPE.NAIVE, verbose=True) gradient_handler = [dict(type='SequenceParallelGradientHandler')]
import argparse import torch from data.bert_helper import SequenceParallelDataIterator, get_batch_for_sequence_parallel from data.dummy_dataloader import DummyDataloader from loss_func.bert_loss import BertLoss from lr_scheduler import AnnealingLR from model.bert import BertForPretrain, build_pipeline_bert import colossalai from colossalai.amp import AMP_TYPE from colossalai.context.parallel_mode import ParallelMode from colossalai.core import global_context as gpc from colossalai.engine.schedule import PipelineSchedule from colossalai.kernel import LayerNorm from colossalai.logging import get_dist_logger from colossalai.nn.optimizer import FusedAdam from colossalai.utils import MultiTimer, is_using_pp def process_batch_data(batch_data): tokens, types, sentence_order, loss_mask, lm_labels, padding_mask = batch_data if gpc.is_first_rank(ParallelMode.PIPELINE): data = dict(input_ids=tokens, attention_masks=padding_mask, tokentype_ids=types, lm_labels=lm_labels) else: data = dict(attention_masks=padding_mask, tokentype_ids=types, lm_labels=lm_labels) label = dict(loss_mask=loss_mask, sentence_order=sentence_order) return data, label def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('-s', '--synthetic', action="store_true", help="whether use synthetic data") return parser.parse_args() def pipeline_data_process_func(stage_output, micro_batch_data): tokens, types, sentence_order, loss_mask, lm_labels, padding_mask = micro_batch_data if gpc.is_first_rank(ParallelMode.PIPELINE): data = (tokens, padding_mask, types, lm_labels) label = (loss_mask, sentence_order) else: data = (stage_output, padding_mask, types, lm_labels) label = (loss_mask, sentence_order) return data, label def main(): # initialize args = parse_args() colossalai.launch_from_torch(config='./config.py', seed=1234, backend='nccl') logger = get_dist_logger() # build synthetic dataloader BATCH_SIZE_PER_GPUS = gpc.config.GLOBAL_BATCH_SIZE // gpc.get_world_size(ParallelMode.DATA) VOCAB_SIZE = 30528 trainloader = DummyDataloader(batch_size=BATCH_SIZE_PER_GPUS, vocab_size=VOCAB_SIZE, seq_length=gpc.config.SEQ_LENGTH) validloader = DummyDataloader(batch_size=BATCH_SIZE_PER_GPUS, vocab_size=VOCAB_SIZE, seq_length=gpc.config.SEQ_LENGTH) logger.info("Dataloaders are built", ranks=[0]) # build model if hasattr(gpc.config, 'fp16') and gpc.config.fp16.get('mode') == AMP_TYPE.NAIVE: is_naive_fp16 = True else: is_naive_fp16 = False use_pipeline = is_using_pp() kwargs = dict(vocab_size=VOCAB_SIZE, hidden_size=gpc.config.HIDDEN_SIZE, max_sequence_length=gpc.config.SEQ_LENGTH, num_attention_heads=gpc.config.NUM_ATTENTION_HEADS, convert_fp16_to_fp32_in_softmax=True, is_naive_fp16=is_naive_fp16, add_binary_head=gpc.config.ADD_BINARY_HEAD) if use_pipeline: model = build_pipeline_bert(num_layers=gpc.config.DEPTH, num_chunks=1, **kwargs) else: model = BertForPretrain(num_layers=gpc.config.DEPTH, **kwargs) model = model.half() model.reset_parameters() logger.info(f"Model is built with softmax in fp32 = {is_naive_fp16}", ranks=[0]) total_numel = 0 for p in model.parameters(): total_numel += p.numel() logger.info(f"This model has {total_numel} parameters") # build criterion criterion = BertLoss() logger.info("Criterion is built", ranks=[0]) # layernorm and bias has no weight decay weight_decay_params = {'params': []} no_weight_decay_params = {'params': [], 'weight_decay': 0.0} for module_ in model.modules(): if isinstance(module_, LayerNorm): no_weight_decay_params['params'].extend([p for p in list(module_._parameters.values()) if p is not None]) else: weight_decay_params['params'].extend( [p for n, p in list(module_._parameters.items()) if p is not None and n != 'bias']) no_weight_decay_params['params'].extend( [p for n, p in list(module_._parameters.items()) if p is not None and n == 'bias']) logger.info( f"without weight decay param: {len(no_weight_decay_params['params'])}, with weight decay param: {len(weight_decay_params['params'])}" ) # optimizer optimizer = FusedAdam((weight_decay_params, no_weight_decay_params), lr=gpc.config.LR, weight_decay=gpc.config.WEIGHT_DECAY) logger.info("Optimizer is built", ranks=[0]) # lr scheduler # follow Megatron-LM setting warmup_steps = int(gpc.config.DECAY_ITERS * gpc.config.WARMUP_FRACTION) lr_scheduler = AnnealingLR(optimizer=optimizer, max_lr=gpc.config.LR, min_lr=gpc.config.MIN_LR, warmup_steps=warmup_steps, decay_steps=gpc.config.DECAY_ITERS, decay_style='linear') logger.info(f"LR Scheduler is built with {warmup_steps} warmup steps and {gpc.config.DECAY_ITERS} decay steps") # # init engine, *dummy = colossalai.initialize(model, optimizer, criterion, verbose=True) # build timer timer = MultiTimer() skip_iters = 0 # build loss tracker accumulated_train_loss = torch.zeros(1, dtype=torch.float32).cuda() accumulated_eval_loss = torch.zeros(1, dtype=torch.float32).cuda() # build data iters for pipeline parallel if use_pipeline: train_data_iter = SequenceParallelDataIterator(trainloader) valid_data_iter = SequenceParallelDataIterator(validloader) engine.schedule.data_process_func = pipeline_data_process_func logger.info("start training") for step in range(1, gpc.config.TRAIN_ITERS + 1): timer.start('train-iterations') engine.train() if use_pipeline: engine.zero_grad() _, _, train_loss = engine.execute_schedule(train_data_iter, return_output_label=False) engine.step() else: tokens, types, sentence_order, loss_mask, lm_labels, padding_mask = get_batch_for_sequence_parallel( trainloader) engine.zero_grad() lm_loss, sop_output = engine(tokens, padding_mask, types, lm_labels) train_loss = engine.criterion(lm_loss, sop_output, loss_mask, sentence_order) engine.backward(train_loss) engine.step() timer.stop('train-iterations', keep_in_history=True) if not gpc.is_initialized(ParallelMode.PIPELINE) or gpc.is_last_rank(ParallelMode.PIPELINE): accumulated_train_loss += train_loss lr_scheduler.step() if step % gpc.config.EVAL_INTERVAL == 0: engine.eval() for j in range(gpc.config.EVAL_ITERS): with torch.no_grad(): if use_pipeline: _, _, eval_loss = engine.execute_schedule(valid_data_iter, forward_only=True, return_output_label=False) else: tokens, types, sentence_order, loss_mask, lm_labels, padding_mask = get_batch_for_sequence_parallel( validloader) lm_loss, sop_output = engine(tokens, padding_mask, types, lm_labels) eval_loss = engine.criterion(lm_loss, sop_output, loss_mask, sentence_order) if not gpc.is_initialized(ParallelMode.PIPELINE) or gpc.is_last_rank(ParallelMode.PIPELINE): accumulated_eval_loss += eval_loss if not gpc.is_initialized(ParallelMode.PIPELINE) or gpc.is_last_rank(ParallelMode.PIPELINE): accumulated_eval_loss /= gpc.config.EVAL_ITERS accumulated_train_loss /= gpc.config.EVAL_INTERVAL timer_string = [] for n, t in timer: timer_string.append(f"{n}: {t.get_history_mean()*1000:.5f}") timer_string = ' | '.join(timer_string) lr = list(engine.optimizer.param_groups)[0]['lr'] loss_scale = engine.optimizer.optim.loss_scale.item() if gpc.is_initialized(ParallelMode.PIPELINE): ranks = [gpc.get_ranks_in_group(ParallelMode.PIPELINE)[-1]] else: ranks = [0] logger.info(f'Step {step} / {gpc.config.TRAIN_ITERS} | Train Loss: {accumulated_train_loss.item():.5g} ' + f'| Eval Loss: {accumulated_eval_loss.item():.5g} ' + f'| Loss Scale: {loss_scale}' + f"| Learning rate: {lr} | " + timer_string, ranks=ranks) for n, t in timer: t.reset() accumulated_eval_loss.zero_() accumulated_train_loss.zero_() if __name__ == '__main__': main()
from .annealing_lr import AnnealingLR
# coding=utf-8 # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Learning rate decay functions.""" import math class AnnealingLR(object): """Anneals the learning rate.""" def __init__(self, optimizer, max_lr, min_lr, warmup_steps, decay_steps, decay_style, use_checkpoint_lr_scheduler=True, override_lr_scheduler=False): # Class values. self.optimizer = optimizer self.max_lr = float(max_lr) self.min_lr = min_lr assert self.min_lr >= 0.0 assert self.max_lr >= self.min_lr self.warmup_steps = warmup_steps self.num_steps = 0 self.decay_steps = decay_steps assert self.decay_steps > 0 assert self.warmup_steps < self.decay_steps self.decay_style = decay_style self.override_lr_scheduler = override_lr_scheduler self.use_checkpoint_lr_scheduler = use_checkpoint_lr_scheduler if self.override_lr_scheduler: assert not self.use_checkpoint_lr_scheduler, 'both override and '\ 'use-checkpoint are set.' # Set the learning rate self.step(0) def get_lr(self): """Learning rate decay functions from: https://openreview.net/pdf?id=BJYwwY9ll pg. 4""" # Use linear warmup for the initial part. if self.warmup_steps > 0 and self.num_steps <= self.warmup_steps: return self.max_lr * float(self.num_steps) / \ float(self.warmup_steps) # If the learning rate is constant, just return the initial value. if self.decay_style == 'constant': return self.max_lr # For any steps larger than `self.decay_steps`, use `self.min_lr`. if self.num_steps > self.decay_steps: return self.min_lr # If we are done with the warmup period, use the decay style. num_steps_ = self.num_steps - self.warmup_steps decay_steps_ = self.decay_steps - self.warmup_steps decay_ratio = float(num_steps_) / float(decay_steps_) assert decay_ratio >= 0.0 assert decay_ratio <= 1.0 delta_lr = self.max_lr - self.min_lr if self.decay_style == 'linear': coeff = (1.0 - decay_ratio) elif self.decay_style == 'cosine': coeff = 0.5 * (math.cos(math.pi * decay_ratio) + 1.0) else: raise Exception('{} decay style is not supported.'.format( self.decay_style)) return self.min_lr + coeff * delta_lr def step(self, increment=1): """Set lr for all parameters groups.""" self.num_steps += increment new_lr = self.get_lr() for group in self.optimizer.param_groups: group['lr'] = new_lr def state_dict(self): state_dict = { 'max_lr': self.max_lr, 'warmup_steps': self.warmup_steps, 'num_steps': self.num_steps, 'decay_style': self.decay_style, 'decay_steps': self.decay_steps, 'min_lr': self.min_lr } return state_dict def _check_and_set(self, cls_value, sd_value, name): """Auxiliary function for checking the values in the checkpoint and setting them.""" if self.override_lr_scheduler: return cls_value if not self.use_checkpoint_lr_scheduler: assert cls_value == sd_value, \ f'AnnealingLR: class input value {cls_value} and checkpoint' \ f'value {sd_value} for {name} do not match' return sd_value def load_state_dict(self, sd): if 'start_lr' in sd: max_lr_ = sd['start_lr'] else: max_lr_ = sd['max_lr'] self.max_lr = self._check_and_set(self.max_lr, max_lr_, 'learning rate') self.min_lr = self._check_and_set(self.min_lr, sd['min_lr'], 'minimum learning rate') if 'warmup_iter' in sd: warmup_steps_ = sd['warmup_iter'] else: warmup_steps_ = sd['warmup_steps'] self.warmup_steps = self._check_and_set(self.warmup_steps, warmup_steps_, 'warmup iterations') if 'end_iter' in sd: decay_steps_ = sd['end_iter'] else: decay_steps_ = sd['decay_steps'] self.decay_steps = self._check_and_set(self.decay_steps, decay_steps_, 'total number of iterations') self.decay_style = self._check_and_set(self.decay_style, sd['decay_style'], 'decay style') if 'num_iters' in sd: num_steps = sd['num_iters'] else: num_steps = sd['num_steps'] self.step(increment=num_steps)
from colossalai.context.parallel_mode import ParallelMode import torch from torch.cuda.amp import custom_bwd, custom_fwd class _VocabCrossEntropy(torch.autograd.Function): @staticmethod @custom_fwd def forward(ctx, vocab_parallel_logits, target): # Maximum value along vocab dimension across all GPUs. logits_max = torch.max(vocab_parallel_logits, dim=-1)[0] # Subtract the maximum value. vocab_parallel_logits.sub_(logits_max.unsqueeze(dim=-1)) # Create a mask of valid vocab ids (1 means it needs to be masked). target_mask = target < 0 masked_target = target.clone() masked_target[target_mask] = 0 # Get predicted-logits = logits[target]. # For Simplicity, we convert logits to a 2-D tensor with size # [*, partition-vocab-size] and target to a 1-D tensor of size [*]. logits_2d = vocab_parallel_logits.view(-1, vocab_parallel_logits.size(-1)) masked_target_1d = masked_target.view(-1) arange_1d = torch.arange(start=0, end=logits_2d.size()[0], device=logits_2d.device) predicted_logits_1d = logits_2d[arange_1d, masked_target_1d] predicted_logits_1d = predicted_logits_1d.clone().contiguous() predicted_logits = predicted_logits_1d.view_as(target) predicted_logits[target_mask] = 0.0 # Sum of exponential of logits along vocab dimension across all GPUs. exp_logits = vocab_parallel_logits torch.exp(vocab_parallel_logits, out=exp_logits) sum_exp_logits = exp_logits.sum(dim=-1) # Loss = log(sum(exp(logits))) - predicted-logit. loss = torch.log(sum_exp_logits) - predicted_logits # Store softmax, target-mask and masked-target for backward pass. exp_logits.div_(sum_exp_logits.unsqueeze(dim=-1)) ctx.save_for_backward(exp_logits, target_mask, masked_target_1d) return loss @staticmethod @custom_bwd def backward(ctx, grad_output): # Retreive tensors from the forward path. softmax, target_mask, masked_target_1d = ctx.saved_tensors # All the inputs have softmax as their gradient. grad_input = softmax # For simplicity, work with the 2D gradient. partition_vocab_size = softmax.size()[-1] grad_2d = grad_input.view(-1, partition_vocab_size) # Add the gradient from matching classes. arange_1d = torch.arange(start=0, end=grad_2d.size()[0], device=grad_2d.device) grad_2d[arange_1d, masked_target_1d] -= ( 1.0 - target_mask.view(-1).float()) # Finally elementwise multiplication with the output gradients. grad_input.mul_(grad_output.unsqueeze(dim=-1)) return grad_input, None def vocab_cross_entropy(vocab_logits, target): """helper function for the cross entropy.""" return _VocabCrossEntropy.apply(vocab_logits, target)
import torch import torch.nn as nn from colossalai.core import global_context as gpc from colossalai.context import ParallelMode from colossalai.logging import get_dist_logger import torch.nn.functional as F import torch.distributed as dist from .cross_entropy import vocab_cross_entropy class BertLoss(nn.Module): def forward(self, lm_loss, sop_logits, loss_mask, sentence_order): lm_loss_ = lm_loss.float() loss_mask = loss_mask.float() loss_mask_sum = loss_mask.sum() lm_loss = torch.sum( lm_loss_.view(-1) * loss_mask.reshape(-1)) lm_loss /= loss_mask_sum torch.distributed.all_reduce( lm_loss, group=gpc.get_group(ParallelMode.SEQUENCE) ) if sop_logits is not None: sop_loss = F.cross_entropy(sop_logits.view(-1, 2).float(), sentence_order.view(-1), ignore_index=-1) sop_loss = sop_loss.float() loss = lm_loss + sop_loss * gpc.get_world_size(ParallelMode.SEQUENCE) else: sop_loss = None loss = lm_loss return loss
import torch def ensure_divisibility(numerator, denominator): """Ensure that numerator is divisible by the denominator.""" assert numerator % denominator == 0, '{} is not divisible by {}'.format( numerator, denominator) def divide(numerator, denominator): """Ensure that numerator is divisible by the denominator and return the division value.""" ensure_divisibility(numerator, denominator) return numerator // denominator def split_tensor_along_last_dim(tensor, num_partitions, contiguous_split_chunks=False): """Split a tensor along its last dimension. Arguments: tensor: input tensor. num_partitions: number of partitions to split the tensor contiguous_split_chunks: If True, make each chunk contiguous in memory. """ # Get the size and dimension. last_dim = tensor.dim() - 1 last_dim_size = divide(tensor.size()[last_dim], num_partitions) # Split. tensor_list = torch.split(tensor, last_dim_size, dim=last_dim) # Note: torch.split does not create contiguous tensors by default. if contiguous_split_chunks: return tuple(chunk.contiguous() for chunk in tensor_list) return tensor_list class VocabUtility: """Split the vocabulary into `world_size` chunks amd return the first and last index of the vocabulary belonging to the `rank` partition: Note that indices in [fist, last)""" @staticmethod def vocab_range_from_per_partition_vocab_size(per_partition_vocab_size, rank, world_size): index_f = rank * per_partition_vocab_size index_l = index_f + per_partition_vocab_size return index_f, index_l @staticmethod def vocab_range_from_global_vocab_size(global_vocab_size, rank, world_size): per_partition_vocab_size = divide(global_vocab_size, world_size) return VocabUtility.vocab_range_from_per_partition_vocab_size( per_partition_vocab_size, rank, world_size)
from colossalai.context.parallel_mode import ParallelMode import torch import torch.nn as nn import inspect from .layers import Embedding, BertLayer, BertDualHead, PreProcessor, VocabEmbedding from .layers.init_method import init_normal, output_init_normal from colossalai.core import global_context as gpc from colossalai.context import ParallelMode from colossalai.kernel import LayerNorm from colossalai.nn.layer.wrapper import PipelineSharedModuleWrapper from colossalai.logging import get_dist_logger from colossalai.pipeline.utils import partition_uniform class BertForPretrain(nn.Module): def __init__(self, vocab_size, hidden_size, max_sequence_length, num_attention_heads, num_layers, add_binary_head, is_naive_fp16, num_tokentypes=2, dropout_prob=0.1, mlp_ratio=4, init_std=0.02, convert_fp16_to_fp32_in_softmax=False, ): super().__init__() self.seq_parallel_size = gpc.get_world_size(ParallelMode.SEQUENCE) assert max_sequence_length % self.seq_parallel_size == 0, 'sequence length is not divisible by the sequence parallel size' self.sub_seq_length = max_sequence_length // self.seq_parallel_size self.init_std = init_std self.num_layers = num_layers if not add_binary_head: num_tokentypes = 0 self.preprocessor = PreProcessor(self.sub_seq_length) self.embedding = Embedding(hidden_size=hidden_size, vocab_size=vocab_size, max_sequence_length=max_sequence_length, embedding_dropout_prob=dropout_prob, num_tokentypes=num_tokentypes) self.bert_layers = nn.ModuleList() for i in range(num_layers): bert_layer = BertLayer(layer_number=i+1, hidden_size=hidden_size, num_attention_heads=num_attention_heads, attention_dropout=dropout_prob, mlp_ratio=mlp_ratio, hidden_dropout=dropout_prob, convert_fp16_to_fp32_in_softmax=convert_fp16_to_fp32_in_softmax, is_naive_fp16=is_naive_fp16 ) self.bert_layers.append(bert_layer) self.layer_norm = LayerNorm(hidden_size) self.head = BertDualHead(hidden_size, self.embedding.word_embedding_weight.size(0), add_binary_head=add_binary_head) self.reset_parameters() def _init_normal(self, tensor): init_normal(tensor, sigma=self.init_std) def _output_init_normal(self, tensor): output_init_normal(tensor, sigma=self.init_std, num_layers=self.num_layers) def reset_parameters(self): # initialize embedding self._init_normal(self.embedding.word_embedding_weight) self._init_normal(self.embedding.position_embeddings.weight) if self.embedding.tokentype_embeddings: self._init_normal(self.embedding.tokentype_embeddings.weight) # initialize bert layer for layer in self.bert_layers: # initialize self attention self._init_normal(layer.self_attention.query_key_value.weight) self._output_init_normal(layer.self_attention.dense.weight) self._init_normal(layer.mlp.dense_h_to_4h.weight) self._output_init_normal(layer.mlp.dense_4h_to_h.weight) # initializer head self._init_normal(self.head.lm_head.dense.weight) if self.head.binary_head is not None: self._init_normal(self.head.binary_head.pooler.dense.weight) self._init_normal(self.head.binary_head.dense.weight) def forward(self, input_ids, attention_masks, tokentype_ids, lm_labels): # inputs of the forward function # input_ids: [batch_size, sub_seq_len] # attention_mask: [batch_size, seq_len] # tokentype_ids: [batch_size, sub_seq_len] # outputs of preprocessor # pos_ids: [batch_size, sub_seq_len] # attention_masks: [batch_size, 1, sub_seq_len, seq_len] pos_ids, attention_masks = self.preprocessor(input_ids, attention_masks) hidden_states = self.embedding(input_ids, pos_ids, tokentype_ids) # hidden_states shape change: # [batch_size, sub_seq_len, hidden_size] -> [sub_seq_len, batch_size, hidden_size] hidden_states = hidden_states.transpose(0, 1).contiguous() for idx, layer in enumerate(self.bert_layers): hidden_states = layer(hidden_states, attention_masks) hidden_states = hidden_states.transpose(0, 1).contiguous() output = self.layer_norm(hidden_states) # hidden_states: [sub_seq_len, batch_size, hidden_size] # word_embedding: [vocab_size, hidden_size] return self.head(output, self.embedding.word_embedding_weight, lm_labels) class PipelineBertForPretrain(nn.Module): def __init__(self, vocab_size, hidden_size, max_sequence_length, num_attention_heads, num_layers, add_binary_head, is_naive_fp16, num_tokentypes=2, dropout_prob=0.1, mlp_ratio=4, init_std=0.02, convert_fp16_to_fp32_in_softmax=False, first_stage=True, last_stage=True, start_idx=None, end_idx=None): super().__init__() self.seq_parallel_size = gpc.get_world_size(ParallelMode.SEQUENCE) assert max_sequence_length % self.seq_parallel_size == 0, 'sequence length is not divisible by the sequence parallel size' self.sub_seq_length = max_sequence_length // self.seq_parallel_size self.init_std = init_std self.num_layers = num_layers if not add_binary_head: num_tokentypes = 0 self.first_stage = first_stage self.last_stage = last_stage self.preprocessor = PreProcessor(self.sub_seq_length) if self.first_stage: self.embedding = Embedding(hidden_size=hidden_size, vocab_size=vocab_size, max_sequence_length=max_sequence_length, embedding_dropout_prob=dropout_prob, num_tokentypes=num_tokentypes) # transformer layers self.bert_layers = nn.ModuleList() if start_idx is None and end_idx is None: start_idx = 0 end_idx = num_layers for i in range(start_idx, end_idx): bert_layer = BertLayer(layer_number=i+1, hidden_size=hidden_size, num_attention_heads=num_attention_heads, attention_dropout=dropout_prob, mlp_ratio=mlp_ratio, hidden_dropout=dropout_prob, convert_fp16_to_fp32_in_softmax=convert_fp16_to_fp32_in_softmax, is_naive_fp16=is_naive_fp16 ) self.bert_layers.append(bert_layer) if self.last_stage: self.word_embeddings = VocabEmbedding(vocab_size, hidden_size) self.layer_norm = LayerNorm(hidden_size) self.head = BertDualHead(hidden_size, vocab_size, add_binary_head=add_binary_head) self.reset_parameters() def _init_normal(self, tensor): init_normal(tensor, sigma=self.init_std) def _output_init_normal(self, tensor): output_init_normal(tensor, sigma=self.init_std, num_layers=self.num_layers) def reset_parameters(self): # initialize embedding if self.first_stage: self._init_normal(self.embedding.word_embedding_weight) self._init_normal(self.embedding.position_embeddings.weight) if self.embedding.tokentype_embeddings: self._init_normal(self.embedding.tokentype_embeddings.weight) # initialize bert layer for layer in self.bert_layers: # initialize self attention self._init_normal(layer.self_attention.query_key_value.weight) self._output_init_normal(layer.self_attention.dense.weight) self._init_normal(layer.mlp.dense_h_to_4h.weight) self._output_init_normal(layer.mlp.dense_4h_to_h.weight) # initializer head if self.last_stage: self._init_normal(self.head.lm_head.dense.weight) if self.head.binary_head is not None: self._init_normal(self.head.binary_head.pooler.dense.weight) self._init_normal(self.head.binary_head.dense.weight) def forward(self, input_ids, attention_masks, tokentype_ids, lm_labels): # inputs of the forward function # input_ids: [batch_size, sub_seq_len] # attention_mask: [batch_size, seq_len] # tokentype_ids: [batch_size, sub_seq_len] # outputs of preprocessor # pos_ids: [batch_size, sub_seq_len] # attention_masks: [batch_size, 1, sub_seq_len, seq_len] if self.first_stage: pos_ids, attention_masks = self.preprocessor(input_ids, attention_masks) else: _, attention_masks = self.preprocessor(None, attention_masks) if self.first_stage: hidden_states = self.embedding(input_ids, pos_ids, tokentype_ids) hidden_states = hidden_states.transpose(0, 1).contiguous() else: hidden_states = input_ids # hidden_states shape change: # [batch_size, sub_seq_len, hidden_size] -> [sub_seq_len, batch_size, hidden_size] for idx, layer in enumerate(self.bert_layers): hidden_states = layer(hidden_states, attention_masks) if self.last_stage: hidden_states = hidden_states.transpose(0, 1).contiguous() output = self.layer_norm(hidden_states) output = self.head(output, self.word_embeddings.weight, lm_labels) else: output = hidden_states # hidden_states: [sub_seq_len, batch_size, hidden_size] # word_embedding: [vocab_size, hidden_size] return output def _filter_kwargs(func, kwargs): sig = inspect.signature(func) return {k: v for k, v in kwargs.items() if k in sig.parameters} def build_pipeline_bert(num_layers, num_chunks, device=torch.device('cuda'), **kwargs): logger = get_dist_logger() pipeline_size = gpc.get_world_size(ParallelMode.PIPELINE) pipeline_rank = gpc.get_local_rank(ParallelMode.PIPELINE) rank = gpc.get_global_rank() wrapper = PipelineSharedModuleWrapper([0, pipeline_size - 1]) parts = partition_uniform(num_layers, pipeline_size, num_chunks)[pipeline_rank] models = [] for start, end in parts: kwargs['num_layers'] = num_layers kwargs['start_idx'] = start kwargs['end_idx'] = end kwargs['first_stage'] = start == 0 kwargs['last_stage'] = end == num_layers logger.info(f'Rank{rank} build layer {start}-{end}, {end-start}/{num_layers} layers') chunk = PipelineBertForPretrain(**_filter_kwargs(PipelineBertForPretrain.__init__, kwargs)).to(device) if start == 0: wrapper.register_module(chunk.embedding.word_embeddings) elif end == num_layers: wrapper.register_module(chunk.word_embeddings) models.append(chunk) if len(models) == 1: model = models[0] else: model = nn.ModuleList(models) return model
from colossalai.context.parallel_mode import ParallelMode import torch import torch.nn as nn from colossalai.core import global_context as gpc class PreProcessor(nn.Module): def __init__(self, sub_seq_length): super().__init__() self.sub_seq_length = sub_seq_length def bert_position_ids(self, token_ids): # Create position ids seq_length = token_ids.size(1) local_rank = gpc.get_local_rank(ParallelMode.SEQUENCE) position_ids = torch.arange(seq_length*local_rank, seq_length * (local_rank+1), dtype=torch.long, device=token_ids.device) position_ids = position_ids.unsqueeze(0).expand_as(token_ids) return position_ids def bert_extended_attention_mask(self, attention_mask): local_rank = gpc.get_local_rank(ParallelMode.SEQUENCE) start_index = local_rank * self.sub_seq_length end_index = (local_rank + 1) * self.sub_seq_length # We create a 3D attention mask from a 2D tensor mask. # [b, 1, s] attention_mask_b1s = attention_mask.unsqueeze(1) # [b, s, 1] attention_mask_bs1 = attention_mask.unsqueeze(2) # [b, s/D, s] attention_mask_bss = attention_mask_b1s * attention_mask_bs1 attention_mask_bss = attention_mask_bss[:, start_index:end_index, :] # [b, 1, s/D, s] extended_attention_mask = attention_mask_bss.unsqueeze(1) # Convert attention mask to binary: extended_attention_mask = (extended_attention_mask < 0.5) return extended_attention_mask def forward(self, input_ids=None, attention_mask=None): if attention_mask is not None: extended_attention_mask = self.bert_extended_attention_mask(attention_mask) else: extended_attention_mask = None if input_ids is not None: position_ids = self.bert_position_ids(input_ids) else: position_ids = None return position_ids, extended_attention_mask
import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.init as init class VocabEmbedding(torch.nn.Module): def __init__(self, num_embeddings, embedding_dim): super(VocabEmbedding, self).__init__() # Keep the input dimensions. self.num_embeddings = num_embeddings self.embedding_dim = embedding_dim self.padding_idx = None self.max_norm = None self.norm_type = 2. self.scale_grad_by_freq = False self.sparse = False self._weight = None # Allocate weights and initialize. self.weight = nn.Parameter(torch.empty( self.num_embeddings, self.embedding_dim)) init.xavier_uniform_(self.weight) def forward(self, hidden_state): output = F.embedding(hidden_state, self.weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse) return output def __repr__(self): return f'VocabEmbedding(num_embeddings={self.num_embeddings}, ' \ f'embedding_dim={self.embedding_dim})' class Embedding(nn.Module): """Language model embeddings. Arguments: hidden_size: hidden size vocab_size: vocabulary size max_sequence_length: maximum size of sequence. This is used for positional embedding embedding_dropout_prob: dropout probability for embeddings init_method: weight initialization method num_tokentypes: size of the token-type embeddings. 0 value will ignore this embedding """ def __init__(self, hidden_size, vocab_size, max_sequence_length, embedding_dropout_prob, num_tokentypes): super(Embedding, self).__init__() self.hidden_size = hidden_size self.num_tokentypes = num_tokentypes self.word_embeddings = VocabEmbedding(vocab_size, self.hidden_size) # Position embedding (serial). self.position_embeddings = torch.nn.Embedding( max_sequence_length, self.hidden_size) # Token type embedding. # Add this as an optional field that can be added through # method call so we can load a pretrain model without # token types and add them as needed. if self.num_tokentypes > 0: self.tokentype_embeddings = torch.nn.Embedding(self.num_tokentypes, self.hidden_size) else: self.tokentype_embeddings = None # Embeddings dropout self.embedding_dropout = torch.nn.Dropout(embedding_dropout_prob) @property def word_embedding_weight(self): return self.word_embeddings.weight def forward(self, input_ids, position_ids, tokentype_ids=None): # Embeddings. words_embeddings = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) embeddings = words_embeddings + position_embeddings if tokentype_ids is not None and self.tokentype_embeddings is not None: embeddings = embeddings + self.tokentype_embeddings(tokentype_ids) # Dropout. embeddings = self.embedding_dropout(embeddings) return embeddings
import torch import torch.nn as nn from torch.nn import Parameter import torch.nn.functional as F import torch.nn.init as init class Linear(nn.Module): """Linear layer with column parallelism. The linear layer is defined as Y = XA + b. A is parallelized along its second dimension as A = [A_1, ..., A_p]. Arguments: input_size: first dimension of matrix A. output_size: second dimension of matrix A. bias: If true, add bias init_method: method to initialize weights. Note that bias is always set to zero. stride: For the strided linear layers. keep_master_weight_for_test: This was added for testing and should be set to False. It returns the master weights used for initialization. skip_bias_add: This was added to enable performance optimations where bias can be fused with other elementwise operations. we skip adding bias but instead return it. """ def __init__(self, input_size, output_size, bias=True, skip_bias_add=False): super(Linear, self).__init__() # Keep input parameters self.input_size = input_size self.output_size = output_size self.skip_bias_add = skip_bias_add self.weight = Parameter(torch.empty(self.output_size, self.input_size, )) init.normal_(self.weight) if bias: self.bias = Parameter(torch.empty(self.output_size)) # Always initialize bias to zero. with torch.no_grad(): self.bias.zero_() else: self.register_parameter('bias', None) def forward(self, input_): # Matrix multiply. bias = self.bias if not self.skip_bias_add else None output = F.linear(input_, self.weight, bias) if self.skip_bias_add: return output, self.bias else: return output def __repr__(self): return f'Linear(in_features={self.input_size}, out_features={self.output_size}, ' + \ f'bias={self.bias is not None}, skip_bias_add={self.skip_bias_add})'
import torch import torch.nn as nn from colossalai.nn.layer.parallel_sequence import TransformerSelfAttentionRing from colossalai.kernel.jit import bias_dropout_add_fused_train, bias_dropout_add_fused_inference from colossalai.kernel.cuda_native import LayerNorm from .mlp import TransformerMLP from .dropout import get_bias_dropout_add def attention_mask_func(attention_scores, attention_mask): attention_scores.masked_fill_(attention_mask, -10000.0) return attention_scores class BertLayer(nn.Module): """A single transformer layer. Transformer layer takes input with size [b, s, h] and returns an output of the same size. """ def __init__(self, layer_number, hidden_size, num_attention_heads, attention_dropout, mlp_ratio, hidden_dropout, is_naive_fp16, apply_residual_connection_post_layernorm=False, fp32_residual_connection=False, bias_dropout_fusion: bool = True, convert_fp16_to_fp32_in_softmax: bool = False): super().__init__() self.layer_number = layer_number self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm self.fp32_residual_connection = fp32_residual_connection # Layernorm on the input data. self.input_layernorm = LayerNorm(hidden_size) # Self attention. self.self_attention = TransformerSelfAttentionRing( hidden_size=hidden_size, num_attention_heads=num_attention_heads, attention_dropout=attention_dropout, attention_mask_func=attention_mask_func, layer_number=layer_number, apply_query_key_layer_scaling=True, convert_fp16_to_fp32_in_softmax=convert_fp16_to_fp32_in_softmax, fp16=is_naive_fp16 ) self.hidden_dropout = hidden_dropout self.bias_dropout_fusion = bias_dropout_fusion # Layernorm on the attention output self.post_attention_layernorm = LayerNorm(hidden_size) self.mlp = TransformerMLP(hidden_size=hidden_size, mlp_ratio=mlp_ratio) def forward(self, hidden_states, attention_mask): # hidden_states: [batch_size, sub_seq_len, hidden_size] # attention_mask: [batch_size, 1, sub_seq_len, seq_len] # Layer norm at the beginning of the transformer layer. layernorm_output = self.input_layernorm(hidden_states) # Self attention. attention_output, attention_bias = self.self_attention(layernorm_output, attention_mask) # Residual connection. if self.apply_residual_connection_post_layernorm: residual = layernorm_output else: residual = hidden_states # jit scripting for a nn.module (with dropout) is not # trigerring the fusion kernel. For now, we use two # different nn.functional routines to account for varying # dropout semantics during training and inference phases. if self.bias_dropout_fusion: if self.training: bias_dropout_add_func = bias_dropout_add_fused_train else: bias_dropout_add_func = bias_dropout_add_fused_inference else: bias_dropout_add_func = get_bias_dropout_add(self.training) # re-enable torch grad to enable fused optimization. with torch.enable_grad(): layernorm_input = bias_dropout_add_func( attention_output, attention_bias.expand_as(residual), residual, self.hidden_dropout) # Layer norm post the self attention. layernorm_output = self.post_attention_layernorm(layernorm_input) # MLP. mlp_output, mlp_bias = self.mlp(layernorm_output) # Second residual connection. if self.apply_residual_connection_post_layernorm: residual = layernorm_output else: residual = layernorm_input # re-enable torch grad to enable fused optimization. with torch.enable_grad(): output = bias_dropout_add_func( mlp_output, mlp_bias.expand_as(residual), residual, self.hidden_dropout) return output
from .embedding import VocabEmbedding, Embedding from .bert_layer import BertLayer from .head import BertDualHead from .preprocess import PreProcessor
import torch import torch.nn as nn import torch.nn.functional as F from .linear import Linear from colossalai.kernel.jit import bias_gelu_impl class TransformerMLP(nn.Module): """MLP. MLP will take the input with h hidden state, project it to 4*h hidden dimension, perform nonlinear transformation, and project the state back into h hidden dimension. At the end, dropout is also applied. """ def __init__(self, hidden_size, mlp_ratio, fuse_gelu=True): super(TransformerMLP, self).__init__() # Project to 4h. self.dense_h_to_4h = Linear( hidden_size, int(hidden_size*mlp_ratio), skip_bias_add=True) self.bias_gelu_fusion = fuse_gelu self.activation_func = F.gelu # Project back to h. self.dense_4h_to_h = Linear( int(hidden_size*mlp_ratio), hidden_size, skip_bias_add=True) def forward(self, hidden_states): # hidden states should be in the shape of [s, b, h] # it will be projects into [s, b, 4h] # and projected back to [s, b, h] intermediate_parallel, bias_parallel = self.dense_h_to_4h(hidden_states) if self.bias_gelu_fusion: intermediate_parallel = \ bias_gelu_impl(intermediate_parallel, bias_parallel) else: intermediate_parallel = \ self.activation_func(intermediate_parallel + bias_parallel) # [s, b, h] output, output_bias = self.dense_4h_to_h(intermediate_parallel) return output, output_bias
import torch import math def init_normal(tensor, sigma): """Init method based on N(0, sigma).""" torch.nn.init.normal_(tensor, mean=0.0, std=sigma) def output_init_normal(tensor, sigma, num_layers): """Init method based on N(0, sigma/sqrt(2*num_layers).""" std = sigma / math.sqrt(2.0 * num_layers) torch.nn.init.normal_(tensor, mean=0.0, std=std)
import colossalai import torch import torch.nn as nn import torch.nn.functional as F from .pooler import Pooler from .linear import Linear from .embedding import VocabEmbedding from colossalai.core import global_context as gpc from colossalai.context import ParallelMode from colossalai.kernel import LayerNorm from loss_func.cross_entropy import vocab_cross_entropy class BertLMHead(nn.Module): """Masked LM head for Bert Arguments: hidden_size: hidden size init_method: init method for weight initialization layernorm_epsilon: tolerance for layer norm divisions """ def __init__(self, vocab_size, hidden_size, ): super(BertLMHead, self).__init__() self.bias = torch.nn.Parameter(torch.zeros(vocab_size)) self.dense = Linear(hidden_size, hidden_size) self.layernorm = LayerNorm(hidden_size) self.gelu = torch.nn.functional.gelu def forward(self, hidden_states, word_embeddings_weight, lm_labels): hidden_states = self.dense(hidden_states) hidden_states = self.gelu(hidden_states) hidden_states = self.layernorm(hidden_states) output = F.linear(hidden_states, word_embeddings_weight, self.bias) lm_loss = vocab_cross_entropy(output, lm_labels) return lm_loss class BertBinaryHead(nn.Module): def __init__(self, hidden_size): super().__init__() self.pooler = Pooler(hidden_size) self.dense = Linear(hidden_size, 2) def forward(self, hidden_states): if gpc.get_local_rank(ParallelMode.SEQUENCE) == 0: output = self.pooler(hidden_states) output = self.dense(output) else: output = None return output class BertDualHead(nn.Module): def __init__(self, hidden_size, vocab_size, add_binary_head): super().__init__() self.lm_head = BertLMHead(vocab_size, hidden_size) self.add_binary_head = add_binary_head if add_binary_head: self.binary_head = BertBinaryHead(hidden_size) else: self.binary_head = None def forward(self, hidden_states, word_embeddings_weight, lm_labels): if self.add_binary_head: binary_output = self.binary_head(hidden_states) else: binary_output = None lm_loss = self.lm_head(hidden_states, word_embeddings_weight, lm_labels) return lm_loss, binary_output
import torch def bias_dropout_add(x, bias, residual, prob, training): # type: (Tensor, Tensor, Tensor, float, bool) -> Tensor out = torch.nn.functional.dropout(x + bias, p=prob, training=training) out = residual + out return out def get_bias_dropout_add(training): def _bias_dropout_add(x, bias, residual, prob): return bias_dropout_add(x, bias, residual, prob, training) return _bias_dropout_add
import torch import torch.nn as nn from .linear import Linear class Pooler(nn.Module): """Pooler layer. Pool hidden states of a specific token (for example start of the sequence) and add a linear transformation followed by a tanh. Arguments: hidden_size: hidden size init_method: weight initialization method for the linear layer. bias is set to zero. """ def __init__(self, hidden_size): super(Pooler, self).__init__() self.dense = Linear(hidden_size, hidden_size) def forward(self, hidden_states, sequence_index=0): # hidden_states: [b, s, h] # sequence_index: index of the token to pool. pooled = hidden_states[:, sequence_index, :] pooled = self.dense(pooled) pooled = torch.tanh(pooled) return pooled
from colossalai.core import global_context as gpc from colossalai.context import ParallelMode import torch _MAX_DATA_DIM = 5 def _build_key_size_numel_dictionaries(keys, data): """Build the size on rank 0 and broadcast.""" max_dim = _MAX_DATA_DIM sizes = [0 for _ in range(max_dim) for _ in keys] # Pack the sizes on rank zero. if not gpc.is_initialized(ParallelMode.TENSOR) or gpc.get_local_rank(ParallelMode.TENSOR) == 0: offset = 0 for key in keys: assert data[key].dim() < max_dim, 'you should increase MAX_DATA_DIM' size = data[key].size() for i, s in enumerate(size): sizes[i + offset] = s offset += max_dim # Move to GPU and broadcast. sizes_cuda = torch.cuda.LongTensor(sizes) torch.distributed.broadcast(sizes_cuda, gpc.get_ranks_in_group(ParallelMode.TENSOR)[0], group=gpc.get_group(ParallelMode.TENSOR)) # Move back to cpu and unpack. sizes_cpu = sizes_cuda.cpu() key_size = {} key_numel = {} total_numel = 0 offset = 0 for key in keys: i = 0 size = [] numel = 1 while sizes_cpu[offset + i] > 0: this_size = sizes_cpu[offset + i] size.append(this_size) numel *= this_size i += 1 key_size[key] = size key_numel[key] = numel total_numel += numel offset += max_dim return key_size, key_numel, total_numel def broadcast_data(keys, data, datatype): """Broadcast data from rank zero of each model parallel group to the members of the same model parallel group. Arguments: keys: list of keys in the data dictionary to be broadcasted data: data dictionary of string keys and cpu tensor values. datatype: torch data type of all tensors in data associated with keys. """ # Build (key, size) and (key, number of elements) dictionaries along # with the total number of elements on all ranks. key_size, key_numel, total_numel = _build_key_size_numel_dictionaries(keys, data) # Pack on rank zero. if not gpc.is_initialized(ParallelMode.TENSOR) or gpc.get_local_rank(ParallelMode.TENSOR) == 0: # Check that all keys have the same data type. # Flatten the data associated with the keys flatten_data = torch.cat( [data[key].contiguous().view(-1) for key in keys], dim=0).cuda() else: flatten_data = torch.empty(total_numel, device=torch.cuda.current_device(), dtype=datatype) # Broadcast torch.distributed.broadcast(flatten_data, gpc.get_ranks_in_group(ParallelMode.TENSOR)[0], group=gpc.get_group(ParallelMode.TENSOR)) # Unpack output = {} offset = 0 for key in keys: size = key_size[key] numel = key_numel[key] output[key] = flatten_data.narrow(0, offset, numel).view(size) offset += numel return output def get_batch(data_iterator): """Build the batch.""" # Items and their type. keys = ['text', 'types', 'labels', 'is_random', 'loss_mask', 'padding_mask'] datatype = torch.int64 # Broadcast data. if data_iterator is not None: data = next(data_iterator) else: data = None data_b = broadcast_data(keys, data, datatype) # Unpack. tokens = data_b['text'].long() types = data_b['types'].long() sentence_order = data_b['is_random'].long() loss_mask = data_b['loss_mask'].float() lm_labels = data_b['labels'].long() padding_mask = data_b['padding_mask'].long() return tokens, types, sentence_order, loss_mask, lm_labels, padding_mask def get_batch_for_sequence_parallel(data_iterator): """Build the batch.""" # Items and their type. keys = ['text', 'types', 'labels', 'is_random', 'loss_mask', 'padding_mask'] datatype = torch.int64 # Broadcast data. if data_iterator is not None: data = next(data_iterator) else: data = None # unpack data_b = broadcast_data(keys, data, datatype) # # get tensor parallel local rank global_rank = torch.distributed.get_rank() local_world_size = 1 if not gpc.is_initialized(ParallelMode.TENSOR) else gpc.get_world_size(ParallelMode.TENSOR) local_rank = global_rank % local_world_size seq_length = data_b['text'].size(1) sub_seq_length = seq_length // local_world_size sub_seq_start = local_rank * sub_seq_length sub_seq_end = (local_rank+1) * sub_seq_length # # # Unpack. tokens = data_b['text'][:, sub_seq_start:sub_seq_end].long() types = data_b['types'][:, sub_seq_start:sub_seq_end].long() sentence_order = data_b['is_random'].long() loss_mask = data_b['loss_mask'][:, sub_seq_start:sub_seq_end].float() lm_labels = data_b['labels'][:, sub_seq_start:sub_seq_end].long() padding_mask = data_b['padding_mask'].long() return tokens, types, sentence_order, loss_mask, lm_labels, padding_mask class SequenceParallelDataIterator: def __init__(self, data_iter): self.data_iter = data_iter def __iter__(self): return self.data_iter def __next__(self): return get_batch_for_sequence_parallel(self.data_iter)
import torch class DummyDataloader(): def __init__(self, batch_size, vocab_size, seq_length): self.batch_size = batch_size self.vocab_size = vocab_size self.seq_length = seq_length self.step = 0 def generate(self): tokens = torch.randint(low=0, high=self.vocab_size, size=( self.batch_size, self.seq_length, )) types = torch.randint(low=0, high=3, size=( self.batch_size, self.seq_length, )) sentence_order = torch.randint(low=0, high=2, size=(self.batch_size,)) loss_mask = torch.randint(low=0, high=2, size=( self.batch_size, self.seq_length, )) lm_labels = torch.randint(low=0, high=self.vocab_size, size=(self.batch_size, self.seq_length)) padding_mask = torch.randint(low=0, high=2, size=(self.batch_size, self.seq_length)) return dict(text=tokens, types=types, is_random=sentence_order, loss_mask=loss_mask, labels=lm_labels, padding_mask=padding_mask) def __iter__(self): return self def __next__(self): return self.generate()
from colossalai.context.parallel_context import ParallelContext from colossalai.core import global_context as gpc from colossalai.logging import get_dist_logger from colossalai.context import ParallelMode from .datasets.data_samplers import build_pretraining_data_loader from .datasets.builder import build_train_valid_test_datasets import torch def cyclic_iter(iter): while True: for x in iter: yield x def build_train_valid_test_data_iterators(train_iters, global_batch_size, eval_interval, eval_iters, dataloader_type='single', **kwargs ): (train_dataloader, valid_dataloader, test_dataloader) = (None, None, None) logger = get_dist_logger() logger.info('> building train, validation, and test datasets ...', ranks=[0]) # Backward compatibility, assume fixed batch size. # if iteration > 0 and consumed_train_samples == 0: # assert train_samples is None, \ # 'only backward compatibility support for iteration-based training' # consumed_train_samples = iteration * global_batch_size # if iteration > 0 and consumed_valid_samples == 0: # if train_samples is None: # consumed_valid_samples = (iteration // eval_interval) * \ # eval_iters * global_batch_size # Data loader only on rank 0 of each model parallel group. if not gpc.is_initialized(ParallelMode.TENSOR) or gpc.get_local_rank(ParallelMode.TENSOR) == 0: # Number of train/valid/test samples. train_samples = train_iters * global_batch_size eval_iters_ = (train_iters // eval_interval + 1) * eval_iters test_iters = eval_iters train_val_test_num_samples = [train_samples, eval_iters_ * global_batch_size, test_iters * global_batch_size] logger.info(' > datasets target sizes (minimum size):') logger.info(' train: {}'.format(train_val_test_num_samples[0]), ranks=[0]) logger.info(' validation: {}'.format(train_val_test_num_samples[1]), ranks=[0]) logger.info(' test: {}'.format(train_val_test_num_samples[2]), ranks=[0]) # Build the datasets. train_ds, valid_ds, test_ds = build_train_valid_test_datasets( train_valid_test_num_samples=train_val_test_num_samples, **kwargs) # Build dataloaders. dp_size = gpc.get_world_size(ParallelMode.DATA) train_dataloader = build_pretraining_data_loader( train_ds, consumed_samples=0, micro_batch_size=global_batch_size//dp_size) valid_dataloader = build_pretraining_data_loader( valid_ds, consumed_samples=0, micro_batch_size=global_batch_size//dp_size) test_dataloader = build_pretraining_data_loader(test_ds, 0, micro_batch_size=global_batch_size//dp_size) # Flags to know if we need to do training/validation/testing. do_train = train_dataloader is not None and train_iters > 0 do_valid = valid_dataloader is not None and eval_iters > 0 do_test = test_dataloader is not None and eval_iters > 0 # Need to broadcast num_tokens and num_type_tokens. flags = torch.cuda.LongTensor( [int(do_train), int(do_valid), int(do_test)]) else: flags = torch.cuda.LongTensor([0, 0, 0]) # Broadcast num tokens. torch.distributed.broadcast(flags, gpc.get_ranks_in_group(ParallelMode.TENSOR)[0], group=gpc.get_group(ParallelMode.TENSOR)) # Build iterators. dl_type = dataloader_type assert dl_type in ['single', 'cyclic'] if train_dataloader is not None: train_data_iterator = iter(train_dataloader) if dl_type == 'single' \ else iter(cyclic_iter(train_dataloader)) else: train_data_iterator = None if valid_dataloader is not None: valid_data_iterator = iter(valid_dataloader) if dl_type == 'single' \ else iter(cyclic_iter(valid_dataloader)) else: valid_data_iterator = None if test_dataloader is not None: test_data_iterator = iter(test_dataloader) if dl_type == 'single' \ else iter(cyclic_iter(test_dataloader)) else: test_data_iterator = None return train_data_iterator, valid_data_iterator, test_data_iterator
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import re import unicodedata import six def validate_case_matches_checkpoint(do_lower_case, init_checkpoint): """Checks whether the casing config is consistent with the checkpoint name.""" # The casing has to be passed in by the user and there is no explicit check # as to whether it matches the checkpoint. The casing information probably # should have been stored in the bert_config.json file, but it's not, so # we have to heuristically detect it to validate. if not init_checkpoint: return m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint) if m is None: return model_name = m.group(1) lower_models = [ "uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12", "multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12" ] cased_models = [ "cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16", "multi_cased_L-12_H-768_A-12" ] is_bad_config = False if model_name in lower_models and not do_lower_case: is_bad_config = True actual_flag = "False" case_name = "lowercased" opposite_flag = "True" if model_name in cased_models and do_lower_case: is_bad_config = True actual_flag = "True" case_name = "cased" opposite_flag = "False" if is_bad_config: raise ValueError( "You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. " "However, `%s` seems to be a %s model, so you " "should pass in `--do_lower_case=%s` so that the fine-tuning matches " "how the model was pre-training. If this error is wrong, please " "just comment out this check." % (actual_flag, init_checkpoint, model_name, case_name, opposite_flag)) def convert_to_unicode(text): """Converts `text` to Unicode (if it's not already), assuming utf-8 input.""" if six.PY3: if isinstance(text, str): return text elif isinstance(text, bytes): return text.decode("utf-8", "ignore") else: raise ValueError("Unsupported string type: %s" % (type(text))) elif six.PY2: if isinstance(text, str): return text.decode("utf-8", "ignore") elif isinstance(text, unicode): return text else: raise ValueError("Unsupported string type: %s" % (type(text))) else: raise ValueError("Not running on Python2 or Python 3?") def printable_text(text): """Returns text encoded in a way suitable for print or `tf.logging`.""" # These functions want `str` for both Python2 and Python3, but in one case # it's a Unicode string and in the other it's a byte string. if six.PY3: if isinstance(text, str): return text elif isinstance(text, bytes): return text.decode("utf-8", "ignore") else: raise ValueError("Unsupported string type: %s" % (type(text))) elif six.PY2: if isinstance(text, str): return text elif isinstance(text, unicode): return text.encode("utf-8") else: raise ValueError("Unsupported string type: %s" % (type(text))) else: raise ValueError("Not running on Python2 or Python 3?") def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() index = 0 with open(vocab_file, "r") as reader: while True: token = convert_to_unicode(reader.readline()) if not token: break token = token.strip() vocab[token] = index index += 1 return vocab def convert_by_vocab(vocab, items): """Converts a sequence of [tokens|ids] using the vocab.""" output = [] for item in items: output.append(vocab[item]) return output def convert_tokens_to_ids(vocab, tokens): return convert_by_vocab(vocab, tokens) def convert_ids_to_tokens(inv_vocab, ids): return convert_by_vocab(inv_vocab, ids) def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens class FullTokenizer(object): """Runs end-to-end tokenization.""" def __init__(self, vocab_file, do_lower_case=True): self.vocab = load_vocab(vocab_file) self.inv_vocab = {v: k for k, v in self.vocab.items()} self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) def tokenize(self, text): split_tokens = [] for token in self.basic_tokenizer.tokenize(text): for sub_token in self.wordpiece_tokenizer.tokenize(token): split_tokens.append(sub_token) return split_tokens def convert_tokens_to_ids(self, tokens): return convert_by_vocab(self.vocab, tokens) def convert_ids_to_tokens(self, ids): return convert_by_vocab(self.inv_vocab, ids) @staticmethod def convert_tokens_to_string(tokens, clean_up_tokenization_spaces=True): """ Converts a sequence of tokens (string) in a single string. """ def clean_up_tokenization(out_string): """ Clean up a list of simple English tokenization artifacts like spaces before punctuations and abbreviated forms. """ out_string = ( out_string.replace(" .", ".") .replace(" ?", "?") .replace(" !", "!") .replace(" ,", ",") .replace(" ' ", "'") .replace(" n't", "n't") .replace(" 'm", "'m") .replace(" 's", "'s") .replace(" 've", "'ve") .replace(" 're", "'re") ) return out_string text = ' '.join(tokens).replace(' ##', '').strip() if clean_up_tokenization_spaces: clean_text = clean_up_tokenization(text) return clean_text else: return text def vocab_size(self): return len(self.vocab) class BasicTokenizer(object): """Runs basic tokenization (punctuation splitting, lower casing, etc.).""" def __init__(self, do_lower_case=True): """Constructs a BasicTokenizer. Args: do_lower_case: Whether to lower case the input. """ self.do_lower_case = do_lower_case def tokenize(self, text): """Tokenizes a piece of text.""" text = convert_to_unicode(text) text = self._clean_text(text) # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). text = self._tokenize_chinese_chars(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if self.do_lower_case: token = token.lower() token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text): """Splits punctuation on a piece of text.""" chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ((cp >= 0x4E00 and cp <= 0x9FFF) or # (cp >= 0x3400 and cp <= 0x4DBF) or # (cp >= 0x20000 and cp <= 0x2A6DF) or # (cp >= 0x2A700 and cp <= 0x2B73F) or # (cp >= 0x2B740 and cp <= 0x2B81F) or # (cp >= 0x2B820 and cp <= 0x2CEAF) or (cp >= 0xF900 and cp <= 0xFAFF) or # (cp >= 0x2F800 and cp <= 0x2FA1F)): # return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xfffd or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) class WordpieceTokenizer(object): """Runs WordPiece tokenization.""" def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): """Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example: input = "unaffable" output = ["un", "##aff", "##able"] Args: text: A single token or whitespace separated tokens. This should have already been passed through `BasicTokenizer. Returns: A list of wordpiece tokens. """ text = convert_to_unicode(text) output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens def _is_whitespace(char): """Checks whether `chars` is a whitespace character.""" # \t, \n, and \r are technically control characters but we treat them # as whitespace since they are generally considered as such. if char == " " or char == "\t" or char == "\n" or char == "\r": return True cat = unicodedata.category(char) if cat == "Zs": return True return False def _is_control(char): """Checks whether `chars` is a control character.""" # These are technically control characters but we count them as whitespace # characters. if char == "\t" or char == "\n" or char == "\r": return False cat = unicodedata.category(char) if cat in ("Cc", "Cf"): return True return False def _is_punctuation(char): """Checks whether `chars` is a punctuation character.""" cp = ord(char) # We treat all non-letter/number ASCII as punctuation. # Characters such as "^", "$", and "`" are not in the Unicode # Punctuation class but we treat them as punctuation anyways, for # consistency. if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False
# coding=utf-8 # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .tokenizer import build_tokenizer _TOKENIZER = None _PADDED_VOCAB_SIZE = -1 def initialize_tokenizer(vocab_file, tokenizer_type, vocab_extra_ids=0): tokenizer, padded_vocab_size = build_tokenizer(vocab_file, tokenizer_type, vocab_extra_ids) global _TOKENIZER, _PADDED_VOCAB_SIZE _TOKENIZER = tokenizer _PADDED_VOCAB_SIZE = padded_vocab_size def get_tokenizer(): global _TOKENIZER return _TOKENIZER def get_padded_vocab_size(): global _PADDED_VOCAB_SIZE return _PADDED_VOCAB_SIZE
# coding=utf-8 # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Megatron tokenizers.""" from abc import ABC from abc import abstractmethod from colossalai.core import global_context as gpc from colossalai.context import ParallelMode from .bert_tokenization import FullTokenizer as FullBertTokenizer def build_tokenizer(vocab_file, tokenizer_type, vocab_extra_ids=0): """Initialize tokenizer.""" if not gpc.is_initialized(ParallelMode.GLOBAL) or gpc.get_global_rank() == 0: print('> building {} tokenizer ...'.format(tokenizer_type), flush=True) # Select and instantiate the tokenizer. if tokenizer_type == 'BertWordPieceLowerCase': tokenizer = _BertWordPieceTokenizer(vocab_file=vocab_file, lower_case=True, vocab_extra_ids=vocab_extra_ids) elif tokenizer_type == 'BertWordPieceCase': tokenizer = _BertWordPieceTokenizer(vocab_file=vocab_file, lower_case=False, vocab_extra_ids=vocab_extra_ids) else: raise NotImplementedError('{} tokenizer is not ' 'implemented.'.format(tokenizer_type)) # Add vocab size. padded_vocab_size = _vocab_size_with_padding(tokenizer.vocab_size) return tokenizer, padded_vocab_size def _vocab_size_with_padding(orig_vocab_size, make_vocab_size_divisible_by=128): """Pad vocab size so it is divisible by model parallel size and still having GPU friendly size.""" after = orig_vocab_size if gpc.is_initialized(ParallelMode.TENSOR): multiple = make_vocab_size_divisible_by * gpc.get_world_size(ParallelMode.TENSOR) else: multiple = make_vocab_size_divisible_by while (after % multiple) != 0: after += 1 if not gpc.is_initialized(ParallelMode.GLOBAL) or gpc.get_global_rank() == 0: print(' > padded vocab (size: {}) with {} dummy tokens ' '(new size: {})'.format( orig_vocab_size, after - orig_vocab_size, after), flush=True) return after class AbstractTokenizer(ABC): """Abstract class for tokenizer.""" def __init__(self, name): self.name = name super().__init__() @property @abstractmethod def vocab_size(self): pass @property @abstractmethod def vocab(self): """Dictionary from vocab text token to id token.""" pass @property @abstractmethod def inv_vocab(self): """Dictionary from vocab id token to text token.""" pass @abstractmethod def tokenize(self, text): pass def detokenize(self, token_ids): raise NotImplementedError('detokenizer is not implemented for {} ' 'tokenizer'.format(self.name)) @property def cls(self): raise NotImplementedError('CLS is not provided for {} ' 'tokenizer'.format(self.name)) @property def sep(self): raise NotImplementedError('SEP is not provided for {} ' 'tokenizer'.format(self.name)) @property def pad(self): raise NotImplementedError('PAD is not provided for {} ' 'tokenizer'.format(self.name)) @property def eod(self): raise NotImplementedError('EOD is not provided for {} ' 'tokenizer'.format(self.name)) @property def mask(self): raise NotImplementedError('MASK is not provided for {} ' 'tokenizer'.format(self.name)) class _BertWordPieceTokenizer(AbstractTokenizer): """Original BERT wordpiece tokenizer.""" def __init__(self, vocab_file, lower_case=True, vocab_extra_ids=0): if lower_case: name = 'BERT Lower Case' else: name = 'BERT Upper Case' super().__init__(name) self.tokenizer = FullBertTokenizer(vocab_file, do_lower_case=lower_case) self.cls_id = self.tokenizer.vocab['[CLS]'] self.sep_id = self.tokenizer.vocab['[SEP]'] self.pad_id = self.tokenizer.vocab['[PAD]'] self.mask_id = self.tokenizer.vocab['[MASK]'] self._additional_special_tokens = [] # (dsachan) Add BOS and EOS tokens SPECIAL_TOKENS = {'eos_token': '[EOS]', 'bos_token': '[BOS]'} self._bos_token = '[BOS]' self.add_token(self._bos_token) self._bos_token_id = self.vocab.get(self._bos_token) self._eos_token = '[EOS]' self.add_token(self._eos_token) self._eos_token_id = self.vocab.get(self._eos_token) # (dsachan) Add additional special tokens # These can be used as sentinel tokens in T5 model inputs additional_special_tokens = [] additional_special_tokens.extend( ["<extra_id_{}>".format(i) for i in range(vocab_extra_ids)]) self.add_additional_special_tokens(additional_special_tokens) def add_token(self, token): if token not in self.vocab: self.inv_vocab[self.vocab_size] = token # self.vocab_size comes from len(vocab) # and it will increase as we add elements self.vocab[token] = self.vocab_size def add_additional_special_tokens(self, tokens_list): setattr(self, "additional_special_tokens", tokens_list) for value in tokens_list: self.add_token(value) @property def vocab_size(self): return self.tokenizer.vocab_size() @property def vocab(self): return self.tokenizer.vocab @property def inv_vocab(self): return self.tokenizer.inv_vocab def tokenize(self, text): text_tokens = self.tokenizer.tokenize(text) return self.tokenizer.convert_tokens_to_ids(text_tokens) def decode(self, ids): tokens = self.tokenizer.convert_ids_to_tokens(ids) return self.tokenizer.convert_tokens_to_string(tokens) def decode_token_ids(self, token_ids): tokens = self.tokenizer.convert_ids_to_tokens(token_ids) exclude_list = ['[PAD]', '[CLS]'] non_pads = [t for t in tokens if t not in exclude_list] result = "" for s in non_pads: if s.startswith("##"): result += s[2:] else: result += " " + s return result @property def cls(self): return self.cls_id @property def sep(self): return self.sep_id @property def pad(self): return self.pad_id @property def mask(self): return self.mask_id @property def bos_token(self): """ Beginning of sentence token id """ return self._bos_token @property def eos_token(self): """ End of sentence token id """ return self._eos_token @property def additional_special_tokens(self): """ All the additional special tokens you may want to use (list of strings).""" return self._additional_special_tokens @property def bos_token_id(self): """ Id of the beginning of sentence token in the vocabulary.""" return self._bos_token_id @property def eos_token_id(self): """ Id of the end of sentence token in the vocabulary.""" return self._eos_token_id @property def additional_special_tokens_ids(self): """ Ids of all the additional special tokens in the vocabulary (list of integers).""" return [self.vocab.get(token) for token in self._additional_special_tokens] @additional_special_tokens.setter def additional_special_tokens(self, value): self._additional_special_tokens = value
# coding=utf-8 # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Blendable dataset.""" import time import numpy as np import torch class BlendableDataset(torch.utils.data.Dataset): def __init__(self, datasets, weights): self.datasets = datasets num_datasets = len(datasets) assert num_datasets == len(weights) self.size = 0 for dataset in self.datasets: self.size += len(dataset) # Normalize weights. weights = np.array(weights, dtype=np.float64) sum_weights = np.sum(weights) assert sum_weights > 0.0 weights /= sum_weights # Build indices. start_time = time.time() assert num_datasets < 255 self.dataset_index = np.zeros(self.size, dtype=np.uint8) self.dataset_sample_index = np.zeros(self.size, dtype=np.int64) from . import helpers helpers.build_blending_indices(self.dataset_index, self.dataset_sample_index, weights, num_datasets, self.size, torch.distributed.get_rank() == 0) print('> elapsed time for building blendable dataset indices: ' '{:.2f} (sec)'.format(time.time() - start_time)) def __len__(self): return self.size def __getitem__(self, idx): dataset_idx = self.dataset_index[idx] sample_idx = self.dataset_sample_index[idx] return self.datasets[dataset_idx][sample_idx]
# coding=utf-8 # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT Style dataset.""" import os import time import numpy as np import torch from torch.utils.data import Dataset from colossalai.context import ParallelMode from colossalai.core import global_context as gpc from colossalai.logging import get_dist_logger from ..tokenizer import get_tokenizer from .dataset_utils import ( create_masked_lm_predictions, create_tokens_and_tokentypes, get_a_and_b_segments, pad_and_convert_to_numpy, truncate_segments, ) try: from . import helpers except: print("helper is not built, ignore this message if you are using synthetic data.") class BertDataset(Dataset): def __init__(self, name, indexed_dataset, data_prefix, num_epochs, max_num_samples, masked_lm_prob, max_seq_length, short_seq_prob, seed, binary_head): # Params to store. self.name = name self.seed = seed self.masked_lm_prob = masked_lm_prob self.max_seq_length = max_seq_length self.binary_head = binary_head # Dataset. self.indexed_dataset = indexed_dataset # Build the samples mapping. self.samples_mapping = get_samples_mapping_( self.indexed_dataset, data_prefix, num_epochs, max_num_samples, self.max_seq_length - 3, # account for added tokens, short_seq_prob, self.seed, self.name, self.binary_head) # Vocab stuff. tokenizer = get_tokenizer() self.vocab_id_list = list(tokenizer.inv_vocab.keys()) self.vocab_id_to_token_dict = tokenizer.inv_vocab self.cls_id = tokenizer.cls self.sep_id = tokenizer.sep self.mask_id = tokenizer.mask self.pad_id = tokenizer.pad def __len__(self): return self.samples_mapping.shape[0] def __getitem__(self, idx): start_idx, end_idx, seq_length = self.samples_mapping[idx] sample = [self.indexed_dataset[i] for i in range(start_idx, end_idx)] # Note that this rng state should be numpy and not python since # python randint is inclusive whereas the numpy one is exclusive. # We % 2**32 since numpy requires the seed to be between 0 and 2**32 - 1 np_rng = np.random.RandomState(seed=((self.seed + idx) % 2**32)) return build_training_sample( sample, seq_length, self.max_seq_length, # needed for padding self.vocab_id_list, self.vocab_id_to_token_dict, self.cls_id, self.sep_id, self.mask_id, self.pad_id, self.masked_lm_prob, np_rng, self.binary_head) def get_samples_mapping_(indexed_dataset, data_prefix, num_epochs, max_num_samples, max_seq_length, short_seq_prob, seed, name, binary_head): logger = get_dist_logger() if not num_epochs: if not max_num_samples: raise ValueError("Need to specify either max_num_samples " "or num_epochs") num_epochs = np.iinfo(np.int32).max - 1 if not max_num_samples: max_num_samples = np.iinfo(np.int64).max - 1 # Filename of the index mapping indexmap_filename = data_prefix indexmap_filename += '_{}_indexmap'.format(name) if num_epochs != (np.iinfo(np.int32).max - 1): indexmap_filename += '_{}ep'.format(num_epochs) if max_num_samples != (np.iinfo(np.int64).max - 1): indexmap_filename += '_{}mns'.format(max_num_samples) indexmap_filename += '_{}msl'.format(max_seq_length) indexmap_filename += '_{:0.2f}ssp'.format(short_seq_prob) indexmap_filename += '_{}s'.format(seed) indexmap_filename += '.npy' # Build the indexed mapping if not exist. if torch.distributed.get_rank() == 0 and \ not os.path.isfile(indexmap_filename): print(' > WARNING: could not find index map file {}, building ' 'the indices on rank 0 ...'.format(indexmap_filename)) # Make sure the types match the helpers input types. assert indexed_dataset.doc_idx.dtype == np.int64 assert indexed_dataset.sizes.dtype == np.int32 # Build samples mapping verbose = torch.distributed.get_rank() == 0 start_time = time.time() logger.info('\n > building samples index mapping for {} ...'.format(name), ranks=[0]) # First compile and then import. samples_mapping = helpers.build_mapping(indexed_dataset.doc_idx, indexed_dataset.sizes, num_epochs, max_num_samples, max_seq_length, short_seq_prob, seed, verbose, 2 if binary_head else 1) logger.info('\n > done building samples index maping', ranks=[0]) np.save(indexmap_filename, samples_mapping, allow_pickle=True) logger.info('\n > saved the index mapping in {}'.format(indexmap_filename), ranks=[0]) # Make sure all the ranks have built the mapping logger.info('\n > elapsed time to build and save samples mapping ' '(seconds): {:4f}'.format(time.time() - start_time), ranks=[0]) # This should be a barrier but nccl barrier assumes # device_index=rank which is not the case for model # parallel case counts = torch.cuda.LongTensor([1]) torch.distributed.all_reduce(counts, group=gpc.get_group(ParallelMode.DATA)) if gpc.is_initialized(ParallelMode.PIPELINE): torch.distributed.all_reduce(counts, group=gpc.get_group(ParallelMode.PIPELINE)) assert counts[0].item() == (torch.distributed.get_world_size() // torch.distributed.get_world_size(group=gpc.get_group(ParallelMode.SEQUENCE))) # Load indexed dataset. start_time = time.time() samples_mapping = np.load(indexmap_filename, allow_pickle=True, mmap_mode='r') logger.info('\n > loading indexed mapping from {}'.format(indexmap_filename) + '\n loaded indexed file in {:3.3f} seconds'.format(time.time() - start_time) + '\n total number of samples: {}'.format(samples_mapping.shape[0]), ranks=[0]) return samples_mapping def build_training_sample(sample, target_seq_length, max_seq_length, vocab_id_list, vocab_id_to_token_dict, cls_id, sep_id, mask_id, pad_id, masked_lm_prob, np_rng, binary_head): """Build training sample. Arguments: sample: A list of sentences in which each sentence is a list token ids. target_seq_length: Desired sequence length. max_seq_length: Maximum length of the sequence. All values are padded to this length. vocab_id_list: List of vocabulary ids. Used to pick a random id. vocab_id_to_token_dict: A dictionary from vocab ids to text tokens. cls_id: Start of example id. sep_id: Separator id. mask_id: Mask token id. pad_id: Padding token id. masked_lm_prob: Probability to mask tokens. np_rng: Random number genenrator. Note that this rng state should be numpy and not python since python randint is inclusive for the opper bound whereas the numpy one is exclusive. """ if binary_head: # We assume that we have at least two sentences in the sample assert len(sample) > 1 assert target_seq_length <= max_seq_length # Divide sample into two segments (A and B). if binary_head: tokens_a, tokens_b, is_next_random = get_a_and_b_segments(sample, np_rng) else: tokens_a = [] for j in range(len(sample)): tokens_a.extend(sample[j]) tokens_b = [] is_next_random = False # Truncate to `target_sequence_length`. max_num_tokens = target_seq_length truncated = truncate_segments(tokens_a, tokens_b, len(tokens_a), len(tokens_b), max_num_tokens, np_rng) # Build tokens and toketypes. tokens, tokentypes = create_tokens_and_tokentypes(tokens_a, tokens_b, cls_id, sep_id) # Masking. max_predictions_per_seq = masked_lm_prob * max_num_tokens (tokens, masked_positions, masked_labels, _) = create_masked_lm_predictions(tokens, vocab_id_list, vocab_id_to_token_dict, masked_lm_prob, cls_id, sep_id, mask_id, max_predictions_per_seq, np_rng) # Padding. tokens_np, tokentypes_np, labels_np, padding_mask_np, loss_mask_np \ = pad_and_convert_to_numpy(tokens, tokentypes, masked_positions, masked_labels, pad_id, max_seq_length) train_sample = { 'text': tokens_np, 'types': tokentypes_np, 'labels': labels_np, 'is_random': int(is_next_random), 'loss_mask': loss_mask_np, 'padding_mask': padding_mask_np, 'truncated': int(truncated) } return train_sample
# coding=utf-8 # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Dataloaders.""" import torch import random from colossalai.core import global_context as gpc from colossalai.context import ParallelMode def build_pretraining_data_loader(dataset, consumed_samples, micro_batch_size, dataloader_type='single', num_workers=0): """Build dataloader given an input dataset.""" if dataset is None: return None # Megatron sampler if dataloader_type == 'single': batch_sampler = MegatronPretrainingSampler(total_samples=len(dataset), consumed_samples=consumed_samples, micro_batch_size=micro_batch_size, data_parallel_rank=gpc.get_local_rank(ParallelMode.DATA), data_parallel_size=gpc.get_world_size(ParallelMode.DATA)) elif dataloader_type == 'cyclic': batch_sampler = MegatronPretrainingRandomSampler(total_samples=len(dataset), consumed_samples=consumed_samples, micro_batch_size=micro_batch_size, data_parallel_rank=gpc.get_local_rank(ParallelMode.DATA), data_parallel_size=gpc.get_world_size(ParallelMode.DATA)) else: raise Exception('{} dataloader type is not supported.'.format(dataloader_type)) # Torch dataloader. return torch.utils.data.DataLoader(dataset, batch_sampler=batch_sampler, num_workers=num_workers, pin_memory=True) class MegatronPretrainingSampler: def __init__(self, total_samples, consumed_samples, micro_batch_size, data_parallel_rank, data_parallel_size, drop_last=True): # Keep a copy of input params for later use. self.total_samples = total_samples self.consumed_samples = consumed_samples self.micro_batch_size = micro_batch_size self.data_parallel_rank = data_parallel_rank self.micro_batch_times_data_parallel_size = \ self.micro_batch_size * data_parallel_size self.drop_last = drop_last # Sanity checks. assert self.total_samples > 0, \ 'no sample to consume: {}'.format(self.total_samples) assert self.consumed_samples < self.total_samples, \ 'no samples left to consume: {}, {}'.format(self.consumed_samples, self.total_samples) assert self.micro_batch_size > 0 assert data_parallel_size > 0 assert self.data_parallel_rank < data_parallel_size, \ 'data_parallel_rank should be smaller than data size: {}, ' \ '{}'.format(self.data_parallel_rank, data_parallel_size) def __len__(self): return self.total_samples def get_start_end_idx(self): start_idx = self.data_parallel_rank * self.micro_batch_size end_idx = start_idx + self.micro_batch_size return start_idx, end_idx def __iter__(self): batch = [] # Last batch will be dropped if drop_last is not set False for idx in range(self.consumed_samples, self.total_samples): batch.append(idx) if len(batch) == self.micro_batch_times_data_parallel_size: start_idx, end_idx = self.get_start_end_idx() yield batch[start_idx:end_idx] batch = [] # Check the last partial batch and see drop_last is set if len(batch) > 0 and not self.drop_last: start_idx, end_idx = self.get_start_end_idx() yield batch[start_idx:end_idx] class MegatronPretrainingRandomSampler: def __init__(self, total_samples, consumed_samples, micro_batch_size, data_parallel_rank, data_parallel_size): # Keep a copy of input params for later use. self.total_samples = total_samples self.consumed_samples = consumed_samples self.micro_batch_size = micro_batch_size self.data_parallel_rank = data_parallel_rank self.data_parallel_size = data_parallel_size self.micro_batch_times_data_parallel_size = \ self.micro_batch_size * data_parallel_size self.last_batch_size = \ self.total_samples % self.micro_batch_times_data_parallel_size # Sanity checks. assert self.total_samples > 0, \ 'no sample to consume: {}'.format(self.total_samples) assert self.micro_batch_size > 0 assert data_parallel_size > 0 assert self.data_parallel_rank < data_parallel_size, \ 'data_parallel_rank should be smaller than data size: {}, ' \ '{}'.format(self.data_parallel_rank, data_parallel_size) def __len__(self): return self.total_samples def __iter__(self): active_total_samples = self.total_samples - self.last_batch_size self.epoch = self.consumed_samples // active_total_samples current_epoch_samples = self.consumed_samples % active_total_samples assert current_epoch_samples % self.micro_batch_times_data_parallel_size == 0 # data sharding and random sampling bucket_size = (self.total_samples // self.micro_batch_times_data_parallel_size) \ * self.micro_batch_size bucket_offset = current_epoch_samples // self.data_parallel_size start_idx = self.data_parallel_rank * bucket_size g = torch.Generator() g.manual_seed(self.epoch) random_idx = torch.randperm(bucket_size, generator=g).tolist() idx_range = [start_idx + x for x in random_idx[bucket_offset:]] batch = [] # Last batch if not complete will be dropped. for idx in idx_range: batch.append(idx) if len(batch) == self.micro_batch_size: self.consumed_samples += self.micro_batch_times_data_parallel_size yield batch batch = []
from . import indexed_dataset
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors, and NVIDIA. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Most of the code here has been copied from: # https://github.com/google-research/albert/blob/master/create_pretraining_data.py # with some modifications. import math import time import collections from colossalai.logging import get_dist_logger import numpy as np from .blendable_dataset import BlendableDataset from .indexed_dataset import make_dataset as make_indexed_dataset DSET_TYPE_STD = 'standard_bert' DSET_TYPE_ICT = 'ict' DSET_TYPES = [DSET_TYPE_ICT, DSET_TYPE_STD] def get_datasets_weights_and_num_samples(data_prefix, train_valid_test_num_samples): # The data prefix should be in the format of: # weight-1, data-prefix-1, weight-2, data-prefix-2, .. assert len(data_prefix) % 2 == 0 num_datasets = len(data_prefix) // 2 weights = [0]*num_datasets prefixes = [0]*num_datasets for i in range(num_datasets): weights[i] = float(data_prefix[2*i]) prefixes[i] = (data_prefix[2*i+1]).strip() # Normalize weights weight_sum = 0.0 for weight in weights: weight_sum += weight assert weight_sum > 0.0 weights = [weight / weight_sum for weight in weights] # Add 0.5% (the 1.005 factor) so in case the bleding dataset does # not uniformly distribute the number of samples, we still have # samples left to feed to the network. datasets_train_valid_test_num_samples = [] for weight in weights: datasets_train_valid_test_num_samples.append( [int(math.ceil(val * weight * 1.005)) for val in train_valid_test_num_samples]) return prefixes, weights, datasets_train_valid_test_num_samples def compile_helper(): """Compile helper function ar runtime. Make sure this is invoked on a single process.""" import os import subprocess path = os.path.abspath(os.path.dirname(__file__)) ret = subprocess.run(['make', '-C', path]) if ret.returncode != 0: print("Making C++ dataset helpers module failed, exiting.") import sys sys.exit(1) def get_a_and_b_segments(sample, np_rng): """Divide sample into a and b segments.""" # Number of sentences in the sample. n_sentences = len(sample) # Make sure we always have two sentences. assert n_sentences > 1, 'make sure each sample has at least two sentences.' # First part: # `a_end` is how many sentences go into the `A`. a_end = 1 if n_sentences >= 3: # Note that randin in numpy is exclusive. a_end = np_rng.randint(1, n_sentences) tokens_a = [] for j in range(a_end): tokens_a.extend(sample[j]) # Second part: tokens_b = [] for j in range(a_end, n_sentences): tokens_b.extend(sample[j]) # Random next: is_next_random = False if np_rng.random() < 0.5: is_next_random = True tokens_a, tokens_b = tokens_b, tokens_a return tokens_a, tokens_b, is_next_random def truncate_segments(tokens_a, tokens_b, len_a, len_b, max_num_tokens, np_rng): """Truncates a pair of sequences to a maximum sequence length.""" #print(len_a, len_b, max_num_tokens) assert len_a > 0 if len_a + len_b <= max_num_tokens: return False while len_a + len_b > max_num_tokens: if len_a > len_b: len_a -= 1 tokens = tokens_a else: len_b -= 1 tokens = tokens_b if np_rng.random() < 0.5: del tokens[0] else: tokens.pop() return True def create_tokens_and_tokentypes(tokens_a, tokens_b, cls_id, sep_id): """Merge segments A and B, add [CLS] and [SEP] and build tokentypes.""" tokens = [] tokentypes = [] # [CLS]. tokens.append(cls_id) tokentypes.append(0) # Segment A. for token in tokens_a: tokens.append(token) tokentypes.append(0) # [SEP]. tokens.append(sep_id) tokentypes.append(0) # Segment B. for token in tokens_b: tokens.append(token) tokentypes.append(1) if tokens_b: # [SEP]. tokens.append(sep_id) tokentypes.append(1) return tokens, tokentypes MaskedLmInstance = collections.namedtuple("MaskedLmInstance", ["index", "label"]) def is_start_piece(piece): """Check if the current word piece is the starting piece (BERT).""" # When a word has been split into # WordPieces, the first token does not have any marker and any subsequence # tokens are prefixed with ##. So whenever we see the ## token, we # append it to the previous set of word indexes. return not piece.startswith("##") def create_masked_lm_predictions(tokens, vocab_id_list, vocab_id_to_token_dict, masked_lm_prob, cls_id, sep_id, mask_id, max_predictions_per_seq, np_rng, max_ngrams=3, do_whole_word_mask=True, favor_longer_ngram=False, do_permutation=False): """Creates the predictions for the masked LM objective. Note: Tokens here are vocab ids and not text tokens.""" cand_indexes = [] # Note(mingdachen): We create a list for recording if the piece is # the starting piece of current token, where 1 means true, so that # on-the-fly whole word masking is possible. token_boundary = [0] * len(tokens) for (i, token) in enumerate(tokens): if token == cls_id or token == sep_id: token_boundary[i] = 1 continue # Whole Word Masking means that if we mask all of the wordpieces # corresponding to an original word. # # Note that Whole Word Masking does *not* change the training code # at all -- we still predict each WordPiece independently, softmaxed # over the entire vocabulary. if (do_whole_word_mask and len(cand_indexes) >= 1 and not is_start_piece(vocab_id_to_token_dict[token])): cand_indexes[-1].append(i) else: cand_indexes.append([i]) if is_start_piece(vocab_id_to_token_dict[token]): token_boundary[i] = 1 output_tokens = list(tokens) masked_lm_positions = [] masked_lm_labels = [] if masked_lm_prob == 0: return (output_tokens, masked_lm_positions, masked_lm_labels, token_boundary) num_to_predict = min(max_predictions_per_seq, max(1, int(round(len(tokens) * masked_lm_prob)))) # Note(mingdachen): # By default, we set the probabilities to favor shorter ngram sequences. ngrams = np.arange(1, max_ngrams + 1, dtype=np.int64) pvals = 1. / np.arange(1, max_ngrams + 1) pvals /= pvals.sum(keepdims=True) if favor_longer_ngram: pvals = pvals[::-1] ngram_indexes = [] for idx in range(len(cand_indexes)): ngram_index = [] for n in ngrams: ngram_index.append(cand_indexes[idx:idx + n]) ngram_indexes.append(ngram_index) np_rng.shuffle(ngram_indexes) masked_lms = [] covered_indexes = set() for cand_index_set in ngram_indexes: if len(masked_lms) >= num_to_predict: break if not cand_index_set: continue # Note(mingdachen): # Skip current piece if they are covered in lm masking or previous ngrams. for index_set in cand_index_set[0]: for index in index_set: if index in covered_indexes: continue n = np_rng.choice(ngrams[:len(cand_index_set)], p=pvals[:len(cand_index_set)] / pvals[:len(cand_index_set)].sum(keepdims=True)) index_set = sum(cand_index_set[n - 1], []) n -= 1 # Note(mingdachen): # Repeatedly looking for a candidate that does not exceed the # maximum number of predictions by trying shorter ngrams. while len(masked_lms) + len(index_set) > num_to_predict: if n == 0: break index_set = sum(cand_index_set[n - 1], []) n -= 1 # If adding a whole-word mask would exceed the maximum number of # predictions, then just skip this candidate. if len(masked_lms) + len(index_set) > num_to_predict: continue is_any_index_covered = False for index in index_set: if index in covered_indexes: is_any_index_covered = True break if is_any_index_covered: continue for index in index_set: covered_indexes.add(index) masked_token = None # 80% of the time, replace with [MASK] if np_rng.random() < 0.8: masked_token = mask_id else: # 10% of the time, keep original if np_rng.random() < 0.5: masked_token = tokens[index] # 10% of the time, replace with random word else: masked_token = vocab_id_list[np_rng.randint(0, len(vocab_id_list))] output_tokens[index] = masked_token masked_lms.append(MaskedLmInstance(index=index, label=tokens[index])) assert len(masked_lms) <= num_to_predict np_rng.shuffle(ngram_indexes) select_indexes = set() if do_permutation: for cand_index_set in ngram_indexes: if len(select_indexes) >= num_to_predict: break if not cand_index_set: continue # Note(mingdachen): # Skip current piece if they are covered in lm masking or previous ngrams. for index_set in cand_index_set[0]: for index in index_set: if index in covered_indexes or index in select_indexes: continue n = np.random.choice(ngrams[:len(cand_index_set)], p=pvals[:len(cand_index_set)] / pvals[:len(cand_index_set)].sum(keepdims=True)) index_set = sum(cand_index_set[n - 1], []) n -= 1 while len(select_indexes) + len(index_set) > num_to_predict: if n == 0: break index_set = sum(cand_index_set[n - 1], []) n -= 1 # If adding a whole-word mask would exceed the maximum number of # predictions, then just skip this candidate. if len(select_indexes) + len(index_set) > num_to_predict: continue is_any_index_covered = False for index in index_set: if index in covered_indexes or index in select_indexes: is_any_index_covered = True break if is_any_index_covered: continue for index in index_set: select_indexes.add(index) assert len(select_indexes) <= num_to_predict select_indexes = sorted(select_indexes) permute_indexes = list(select_indexes) np_rng.shuffle(permute_indexes) orig_token = list(output_tokens) for src_i, tgt_i in zip(select_indexes, permute_indexes): output_tokens[src_i] = orig_token[tgt_i] masked_lms.append(MaskedLmInstance(index=src_i, label=orig_token[src_i])) masked_lms = sorted(masked_lms, key=lambda x: x.index) for p in masked_lms: masked_lm_positions.append(p.index) masked_lm_labels.append(p.label) return (output_tokens, masked_lm_positions, masked_lm_labels, token_boundary) def pad_and_convert_to_numpy(tokens, tokentypes, masked_positions, masked_labels, pad_id, max_seq_length): """Pad sequences and convert them to numpy.""" # Some checks. num_tokens = len(tokens) padding_length = max_seq_length - num_tokens assert padding_length >= 0 assert len(tokentypes) == num_tokens assert len(masked_positions) == len(masked_labels) # Tokens and token types. filler = [pad_id] * padding_length tokens_np = np.array(tokens + filler, dtype=np.int64) tokentypes_np = np.array(tokentypes + filler, dtype=np.int64) # Padding mask. padding_mask_np = np.array([1] * num_tokens + [0] * padding_length, dtype=np.int64) # Lables and loss mask. labels = [-1] * max_seq_length loss_mask = [0] * max_seq_length for i in range(len(masked_positions)): assert masked_positions[i] < num_tokens labels[masked_positions[i]] = masked_labels[i] loss_mask[masked_positions[i]] = 1 labels_np = np.array(labels, dtype=np.int64) loss_mask_np = np.array(loss_mask, dtype=np.int64) return tokens_np, tokentypes_np, labels_np, padding_mask_np, loss_mask_np def build_train_valid_test_datasets(data_prefix, data_impl, splits_string, train_valid_test_num_samples, max_seq_length, masked_lm_prob, short_seq_prob, seed, skip_warmup, binary_head, dataset_type='standard_bert'): if len(data_prefix) == 1: return _build_train_valid_test_datasets(data_prefix[0], data_impl, splits_string, train_valid_test_num_samples, max_seq_length, masked_lm_prob, short_seq_prob, seed, skip_warmup, binary_head, dataset_type=dataset_type) # Blending dataset. # Parse the values. output = get_datasets_weights_and_num_samples(data_prefix, train_valid_test_num_samples) prefixes, weights, datasets_train_valid_test_num_samples = output # Build individual datasets. train_datasets = [] valid_datasets = [] test_datasets = [] for i in range(len(prefixes)): train_ds, valid_ds, test_ds = _build_train_valid_test_datasets( prefixes[i], data_impl, splits_string, datasets_train_valid_test_num_samples[i], max_seq_length, masked_lm_prob, short_seq_prob, seed, skip_warmup, binary_head, dataset_type=dataset_type) if train_ds: train_datasets.append(train_ds) if valid_ds: valid_datasets.append(valid_ds) if test_ds: test_datasets.append(test_ds) # Blend. blending_train_dataset = None if train_datasets: blending_train_dataset = BlendableDataset(train_datasets, weights) blending_valid_dataset = None if valid_datasets: blending_valid_dataset = BlendableDataset(valid_datasets, weights) blending_test_dataset = None if test_datasets: blending_test_dataset = BlendableDataset(test_datasets, weights) return (blending_train_dataset, blending_valid_dataset, blending_test_dataset) def _build_train_valid_test_datasets(data_prefix, data_impl, splits_string, train_valid_test_num_samples, max_seq_length, masked_lm_prob, short_seq_prob, seed, skip_warmup, binary_head, dataset_type='standard_bert'): logger = get_dist_logger() if dataset_type not in DSET_TYPES: raise ValueError("Invalid dataset_type: ", dataset_type) # Indexed dataset. indexed_dataset = get_indexed_dataset_(data_prefix, data_impl, skip_warmup) if dataset_type == DSET_TYPE_ICT: args = get_args() title_dataset = get_indexed_dataset_(args.titles_data_path, data_impl, skip_warmup) # Get start and end indices of train/valid/train into doc-idx # Note that doc-idx is designed to be num-docs + 1 so we can # easily iterate over it. total_num_of_documents = indexed_dataset.doc_idx.shape[0] - 1 splits = get_train_valid_test_split_(splits_string, total_num_of_documents) # Print stats about the splits. logger.info('\n > dataset split:') def print_split_stats(name, index): start_index = indexed_dataset.doc_idx[splits[index]] end_index = indexed_dataset.doc_idx[splits[index + 1]] logger.info('\n {}:'.format(name) + '\n document indices in [{}, {}) total of {} documents'.format( splits[index], splits[index + 1], splits[index + 1] - splits[index]) + '\n sentence indices in [{}, {}) total of {} sentences'.format( start_index, end_index, end_index - start_index), ranks=[0]) print_split_stats('train', 0) print_split_stats('validation', 1) print_split_stats('test', 2) def build_dataset(index, name): from .bert_dataset import BertDataset dataset = None if splits[index + 1] > splits[index]: # Get the pointer to the original doc-idx so we can set it later. doc_idx_ptr = indexed_dataset.get_doc_idx() # Slice the doc-idx start_index = splits[index] # Add +1 so we can index into the dataset to get the upper bound. end_index = splits[index + 1] + 1 # New doc_idx view. indexed_dataset.set_doc_idx(doc_idx_ptr[start_index:end_index]) # Build the dataset accordingly. kwargs = dict( name=name, data_prefix=data_prefix, num_epochs=None, max_num_samples=train_valid_test_num_samples[index], max_seq_length=max_seq_length, seed=seed, binary_head=binary_head ) if dataset_type == DSET_TYPE_ICT: args = get_args() dataset = ICTDataset( block_dataset=indexed_dataset, title_dataset=title_dataset, query_in_block_prob=args.query_in_block_prob, use_one_sent_docs=args.use_one_sent_docs, **kwargs ) else: dataset = BertDataset( indexed_dataset=indexed_dataset, masked_lm_prob=masked_lm_prob, short_seq_prob=short_seq_prob, **kwargs ) # Set the original pointer so dataset remains the main dataset. indexed_dataset.set_doc_idx(doc_idx_ptr) # Checks. assert indexed_dataset.doc_idx[0] == 0 assert indexed_dataset.doc_idx.shape[0] == \ (total_num_of_documents + 1) return dataset train_dataset = build_dataset(0, 'train') valid_dataset = build_dataset(1, 'valid') test_dataset = build_dataset(2, 'test') return (train_dataset, valid_dataset, test_dataset) def get_indexed_dataset_(data_prefix, data_impl, skip_warmup): logger = get_dist_logger() start_time = time.time() indexed_dataset = make_indexed_dataset(data_prefix, data_impl, skip_warmup) assert indexed_dataset.sizes.shape[0] == indexed_dataset.doc_idx[-1] logger.info('\n > building dataset index ...', ranks=[0]) logger.info('\n > finished creating indexed dataset in {:4f} ' 'seconds'.format(time.time() - start_time), ranks=[0]) logger.info('\n > indexed dataset stats:' + '\n number of documents: {}'.format( indexed_dataset.doc_idx.shape[0] - 1) + '\n number of sentences: {}'.format( indexed_dataset.sizes.shape[0]), ranks=[0] ) return indexed_dataset def get_train_valid_test_split_(splits_string, size): """ Get dataset splits from comma or '/' separated string list.""" splits = [] if splits_string.find(',') != -1: splits = [float(s) for s in splits_string.split(',')] elif splits_string.find('/') != -1: splits = [float(s) for s in splits_string.split('/')] else: splits = [float(splits_string)] while len(splits) < 3: splits.append(0.) splits = splits[:3] splits_sum = sum(splits) assert splits_sum > 0.0 splits = [split / splits_sum for split in splits] splits_index = [0] for index, split in enumerate(splits): splits_index.append(splits_index[index] + int(round(split * float(size)))) diff = splits_index[-1] - size for index in range(1, len(splits_index)): splits_index[index] -= diff assert len(splits_index) == 4 assert splits_index[-1] == size return splits_index
from .blendable_dataset import BlendableDataset from .dataset_utils import get_datasets_weights_and_num_samples, get_indexed_dataset_, get_train_valid_test_split_ from .bert_dataset import BertDataset from colossalai.logging import get_dist_logger DSET_TYPE_BERT = 'standard_bert' DSET_TYPE_ICT = 'ict' DSET_TYPE_T5 = 't5' DSET_TYPES = [DSET_TYPE_BERT, DSET_TYPE_ICT, DSET_TYPE_T5] def _build_train_valid_test_datasets(data_prefix, data_impl, splits_string, train_valid_test_num_samples, max_seq_length, masked_lm_prob, short_seq_prob, seed, skip_warmup, binary_head, dataset_type='standard_bert'): if dataset_type not in DSET_TYPES: raise ValueError("Invalid dataset_type: ", dataset_type) # Indexed dataset. indexed_dataset = get_indexed_dataset_(data_prefix, data_impl, skip_warmup) # Get start and end indices of train/valid/train into doc-idx # Note that doc-idx is designed to be num-docs + 1 so we can # easily iterate over it. total_num_of_documents = indexed_dataset.doc_idx.shape[0] - 1 splits = get_train_valid_test_split_(splits_string, total_num_of_documents) logger = get_dist_logger() # Print stats about the splits. logger.info('\n > dataset split:', ranks=[0]) def print_split_stats(name, index): start_index = indexed_dataset.doc_idx[splits[index]] end_index = indexed_dataset.doc_idx[splits[index + 1]] logger.info('\n {}:'.format(name) + '\n document indices in [{}, {}) total of {} documents'.format( splits[index], splits[index + 1], splits[index + 1] - splits[index]) + '\n sentence indices in [{}, {}) total of {} sentences'.format( start_index, end_index, end_index - start_index), ranks=[0]) print_split_stats('train', 0) print_split_stats('validation', 1) print_split_stats('test', 2) def build_dataset(index, name): dataset = None if splits[index + 1] > splits[index]: # Get the pointer to the original doc-idx so we can set it later. doc_idx_ptr = indexed_dataset.get_doc_idx() # Slice the doc-idx start_index = splits[index] # Add +1 so we can index into the dataset to get the upper bound. end_index = splits[index + 1] + 1 # New doc_idx view. indexed_dataset.set_doc_idx(doc_idx_ptr[start_index:end_index]) # Build the dataset accordingly. kwargs = dict( name=name, data_prefix=data_prefix, num_epochs=None, max_num_samples=train_valid_test_num_samples[index], max_seq_length=max_seq_length, seed=seed, ) if dataset_type != DSET_TYPE_BERT: raise NotImplementedError("Only BERT dataset is supported") else: dataset = BertDataset( indexed_dataset=indexed_dataset, masked_lm_prob=masked_lm_prob, short_seq_prob=short_seq_prob, binary_head=binary_head, **kwargs ) # Set the original pointer so dataset remains the main dataset. indexed_dataset.set_doc_idx(doc_idx_ptr) # Checks. assert indexed_dataset.doc_idx[0] == 0 assert indexed_dataset.doc_idx.shape[0] == \ (total_num_of_documents + 1) return dataset train_dataset = build_dataset(0, 'train') valid_dataset = build_dataset(1, 'valid') test_dataset = build_dataset(2, 'test') return (train_dataset, valid_dataset, test_dataset) def build_train_valid_test_datasets(data_prefix, data_impl, splits_string, train_valid_test_num_samples, max_seq_length, masked_lm_prob, short_seq_prob, seed, skip_warmup, binary_head, dataset_type='standard_bert'): if len(data_prefix) == 1: return _build_train_valid_test_datasets(data_prefix[0], data_impl, splits_string, train_valid_test_num_samples, max_seq_length, masked_lm_prob, short_seq_prob, seed, skip_warmup, binary_head, dataset_type=dataset_type) # Blending dataset. # Parse the values. output = get_datasets_weights_and_num_samples(data_prefix, train_valid_test_num_samples) prefixes, weights, datasets_train_valid_test_num_samples = output # Build individual datasets. train_datasets = [] valid_datasets = [] test_datasets = [] for i in range(len(prefixes)): train_ds, valid_ds, test_ds = _build_train_valid_test_datasets( prefixes[i], data_impl, splits_string, datasets_train_valid_test_num_samples[i], max_seq_length, masked_lm_prob, short_seq_prob, seed, skip_warmup, binary_head, dataset_type=dataset_type) if train_ds: train_datasets.append(train_ds) if valid_ds: valid_datasets.append(valid_ds) if test_ds: test_datasets.append(test_ds) # Blend. blending_train_dataset = None if train_datasets: blending_train_dataset = BlendableDataset(train_datasets, weights) blending_valid_dataset = None if valid_datasets: blending_valid_dataset = BlendableDataset(valid_datasets, weights) blending_test_dataset = None if test_datasets: blending_test_dataset = BlendableDataset(test_datasets, weights) return (blending_train_dataset, blending_valid_dataset, blending_test_dataset)
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # copied from fairseq/fairseq/data/indexed_dataset.py # Removed IndexedRawTextDataset since it relied on Fairseq dictionary # other slight modifications to remove fairseq dependencies # Added document index to index file and made it accessible. # An empty sentence no longer separates documents. from functools import lru_cache import os import shutil import struct from itertools import accumulate import numpy as np import torch def __best_fitting_dtype(vocab_size=None): if vocab_size is not None and vocab_size < 65500: return np.uint16 else: return np.int32 def get_available_dataset_impl(): return ['lazy', 'cached', 'mmap'] def infer_dataset_impl(path): if IndexedDataset.exists(path): with open(index_file_path(path), 'rb') as f: magic = f.read(8) if magic == IndexedDataset._HDR_MAGIC: return 'cached' elif magic == MMapIndexedDataset.Index._HDR_MAGIC[:8]: return 'mmap' else: return None else: print(f"Dataset does not exist: {path}") print("Path should be a basename that both .idx and .bin can be appended to get full filenames.") return None def make_builder(out_file, impl, vocab_size=None): if impl == 'mmap': return MMapIndexedDatasetBuilder(out_file, dtype=__best_fitting_dtype(vocab_size)) else: return IndexedDatasetBuilder(out_file) def make_dataset(path, impl, skip_warmup=False): if not IndexedDataset.exists(path): print(f"Dataset does not exist: {path}") print("Path should be a basename that both .idx and .bin can be appended to get full filenames.") return None if impl == 'infer': impl = infer_dataset_impl(path) if impl == 'lazy' and IndexedDataset.exists(path): return IndexedDataset(path) elif impl == 'cached' and IndexedDataset.exists(path): return IndexedCachedDataset(path) elif impl == 'mmap' and MMapIndexedDataset.exists(path): return MMapIndexedDataset(path, skip_warmup) print(f"Unknown dataset implementation: {impl}") return None def dataset_exists(path, impl): if impl == 'mmap': return MMapIndexedDataset.exists(path) else: return IndexedDataset.exists(path) def read_longs(f, n): a = np.empty(n, dtype=np.int64) f.readinto(a) return a def write_longs(f, a): f.write(np.array(a, dtype=np.int64)) dtypes = { 1: np.uint8, 2: np.int8, 3: np.int16, 4: np.int32, 5: np.int64, 6: np.float, 7: np.double, 8: np.uint16 } def code(dtype): for k in dtypes.keys(): if dtypes[k] == dtype: return k raise ValueError(dtype) def index_file_path(prefix_path): return prefix_path + '.idx' def data_file_path(prefix_path): return prefix_path + '.bin' def create_doc_idx(sizes): doc_idx = [0] for i, s in enumerate(sizes): if s == 0: doc_idx.append(i + 1) return doc_idx class IndexedDataset(torch.utils.data.Dataset): """Loader for IndexedDataset""" _HDR_MAGIC = b'TNTIDX\x00\x00' def __init__(self, path): super().__init__() self.path = path self.data_file = None self.read_index(path) def read_index(self, path): with open(index_file_path(path), 'rb') as f: magic = f.read(8) assert magic == self._HDR_MAGIC, ( 'Index file doesn\'t match expected format. ' 'Make sure that --dataset-impl is configured properly.' ) version = f.read(8) assert struct.unpack('<Q', version) == (1,) code, self.element_size = struct.unpack('<QQ', f.read(16)) self.dtype = dtypes[code] self._len, self.s = struct.unpack('<QQ', f.read(16)) self.doc_count = struct.unpack('<Q', f.read(8)) self.dim_offsets = read_longs(f, self._len + 1) self.data_offsets = read_longs(f, self._len + 1) self.sizes = read_longs(f, self.s) self.doc_idx = read_longs(f, self.doc_count) def read_data(self, path): self.data_file = open(data_file_path(path), 'rb', buffering=0) def check_index(self, i): if i < 0 or i >= self._len: raise IndexError('index out of range') def __del__(self): if self.data_file: self.data_file.close() # @lru_cache(maxsize=8) def __getitem__(self, idx): if not self.data_file: self.read_data(self.path) if isinstance(idx, int): i = idx self.check_index(i) tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]] a = np.empty(tensor_size, dtype=self.dtype) self.data_file.seek(self.data_offsets[i] * self.element_size) self.data_file.readinto(a) return a elif isinstance(idx, slice): start, stop, step = idx.indices(len(self)) if step != 1: raise ValueError("Slices into indexed_dataset must be contiguous") sizes = self.sizes[self.dim_offsets[start]:self.dim_offsets[stop]] size = sum(sizes) a = np.empty(size, dtype=self.dtype) self.data_file.seek(self.data_offsets[start] * self.element_size) self.data_file.readinto(a) offsets = list(accumulate(sizes)) sents = np.split(a, offsets[:-1]) return sents def __len__(self): return self._len def num_tokens(self, index): return self.sizes[index] def size(self, index): return self.sizes[index] @staticmethod def exists(path): return ( os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path)) ) @property def supports_prefetch(self): return False # avoid prefetching to save memory class IndexedCachedDataset(IndexedDataset): def __init__(self, path): super().__init__(path) self.cache = None self.cache_index = {} @property def supports_prefetch(self): return True def prefetch(self, indices): if all(i in self.cache_index for i in indices): return if not self.data_file: self.read_data(self.path) indices = sorted(set(indices)) total_size = 0 for i in indices: total_size += self.data_offsets[i + 1] - self.data_offsets[i] self.cache = np.empty(total_size, dtype=self.dtype) ptx = 0 self.cache_index.clear() for i in indices: self.cache_index[i] = ptx size = self.data_offsets[i + 1] - self.data_offsets[i] a = self.cache[ptx: ptx + size] self.data_file.seek(self.data_offsets[i] * self.element_size) self.data_file.readinto(a) ptx += size if self.data_file: # close and delete data file after prefetch so we can pickle self.data_file.close() self.data_file = None # @lru_cache(maxsize=8) def __getitem__(self, idx): if isinstance(idx, int): i = idx self.check_index(i) tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]] a = np.empty(tensor_size, dtype=self.dtype) ptx = self.cache_index[i] np.copyto(a, self.cache[ptx: ptx + a.size]) return a elif isinstance(idx, slice): # Hack just to make this work, can optimizer later if necessary sents = [] for i in range(*idx.indices(len(self))): sents.append(self[i]) return sents class IndexedDatasetBuilder(object): element_sizes = { np.uint8: 1, np.int8: 1, np.int16: 2, np.int32: 4, np.int64: 8, np.float: 4, np.double: 8 } def __init__(self, out_file, dtype=np.int32): self.out_file = open(out_file, 'wb') self.dtype = dtype self.data_offsets = [0] self.dim_offsets = [0] self.sizes = [] self.element_size = self.element_sizes[self.dtype] self.doc_idx = [0] def add_item(self, tensor): bytes = self.out_file.write(np.array(tensor.numpy(), dtype=self.dtype)) self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size) for s in tensor.size(): self.sizes.append(s) self.dim_offsets.append(self.dim_offsets[-1] + len(tensor.size())) def end_document(self): self.doc_idx.append(len(self.sizes)) def merge_file_(self, another_file): index = IndexedDataset(another_file) assert index.dtype == self.dtype begin = self.data_offsets[-1] for offset in index.data_offsets[1:]: self.data_offsets.append(begin + offset) self.sizes.extend(index.sizes) begin = self.dim_offsets[-1] for dim_offset in index.dim_offsets[1:]: self.dim_offsets.append(begin + dim_offset) with open(data_file_path(another_file), 'rb') as f: while True: data = f.read(1024) if data: self.out_file.write(data) else: break def finalize(self, index_file): self.out_file.close() index = open(index_file, 'wb') index.write(b'TNTIDX\x00\x00') index.write(struct.pack('<Q', 1)) index.write(struct.pack('<QQ', code(self.dtype), self.element_size)) index.write(struct.pack('<QQ', len(self.data_offsets) - 1, len(self.sizes))) index.write(struct.pack('<Q', len(self.doc_idx))) write_longs(index, self.dim_offsets) write_longs(index, self.data_offsets) write_longs(index, self.sizes) write_longs(index, self.doc_idx) index.close() def _warmup_mmap_file(path): with open(path, 'rb') as stream: while stream.read(100 * 1024 * 1024): pass class MMapIndexedDataset(torch.utils.data.Dataset): class Index(object): _HDR_MAGIC = b'MMIDIDX\x00\x00' @classmethod def writer(cls, path, dtype): class _Writer(object): def __enter__(self): self._file = open(path, 'wb') self._file.write(cls._HDR_MAGIC) self._file.write(struct.pack('<Q', 1)) self._file.write(struct.pack('<B', code(dtype))) return self @staticmethod def _get_pointers(sizes): dtype_size = dtype().itemsize address = 0 pointers = [] for size in sizes: pointers.append(address) address += size * dtype_size return pointers def write(self, sizes, doc_idx): pointers = self._get_pointers(sizes) self._file.write(struct.pack('<Q', len(sizes))) self._file.write(struct.pack('<Q', len(doc_idx))) sizes = np.array(sizes, dtype=np.int32) self._file.write(sizes.tobytes(order='C')) del sizes pointers = np.array(pointers, dtype=np.int64) self._file.write(pointers.tobytes(order='C')) del pointers doc_idx = np.array(doc_idx, dtype=np.int64) self._file.write(doc_idx.tobytes(order='C')) def __exit__(self, exc_type, exc_val, exc_tb): self._file.close() return _Writer() def __init__(self, path, skip_warmup=False): with open(path, 'rb') as stream: magic_test = stream.read(9) assert self._HDR_MAGIC == magic_test, ( 'Index file doesn\'t match expected format. ' 'Make sure that --dataset-impl is configured properly.' ) version = struct.unpack('<Q', stream.read(8)) assert (1,) == version dtype_code, = struct.unpack('<B', stream.read(1)) self._dtype = dtypes[dtype_code] self._dtype_size = self._dtype().itemsize self._len = struct.unpack('<Q', stream.read(8))[0] self._doc_count = struct.unpack('<Q', stream.read(8))[0] offset = stream.tell() if not skip_warmup: print(" warming up index mmap file...") _warmup_mmap_file(path) self._bin_buffer_mmap = np.memmap(path, mode='r', order='C') self._bin_buffer = memoryview(self._bin_buffer_mmap) print(" reading sizes...") self._sizes = np.frombuffer( self._bin_buffer, dtype=np.int32, count=self._len, offset=offset) print(" reading pointers...") self._pointers = np.frombuffer(self._bin_buffer, dtype=np.int64, count=self._len, offset=offset + self._sizes.nbytes) print(" reading document index...") self._doc_idx = np.frombuffer(self._bin_buffer, dtype=np.int64, count=self._doc_count, offset=offset + self._sizes.nbytes + self._pointers.nbytes) def __del__(self): self._bin_buffer_mmap._mmap.close() del self._bin_buffer_mmap @property def dtype(self): return self._dtype @property def sizes(self): return self._sizes @property def doc_idx(self): return self._doc_idx @lru_cache(maxsize=8) def __getitem__(self, i): return self._pointers[i], self._sizes[i] def __len__(self): return self._len def __init__(self, path, skip_warmup=False): super().__init__() self._path = None self._index = None self._bin_buffer = None self._do_init(path, skip_warmup) def __getstate__(self): return self._path def __setstate__(self, state): self._do_init(state) def _do_init(self, path, skip_warmup): self._path = path self._index = self.Index(index_file_path(self._path), skip_warmup) if not skip_warmup: print(" warming up data mmap file...") _warmup_mmap_file(data_file_path(self._path)) print(" creating numpy buffer of mmap...") self._bin_buffer_mmap = np.memmap(data_file_path(self._path), mode='r', order='C') print(" creating memory view of numpy buffer...") self._bin_buffer = memoryview(self._bin_buffer_mmap) def __del__(self): self._bin_buffer_mmap._mmap.close() del self._bin_buffer_mmap del self._index def __len__(self): return len(self._index) # @lru_cache(maxsize=8) def __getitem__(self, idx): if isinstance(idx, int): ptr, size = self._index[idx] np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr) return np_array elif isinstance(idx, slice): start, stop, step = idx.indices(len(self)) if step != 1: raise ValueError("Slices into indexed_dataset must be contiguous") ptr = self._index._pointers[start] sizes = self._index._sizes[idx] offsets = list(accumulate(sizes)) total_size = sum(sizes) np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=total_size, offset=ptr) sents = np.split(np_array, offsets[:-1]) return sents def get(self, idx, offset=0, length=None): """ Retrieves a single item from the dataset with the option to only return a portion of the item. get(idx) is the same as [idx] but get() does not support slicing. """ ptr, size = self._index[idx] if length is None: length = size - offset ptr += offset * np.dtype(self._index.dtype).itemsize np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=length, offset=ptr) return np_array @property def sizes(self): return self._index.sizes @property def doc_idx(self): return self._index.doc_idx def get_doc_idx(self): return self._index._doc_idx def set_doc_idx(self, doc_idx_): self._index._doc_idx = doc_idx_ @property def supports_prefetch(self): return False @staticmethod def exists(path): return ( os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path)) ) class MMapIndexedDatasetBuilder(object): def __init__(self, out_file, dtype=np.int64): self._data_file = open(out_file, 'wb') self._dtype = dtype self._sizes = [] self._doc_idx = [0] def add_item(self, tensor): np_array = np.array(tensor.numpy(), dtype=self._dtype) self._data_file.write(np_array.tobytes(order='C')) self._sizes.append(np_array.size) def end_document(self): self._doc_idx.append(len(self._sizes)) def merge_file_(self, another_file): # Concatenate index index = MMapIndexedDataset.Index(index_file_path(another_file)) assert index.dtype == self._dtype for size in index.sizes: self._sizes.append(size) # Concatenate data with open(data_file_path(another_file), 'rb') as f: shutil.copyfileobj(f, self._data_file) def finalize(self, index_file): self._data_file.close() with MMapIndexedDataset.Index.writer(index_file, self._dtype) as index: index.write(self._sizes, self._doc_idx)
import itertools import random import numpy as np from torch.utils.data import Dataset from megatron import get_tokenizer from megatron import get_args from megatron.data.dataset_utils import get_indexed_dataset_ from megatron.data.realm_dataset_utils import get_block_samples_mapping def make_attention_mask(source_block, target_block): """ Returns a 2-dimensional (2-D) attention mask :param source_block: 1-D array :param target_block: 1-D array """ mask = (target_block[None, :] >= 1) * (source_block[:, None] >= 1) mask = mask.astype(np.int64) # (source_length, target_length) return mask def get_ict_dataset(use_titles=True, query_in_block_prob=1): """Get a dataset which uses block samples mappings to get ICT/block indexing data (via get_block()) rather than for training, since it is only built with a single epoch sample mapping. """ args = get_args() block_dataset = get_indexed_dataset_(args.data_path, 'mmap', True) titles_dataset = get_indexed_dataset_(args.titles_data_path, 'mmap', True) kwargs = dict( name='full', block_dataset=block_dataset, title_dataset=titles_dataset, data_prefix=args.data_path, num_epochs=1, max_num_samples=None, max_seq_length=args.seq_length, seed=1, query_in_block_prob=query_in_block_prob, use_titles=use_titles, use_one_sent_docs=args.use_one_sent_docs ) dataset = ICTDataset(**kwargs) return dataset class ICTDataset(Dataset): """Dataset containing sentences and their blocks for an inverse cloze task.""" def __init__(self, name, block_dataset, title_dataset, data_prefix, num_epochs, max_num_samples, max_seq_length, query_in_block_prob, seed, use_titles=True, use_one_sent_docs=False, binary_head=False): self.name = name self.seed = seed self.max_seq_length = max_seq_length self.query_in_block_prob = query_in_block_prob self.block_dataset = block_dataset self.title_dataset = title_dataset self.rng = random.Random(self.seed) self.use_titles = use_titles self.use_one_sent_docs = use_one_sent_docs self.samples_mapping = get_block_samples_mapping( block_dataset, title_dataset, data_prefix, num_epochs, max_num_samples, max_seq_length, seed, name, use_one_sent_docs) self.tokenizer = get_tokenizer() self.vocab_id_list = list(self.tokenizer.inv_vocab.keys()) self.vocab_id_to_token_list = self.tokenizer.inv_vocab self.cls_id = self.tokenizer.cls self.sep_id = self.tokenizer.sep self.mask_id = self.tokenizer.mask self.pad_id = self.tokenizer.pad def __len__(self): return len(self.samples_mapping) def __getitem__(self, idx): """Get an ICT example of a pseudo-query and the block of text from which it was extracted""" sample_data = self.samples_mapping[idx] start_idx, end_idx, doc_idx, block_idx = sample_data.as_tuple() if self.use_titles: title = self.title_dataset[int(doc_idx)] title_pad_offset = 3 + len(title) else: title = None title_pad_offset = 2 block = [self.block_dataset[i] for i in range(start_idx, end_idx)] assert len(block) > 1 or self.use_one_sent_docs or self.query_in_block_prob == 1 # randint() is inclusive for Python rng rand_sent_idx = self.rng.randint(0, len(block) - 1) # keep the query in the context query_in_block_prob fraction of the time. if self.rng.random() < self.query_in_block_prob: query = block[rand_sent_idx].copy() else: query = block.pop(rand_sent_idx) # still need to truncate because blocks are concluded when # the sentence lengths have exceeded max_seq_length. query = query[:self.max_seq_length - 2] block = list(itertools.chain(*block))[:self.max_seq_length - title_pad_offset] query_tokens, query_pad_mask = self.concat_and_pad_tokens(query) context_tokens, context_pad_mask = self.concat_and_pad_tokens(block, title) query_mask = make_attention_mask(query_tokens, query_tokens) context_mask = make_attention_mask(context_tokens, context_tokens) block_data = sample_data.as_array() sample = { 'query_tokens': query_tokens, 'query_mask': query_mask, 'query_pad_mask': query_pad_mask, 'context_tokens': context_tokens, 'context_mask': context_mask, 'context_pad_mask': context_pad_mask, 'block_data': block_data, } return sample def get_block(self, start_idx, end_idx, doc_idx): """Get the IDs for an evidence block plus the title of the corresponding document""" block = [self.block_dataset[i] for i in range(start_idx, end_idx)] title = self.title_dataset[int(doc_idx)] block = list(itertools.chain(*block))[:self.max_seq_length - (3 + len(title))] block_tokens, block_pad_mask = self.concat_and_pad_tokens(block, title) return block_tokens, block_pad_mask def get_null_block(self): """Get empty block and title - used in REALM pretraining""" block, title = [], [] block_tokens, block_pad_mask = self.concat_and_pad_tokens(block, title) return block_tokens, block_pad_mask def concat_and_pad_tokens(self, tokens, title=None): """Concat with special tokens and pad sequence to self.max_seq_length""" tokens = list(tokens) if title is None: tokens = [self.cls_id] + tokens + [self.sep_id] else: title = list(title) tokens = [self.cls_id] + title + [self.sep_id] + tokens + [self.sep_id] assert len(tokens) <= self.max_seq_length num_pad = self.max_seq_length - len(tokens) pad_mask = [1] * len(tokens) + [0] * num_pad tokens += [self.pad_id] * num_pad return np.array(tokens), np.array(pad_mask)
# This file isn't really a formal automated test, it's just a place to # put some code used during development and manual testing of # indexed_dataset. from megatron.data import indexed_dataset from megatron.tokenizer import build_tokenizer import argparse import os import sys import torch script_dir = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.join(script_dir, "../../../")) def test_indexed_dataset(args): ds = indexed_dataset.make_dataset(args.data, args.dataset_impl) tokenizer = build_tokenizer(args) print(len(ds.doc_idx)) print(len(ds)) print(ds.doc_idx[-1]) if ds.supports_prefetch: # just prefetch the whole thing in test (so assume it is small) ds.prefetch(range(len(ds))) if args.count > len(ds.doc_idx) - 1: args.count = len(ds.doc_idx) - 1 for i in range(args.count): start = ds.doc_idx[i] end = ds.doc_idx[i + 1] ids = ds[start:end] print(f"Document {i}:") print("--------------") for s in ids: assert len(s) > 0 l = s.data.tolist() text = tokenizer.detokenize(l) print(text) print("---") def test_indexed_dataset_get(args): ds = indexed_dataset.make_dataset(args.data, args.dataset_impl) tokenizer = build_tokenizer(args) size = ds.sizes[0] print(f"size: {size}") full = ds.get(0) print(full) # print(tokenizer.detokenize(full.data.tolist())) print("---") end = ds.get(0, offset=size - 10) print(end) # print(tokenizer.detokenize(end.data.tolist())) start = ds.get(0, length=10) print(start) # print(tokenizer.detokenize(start.data.tolist())) part = ds.get(0, offset=2, length=8) print(part) # print(tokenizer.detokenize(part.data.tolist())) # def test_albert_dataset(args): # # tokenizer = FullBertTokenizer(args.vocab, do_lower_case=True) # # idataset = indexed_dataset.make_dataset(args.data, args.dataset_impl) # # ds = AlbertDataset(idataset, tokenizer) # ds = AlbertDataset.from_paths(args.vocab, args.data, args.dataset_impl, # args.epochs, args.max_num_samples, # args.masked_lm_prob, args.seq_length, # args.short_seq_prob, args.seed) # truncated = 0 # total = 0 # for i, s in enumerate(ds): # ids = s['text'] # tokens = ds.tokenizer.convert_ids_to_tokens(ids) # print(tokens) # if i >= args.count-1: # exit() def main(): parser = argparse.ArgumentParser() parser.add_argument('--data', type=str, help='prefix to data files') parser.add_argument('--dataset-impl', type=str, default='infer', choices=['lazy', 'cached', 'mmap', 'infer']) parser.add_argument('--count', type=int, default=10, help='Number of samples/documents to print') group = parser.add_argument_group(title='tokenizer') group.add_argument('--tokenizer-type', type=str, required=True, choices=['BertWordPieceLowerCase', 'GPT2BPETokenizer'], help='What type of tokenizer to use.') group.add_argument('--vocab-file', type=str, default=None, help='Path to the vocab file') group.add_argument('--merge-file', type=str, default=None, help='Path to the BPE merge file (if necessary).') parser.add_argument('--epochs', type=int, default=5, help='Number of epochs to plan for') parser.add_argument('--max-num-samples', type=int, default=None, help='Maximum number of samples to plan for') parser.add_argument('--masked-lm-prob', type=float, default=0.15, help='probability of masking tokens') parser.add_argument('--seq-length', type=int, default=512, help='maximum sequence length') parser.add_argument('--short-seq-prob', type=float, default=0.1, help='probability of creating a short sequence') parser.add_argument('--seed', type=int, default=1234, help='random seed') args = parser.parse_args() args.rank = 0 args.make_vocab_size_divisible_by = 128 args.tensor_model_parallel_size = 1 if args.dataset_impl == "infer": args.dataset_impl = indexed_dataset.infer_dataset_impl(args.data) # test_albert_dataset(args) test_indexed_dataset_get(args) if __name__ == "__main__": main()
from colossalai.amp import AMP_TYPE # hyperparameters # BATCH_SIZE is as per GPU # global batch size = BATCH_SIZE x data parallel size BATCH_SIZE = 4 LEARNING_RATE = 3e-3 WEIGHT_DECAY = 0.3 NUM_EPOCHS = 2 WARMUP_EPOCHS = 1 # model config IMG_SIZE = 224 PATCH_SIZE = 16 HIDDEN_SIZE = 128 DEPTH = 4 NUM_HEADS = 4 MLP_RATIO = 2 NUM_CLASSES = 10 CHECKPOINT = False SEQ_LENGTH = (IMG_SIZE // PATCH_SIZE)**2 + 1 # add 1 for cls token # parallel setting TENSOR_PARALLEL_SIZE = 2 TENSOR_PARALLEL_MODE = '1d' parallel = dict( pipeline=2, tensor=dict(mode=TENSOR_PARALLEL_MODE, size=TENSOR_PARALLEL_SIZE), ) fp16 = dict(mode=AMP_TYPE.NAIVE) clip_grad_norm = 1.0 # pipeline config NUM_MICRO_BATCHES = parallel['pipeline']
import os import torch from titans.model.vit.vit import _create_vit_model from tqdm import tqdm import colossalai from colossalai.context import ParallelMode from colossalai.core import global_context as gpc from colossalai.logging import get_dist_logger from colossalai.nn import CrossEntropyLoss from colossalai.nn.lr_scheduler import CosineAnnealingWarmupLR from colossalai.pipeline.pipelinable import PipelinableContext from colossalai.utils import is_using_pp class DummyDataloader(): def __init__(self, length, batch_size): self.length = length self.batch_size = batch_size def generate(self): data = torch.rand(self.batch_size, 3, 224, 224) label = torch.randint(low=0, high=10, size=(self.batch_size,)) return data, label def __iter__(self): self.step = 0 return self def __next__(self): if self.step < self.length: self.step += 1 return self.generate() else: raise StopIteration def __len__(self): return self.length def main(): # launch from torch parser = colossalai.get_default_parser() args = parser.parse_args() colossalai.launch_from_torch(config=args.config) # get logger logger = get_dist_logger() logger.info("initialized distributed environment", ranks=[0]) if hasattr(gpc.config, 'LOG_PATH'): if gpc.get_global_rank() == 0: log_path = gpc.config.LOG_PATH if not os.path.exists(log_path): os.mkdir(log_path) logger.log_to_file(log_path) use_pipeline = is_using_pp() # create model model_kwargs = dict(img_size=gpc.config.IMG_SIZE, patch_size=gpc.config.PATCH_SIZE, hidden_size=gpc.config.HIDDEN_SIZE, depth=gpc.config.DEPTH, num_heads=gpc.config.NUM_HEADS, mlp_ratio=gpc.config.MLP_RATIO, num_classes=10, init_method='jax', checkpoint=gpc.config.CHECKPOINT) if use_pipeline: pipelinable = PipelinableContext() with pipelinable: model = _create_vit_model(**model_kwargs) pipelinable.to_layer_list() pipelinable.policy = "uniform" model = pipelinable.partition(1, gpc.pipeline_parallel_size, gpc.get_local_rank(ParallelMode.PIPELINE)) else: model = _create_vit_model(**model_kwargs) # count number of parameters total_numel = 0 for p in model.parameters(): total_numel += p.numel() if not gpc.is_initialized(ParallelMode.PIPELINE): pipeline_stage = 0 else: pipeline_stage = gpc.get_local_rank(ParallelMode.PIPELINE) logger.info(f"number of parameters: {total_numel} on pipeline stage {pipeline_stage}") # use synthetic dataset # we train for 10 steps and eval for 5 steps per epoch train_dataloader = DummyDataloader(length=10, batch_size=gpc.config.BATCH_SIZE) test_dataloader = DummyDataloader(length=5, batch_size=gpc.config.BATCH_SIZE) # create loss function criterion = CrossEntropyLoss(label_smoothing=0.1) # create optimizer optimizer = torch.optim.AdamW(model.parameters(), lr=gpc.config.LEARNING_RATE, weight_decay=gpc.config.WEIGHT_DECAY) # create lr scheduler lr_scheduler = CosineAnnealingWarmupLR(optimizer=optimizer, total_steps=gpc.config.NUM_EPOCHS, warmup_steps=gpc.config.WARMUP_EPOCHS) # initialize engine, train_dataloader, test_dataloader, _ = colossalai.initialize(model=model, optimizer=optimizer, criterion=criterion, train_dataloader=train_dataloader, test_dataloader=test_dataloader) logger.info("Engine is built", ranks=[0]) for epoch in range(gpc.config.NUM_EPOCHS): # training engine.train() data_iter = iter(train_dataloader) if gpc.get_global_rank() == 0: description = 'Epoch {} / {}'.format(epoch, gpc.config.NUM_EPOCHS) progress = tqdm(range(len(train_dataloader)), desc=description) else: progress = range(len(train_dataloader)) for _ in progress: engine.zero_grad() engine.execute_schedule(data_iter, return_output_label=False) engine.step() lr_scheduler.step() gpc.destroy() if __name__ == '__main__': main()
import argparse import logging import random from typing import Optional import uvicorn from energonai import QueueFullError, launch_engine from energonai.model import opt_6B, opt_30B, opt_125M, opt_175B from fastapi import FastAPI, HTTPException, Request from pydantic import BaseModel, Field from transformers import GPT2Tokenizer from batch import BatchManagerForGeneration from cache import ListCache, MissCacheError class GenerationTaskReq(BaseModel): max_tokens: int = Field(gt=0, le=256, example=64) prompt: str = Field( min_length=1, example='Question: Where were the 2004 Olympics held?\nAnswer: Athens, Greece\n\nQuestion: What is the longest river on the earth?\nAnswer:') top_k: Optional[int] = Field(default=None, gt=0, example=50) top_p: Optional[float] = Field(default=None, gt=0.0, lt=1.0, example=0.5) temperature: Optional[float] = Field(default=None, gt=0.0, lt=1.0, example=0.7) app = FastAPI() @app.post('/generation') async def generate(data: GenerationTaskReq, request: Request): logger.info(f'{request.client.host}:{request.client.port} - "{request.method} {request.url.path}" - {data}') key = (data.prompt, data.max_tokens) try: if cache is None: raise MissCacheError() outputs = cache.get(key) output = random.choice(outputs) logger.info('Cache hit') except MissCacheError: inputs = tokenizer(data.prompt, truncation=True, max_length=512) inputs['max_tokens'] = data.max_tokens inputs['top_k'] = data.top_k inputs['top_p'] = data.top_p inputs['temperature'] = data.temperature try: uid = id(data) engine.submit(uid, inputs) output = await engine.wait(uid) output = tokenizer.decode(output, skip_special_tokens=True) if cache is not None: cache.add(key, output) except QueueFullError as e: raise HTTPException(status_code=406, detail=e.args[0]) return {'text': output} @app.on_event("shutdown") async def shutdown(*_): engine.shutdown() server.should_exit = True server.force_exit = True await server.shutdown() def get_model_fn(model_name: str): model_map = { 'opt-125m': opt_125M, 'opt-6.7b': opt_6B, 'opt-30b': opt_30B, 'opt-175b': opt_175B } return model_map[model_name] def print_args(args: argparse.Namespace): print('\n==> Args:') for k, v in args.__dict__.items(): print(f'{k} = {v}') FIXED_CACHE_KEYS = [ ('Question: What is the name of the largest continent on earth?\nAnswer: Asia\n\nQuestion: What is at the center of the solar system?\nAnswer:', 64), ('A chat between a salesman and a student.\n\nSalesman: Hi boy, are you looking for a new phone?\nStudent: Yes, my phone is not functioning well.\nSalesman: What is your budget? \nStudent: I have received my scholarship so I am fine with any phone.\nSalesman: Great, then perhaps this latest flagship phone is just right for you.', 64), ("English: I am happy today.\nChinese: 我今天很开心。\n\nEnglish: I am going to play basketball.\nChinese: 我一会去打篮球。\n\nEnglish: Let's celebrate our anniversary.\nChinese:", 64) ] if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('model', choices=['opt-125m', 'opt-6.7b', 'opt-30b', 'opt-175b']) parser.add_argument('--tp', type=int, default=1) parser.add_argument('--master_host', default='localhost') parser.add_argument('--master_port', type=int, default=19990) parser.add_argument('--rpc_port', type=int, default=19980) parser.add_argument('--max_batch_size', type=int, default=8) parser.add_argument('--pipe_size', type=int, default=1) parser.add_argument('--queue_size', type=int, default=0) parser.add_argument('--http_host', default='0.0.0.0') parser.add_argument('--http_port', type=int, default=7070) parser.add_argument('--checkpoint', default=None) parser.add_argument('--cache_size', type=int, default=0) parser.add_argument('--cache_list_size', type=int, default=1) args = parser.parse_args() print_args(args) model_kwargs = {} if args.checkpoint is not None: model_kwargs['checkpoint'] = args.checkpoint logger = logging.getLogger(__name__) tokenizer = GPT2Tokenizer.from_pretrained('facebook/opt-30b') if args.cache_size > 0: cache = ListCache(args.cache_size, args.cache_list_size, fixed_keys=FIXED_CACHE_KEYS) else: cache = None engine = launch_engine(args.tp, 1, args.master_host, args.master_port, args.rpc_port, get_model_fn(args.model), batch_manager=BatchManagerForGeneration(max_batch_size=args.max_batch_size, pad_token_id=tokenizer.pad_token_id), pipe_size=args.pipe_size, queue_size=args.queue_size, **model_kwargs) config = uvicorn.Config(app, host=args.http_host, port=args.http_port) server = uvicorn.Server(config=config) server.run()
import torch from typing import List, Deque, Tuple, Hashable, Any from energonai import BatchManager, SubmitEntry, TaskEntry class BatchManagerForGeneration(BatchManager): def __init__(self, max_batch_size: int = 1, pad_token_id: int = 0) -> None: super().__init__() self.max_batch_size = max_batch_size self.pad_token_id = pad_token_id def _left_padding(self, batch_inputs): max_len = max(len(inputs['input_ids']) for inputs in batch_inputs) outputs = {'input_ids': [], 'attention_mask': []} for inputs in batch_inputs: input_ids, attention_mask = inputs['input_ids'], inputs['attention_mask'] padding_len = max_len - len(input_ids) input_ids = [self.pad_token_id] * padding_len + input_ids attention_mask = [0] * padding_len + attention_mask outputs['input_ids'].append(input_ids) outputs['attention_mask'].append(attention_mask) for k in outputs: outputs[k] = torch.tensor(outputs[k]) return outputs, max_len @staticmethod def _make_batch_key(entry: SubmitEntry) -> tuple: data = entry.data return (data['top_k'], data['top_p'], data['temperature']) def make_batch(self, q: Deque[SubmitEntry]) -> Tuple[TaskEntry, dict]: entry = q.popleft() uids = [entry.uid] batch = [entry.data] while len(batch) < self.max_batch_size: if len(q) == 0: break if self._make_batch_key(entry) != self._make_batch_key(q[0]): break if q[0].data['max_tokens'] > entry.data['max_tokens']: break e = q.popleft() batch.append(e.data) uids.append(e.uid) inputs, max_len = self._left_padding(batch) trunc_lens = [] for data in batch: trunc_lens.append(max_len + data['max_tokens']) inputs['top_k'] = entry.data['top_k'] inputs['top_p'] = entry.data['top_p'] inputs['temperature'] = entry.data['temperature'] inputs['max_tokens'] = max_len + entry.data['max_tokens'] return TaskEntry(tuple(uids), inputs), {'trunc_lens': trunc_lens} def split_batch(self, task_entry: TaskEntry, trunc_lens: List[int] = []) -> List[Tuple[Hashable, Any]]: retval = [] for uid, output, trunc_len in zip(task_entry.uids, task_entry.batch, trunc_lens): retval.append((uid, output[:trunc_len])) return retval
from collections import OrderedDict from threading import Lock from contextlib import contextmanager from typing import List, Any, Hashable, Dict class MissCacheError(Exception): pass class ListCache: def __init__(self, cache_size: int, list_size: int, fixed_keys: List[Hashable] = []) -> None: """Cache a list of values. The fixed keys won't be removed. For other keys, LRU is applied. When the value list is not full, a cache miss occurs. Otherwise, a cache hit occurs. Redundant values will be removed. Args: cache_size (int): Max size for LRU cache. list_size (int): Value list size. fixed_keys (List[Hashable], optional): The keys which won't be removed. Defaults to []. """ self.cache_size = cache_size self.list_size = list_size self.cache: OrderedDict[Hashable, List[Any]] = OrderedDict() self.fixed_cache: Dict[Hashable, List[Any]] = {} for key in fixed_keys: self.fixed_cache[key] = [] self._lock = Lock() def get(self, key: Hashable) -> List[Any]: with self.lock(): if key in self.fixed_cache: l = self.fixed_cache[key] if len(l) >= self.list_size: return l elif key in self.cache: self.cache.move_to_end(key) l = self.cache[key] if len(l) >= self.list_size: return l raise MissCacheError() def add(self, key: Hashable, value: Any) -> None: with self.lock(): if key in self.fixed_cache: l = self.fixed_cache[key] if len(l) < self.list_size and value not in l: l.append(value) elif key in self.cache: self.cache.move_to_end(key) l = self.cache[key] if len(l) < self.list_size and value not in l: l.append(value) else: if len(self.cache) >= self.cache_size: self.cache.popitem(last=False) self.cache[key] = [value] @contextmanager def lock(self): try: self._lock.acquire() yield finally: self._lock.release()
import logging import argparse import random from torch import Tensor from pydantic import BaseModel, Field from typing import Optional from energonai.model import opt_125M, opt_30B, opt_175B, opt_6B from transformers import GPT2Tokenizer from energonai import launch_engine, QueueFullError from sanic import Sanic from sanic.request import Request from sanic.response import json from sanic_ext import validate, openapi from batch import BatchManagerForGeneration from cache import ListCache, MissCacheError class GenerationTaskReq(BaseModel): max_tokens: int = Field(gt=0, le=256, example=64) prompt: str = Field( min_length=1, example='Question: Where were the 2004 Olympics held?\nAnswer: Athens, Greece\n\nQuestion: What is the longest river on the earth?\nAnswer:') top_k: Optional[int] = Field(default=None, gt=0, example=50) top_p: Optional[float] = Field(default=None, gt=0.0, lt=1.0, example=0.5) temperature: Optional[float] = Field(default=None, gt=0.0, lt=1.0, example=0.7) app = Sanic('opt') @app.post('/generation') @openapi.body(GenerationTaskReq) @validate(json=GenerationTaskReq) async def generate(request: Request, body: GenerationTaskReq): logger.info(f'{request.ip}:{request.port} - "{request.method} {request.path}" - {body}') key = (body.prompt, body.max_tokens) try: if cache is None: raise MissCacheError() outputs = cache.get(key) output = random.choice(outputs) logger.info('Cache hit') except MissCacheError: inputs = tokenizer(body.prompt, truncation=True, max_length=512) inputs['max_tokens'] = body.max_tokens inputs['top_k'] = body.top_k inputs['top_p'] = body.top_p inputs['temperature'] = body.temperature try: uid = id(body) engine.submit(uid, inputs) output = await engine.wait(uid) assert isinstance(output, Tensor) output = tokenizer.decode(output, skip_special_tokens=True) if cache is not None: cache.add(key, output) except QueueFullError as e: return json({'detail': e.args[0]}, status=406) return json({'text': output}) @app.after_server_stop def shutdown(*_): engine.shutdown() def get_model_fn(model_name: str): model_map = { 'opt-125m': opt_125M, 'opt-6.7b': opt_6B, 'opt-30b': opt_30B, 'opt-175b': opt_175B } return model_map[model_name] def print_args(args: argparse.Namespace): print('\n==> Args:') for k, v in args.__dict__.items(): print(f'{k} = {v}') FIXED_CACHE_KEYS = [ ('Question: What is the name of the largest continent on earth?\nAnswer: Asia\n\nQuestion: What is at the center of the solar system?\nAnswer:', 64), ('A chat between a salesman and a student.\n\nSalesman: Hi boy, are you looking for a new phone?\nStudent: Yes, my phone is not functioning well.\nSalesman: What is your budget? \nStudent: I have received my scholarship so I am fine with any phone.\nSalesman: Great, then perhaps this latest flagship phone is just right for you.', 64), ("English: I am happy today.\nChinese: 我今天很开心。\n\nEnglish: I am going to play basketball.\nChinese: 我一会去打篮球。\n\nEnglish: Let's celebrate our anniversary.\nChinese:", 64) ] if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('model', choices=['opt-125m', 'opt-6.7b', 'opt-30b', 'opt-175b']) parser.add_argument('--tp', type=int, default=1) parser.add_argument('--master_host', default='localhost') parser.add_argument('--master_port', type=int, default=19990) parser.add_argument('--rpc_port', type=int, default=19980) parser.add_argument('--max_batch_size', type=int, default=8) parser.add_argument('--pipe_size', type=int, default=1) parser.add_argument('--queue_size', type=int, default=0) parser.add_argument('--http_host', default='0.0.0.0') parser.add_argument('--http_port', type=int, default=7070) parser.add_argument('--checkpoint', default=None) parser.add_argument('--cache_size', type=int, default=0) parser.add_argument('--cache_list_size', type=int, default=1) args = parser.parse_args() print_args(args) model_kwargs = {} if args.checkpoint is not None: model_kwargs['checkpoint'] = args.checkpoint logger = logging.getLogger(__name__) tokenizer = GPT2Tokenizer.from_pretrained('facebook/opt-30b') if args.cache_size > 0: cache = ListCache(args.cache_size, args.cache_list_size, fixed_keys=FIXED_CACHE_KEYS) else: cache = None engine = launch_engine(args.tp, 1, args.master_host, args.master_port, args.rpc_port, get_model_fn(args.model), batch_manager=BatchManagerForGeneration(max_batch_size=args.max_batch_size, pad_token_id=tokenizer.pad_token_id), pipe_size=args.pipe_size, queue_size=args.queue_size, **model_kwargs) app.run(args.http_host, args.http_port)
from locust import HttpUser, task from json import JSONDecodeError class GenerationUser(HttpUser): @task def generate(self): prompt = 'Question: What is the longest river on the earth? Answer:' for i in range(4, 9): data = {'max_tokens': 2**i, 'prompt': prompt} with self.client.post('/generation', json=data, catch_response=True) as response: if response.status_code in (200, 406): response.success() else: response.failure('Response wrong')
import os import torch from multiprocessing import Pool # download pytorch model ckpt in https://huggingface.co/facebook/opt-66b/tree/main # you can use whether wget or git lfs path = "/path/to/your/ckpt" new_path = "/path/to/the/processed/ckpt/" assert os.path.isdir(path) files = [] for filename in os.listdir(path): filepath = os.path.join(path, filename) if os.path.isfile(filepath): files.append(filepath) with Pool(14) as pool: ckpts = pool.map(torch.load, files) restored = {} for ckpt in ckpts: for k,v in ckpt.items(): if(k[0] == 'm'): k = k[6:] if(k == "lm_head.weight"): k = "head.dense.weight" if(k == "decoder.final_layer_norm.weight"): k = "decoder.layer_norm.weight" if(k == "decoder.final_layer_norm.bias"): k = "decoder.layer_norm.bias" restored[k] = v restored["decoder.version"] = "0.0" split_num = len(restored.keys()) // 60 count = 0 file_count = 1 tmp = {} for k,v in restored.items(): print(k) tmp[k] = v count = count + 1 if(count == split_num): filename = str(file_count) + "-restored.pt" torch.save(tmp, os.path.join(new_path, filename)) file_count = file_count + 1 count = 0 tmp = {} filename = str(file_count) + "-restored.pt" torch.save(tmp, os.path.join(new_path, filename))
import argparse import json import os import re from collections import defaultdict import numpy as np import torch def load_json(path: str): with open(path) as f: return json.load(f) def parse_shape_info(flat_dir: str): data = load_json(os.path.join(flat_dir, 'shape.json')) flat_info = defaultdict(lambda: defaultdict(list)) for k, shape in data.items(): matched = re.match(r'decoder.layers.\d+', k) if matched is None: flat_key = 'flat_param_0' else: flat_key = f'{matched[0]}.flat_param_0' flat_info[flat_key]['names'].append(k) flat_info[flat_key]['shapes'].append(shape) flat_info[flat_key]['numels'].append(int(np.prod(shape))) return flat_info def convert(flat_dir: str, output_dir: str, part: int): flat_path = os.path.join(flat_dir, f'reshard-model_part-{part}-shard0.pt') output_path = os.path.join(output_dir, f'reshard-model_part-{part}.pt') flat_meta = load_json(os.path.join(flat_dir, 'flat-meta.json')) flat_sd = torch.load(flat_path) print(f'Loaded flat state dict from {flat_path}') output_sd = {} for flat_key, param_meta in flat_meta.items(): flat_param = flat_sd['model'][flat_key] assert sum(param_meta['numels']) == flat_param.numel( ), f'flat {flat_key} {flat_param.numel()} vs {sum(param_meta["numels"])}' for name, shape, param in zip(param_meta['names'], param_meta['shapes'], flat_param.split(param_meta['numels'])): output_sd[name] = param.view(shape) torch.save(output_sd, output_path) print(f'Saved unflat state dict to {output_path}') if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('flat_dir') parser.add_argument('output_dir') parser.add_argument('part', type=int) args = parser.parse_args() convert(args.flat_dir, args.output_dir, args.part)
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) on a text file or a dataset without using HuggingFace Trainer. Here is the full list of checkpoints on the hub that can be fine-tuned by this script: https://huggingface.co/models?filter=text-generation """ # You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments. import math import os import time from itertools import chain import datasets import torch import torch.distributed as dist from accelerate.utils import set_seed from context import barrier_context from datasets import load_dataset from packaging import version from torch.utils.data import DataLoader from tqdm.auto import tqdm import colossalai import transformers from colossalai.context import ParallelMode from colossalai.core import global_context as gpc from colossalai.logging import disable_existing_loggers, get_dist_logger from colossalai.nn.optimizer import HybridAdam from colossalai.nn.optimizer.zero_optimizer import ZeroOptimizer from colossalai.nn.parallel import ZeroDDP from colossalai.tensor import ProcessGroup from colossalai.utils import get_current_device, get_dataloader from colossalai.utils.model.colo_init_context import ColoInitContext from transformers import ( CONFIG_MAPPING, MODEL_MAPPING, AutoConfig, AutoTokenizer, GPT2Tokenizer, OPTForCausalLM, SchedulerType, default_data_collator, get_scheduler, ) from transformers.utils.versions import require_version require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt") MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def get_time_stamp(): torch.cuda.synchronize() return time.time() def parse_args(): parser = colossalai.get_default_parser() parser.add_argument("-s", "--synthetic", action="store_true") parser.add_argument( "--dataset_name", type=str, default=None, help="The name of the dataset to use (via the datasets library).", ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The configuration name of the dataset to use (via the datasets library).", ) parser.add_argument("--train_file", type=str, default=None, help="A csv or a json file containing the training data.") parser.add_argument("--validation_file", type=str, default=None, help="A csv or a json file containing the validation data.") parser.add_argument( "--validation_split_percentage", default=5, help="The percentage of the train set used as validation set in case there's no validation split", ) parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=True, ) parser.add_argument( "--config_name", type=str, default=None, help="Pretrained config name or path if not the same as model_name", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--use_slow_tokenizer", action="store_true", help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).", ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="linear", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument("--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler.") parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--model_type", type=str, default=None, help="Model type to use if training from scratch.", choices=MODEL_TYPES, ) parser.add_argument( "--block_size", type=int, default=None, help=("Optional input sequence length after tokenization. The training dataset will be truncated in block of" " this size for training. Default to the model max input length for single sentence inputs (take into" " account special tokens)."), ) parser.add_argument( "--preprocessing_num_workers", type=int, default=None, help="The number of processes to use for the preprocessing.", ) parser.add_argument("--overwrite_cache", type=bool, default=False, help="Overwrite the cached training and evaluation sets") parser.add_argument("--no_keep_linebreaks", action="store_true", help="Do not keep line breaks when using TXT files.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`.") parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to enable experiment trackers for logging.", ) parser.add_argument( "--report_to", type=str, default="all", help=('The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' ' `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.' "Only applicable when `--with_tracking` is passed."), ) parser.add_argument("--mem_cap", type=int, default=0, help="use mem cap") parser.add_argument("--init_in_cpu", action='store_true', default=False, help="init training model in cpu") args = parser.parse_args() # Sanity checks if not args.synthetic: if args.dataset_name is None and args.train_file is None and args.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if args.train_file is not None: extension = args.train_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, json or txt file." if args.validation_file is not None: extension = args.validation_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, json or txt file." if args.push_to_hub: assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed." return args def colo_memory_cap(size_in_GB): from colossalai.utils import colo_device_memory_capacity, colo_set_process_memory_fraction, get_current_device cuda_capacity = colo_device_memory_capacity(get_current_device()) if size_in_GB * (1024**3) < cuda_capacity: colo_set_process_memory_fraction(size_in_GB * (1024**3) / cuda_capacity) print("Using {} GB of GPU memory".format(size_in_GB)) class DummyDataloader: def __init__(self, length, batch_size, seq_len, vocab_size): self.length = length self.batch_size = batch_size self.seq_len = seq_len self.vocab_size = vocab_size def generate(self): input_ids = torch.randint(0, self.vocab_size, (self.batch_size, self.seq_len), device=get_current_device()) attention_mask = torch.ones_like(input_ids) return {"input_ids": input_ids, "attention_mask": attention_mask, "labels": input_ids} def __iter__(self): self.step = 0 return self def __next__(self): if self.step < self.length: self.step += 1 return self.generate() else: raise StopIteration def __len__(self): return self.length def main(): args = parse_args() disable_existing_loggers() colossalai.launch_from_torch(config=dict()) logger = get_dist_logger() is_main_process = dist.get_rank() == 0 if is_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() if args.mem_cap > 0: colo_memory_cap(args.mem_cap) # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) logger.info(f"Rank {dist.get_rank()}: random seed is set to {args.seed}") # Handle the repository creation with barrier_context(): if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. logger.info("Start preparing dataset", ranks=[0]) if not args.synthetic: if args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name) if "validation" not in raw_datasets.keys(): raw_datasets["validation"] = load_dataset( args.dataset_name, args.dataset_config_name, split=f"train[:{args.validation_split_percentage}%]", ) raw_datasets["train"] = load_dataset( args.dataset_name, args.dataset_config_name, split=f"train[{args.validation_split_percentage}%:]", ) else: data_files = {} dataset_args = {} if args.train_file is not None: data_files["train"] = args.train_file if args.validation_file is not None: data_files["validation"] = args.validation_file extension = args.train_file.split(".")[-1] if extension == "txt": extension = "text" dataset_args["keep_linebreaks"] = not args.no_keep_linebreaks raw_datasets = load_dataset(extension, data_files=data_files, **dataset_args) # If no validation data is there, validation_split_percentage will be used to divide the dataset. if "validation" not in raw_datasets.keys(): raw_datasets["validation"] = load_dataset( extension, data_files=data_files, split=f"train[:{args.validation_split_percentage}%]", **dataset_args, ) raw_datasets["train"] = load_dataset( extension, data_files=data_files, split=f"train[{args.validation_split_percentage}%:]", **dataset_args, ) logger.info("Dataset is prepared", ranks=[0]) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if args.config_name: config = AutoConfig.from_pretrained(args.config_name) elif args.model_name_or_path: config = AutoConfig.from_pretrained(args.model_name_or_path) else: config = CONFIG_MAPPING[args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") logger.info("Model config has been created", ranks=[0]) if args.model_name_or_path == 'facebook/opt-13b': tokenizer = GPT2Tokenizer.from_pretrained(args.model_name_or_path) else: print(f'load model from {args.model_name_or_path}') tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer) logger.info(f"{tokenizer.__class__.__name__} has been created", ranks=[0]) if args.init_in_cpu: init_dev = torch.device('cpu') else: init_dev = get_current_device() # build model if args.model_name_or_path is None or args.model_name_or_path == 'facebook/opt-13b': # currently, there has a bug in pretrained opt-13b # we can not import it until huggingface fix it logger.info("Train a new model from scratch", ranks=[0]) with ColoInitContext(device=init_dev): model = OPTForCausalLM(config) else: logger.info("Finetune a pre-trained model", ranks=[0]) with ColoInitContext(device=init_dev): model = OPTForCausalLM.from_pretrained(args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, local_files_only=False) # enable graident checkpointing model.gradient_checkpointing_enable() PLACEMENT_POLICY = 'auto' cai_version = colossalai.__version__ logger.info(f'using Colossal-AI version {cai_version}') if version.parse(cai_version) > version.parse("0.1.10"): from colossalai.nn.parallel import GeminiDDP model = GeminiDDP(model, device=get_current_device(), placement_policy=PLACEMENT_POLICY, pin_memory=True) elif version.parse(cai_version) <= version.parse("0.1.10") and version.parse(cai_version) >= version.parse("0.1.9"): from colossalai.gemini import ChunkManager, GeminiManager pg = ProcessGroup() chunk_size = ChunkManager.search_chunk_size(model, 64 * 1024**2, 32) chunk_manager = ChunkManager(chunk_size, pg, enable_distributed_storage=True, init_device=GeminiManager.get_default_device(PLACEMENT_POLICY)) gemini_manager = GeminiManager(PLACEMENT_POLICY, chunk_manager) model = ZeroDDP(model, gemini_manager) logger.info(f'{model.__class__.__name__} has been created', ranks=[0]) if not args.synthetic: # Preprocessing the datasets. # First we tokenize all the texts. column_names = raw_datasets["train"].column_names text_column_name = "text" if "text" in column_names else column_names[0] def tokenize_function(examples): return tokenizer(examples[text_column_name]) with barrier_context(executor_rank=0, parallel_mode=ParallelMode.DATA): tokenized_datasets = raw_datasets.map( tokenize_function, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc="Running tokenizer on dataset", ) if args.block_size is None: block_size = tokenizer.model_max_length if block_size > 1024: logger.warning( f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). " "Picking 1024 instead. You can change that default value by passing --block_size xxx.") block_size = 1024 else: if args.block_size > tokenizer.model_max_length: logger.warning(f"The block_size passed ({args.block_size}) is larger than the maximum length for the model" f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}.") block_size = min(args.block_size, tokenizer.model_max_length) # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size. def group_texts(examples): # Concatenate all texts. concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()} total_length = len(concatenated_examples[list(examples.keys())[0]]) # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can # customize this part to your needs. if total_length >= block_size: total_length = (total_length // block_size) * block_size # Split by chunks of max_len. result = { k: [t[i:i + block_size] for i in range(0, total_length, block_size) ] for k, t in concatenated_examples.items() } result["labels"] = result["input_ids"].copy() return result if not args.synthetic: # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower # to preprocess. # # To speed up this part, we use multiprocessing. See the documentation of the map method for more information: # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map with barrier_context(executor_rank=0, parallel_mode=ParallelMode.DATA): lm_datasets = tokenized_datasets.map( group_texts, batched=True, num_proc=args.preprocessing_num_workers, load_from_cache_file=not args.overwrite_cache, desc=f"Grouping texts in chunks of {block_size}", ) train_dataset = lm_datasets["train"] eval_dataset = lm_datasets["validation"] # Log a few random samples from the training set: # for index in random.sample(range(len(train_dataset)), 3): # logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") # DataLoaders creation: train_dataloader = get_dataloader(train_dataset, shuffle=True, add_sampler=True, collate_fn=default_data_collator, batch_size=args.per_device_train_batch_size) eval_dataloader = DataLoader(eval_dataset, collate_fn=default_data_collator, batch_size=args.per_device_eval_batch_size) else: train_dataloader = DummyDataloader(30, args.per_device_train_batch_size, config.max_position_embeddings, config.vocab_size) eval_dataloader = DummyDataloader(10, args.per_device_train_batch_size, config.max_position_embeddings, config.vocab_size) logger.info("Dataloaders have been created", ranks=[0]) # Optimizer # Split weights in two groups, one with weight decay and the other not. no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] optimizer = HybridAdam(optimizer_grouped_parameters, lr=args.learning_rate) optimizer = ZeroOptimizer(optimizer, model, initial_scale=2**14) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps, ) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # Train! total_batch_size = args.per_device_train_batch_size * gpc.get_world_size(ParallelMode.DATA) num_train_samples = len(train_dataset) if not args.synthetic else 30 * total_batch_size num_eval_samples = len(eval_dataset) if not args.synthetic else 10 * total_batch_size logger.info("***** Running training *****", ranks=[0]) logger.info(f" Num examples = {num_train_samples}", ranks=[0]) logger.info(f" Num Epochs = {args.num_train_epochs}", ranks=[0]) logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}", ranks=[0]) logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}", ranks=[0]) logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}", ranks=[0]) logger.info(f" Total optimization steps = {args.max_train_steps}", ranks=[0]) # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not is_main_process) completed_steps = 0 starting_epoch = 0 global_step = 0 for epoch in range(starting_epoch, args.num_train_epochs): if completed_steps >= args.max_train_steps: break model.train() for step, batch in enumerate(train_dataloader): batch = {k: v.cuda() for k, v in batch.items()} outputs = model(use_cache=False, **batch) loss = outputs['loss'] optimizer.backward(loss) if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) completed_steps += 1 global_step += 1 logger.info("Global step {} finished".format(global_step + 1), ranks=[0]) if completed_steps >= args.max_train_steps: break model.eval() losses = [] for step, batch in enumerate(eval_dataloader): with torch.no_grad(): batch = {k: v.cuda() for k, v in batch.items()} outputs = model(**batch) loss = outputs['loss'].unsqueeze(0) losses.append(loss) losses = torch.cat(losses) losses = losses[:num_eval_samples] try: eval_loss = torch.mean(losses) perplexity = math.exp(eval_loss) except OverflowError: perplexity = float("inf") logger.info(f"Epoch {epoch}: perplexity: {perplexity} eval_loss: {eval_loss}", ranks=[0]) if args.output_dir is not None: model_state = model.state_dict() if is_main_process: torch.save(model_state, args.output_dir + '/epoch_{}_model.pth'.format(completed_steps)) dist.barrier() # load_state = torch.load(args.output_dir + '/epoch_{}_model.pth'.format(completed_steps)) # model.load_state_dict(load_state, strict=False) logger.info("Training finished", ranks=[0]) if __name__ == "__main__": main()
from colossalai.zero.shard_utils import TensorShardStrategy zero = dict(model_config=dict(shard_strategy=TensorShardStrategy(), tensor_placement_policy="auto", reuse_fp16_shard=True), optimizer_config=dict(gpu_margin_mem_ratio=0.8, initial_scale=16384))
import torch.distributed as dist from colossalai.context import ParallelMode from colossalai.core import global_context as gpc class barrier_context(): """ This context manager is used to allow one process to execute while blocking all other processes in the same process group. This is often useful when downloading is required as we only want to download in one process to prevent file corruption. Args: executor_rank (int): the process rank to execute without blocking, all other processes will be blocked parallel_mode (ParallelMode): the parallel mode corresponding to a process group Usage: with barrier_context(): dataset = CIFAR10(root='./data', download=True) """ def __init__(self, executor_rank: int = 0, parallel_mode: ParallelMode = ParallelMode.GLOBAL): # the class name is lowercase by convention current_rank = gpc.get_local_rank(parallel_mode=parallel_mode) self.should_block = current_rank != executor_rank self.group = gpc.get_group(parallel_mode=parallel_mode) def __enter__(self): if self.should_block: dist.barrier(group=self.group) def __exit__(self, exc_type, exc_value, exc_traceback): if not self.should_block: dist.barrier(group=self.group)
from colossalai.amp import AMP_TYPE # hyperparameters # BATCH_SIZE is as per GPU # global batch size = BATCH_SIZE x data parallel size BATCH_SIZE = 512 LEARNING_RATE = 3e-3 WEIGHT_DECAY = 0.3 NUM_EPOCHS = 2 WARMUP_EPOCHS = 1 # model config NUM_CLASSES = 10 fp16 = dict(mode=AMP_TYPE.NAIVE) clip_grad_norm = 1.0
import torch import torch.nn as nn from torchvision.models import resnet18 from tqdm import tqdm import colossalai from colossalai.core import global_context as gpc from colossalai.logging import get_dist_logger from colossalai.nn.lr_scheduler import CosineAnnealingWarmupLR from colossalai.nn.optimizer import Lamb, Lars class DummyDataloader(): def __init__(self, length, batch_size): self.length = length self.batch_size = batch_size def generate(self): data = torch.rand(self.batch_size, 3, 224, 224) label = torch.randint(low=0, high=10, size=(self.batch_size,)) return data, label def __iter__(self): self.step = 0 return self def __next__(self): if self.step < self.length: self.step += 1 return self.generate() else: raise StopIteration def __len__(self): return self.length def main(): # initialize distributed setting parser = colossalai.get_default_parser() parser.add_argument('--optimizer', choices=['lars', 'lamb'], help="Choose your large-batch optimizer", required=True) args = parser.parse_args() # launch from torch colossalai.launch_from_torch(config=args.config) # get logger logger = get_dist_logger() logger.info("initialized distributed environment", ranks=[0]) # create synthetic dataloaders train_dataloader = DummyDataloader(length=10, batch_size=gpc.config.BATCH_SIZE) test_dataloader = DummyDataloader(length=5, batch_size=gpc.config.BATCH_SIZE) # build model model = resnet18(num_classes=gpc.config.NUM_CLASSES) # create loss function criterion = nn.CrossEntropyLoss() # create optimizer if args.optimizer == "lars": optim_cls = Lars elif args.optimizer == "lamb": optim_cls = Lamb optimizer = optim_cls(model.parameters(), lr=gpc.config.LEARNING_RATE, weight_decay=gpc.config.WEIGHT_DECAY) # create lr scheduler lr_scheduler = CosineAnnealingWarmupLR(optimizer=optimizer, total_steps=gpc.config.NUM_EPOCHS, warmup_steps=gpc.config.WARMUP_EPOCHS) # initialize engine, train_dataloader, test_dataloader, _ = colossalai.initialize(model=model, optimizer=optimizer, criterion=criterion, train_dataloader=train_dataloader, test_dataloader=test_dataloader) logger.info("Engine is built", ranks=[0]) for epoch in range(gpc.config.NUM_EPOCHS): # training engine.train() data_iter = iter(train_dataloader) if gpc.get_global_rank() == 0: description = 'Epoch {} / {}'.format(epoch, gpc.config.NUM_EPOCHS) progress = tqdm(range(len(train_dataloader)), desc=description) else: progress = range(len(train_dataloader)) for _ in progress: engine.zero_grad() engine.execute_schedule(data_iter, return_output_label=False) engine.step() lr_scheduler.step() if __name__ == '__main__': main()
from setuptools import setup, find_packages setup( name='latent-diffusion', version='0.0.1', description='', packages=find_packages(), install_requires=[ 'torch', 'numpy', 'tqdm', ], )
import argparse import csv import datetime import glob import importlib import os import sys import time import numpy as np import torch import torchvision try: import lightning.pytorch as pl except: import pytorch_lightning as pl from functools import partial from omegaconf import OmegaConf from packaging import version from PIL import Image from prefetch_generator import BackgroundGenerator from torch.utils.data import DataLoader, Dataset, Subset, random_split try: from lightning.pytorch import seed_everything from lightning.pytorch.callbacks import Callback, LearningRateMonitor, ModelCheckpoint from lightning.pytorch.trainer import Trainer from lightning.pytorch.utilities import rank_zero_info, rank_zero_only LIGHTNING_PACK_NAME = "lightning.pytorch." except: from pytorch_lightning import seed_everything from pytorch_lightning.callbacks import Callback, LearningRateMonitor, ModelCheckpoint from pytorch_lightning.trainer import Trainer from pytorch_lightning.utilities import rank_zero_info, rank_zero_only LIGHTNING_PACK_NAME = "pytorch_lightning." from ldm.data.base import Txt2ImgIterableBaseDataset from ldm.util import instantiate_from_config # from ldm.modules.attention import enable_flash_attentions class DataLoaderX(DataLoader): def __iter__(self): return BackgroundGenerator(super().__iter__()) def get_parser(**parser_kwargs): def str2bool(v): if isinstance(v, bool): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError("Boolean value expected.") parser = argparse.ArgumentParser(**parser_kwargs) parser.add_argument( "-n", "--name", type=str, const=True, default="", nargs="?", help="postfix for logdir", ) parser.add_argument( "-r", "--resume", type=str, const=True, default="", nargs="?", help="resume from logdir or checkpoint in logdir", ) parser.add_argument( "-b", "--base", nargs="*", metavar="base_config.yaml", help="paths to base configs. Loaded from left-to-right. " "Parameters can be overwritten or added with command-line options of the form `--key value`.", default=list(), ) parser.add_argument( "-t", "--train", type=str2bool, const=True, default=False, nargs="?", help="train", ) parser.add_argument( "--no-test", type=str2bool, const=True, default=False, nargs="?", help="disable test", ) parser.add_argument( "-p", "--project", help="name of new or path to existing project", ) parser.add_argument( "-c", "--ckpt", type=str, const=True, default="", nargs="?", help="load pretrained checkpoint from stable AI", ) parser.add_argument( "-d", "--debug", type=str2bool, nargs="?", const=True, default=False, help="enable post-mortem debugging", ) parser.add_argument( "-s", "--seed", type=int, default=23, help="seed for seed_everything", ) parser.add_argument( "-f", "--postfix", type=str, default="", help="post-postfix for default name", ) parser.add_argument( "-l", "--logdir", type=str, default="logs", help="directory for logging dat shit", ) parser.add_argument( "--scale_lr", type=str2bool, nargs="?", const=True, default=True, help="scale base-lr by ngpu * batch_size * n_accumulate", ) return parser def nondefault_trainer_args(opt): parser = argparse.ArgumentParser() parser = Trainer.add_argparse_args(parser) args = parser.parse_args([]) return sorted(k for k in vars(args) if getattr(opt, k) != getattr(args, k)) class WrappedDataset(Dataset): """Wraps an arbitrary object with __len__ and __getitem__ into a pytorch dataset""" def __init__(self, dataset): self.data = dataset def __len__(self): return len(self.data) def __getitem__(self, idx): return self.data[idx] def worker_init_fn(_): worker_info = torch.utils.data.get_worker_info() dataset = worker_info.dataset worker_id = worker_info.id if isinstance(dataset, Txt2ImgIterableBaseDataset): split_size = dataset.num_records // worker_info.num_workers # reset num_records to the true number to retain reliable length information dataset.sample_ids = dataset.valid_ids[worker_id * split_size:(worker_id + 1) * split_size] current_id = np.random.choice(len(np.random.get_state()[1]), 1) return np.random.seed(np.random.get_state()[1][current_id] + worker_id) else: return np.random.seed(np.random.get_state()[1][0] + worker_id) class DataModuleFromConfig(pl.LightningDataModule): def __init__(self, batch_size, train=None, validation=None, test=None, predict=None, wrap=False, num_workers=None, shuffle_test_loader=False, use_worker_init_fn=False, shuffle_val_dataloader=False): super().__init__() self.batch_size = batch_size self.dataset_configs = dict() self.num_workers = num_workers if num_workers is not None else batch_size * 2 self.use_worker_init_fn = use_worker_init_fn if train is not None: self.dataset_configs["train"] = train self.train_dataloader = self._train_dataloader if validation is not None: self.dataset_configs["validation"] = validation self.val_dataloader = partial(self._val_dataloader, shuffle=shuffle_val_dataloader) if test is not None: self.dataset_configs["test"] = test self.test_dataloader = partial(self._test_dataloader, shuffle=shuffle_test_loader) if predict is not None: self.dataset_configs["predict"] = predict self.predict_dataloader = self._predict_dataloader self.wrap = wrap def prepare_data(self): for data_cfg in self.dataset_configs.values(): instantiate_from_config(data_cfg) def setup(self, stage=None): self.datasets = dict((k, instantiate_from_config(self.dataset_configs[k])) for k in self.dataset_configs) if self.wrap: for k in self.datasets: self.datasets[k] = WrappedDataset(self.datasets[k]) def _train_dataloader(self): is_iterable_dataset = isinstance(self.datasets['train'], Txt2ImgIterableBaseDataset) if is_iterable_dataset or self.use_worker_init_fn: init_fn = worker_init_fn else: init_fn = None return DataLoaderX(self.datasets["train"], batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False if is_iterable_dataset else True, worker_init_fn=init_fn) def _val_dataloader(self, shuffle=False): if isinstance(self.datasets['validation'], Txt2ImgIterableBaseDataset) or self.use_worker_init_fn: init_fn = worker_init_fn else: init_fn = None return DataLoaderX(self.datasets["validation"], batch_size=self.batch_size, num_workers=self.num_workers, worker_init_fn=init_fn, shuffle=shuffle) def _test_dataloader(self, shuffle=False): is_iterable_dataset = isinstance(self.datasets['train'], Txt2ImgIterableBaseDataset) if is_iterable_dataset or self.use_worker_init_fn: init_fn = worker_init_fn else: init_fn = None # do not shuffle dataloader for iterable dataset shuffle = shuffle and (not is_iterable_dataset) return DataLoaderX(self.datasets["test"], batch_size=self.batch_size, num_workers=self.num_workers, worker_init_fn=init_fn, shuffle=shuffle) def _predict_dataloader(self, shuffle=False): if isinstance(self.datasets['predict'], Txt2ImgIterableBaseDataset) or self.use_worker_init_fn: init_fn = worker_init_fn else: init_fn = None return DataLoaderX(self.datasets["predict"], batch_size=self.batch_size, num_workers=self.num_workers, worker_init_fn=init_fn) class SetupCallback(Callback): def __init__(self, resume, now, logdir, ckptdir, cfgdir, config, lightning_config): super().__init__() self.resume = resume self.now = now self.logdir = logdir self.ckptdir = ckptdir self.cfgdir = cfgdir self.config = config self.lightning_config = lightning_config def on_keyboard_interrupt(self, trainer, pl_module): if trainer.global_rank == 0: print("Summoning checkpoint.") ckpt_path = os.path.join(self.ckptdir, "last.ckpt") trainer.save_checkpoint(ckpt_path) # def on_pretrain_routine_start(self, trainer, pl_module): def on_fit_start(self, trainer, pl_module): if trainer.global_rank == 0: # Create logdirs and save configs os.makedirs(self.logdir, exist_ok=True) os.makedirs(self.ckptdir, exist_ok=True) os.makedirs(self.cfgdir, exist_ok=True) if "callbacks" in self.lightning_config: if 'metrics_over_trainsteps_checkpoint' in self.lightning_config['callbacks']: os.makedirs(os.path.join(self.ckptdir, 'trainstep_checkpoints'), exist_ok=True) print("Project config") print(OmegaConf.to_yaml(self.config)) OmegaConf.save(self.config, os.path.join(self.cfgdir, "{}-project.yaml".format(self.now))) print("Lightning config") print(OmegaConf.to_yaml(self.lightning_config)) OmegaConf.save(OmegaConf.create({"lightning": self.lightning_config}), os.path.join(self.cfgdir, "{}-lightning.yaml".format(self.now))) else: # ModelCheckpoint callback created log directory --- remove it if not self.resume and os.path.exists(self.logdir): dst, name = os.path.split(self.logdir) dst = os.path.join(dst, "child_runs", name) os.makedirs(os.path.split(dst)[0], exist_ok=True) try: os.rename(self.logdir, dst) except FileNotFoundError: pass # def on_fit_end(self, trainer, pl_module): # if trainer.global_rank == 0: # ckpt_path = os.path.join(self.ckptdir, "last.ckpt") # rank_zero_info(f"Saving final checkpoint in {ckpt_path}.") # trainer.save_checkpoint(ckpt_path) class ImageLogger(Callback): def __init__(self, batch_frequency, max_images, clamp=True, increase_log_steps=True, rescale=True, disabled=False, log_on_batch_idx=False, log_first_step=False, log_images_kwargs=None): super().__init__() self.rescale = rescale self.batch_freq = batch_frequency self.max_images = max_images self.logger_log_images = { pl.loggers.CSVLogger: self._testtube, } self.log_steps = [2**n for n in range(int(np.log2(self.batch_freq)) + 1)] if not increase_log_steps: self.log_steps = [self.batch_freq] self.clamp = clamp self.disabled = disabled self.log_on_batch_idx = log_on_batch_idx self.log_images_kwargs = log_images_kwargs if log_images_kwargs else {} self.log_first_step = log_first_step @rank_zero_only def _testtube(self, pl_module, images, batch_idx, split): for k in images: grid = torchvision.utils.make_grid(images[k]) grid = (grid + 1.0) / 2.0 # -1,1 -> 0,1; c,h,w tag = f"{split}/{k}" pl_module.logger.experiment.add_image(tag, grid, global_step=pl_module.global_step) @rank_zero_only def log_local(self, save_dir, split, images, global_step, current_epoch, batch_idx): root = os.path.join(save_dir, "images", split) for k in images: grid = torchvision.utils.make_grid(images[k], nrow=4) if self.rescale: grid = (grid + 1.0) / 2.0 # -1,1 -> 0,1; c,h,w grid = grid.transpose(0, 1).transpose(1, 2).squeeze(-1) grid = grid.numpy() grid = (grid * 255).astype(np.uint8) filename = "{}_gs-{:06}_e-{:06}_b-{:06}.png".format(k, global_step, current_epoch, batch_idx) path = os.path.join(root, filename) os.makedirs(os.path.split(path)[0], exist_ok=True) Image.fromarray(grid).save(path) def log_img(self, pl_module, batch, batch_idx, split="train"): check_idx = batch_idx if self.log_on_batch_idx else pl_module.global_step if (self.check_frequency(check_idx) and # batch_idx % self.batch_freq == 0 hasattr(pl_module, "log_images") and callable(pl_module.log_images) and self.max_images > 0): logger = type(pl_module.logger) is_train = pl_module.training if is_train: pl_module.eval() with torch.no_grad(): images = pl_module.log_images(batch, split=split, **self.log_images_kwargs) for k in images: N = min(images[k].shape[0], self.max_images) images[k] = images[k][:N] if isinstance(images[k], torch.Tensor): images[k] = images[k].detach().cpu() if self.clamp: images[k] = torch.clamp(images[k], -1., 1.) self.log_local(pl_module.logger.save_dir, split, images, pl_module.global_step, pl_module.current_epoch, batch_idx) logger_log_images = self.logger_log_images.get(logger, lambda *args, **kwargs: None) logger_log_images(pl_module, images, pl_module.global_step, split) if is_train: pl_module.train() def check_frequency(self, check_idx): if ((check_idx % self.batch_freq) == 0 or (check_idx in self.log_steps)) and (check_idx > 0 or self.log_first_step): try: self.log_steps.pop(0) except IndexError as e: print(e) pass return True return False def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx): # if not self.disabled and (pl_module.global_step > 0 or self.log_first_step): # self.log_img(pl_module, batch, batch_idx, split="train") pass def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx): if not self.disabled and pl_module.global_step > 0: self.log_img(pl_module, batch, batch_idx, split="val") if hasattr(pl_module, 'calibrate_grad_norm'): if (pl_module.calibrate_grad_norm and batch_idx % 25 == 0) and batch_idx > 0: self.log_gradients(trainer, pl_module, batch_idx=batch_idx) class CUDACallback(Callback): # see https://github.com/SeanNaren/minGPT/blob/master/mingpt/callback.py def on_train_start(self, trainer, pl_module): rank_zero_info("Training is starting") def on_train_end(self, trainer, pl_module): rank_zero_info("Training is ending") def on_train_epoch_start(self, trainer, pl_module): # Reset the memory use counter torch.cuda.reset_peak_memory_stats(trainer.strategy.root_device.index) torch.cuda.synchronize(trainer.strategy.root_device.index) self.start_time = time.time() def on_train_epoch_end(self, trainer, pl_module): torch.cuda.synchronize(trainer.strategy.root_device.index) max_memory = torch.cuda.max_memory_allocated(trainer.strategy.root_device.index) / 2**20 epoch_time = time.time() - self.start_time try: max_memory = trainer.strategy.reduce(max_memory) epoch_time = trainer.strategy.reduce(epoch_time) rank_zero_info(f"Average Epoch time: {epoch_time:.2f} seconds") rank_zero_info(f"Average Peak memory {max_memory:.2f}MiB") except AttributeError: pass if __name__ == "__main__": # custom parser to specify config files, train, test and debug mode, # postfix, resume. # `--key value` arguments are interpreted as arguments to the trainer. # `nested.key=value` arguments are interpreted as config parameters. # configs are merged from left-to-right followed by command line parameters. # model: # base_learning_rate: float # target: path to lightning module # params: # key: value # data: # target: main.DataModuleFromConfig # params: # batch_size: int # wrap: bool # train: # target: path to train dataset # params: # key: value # validation: # target: path to validation dataset # params: # key: value # test: # target: path to test dataset # params: # key: value # lightning: (optional, has sane defaults and can be specified on cmdline) # trainer: # additional arguments to trainer # logger: # logger to instantiate # modelcheckpoint: # modelcheckpoint to instantiate # callbacks: # callback1: # target: importpath # params: # key: value now = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S") # add cwd for convenience and to make classes in this file available when # running as `python main.py` # (in particular `main.DataModuleFromConfig`) sys.path.append(os.getcwd()) parser = get_parser() parser = Trainer.add_argparse_args(parser) opt, unknown = parser.parse_known_args() if opt.name and opt.resume: raise ValueError("-n/--name and -r/--resume cannot be specified both." "If you want to resume training in a new log folder, " "use -n/--name in combination with --resume_from_checkpoint") ckpt = None if opt.resume: rank_zero_info("Resuming from {}".format(opt.resume)) if not os.path.exists(opt.resume): raise ValueError("Cannot find {}".format(opt.resume)) if os.path.isfile(opt.resume): paths = opt.resume.split("/") # idx = len(paths)-paths[::-1].index("logs")+1 # logdir = "/".join(paths[:idx]) logdir = "/".join(paths[:-2]) rank_zero_info("logdir: {}".format(logdir)) ckpt = opt.resume else: assert os.path.isdir(opt.resume), opt.resume logdir = opt.resume.rstrip("/") ckpt = os.path.join(logdir, "checkpoints", "last.ckpt") base_configs = sorted(glob.glob(os.path.join(logdir, "configs/*.yaml"))) opt.base = base_configs + opt.base _tmp = logdir.split("/") nowname = _tmp[-1] else: if opt.name: name = "_" + opt.name elif opt.base: rank_zero_info("Using base config {}".format(opt.base)) cfg_fname = os.path.split(opt.base[0])[-1] cfg_name = os.path.splitext(cfg_fname)[0] name = "_" + cfg_name else: name = "" nowname = now + name + opt.postfix logdir = os.path.join(opt.logdir, nowname) if opt.ckpt: ckpt = opt.ckpt ckptdir = os.path.join(logdir, "checkpoints") cfgdir = os.path.join(logdir, "configs") seed_everything(opt.seed) try: # init and save configs configs = [OmegaConf.load(cfg) for cfg in opt.base] cli = OmegaConf.from_dotlist(unknown) config = OmegaConf.merge(*configs, cli) lightning_config = config.pop("lightning", OmegaConf.create()) # merge trainer cli with config trainer_config = lightning_config.get("trainer", OmegaConf.create()) for k in nondefault_trainer_args(opt): trainer_config[k] = getattr(opt, k) if not trainer_config["accelerator"] == "gpu": del trainer_config["accelerator"] cpu = True else: cpu = False trainer_opt = argparse.Namespace(**trainer_config) lightning_config.trainer = trainer_config # model use_fp16 = trainer_config.get("precision", 32) == 16 if use_fp16: config.model["params"].update({"use_fp16": True}) else: config.model["params"].update({"use_fp16": False}) if ckpt is not None: config.model["params"].update({"ckpt": ckpt}) rank_zero_info("Using ckpt_path = {}".format(config.model["params"]["ckpt"])) model = instantiate_from_config(config.model) # trainer and callbacks trainer_kwargs = dict() # config the logger # default logger configs default_logger_cfgs = { "wandb": { "target": LIGHTNING_PACK_NAME + "loggers.WandbLogger", "params": { "name": nowname, "save_dir": logdir, "offline": opt.debug, "id": nowname, } }, "tensorboard": { "target": LIGHTNING_PACK_NAME + "loggers.TensorBoardLogger", "params": { "save_dir": logdir, "name": "diff_tb", "log_graph": True } } } default_logger_cfg = default_logger_cfgs["tensorboard"] if "logger" in lightning_config: logger_cfg = lightning_config.logger else: logger_cfg = default_logger_cfg logger_cfg = OmegaConf.merge(default_logger_cfg, logger_cfg) trainer_kwargs["logger"] = instantiate_from_config(logger_cfg) # config the strategy, defualt is ddp if "strategy" in trainer_config: strategy_cfg = trainer_config["strategy"] strategy_cfg["target"] = LIGHTNING_PACK_NAME + strategy_cfg["target"] else: strategy_cfg = { "target": LIGHTNING_PACK_NAME + "strategies.DDPStrategy", "params": { "find_unused_parameters": False } } trainer_kwargs["strategy"] = instantiate_from_config(strategy_cfg) # modelcheckpoint - use TrainResult/EvalResult(checkpoint_on=metric) to # specify which metric is used to determine best models default_modelckpt_cfg = { "target": LIGHTNING_PACK_NAME + "callbacks.ModelCheckpoint", "params": { "dirpath": ckptdir, "filename": "{epoch:06}", "verbose": True, "save_last": True, } } if hasattr(model, "monitor"): default_modelckpt_cfg["params"]["monitor"] = model.monitor default_modelckpt_cfg["params"]["save_top_k"] = 3 if "modelcheckpoint" in lightning_config: modelckpt_cfg = lightning_config.modelcheckpoint else: modelckpt_cfg = OmegaConf.create() modelckpt_cfg = OmegaConf.merge(default_modelckpt_cfg, modelckpt_cfg) if version.parse(pl.__version__) < version.parse('1.4.0'): trainer_kwargs["checkpoint_callback"] = instantiate_from_config(modelckpt_cfg) # add callback which sets up log directory default_callbacks_cfg = { "setup_callback": { "target": "main.SetupCallback", "params": { "resume": opt.resume, "now": now, "logdir": logdir, "ckptdir": ckptdir, "cfgdir": cfgdir, "config": config, "lightning_config": lightning_config, } }, "image_logger": { "target": "main.ImageLogger", "params": { "batch_frequency": 750, "max_images": 4, "clamp": True } }, "learning_rate_logger": { "target": "main.LearningRateMonitor", "params": { "logging_interval": "step", # "log_momentum": True } }, "cuda_callback": { "target": "main.CUDACallback" }, } if "callbacks" in lightning_config: callbacks_cfg = lightning_config.callbacks else: callbacks_cfg = OmegaConf.create() if 'metrics_over_trainsteps_checkpoint' in callbacks_cfg: print( 'Caution: Saving checkpoints every n train steps without deleting. This might require some free space.') default_metrics_over_trainsteps_ckpt_dict = { 'metrics_over_trainsteps_checkpoint': { "target": LIGHTNING_PACK_NAME + 'callbacks.ModelCheckpoint', 'params': { "dirpath": os.path.join(ckptdir, 'trainstep_checkpoints'), "filename": "{epoch:06}-{step:09}", "verbose": True, 'save_top_k': -1, 'every_n_train_steps': 10000, 'save_weights_only': True } } } default_callbacks_cfg.update(default_metrics_over_trainsteps_ckpt_dict) callbacks_cfg = OmegaConf.merge(default_callbacks_cfg, callbacks_cfg) trainer_kwargs["callbacks"] = [instantiate_from_config(callbacks_cfg[k]) for k in callbacks_cfg] trainer = Trainer.from_argparse_args(trainer_opt, **trainer_kwargs) trainer.logdir = logdir # data data = instantiate_from_config(config.data) # NOTE according to https://pytorch-lightning.readthedocs.io/en/latest/datamodules.html # calling these ourselves should not be necessary but it is. # lightning still takes care of proper multiprocessing though data.prepare_data() data.setup() for k in data.datasets: rank_zero_info(f"{k}, {data.datasets[k].__class__.__name__}, {len(data.datasets[k])}") # configure learning rate bs, base_lr = config.data.params.batch_size, config.model.base_learning_rate if not cpu: ngpu = trainer_config["devices"] else: ngpu = 1 if 'accumulate_grad_batches' in lightning_config.trainer: accumulate_grad_batches = lightning_config.trainer.accumulate_grad_batches else: accumulate_grad_batches = 1 rank_zero_info(f"accumulate_grad_batches = {accumulate_grad_batches}") lightning_config.trainer.accumulate_grad_batches = accumulate_grad_batches if opt.scale_lr: model.learning_rate = accumulate_grad_batches * ngpu * bs * base_lr rank_zero_info( "Setting learning rate to {:.2e} = {} (accumulate_grad_batches) * {} (num_gpus) * {} (batchsize) * {:.2e} (base_lr)" .format(model.learning_rate, accumulate_grad_batches, ngpu, bs, base_lr)) else: model.learning_rate = base_lr rank_zero_info("++++ NOT USING LR SCALING ++++") rank_zero_info(f"Setting learning rate to {model.learning_rate:.2e}") # allow checkpointing via USR1 def melk(*args, **kwargs): # run all checkpoint hooks if trainer.global_rank == 0: print("Summoning checkpoint.") ckpt_path = os.path.join(ckptdir, "last.ckpt") trainer.save_checkpoint(ckpt_path) def divein(*args, **kwargs): if trainer.global_rank == 0: import pudb pudb.set_trace() import signal signal.signal(signal.SIGUSR1, melk) signal.signal(signal.SIGUSR2, divein) # run if opt.train: try: trainer.fit(model, data) except Exception: melk() raise # if not opt.no_test and not trainer.interrupted: # trainer.test(model, data) except Exception: if opt.debug and trainer.global_rank == 0: try: import pudb as debugger except ImportError: import pdb as debugger debugger.post_mortem() raise finally: # move newly created debug project to debug_runs if opt.debug and not opt.resume and trainer.global_rank == 0: dst, name = os.path.split(logdir) dst = os.path.join(dst, "debug_runs", name) os.makedirs(os.path.split(dst)[0], exist_ok=True) os.rename(logdir, dst) if trainer.global_rank == 0: print(trainer.profiler.summary())
import argparse, os, sys, glob from omegaconf import OmegaConf from PIL import Image from tqdm import tqdm import numpy as np import torch from main import instantiate_from_config from ldm.models.diffusion.ddim import DDIMSampler def make_batch(image, mask, device): image = np.array(Image.open(image).convert("RGB")) image = image.astype(np.float32)/255.0 image = image[None].transpose(0,3,1,2) image = torch.from_numpy(image) mask = np.array(Image.open(mask).convert("L")) mask = mask.astype(np.float32)/255.0 mask = mask[None,None] mask[mask < 0.5] = 0 mask[mask >= 0.5] = 1 mask = torch.from_numpy(mask) masked_image = (1-mask)*image batch = {"image": image, "mask": mask, "masked_image": masked_image} for k in batch: batch[k] = batch[k].to(device=device) batch[k] = batch[k]*2.0-1.0 return batch if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--indir", type=str, nargs="?", help="dir containing image-mask pairs (`example.png` and `example_mask.png`)", ) parser.add_argument( "--outdir", type=str, nargs="?", help="dir to write results to", ) parser.add_argument( "--steps", type=int, default=50, help="number of ddim sampling steps", ) opt = parser.parse_args() masks = sorted(glob.glob(os.path.join(opt.indir, "*_mask.png"))) images = [x.replace("_mask.png", ".png") for x in masks] print(f"Found {len(masks)} inputs.") config = OmegaConf.load("models/ldm/inpainting_big/config.yaml") model = instantiate_from_config(config.model) model.load_state_dict(torch.load("models/ldm/inpainting_big/last.ckpt")["state_dict"], strict=False) device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") model = model.to(device) sampler = DDIMSampler(model) os.makedirs(opt.outdir, exist_ok=True) with torch.no_grad(): with model.ema_scope(): for image, mask in tqdm(zip(images, masks)): outpath = os.path.join(opt.outdir, os.path.split(image)[1]) batch = make_batch(image, mask, device=device) # encode masked image and concat downsampled mask c = model.cond_stage_model.encode(batch["masked_image"]) cc = torch.nn.functional.interpolate(batch["mask"], size=c.shape[-2:]) c = torch.cat((c, cc), dim=1) shape = (c.shape[1]-1,)+c.shape[2:] samples_ddim, _ = sampler.sample(S=opt.steps, conditioning=c, batch_size=c.shape[0], shape=shape, verbose=False) x_samples_ddim = model.decode_first_stage(samples_ddim) image = torch.clamp((batch["image"]+1.0)/2.0, min=0.0, max=1.0) mask = torch.clamp((batch["mask"]+1.0)/2.0, min=0.0, max=1.0) predicted_image = torch.clamp((x_samples_ddim+1.0)/2.0, min=0.0, max=1.0) inpainted = (1-mask)*image+mask*predicted_image inpainted = inpainted.cpu().numpy().transpose(0,2,3,1)[0]*255 Image.fromarray(inpainted.astype(np.uint8)).save(outpath)
import argparse, os import cv2 import torch import numpy as np from omegaconf import OmegaConf from PIL import Image from tqdm import tqdm, trange from itertools import islice from einops import rearrange from torchvision.utils import make_grid try: from lightning.pytorch import seed_everything except: from pytorch_lightning import seed_everything from torch import autocast from contextlib import nullcontext from imwatermark import WatermarkEncoder from ldm.util import instantiate_from_config from ldm.models.diffusion.ddim import DDIMSampler from ldm.models.diffusion.plms import PLMSSampler from ldm.models.diffusion.dpm_solver import DPMSolverSampler from utils import replace_module, getModelSize torch.set_grad_enabled(False) def chunk(it, size): it = iter(it) return iter(lambda: tuple(islice(it, size)), ()) def load_model_from_config(config, ckpt, verbose=False): print(f"Loading model from {ckpt}") pl_sd = torch.load(ckpt, map_location="cpu") if "global_step" in pl_sd: print(f"Global Step: {pl_sd['global_step']}") sd = pl_sd["state_dict"] model = instantiate_from_config(config.model) m, u = model.load_state_dict(sd, strict=False) if len(m) > 0 and verbose: print("missing keys:") print(m) if len(u) > 0 and verbose: print("unexpected keys:") print(u) model.eval() return model def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( "--prompt", type=str, nargs="?", default="a professional photograph of an astronaut riding a triceratops", help="the prompt to render" ) parser.add_argument( "--outdir", type=str, nargs="?", help="dir to write results to", default="outputs/txt2img-samples" ) parser.add_argument( "--steps", type=int, default=50, help="number of ddim sampling steps", ) parser.add_argument( "--plms", action='store_true', help="use plms sampling", ) parser.add_argument( "--dpm", action='store_true', help="use DPM (2) sampler", ) parser.add_argument( "--fixed_code", action='store_true', help="if enabled, uses the same starting code across all samples ", ) parser.add_argument( "--ddim_eta", type=float, default=0.0, help="ddim eta (eta=0.0 corresponds to deterministic sampling", ) parser.add_argument( "--n_iter", type=int, default=3, help="sample this often", ) parser.add_argument( "--H", type=int, default=512, help="image height, in pixel space", ) parser.add_argument( "--W", type=int, default=512, help="image width, in pixel space", ) parser.add_argument( "--C", type=int, default=4, help="latent channels", ) parser.add_argument( "--f", type=int, default=8, help="downsampling factor, most often 8 or 16", ) parser.add_argument( "--n_samples", type=int, default=3, help="how many samples to produce for each given prompt. A.k.a batch size", ) parser.add_argument( "--n_rows", type=int, default=0, help="rows in the grid (default: n_samples)", ) parser.add_argument( "--scale", type=float, default=9.0, help="unconditional guidance scale: eps = eps(x, empty) + scale * (eps(x, cond) - eps(x, empty))", ) parser.add_argument( "--from-file", type=str, help="if specified, load prompts from this file, separated by newlines", ) parser.add_argument( "--config", type=str, default="configs/stable-diffusion/v2-inference.yaml", help="path to config which constructs model", ) parser.add_argument( "--ckpt", type=str, help="path to checkpoint of model", ) parser.add_argument( "--seed", type=int, default=42, help="the seed (for reproducible sampling)", ) parser.add_argument( "--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast" ) parser.add_argument( "--repeat", type=int, default=1, help="repeat each prompt in file this often", ) parser.add_argument( "--use_int8", type=bool, default=False, help="use int8 for inference", ) opt = parser.parse_args() return opt def put_watermark(img, wm_encoder=None): if wm_encoder is not None: img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR) img = wm_encoder.encode(img, 'dwtDct') img = Image.fromarray(img[:, :, ::-1]) return img def main(opt): seed_everything(opt.seed) config = OmegaConf.load(f"{opt.config}") model = load_model_from_config(config, f"{opt.ckpt}") device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") model = model.to(device) # quantize model if opt.use_int8: model = replace_module(model) # # to compute the model size # getModelSize(model) if opt.plms: sampler = PLMSSampler(model) elif opt.dpm: sampler = DPMSolverSampler(model) else: sampler = DDIMSampler(model) os.makedirs(opt.outdir, exist_ok=True) outpath = opt.outdir print("Creating invisible watermark encoder (see https://github.com/ShieldMnt/invisible-watermark)...") wm = "SDV2" wm_encoder = WatermarkEncoder() wm_encoder.set_watermark('bytes', wm.encode('utf-8')) batch_size = opt.n_samples n_rows = opt.n_rows if opt.n_rows > 0 else batch_size if not opt.from_file: prompt = opt.prompt assert prompt is not None data = [batch_size * [prompt]] else: print(f"reading prompts from {opt.from_file}") with open(opt.from_file, "r") as f: data = f.read().splitlines() data = [p for p in data for i in range(opt.repeat)] data = list(chunk(data, batch_size)) sample_path = os.path.join(outpath, "samples") os.makedirs(sample_path, exist_ok=True) sample_count = 0 base_count = len(os.listdir(sample_path)) grid_count = len(os.listdir(outpath)) - 1 start_code = None if opt.fixed_code: start_code = torch.randn([opt.n_samples, opt.C, opt.H // opt.f, opt.W // opt.f], device=device) precision_scope = autocast if opt.precision == "autocast" else nullcontext with torch.no_grad(), \ precision_scope("cuda"), \ model.ema_scope(): all_samples = list() for n in trange(opt.n_iter, desc="Sampling"): for prompts in tqdm(data, desc="data"): uc = None if opt.scale != 1.0: uc = model.get_learned_conditioning(batch_size * [""]) if isinstance(prompts, tuple): prompts = list(prompts) c = model.get_learned_conditioning(prompts) shape = [opt.C, opt.H // opt.f, opt.W // opt.f] samples, _ = sampler.sample(S=opt.steps, conditioning=c, batch_size=opt.n_samples, shape=shape, verbose=False, unconditional_guidance_scale=opt.scale, unconditional_conditioning=uc, eta=opt.ddim_eta, x_T=start_code) x_samples = model.decode_first_stage(samples) x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0) for x_sample in x_samples: x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c') img = Image.fromarray(x_sample.astype(np.uint8)) img = put_watermark(img, wm_encoder) img.save(os.path.join(sample_path, f"{base_count:05}.png")) base_count += 1 sample_count += 1 all_samples.append(x_samples) # additionally, save as grid grid = torch.stack(all_samples, 0) grid = rearrange(grid, 'n b c h w -> (n b) c h w') grid = make_grid(grid, nrow=n_rows) # to image grid = 255. * rearrange(grid, 'c h w -> h w c').cpu().numpy() grid = Image.fromarray(grid.astype(np.uint8)) grid = put_watermark(grid, wm_encoder) grid.save(os.path.join(outpath, f'grid-{grid_count:04}.png')) grid_count += 1 print(f"Your samples are ready and waiting for you here: \n{outpath} \n" f" \nEnjoy.") if __name__ == "__main__": opt = parse_args() main(opt) # # to compute the mem allocated # print(torch.cuda.max_memory_allocated() / 1024 / 1024)
import argparse, os, sys, glob import clip import torch import torch.nn as nn import numpy as np from omegaconf import OmegaConf from PIL import Image from tqdm import tqdm, trange from itertools import islice from einops import rearrange, repeat from torchvision.utils import make_grid import scann import time from multiprocessing import cpu_count from ldm.util import instantiate_from_config, parallel_data_prefetch from ldm.models.diffusion.ddim import DDIMSampler from ldm.models.diffusion.plms import PLMSSampler from ldm.modules.encoders.modules import FrozenClipImageEmbedder, FrozenCLIPTextEmbedder DATABASES = [ "openimages", "artbench-art_nouveau", "artbench-baroque", "artbench-expressionism", "artbench-impressionism", "artbench-post_impressionism", "artbench-realism", "artbench-romanticism", "artbench-renaissance", "artbench-surrealism", "artbench-ukiyo_e", ] def chunk(it, size): it = iter(it) return iter(lambda: tuple(islice(it, size)), ()) def load_model_from_config(config, ckpt, verbose=False): print(f"Loading model from {ckpt}") pl_sd = torch.load(ckpt, map_location="cpu") if "global_step" in pl_sd: print(f"Global Step: {pl_sd['global_step']}") sd = pl_sd["state_dict"] model = instantiate_from_config(config.model) m, u = model.load_state_dict(sd, strict=False) if len(m) > 0 and verbose: print("missing keys:") print(m) if len(u) > 0 and verbose: print("unexpected keys:") print(u) model.cuda() model.eval() return model class Searcher(object): def __init__(self, database, retriever_version='ViT-L/14'): assert database in DATABASES # self.database = self.load_database(database) self.database_name = database self.searcher_savedir = f'data/rdm/searchers/{self.database_name}' self.database_path = f'data/rdm/retrieval_databases/{self.database_name}' self.retriever = self.load_retriever(version=retriever_version) self.database = {'embedding': [], 'img_id': [], 'patch_coords': []} self.load_database() self.load_searcher() def train_searcher(self, k, metric='dot_product', searcher_savedir=None): print('Start training searcher') searcher = scann.scann_ops_pybind.builder(self.database['embedding'] / np.linalg.norm(self.database['embedding'], axis=1)[:, np.newaxis], k, metric) self.searcher = searcher.score_brute_force().build() print('Finish training searcher') if searcher_savedir is not None: print(f'Save trained searcher under "{searcher_savedir}"') os.makedirs(searcher_savedir, exist_ok=True) self.searcher.serialize(searcher_savedir) def load_single_file(self, saved_embeddings): compressed = np.load(saved_embeddings) self.database = {key: compressed[key] for key in compressed.files} print('Finished loading of clip embeddings.') def load_multi_files(self, data_archive): out_data = {key: [] for key in self.database} for d in tqdm(data_archive, desc=f'Loading datapool from {len(data_archive)} individual files.'): for key in d.files: out_data[key].append(d[key]) return out_data def load_database(self): print(f'Load saved patch embedding from "{self.database_path}"') file_content = glob.glob(os.path.join(self.database_path, '*.npz')) if len(file_content) == 1: self.load_single_file(file_content[0]) elif len(file_content) > 1: data = [np.load(f) for f in file_content] prefetched_data = parallel_data_prefetch(self.load_multi_files, data, n_proc=min(len(data), cpu_count()), target_data_type='dict') self.database = {key: np.concatenate([od[key] for od in prefetched_data], axis=1)[0] for key in self.database} else: raise ValueError(f'No npz-files in specified path "{self.database_path}" is this directory existing?') print(f'Finished loading of retrieval database of length {self.database["embedding"].shape[0]}.') def load_retriever(self, version='ViT-L/14', ): model = FrozenClipImageEmbedder(model=version) if torch.cuda.is_available(): model.cuda() model.eval() return model def load_searcher(self): print(f'load searcher for database {self.database_name} from {self.searcher_savedir}') self.searcher = scann.scann_ops_pybind.load_searcher(self.searcher_savedir) print('Finished loading searcher.') def search(self, x, k): if self.searcher is None and self.database['embedding'].shape[0] < 2e4: self.train_searcher(k) # quickly fit searcher on the fly for small databases assert self.searcher is not None, 'Cannot search with uninitialized searcher' if isinstance(x, torch.Tensor): x = x.detach().cpu().numpy() if len(x.shape) == 3: x = x[:, 0] query_embeddings = x / np.linalg.norm(x, axis=1)[:, np.newaxis] start = time.time() nns, distances = self.searcher.search_batched(query_embeddings, final_num_neighbors=k) end = time.time() out_embeddings = self.database['embedding'][nns] out_img_ids = self.database['img_id'][nns] out_pc = self.database['patch_coords'][nns] out = {'nn_embeddings': out_embeddings / np.linalg.norm(out_embeddings, axis=-1)[..., np.newaxis], 'img_ids': out_img_ids, 'patch_coords': out_pc, 'queries': x, 'exec_time': end - start, 'nns': nns, 'q_embeddings': query_embeddings} return out def __call__(self, x, n): return self.search(x, n) if __name__ == "__main__": parser = argparse.ArgumentParser() # TODO: add n_neighbors and modes (text-only, text-image-retrieval, image-image retrieval etc) # TODO: add 'image variation' mode when knn=0 but a single image is given instead of a text prompt? parser.add_argument( "--prompt", type=str, nargs="?", default="a painting of a virus monster playing guitar", help="the prompt to render" ) parser.add_argument( "--outdir", type=str, nargs="?", help="dir to write results to", default="outputs/txt2img-samples" ) parser.add_argument( "--skip_grid", action='store_true', help="do not save a grid, only individual samples. Helpful when evaluating lots of samples", ) parser.add_argument( "--ddim_steps", type=int, default=50, help="number of ddim sampling steps", ) parser.add_argument( "--n_repeat", type=int, default=1, help="number of repeats in CLIP latent space", ) parser.add_argument( "--plms", action='store_true', help="use plms sampling", ) parser.add_argument( "--ddim_eta", type=float, default=0.0, help="ddim eta (eta=0.0 corresponds to deterministic sampling", ) parser.add_argument( "--n_iter", type=int, default=1, help="sample this often", ) parser.add_argument( "--H", type=int, default=768, help="image height, in pixel space", ) parser.add_argument( "--W", type=int, default=768, help="image width, in pixel space", ) parser.add_argument( "--n_samples", type=int, default=3, help="how many samples to produce for each given prompt. A.k.a batch size", ) parser.add_argument( "--n_rows", type=int, default=0, help="rows in the grid (default: n_samples)", ) parser.add_argument( "--scale", type=float, default=5.0, help="unconditional guidance scale: eps = eps(x, empty) + scale * (eps(x, cond) - eps(x, empty))", ) parser.add_argument( "--from-file", type=str, help="if specified, load prompts from this file", ) parser.add_argument( "--config", type=str, default="configs/retrieval-augmented-diffusion/768x768.yaml", help="path to config which constructs model", ) parser.add_argument( "--ckpt", type=str, default="models/rdm/rdm768x768/model.ckpt", help="path to checkpoint of model", ) parser.add_argument( "--clip_type", type=str, default="ViT-L/14", help="which CLIP model to use for retrieval and NN encoding", ) parser.add_argument( "--database", type=str, default='artbench-surrealism', choices=DATABASES, help="The database used for the search, only applied when --use_neighbors=True", ) parser.add_argument( "--use_neighbors", default=False, action='store_true', help="Include neighbors in addition to text prompt for conditioning", ) parser.add_argument( "--knn", default=10, type=int, help="The number of included neighbors, only applied when --use_neighbors=True", ) opt = parser.parse_args() config = OmegaConf.load(f"{opt.config}") model = load_model_from_config(config, f"{opt.ckpt}") device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") model = model.to(device) clip_text_encoder = FrozenCLIPTextEmbedder(opt.clip_type).to(device) if opt.plms: sampler = PLMSSampler(model) else: sampler = DDIMSampler(model) os.makedirs(opt.outdir, exist_ok=True) outpath = opt.outdir batch_size = opt.n_samples n_rows = opt.n_rows if opt.n_rows > 0 else batch_size if not opt.from_file: prompt = opt.prompt assert prompt is not None data = [batch_size * [prompt]] else: print(f"reading prompts from {opt.from_file}") with open(opt.from_file, "r") as f: data = f.read().splitlines() data = list(chunk(data, batch_size)) sample_path = os.path.join(outpath, "samples") os.makedirs(sample_path, exist_ok=True) base_count = len(os.listdir(sample_path)) grid_count = len(os.listdir(outpath)) - 1 print(f"sampling scale for cfg is {opt.scale:.2f}") searcher = None if opt.use_neighbors: searcher = Searcher(opt.database) with torch.no_grad(): with model.ema_scope(): for n in trange(opt.n_iter, desc="Sampling"): all_samples = list() for prompts in tqdm(data, desc="data"): print("sampling prompts:", prompts) if isinstance(prompts, tuple): prompts = list(prompts) c = clip_text_encoder.encode(prompts) uc = None if searcher is not None: nn_dict = searcher(c, opt.knn) c = torch.cat([c, torch.from_numpy(nn_dict['nn_embeddings']).cuda()], dim=1) if opt.scale != 1.0: uc = torch.zeros_like(c) if isinstance(prompts, tuple): prompts = list(prompts) shape = [16, opt.H // 16, opt.W // 16] # note: currently hardcoded for f16 model samples_ddim, _ = sampler.sample(S=opt.ddim_steps, conditioning=c, batch_size=c.shape[0], shape=shape, verbose=False, unconditional_guidance_scale=opt.scale, unconditional_conditioning=uc, eta=opt.ddim_eta, ) x_samples_ddim = model.decode_first_stage(samples_ddim) x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0) for x_sample in x_samples_ddim: x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c') Image.fromarray(x_sample.astype(np.uint8)).save( os.path.join(sample_path, f"{base_count:05}.png")) base_count += 1 all_samples.append(x_samples_ddim) if not opt.skip_grid: # additionally, save as grid grid = torch.stack(all_samples, 0) grid = rearrange(grid, 'n b c h w -> (n b) c h w') grid = make_grid(grid, nrow=n_rows) # to image grid = 255. * rearrange(grid, 'c h w -> h w c').cpu().numpy() Image.fromarray(grid.astype(np.uint8)).save(os.path.join(outpath, f'grid-{grid_count:04}.png')) grid_count += 1 print(f"Your samples are ready and waiting for you here: \n{outpath} \nEnjoy.")
"""make variations of input image""" import argparse, os import PIL import torch import numpy as np from omegaconf import OmegaConf from PIL import Image from tqdm import tqdm, trange from itertools import islice from einops import rearrange, repeat from torchvision.utils import make_grid from torch import autocast from contextlib import nullcontext try: from lightning.pytorch import seed_everything except: from pytorch_lightning import seed_everything from imwatermark import WatermarkEncoder from scripts.txt2img import put_watermark from ldm.util import instantiate_from_config from ldm.models.diffusion.ddim import DDIMSampler from utils import replace_module, getModelSize def chunk(it, size): it = iter(it) return iter(lambda: tuple(islice(it, size)), ()) def load_model_from_config(config, ckpt, verbose=False): print(f"Loading model from {ckpt}") pl_sd = torch.load(ckpt, map_location="cpu") if "global_step" in pl_sd: print(f"Global Step: {pl_sd['global_step']}") sd = pl_sd["state_dict"] model = instantiate_from_config(config.model) m, u = model.load_state_dict(sd, strict=False) if len(m) > 0 and verbose: print("missing keys:") print(m) if len(u) > 0 and verbose: print("unexpected keys:") print(u) model.eval() return model def load_img(path): image = Image.open(path).convert("RGB") w, h = image.size print(f"loaded input image of size ({w}, {h}) from {path}") w, h = map(lambda x: x - x % 64, (w, h)) # resize to integer multiple of 64 image = image.resize((w, h), resample=PIL.Image.LANCZOS) image = np.array(image).astype(np.float32) / 255.0 image = image[None].transpose(0, 3, 1, 2) image = torch.from_numpy(image) return 2. * image - 1. def main(): parser = argparse.ArgumentParser() parser.add_argument( "--prompt", type=str, nargs="?", default="a painting of a virus monster playing guitar", help="the prompt to render" ) parser.add_argument( "--init-img", type=str, nargs="?", help="path to the input image" ) parser.add_argument( "--outdir", type=str, nargs="?", help="dir to write results to", default="outputs/img2img-samples" ) parser.add_argument( "--ddim_steps", type=int, default=50, help="number of ddim sampling steps", ) parser.add_argument( "--fixed_code", action='store_true', help="if enabled, uses the same starting code across all samples ", ) parser.add_argument( "--ddim_eta", type=float, default=0.0, help="ddim eta (eta=0.0 corresponds to deterministic sampling", ) parser.add_argument( "--n_iter", type=int, default=1, help="sample this often", ) parser.add_argument( "--C", type=int, default=4, help="latent channels", ) parser.add_argument( "--f", type=int, default=8, help="downsampling factor, most often 8 or 16", ) parser.add_argument( "--n_samples", type=int, default=2, help="how many samples to produce for each given prompt. A.k.a batch size", ) parser.add_argument( "--n_rows", type=int, default=0, help="rows in the grid (default: n_samples)", ) parser.add_argument( "--scale", type=float, default=9.0, help="unconditional guidance scale: eps = eps(x, empty) + scale * (eps(x, cond) - eps(x, empty))", ) parser.add_argument( "--strength", type=float, default=0.8, help="strength for noising/unnoising. 1.0 corresponds to full destruction of information in init image", ) parser.add_argument( "--from-file", type=str, help="if specified, load prompts from this file", ) parser.add_argument( "--config", type=str, default="configs/stable-diffusion/v2-inference.yaml", help="path to config which constructs model", ) parser.add_argument( "--ckpt", type=str, help="path to checkpoint of model", ) parser.add_argument( "--seed", type=int, default=42, help="the seed (for reproducible sampling)", ) parser.add_argument( "--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast" ) parser.add_argument( "--use_int8", type=bool, default=False, help="use int8 for inference", ) opt = parser.parse_args() seed_everything(opt.seed) config = OmegaConf.load(f"{opt.config}") model = load_model_from_config(config, f"{opt.ckpt}") device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") model = model.to(device) # quantize model if opt.use_int8: model = replace_module(model) # # to compute the model size # getModelSize(model) sampler = DDIMSampler(model) os.makedirs(opt.outdir, exist_ok=True) outpath = opt.outdir print("Creating invisible watermark encoder (see https://github.com/ShieldMnt/invisible-watermark)...") wm = "SDV2" wm_encoder = WatermarkEncoder() wm_encoder.set_watermark('bytes', wm.encode('utf-8')) batch_size = opt.n_samples n_rows = opt.n_rows if opt.n_rows > 0 else batch_size if not opt.from_file: prompt = opt.prompt assert prompt is not None data = [batch_size * [prompt]] else: print(f"reading prompts from {opt.from_file}") with open(opt.from_file, "r") as f: data = f.read().splitlines() data = list(chunk(data, batch_size)) sample_path = os.path.join(outpath, "samples") os.makedirs(sample_path, exist_ok=True) base_count = len(os.listdir(sample_path)) grid_count = len(os.listdir(outpath)) - 1 assert os.path.isfile(opt.init_img) init_image = load_img(opt.init_img).to(device) init_image = repeat(init_image, '1 ... -> b ...', b=batch_size) init_latent = model.get_first_stage_encoding(model.encode_first_stage(init_image)) # move to latent space sampler.make_schedule(ddim_num_steps=opt.ddim_steps, ddim_eta=opt.ddim_eta, verbose=False) assert 0. <= opt.strength <= 1., 'can only work with strength in [0.0, 1.0]' t_enc = int(opt.strength * opt.ddim_steps) print(f"target t_enc is {t_enc} steps") precision_scope = autocast if opt.precision == "autocast" else nullcontext with torch.no_grad(): with precision_scope("cuda"): with model.ema_scope(): all_samples = list() for n in trange(opt.n_iter, desc="Sampling"): for prompts in tqdm(data, desc="data"): uc = None if opt.scale != 1.0: uc = model.get_learned_conditioning(batch_size * [""]) if isinstance(prompts, tuple): prompts = list(prompts) c = model.get_learned_conditioning(prompts) # encode (scaled latent) z_enc = sampler.stochastic_encode(init_latent, torch.tensor([t_enc] * batch_size).to(device)) # decode it samples = sampler.decode(z_enc, c, t_enc, unconditional_guidance_scale=opt.scale, unconditional_conditioning=uc, ) x_samples = model.decode_first_stage(samples) x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0) for x_sample in x_samples: x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c') img = Image.fromarray(x_sample.astype(np.uint8)) img = put_watermark(img, wm_encoder) img.save(os.path.join(sample_path, f"{base_count:05}.png")) base_count += 1 all_samples.append(x_samples) # additionally, save as grid grid = torch.stack(all_samples, 0) grid = rearrange(grid, 'n b c h w -> (n b) c h w') grid = make_grid(grid, nrow=n_rows) # to image grid = 255. * rearrange(grid, 'c h w -> h w c').cpu().numpy() grid = Image.fromarray(grid.astype(np.uint8)) grid = put_watermark(grid, wm_encoder) grid.save(os.path.join(outpath, f'grid-{grid_count:04}.png')) grid_count += 1 print(f"Your samples are ready and waiting for you here: \n{outpath} \nEnjoy.") if __name__ == "__main__": main() # # to compute the mem allocated # print(torch.cuda.max_memory_allocated() / 1024 / 1024)
import bitsandbytes as bnb import torch.nn as nn import torch class Linear8bit(nn.Linear): def __init__( self, input_features, output_features, bias=True, has_fp16_weights=False, memory_efficient_backward=False, threshold=6.0, weight_data=None, bias_data=None ): super(Linear8bit, self).__init__( input_features, output_features, bias ) self.state = bnb.MatmulLtState() self.bias = bias_data self.state.threshold = threshold self.state.has_fp16_weights = has_fp16_weights self.state.memory_efficient_backward = memory_efficient_backward if threshold > 0.0 and not has_fp16_weights: self.state.use_pool = True self.register_parameter("SCB", nn.Parameter(torch.empty(0), requires_grad=False)) self.weight = weight_data self.quant() def quant(self): weight = self.weight.data.contiguous().half().cuda() CB, _, SCB, _, _ = bnb.functional.double_quant(weight) delattr(self, "weight") setattr(self, "weight", nn.Parameter(CB, requires_grad=False)) delattr(self, "SCB") setattr(self, "SCB", nn.Parameter(SCB, requires_grad=False)) del weight def forward(self, x): self.state.is_training = self.training if self.bias is not None and self.bias.dtype != torch.float16: self.bias.data = self.bias.data.half() self.state.CB = self.weight.data self.state.SCB = self.SCB.data out = bnb.matmul(x, self.weight, bias=self.bias, state=self.state) del self.state.CxB return out def replace_module(model): for name, module in model.named_children(): if len(list(module.children())) > 0: replace_module(module) if isinstance(module, nn.Linear) and "out_proj" not in name: model._modules[name] = Linear8bit( input_features=module.in_features, output_features=module.out_features, threshold=6.0, weight_data=module.weight, bias_data=module.bias, ) return model def getModelSize(model): param_size = 0 param_sum = 0 for param in model.parameters(): param_size += param.nelement() * param.element_size() param_sum += param.nelement() buffer_size = 0 buffer_sum = 0 for buffer in model.buffers(): buffer_size += buffer.nelement() * buffer.element_size() buffer_sum += buffer.nelement() all_size = (param_size + buffer_size) / 1024 / 1024 print('Model Size: {:.3f}MB'.format(all_size)) return (param_size, param_sum, buffer_size, buffer_sum, all_size)
import argparse, os, sys, glob, datetime, yaml import torch import time import numpy as np from tqdm import trange from omegaconf import OmegaConf from PIL import Image from ldm.models.diffusion.ddim import DDIMSampler from ldm.util import instantiate_from_config rescale = lambda x: (x + 1.) / 2. def custom_to_pil(x): x = x.detach().cpu() x = torch.clamp(x, -1., 1.) x = (x + 1.) / 2. x = x.permute(1, 2, 0).numpy() x = (255 * x).astype(np.uint8) x = Image.fromarray(x) if not x.mode == "RGB": x = x.convert("RGB") return x def custom_to_np(x): # saves the batch in adm style as in https://github.com/openai/guided-diffusion/blob/main/scripts/image_sample.py sample = x.detach().cpu() sample = ((sample + 1) * 127.5).clamp(0, 255).to(torch.uint8) sample = sample.permute(0, 2, 3, 1) sample = sample.contiguous() return sample def logs2pil(logs, keys=["sample"]): imgs = dict() for k in logs: try: if len(logs[k].shape) == 4: img = custom_to_pil(logs[k][0, ...]) elif len(logs[k].shape) == 3: img = custom_to_pil(logs[k]) else: print(f"Unknown format for key {k}. ") img = None except: img = None imgs[k] = img return imgs @torch.no_grad() def convsample(model, shape, return_intermediates=True, verbose=True, make_prog_row=False): if not make_prog_row: return model.p_sample_loop(None, shape, return_intermediates=return_intermediates, verbose=verbose) else: return model.progressive_denoising( None, shape, verbose=True ) @torch.no_grad() def convsample_ddim(model, steps, shape, eta=1.0 ): ddim = DDIMSampler(model) bs = shape[0] shape = shape[1:] samples, intermediates = ddim.sample(steps, batch_size=bs, shape=shape, eta=eta, verbose=False,) return samples, intermediates @torch.no_grad() def make_convolutional_sample(model, batch_size, vanilla=False, custom_steps=None, eta=1.0,): log = dict() shape = [batch_size, model.model.diffusion_model.in_channels, model.model.diffusion_model.image_size, model.model.diffusion_model.image_size] with model.ema_scope("Plotting"): t0 = time.time() if vanilla: sample, progrow = convsample(model, shape, make_prog_row=True) else: sample, intermediates = convsample_ddim(model, steps=custom_steps, shape=shape, eta=eta) t1 = time.time() x_sample = model.decode_first_stage(sample) log["sample"] = x_sample log["time"] = t1 - t0 log['throughput'] = sample.shape[0] / (t1 - t0) print(f'Throughput for this batch: {log["throughput"]}') return log def run(model, logdir, batch_size=50, vanilla=False, custom_steps=None, eta=None, n_samples=50000, nplog=None): if vanilla: print(f'Using Vanilla DDPM sampling with {model.num_timesteps} sampling steps.') else: print(f'Using DDIM sampling with {custom_steps} sampling steps and eta={eta}') tstart = time.time() n_saved = len(glob.glob(os.path.join(logdir,'*.png')))-1 # path = logdir if model.cond_stage_model is None: all_images = [] print(f"Running unconditional sampling for {n_samples} samples") for _ in trange(n_samples // batch_size, desc="Sampling Batches (unconditional)"): logs = make_convolutional_sample(model, batch_size=batch_size, vanilla=vanilla, custom_steps=custom_steps, eta=eta) n_saved = save_logs(logs, logdir, n_saved=n_saved, key="sample") all_images.extend([custom_to_np(logs["sample"])]) if n_saved >= n_samples: print(f'Finish after generating {n_saved} samples') break all_img = np.concatenate(all_images, axis=0) all_img = all_img[:n_samples] shape_str = "x".join([str(x) for x in all_img.shape]) nppath = os.path.join(nplog, f"{shape_str}-samples.npz") np.savez(nppath, all_img) else: raise NotImplementedError('Currently only sampling for unconditional models supported.') print(f"sampling of {n_saved} images finished in {(time.time() - tstart) / 60.:.2f} minutes.") def save_logs(logs, path, n_saved=0, key="sample", np_path=None): for k in logs: if k == key: batch = logs[key] if np_path is None: for x in batch: img = custom_to_pil(x) imgpath = os.path.join(path, f"{key}_{n_saved:06}.png") img.save(imgpath) n_saved += 1 else: npbatch = custom_to_np(batch) shape_str = "x".join([str(x) for x in npbatch.shape]) nppath = os.path.join(np_path, f"{n_saved}-{shape_str}-samples.npz") np.savez(nppath, npbatch) n_saved += npbatch.shape[0] return n_saved def get_parser(): parser = argparse.ArgumentParser() parser.add_argument( "-r", "--resume", type=str, nargs="?", help="load from logdir or checkpoint in logdir", ) parser.add_argument( "-n", "--n_samples", type=int, nargs="?", help="number of samples to draw", default=50000 ) parser.add_argument( "-e", "--eta", type=float, nargs="?", help="eta for ddim sampling (0.0 yields deterministic sampling)", default=1.0 ) parser.add_argument( "-v", "--vanilla_sample", default=False, action='store_true', help="vanilla sampling (default option is DDIM sampling)?", ) parser.add_argument( "-l", "--logdir", type=str, nargs="?", help="extra logdir", default="none" ) parser.add_argument( "-c", "--custom_steps", type=int, nargs="?", help="number of steps for ddim and fastdpm sampling", default=50 ) parser.add_argument( "--batch_size", type=int, nargs="?", help="the bs", default=10 ) return parser def load_model_from_config(config, sd): model = instantiate_from_config(config) model.load_state_dict(sd,strict=False) model.cuda() model.eval() return model def load_model(config, ckpt, gpu, eval_mode): if ckpt: print(f"Loading model from {ckpt}") pl_sd = torch.load(ckpt, map_location="cpu") global_step = pl_sd["global_step"] else: pl_sd = {"state_dict": None} global_step = None model = load_model_from_config(config.model, pl_sd["state_dict"]) return model, global_step if __name__ == "__main__": now = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S") sys.path.append(os.getcwd()) command = " ".join(sys.argv) parser = get_parser() opt, unknown = parser.parse_known_args() ckpt = None if not os.path.exists(opt.resume): raise ValueError("Cannot find {}".format(opt.resume)) if os.path.isfile(opt.resume): # paths = opt.resume.split("/") try: logdir = '/'.join(opt.resume.split('/')[:-1]) # idx = len(paths)-paths[::-1].index("logs")+1 print(f'Logdir is {logdir}') except ValueError: paths = opt.resume.split("/") idx = -2 # take a guess: path/to/logdir/checkpoints/model.ckpt logdir = "/".join(paths[:idx]) ckpt = opt.resume else: assert os.path.isdir(opt.resume), f"{opt.resume} is not a directory" logdir = opt.resume.rstrip("/") ckpt = os.path.join(logdir, "model.ckpt") base_configs = sorted(glob.glob(os.path.join(logdir, "config.yaml"))) opt.base = base_configs configs = [OmegaConf.load(cfg) for cfg in opt.base] cli = OmegaConf.from_dotlist(unknown) config = OmegaConf.merge(*configs, cli) gpu = True eval_mode = True if opt.logdir != "none": locallog = logdir.split(os.sep)[-1] if locallog == "": locallog = logdir.split(os.sep)[-2] print(f"Switching logdir from '{logdir}' to '{os.path.join(opt.logdir, locallog)}'") logdir = os.path.join(opt.logdir, locallog) print(config) model, global_step = load_model(config, ckpt, gpu, eval_mode) print(f"global step: {global_step}") print(75 * "=") print("logging to:") logdir = os.path.join(logdir, "samples", f"{global_step:08}", now) imglogdir = os.path.join(logdir, "img") numpylogdir = os.path.join(logdir, "numpy") os.makedirs(imglogdir) os.makedirs(numpylogdir) print(logdir) print(75 * "=") # write config out sampling_file = os.path.join(logdir, "sampling_config.yaml") sampling_conf = vars(opt) with open(sampling_file, 'w') as f: yaml.dump(sampling_conf, f, default_flow_style=False) print(sampling_conf) run(model, imglogdir, eta=opt.eta, vanilla=opt.vanilla_sample, n_samples=opt.n_samples, custom_steps=opt.custom_steps, batch_size=opt.batch_size, nplog=numpylogdir) print("done.")
import os, sys import numpy as np import scann import argparse import glob from multiprocessing import cpu_count from tqdm import tqdm from ldm.util import parallel_data_prefetch def search_bruteforce(searcher): return searcher.score_brute_force().build() def search_partioned_ah(searcher, dims_per_block, aiq_threshold, reorder_k, partioning_trainsize, num_leaves, num_leaves_to_search): return searcher.tree(num_leaves=num_leaves, num_leaves_to_search=num_leaves_to_search, training_sample_size=partioning_trainsize). \ score_ah(dims_per_block, anisotropic_quantization_threshold=aiq_threshold).reorder(reorder_k).build() def search_ah(searcher, dims_per_block, aiq_threshold, reorder_k): return searcher.score_ah(dims_per_block, anisotropic_quantization_threshold=aiq_threshold).reorder( reorder_k).build() def load_datapool(dpath): def load_single_file(saved_embeddings): compressed = np.load(saved_embeddings) database = {key: compressed[key] for key in compressed.files} return database def load_multi_files(data_archive): database = {key: [] for key in data_archive[0].files} for d in tqdm(data_archive, desc=f'Loading datapool from {len(data_archive)} individual files.'): for key in d.files: database[key].append(d[key]) return database print(f'Load saved patch embedding from "{dpath}"') file_content = glob.glob(os.path.join(dpath, '*.npz')) if len(file_content) == 1: data_pool = load_single_file(file_content[0]) elif len(file_content) > 1: data = [np.load(f) for f in file_content] prefetched_data = parallel_data_prefetch(load_multi_files, data, n_proc=min(len(data), cpu_count()), target_data_type='dict') data_pool = {key: np.concatenate([od[key] for od in prefetched_data], axis=1)[0] for key in prefetched_data[0].keys()} else: raise ValueError(f'No npz-files in specified path "{dpath}" is this directory existing?') print(f'Finished loading of retrieval database of length {data_pool["embedding"].shape[0]}.') return data_pool def train_searcher(opt, metric='dot_product', partioning_trainsize=None, reorder_k=None, # todo tune aiq_thld=0.2, dims_per_block=2, num_leaves=None, num_leaves_to_search=None,): data_pool = load_datapool(opt.database) k = opt.knn if not reorder_k: reorder_k = 2 * k # normalize # embeddings = searcher = scann.scann_ops_pybind.builder(data_pool['embedding'] / np.linalg.norm(data_pool['embedding'], axis=1)[:, np.newaxis], k, metric) pool_size = data_pool['embedding'].shape[0] print(*(['#'] * 100)) print('Initializing scaNN searcher with the following values:') print(f'k: {k}') print(f'metric: {metric}') print(f'reorder_k: {reorder_k}') print(f'anisotropic_quantization_threshold: {aiq_thld}') print(f'dims_per_block: {dims_per_block}') print(*(['#'] * 100)) print('Start training searcher....') print(f'N samples in pool is {pool_size}') # this reflects the recommended design choices proposed at # https://github.com/google-research/google-research/blob/aca5f2e44e301af172590bb8e65711f0c9ee0cfd/scann/docs/algorithms.md if pool_size < 2e4: print('Using brute force search.') searcher = search_bruteforce(searcher) elif 2e4 <= pool_size and pool_size < 1e5: print('Using asymmetric hashing search and reordering.') searcher = search_ah(searcher, dims_per_block, aiq_thld, reorder_k) else: print('Using using partioning, asymmetric hashing search and reordering.') if not partioning_trainsize: partioning_trainsize = data_pool['embedding'].shape[0] // 10 if not num_leaves: num_leaves = int(np.sqrt(pool_size)) if not num_leaves_to_search: num_leaves_to_search = max(num_leaves // 20, 1) print('Partitioning params:') print(f'num_leaves: {num_leaves}') print(f'num_leaves_to_search: {num_leaves_to_search}') # self.searcher = self.search_ah(searcher, dims_per_block, aiq_thld, reorder_k) searcher = search_partioned_ah(searcher, dims_per_block, aiq_thld, reorder_k, partioning_trainsize, num_leaves, num_leaves_to_search) print('Finish training searcher') searcher_savedir = opt.target_path os.makedirs(searcher_savedir, exist_ok=True) searcher.serialize(searcher_savedir) print(f'Saved trained searcher under "{searcher_savedir}"') if __name__ == '__main__': sys.path.append(os.getcwd()) parser = argparse.ArgumentParser() parser.add_argument('--database', '-d', default='data/rdm/retrieval_databases/openimages', type=str, help='path to folder containing the clip feature of the database') parser.add_argument('--target_path', '-t', default='data/rdm/searchers/openimages', type=str, help='path to the target folder where the searcher shall be stored.') parser.add_argument('--knn', '-k', default=20, type=int, help='number of nearest neighbors, for which the searcher shall be optimized') opt, _ = parser.parse_known_args() train_searcher(opt,)
import cv2 import fire from imwatermark import WatermarkDecoder def testit(img_path): bgr = cv2.imread(img_path) decoder = WatermarkDecoder('bytes', 136) watermark = decoder.decode(bgr, 'dwtDct') try: dec = watermark.decode('utf-8') except: dec = "null" print(dec) if __name__ == "__main__": fire.Fire(testit)
import os import sys from copy import deepcopy import yaml from datetime import datetime from diffusers import StableDiffusionPipeline import torch from ldm.util import instantiate_from_config from main import get_parser if __name__ == "__main__": with torch.no_grad(): yaml_path = "../../train_colossalai.yaml" with open(yaml_path, 'r', encoding='utf-8') as f: config = f.read() base_config = yaml.load(config, Loader=yaml.FullLoader) unet_config = base_config['model']['params']['unet_config'] diffusion_model = instantiate_from_config(unet_config).to("cuda:0") pipe = StableDiffusionPipeline.from_pretrained( "/data/scratch/diffuser/stable-diffusion-v1-4" ).to("cuda:0") dif_model_2 = pipe.unet random_input_ = torch.rand((4, 4, 32, 32)).to("cuda:0") random_input_2 = torch.clone(random_input_).to("cuda:0") time_stamp = torch.randint(20, (4,)).to("cuda:0") time_stamp2 = torch.clone(time_stamp).to("cuda:0") context_ = torch.rand((4, 77, 768)).to("cuda:0") context_2 = torch.clone(context_).to("cuda:0") out_1 = diffusion_model(random_input_, time_stamp, context_) out_2 = dif_model_2(random_input_2, time_stamp2, context_2) print(out_1.shape) print(out_2['sample'].shape)
import numpy as np class LambdaWarmUpCosineScheduler: """ note: use with a base_lr of 1.0 """ def __init__(self, warm_up_steps, lr_min, lr_max, lr_start, max_decay_steps, verbosity_interval=0): self.lr_warm_up_steps = warm_up_steps self.lr_start = lr_start self.lr_min = lr_min self.lr_max = lr_max self.lr_max_decay_steps = max_decay_steps self.last_lr = 0. self.verbosity_interval = verbosity_interval def schedule(self, n, **kwargs): if self.verbosity_interval > 0: if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_lr}") if n < self.lr_warm_up_steps: lr = (self.lr_max - self.lr_start) / self.lr_warm_up_steps * n + self.lr_start self.last_lr = lr return lr else: t = (n - self.lr_warm_up_steps) / (self.lr_max_decay_steps - self.lr_warm_up_steps) t = min(t, 1.0) lr = self.lr_min + 0.5 * (self.lr_max - self.lr_min) * ( 1 + np.cos(t * np.pi)) self.last_lr = lr return lr def __call__(self, n, **kwargs): return self.schedule(n,**kwargs) class LambdaWarmUpCosineScheduler2: """ supports repeated iterations, configurable via lists note: use with a base_lr of 1.0. """ def __init__(self, warm_up_steps, f_min, f_max, f_start, cycle_lengths, verbosity_interval=0): assert len(warm_up_steps) == len(f_min) == len(f_max) == len(f_start) == len(cycle_lengths) self.lr_warm_up_steps = warm_up_steps self.f_start = f_start self.f_min = f_min self.f_max = f_max self.cycle_lengths = cycle_lengths self.cum_cycles = np.cumsum([0] + list(self.cycle_lengths)) self.last_f = 0. self.verbosity_interval = verbosity_interval def find_in_interval(self, n): interval = 0 for cl in self.cum_cycles[1:]: if n <= cl: return interval interval += 1 def schedule(self, n, **kwargs): cycle = self.find_in_interval(n) n = n - self.cum_cycles[cycle] if self.verbosity_interval > 0: if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, " f"current cycle {cycle}") if n < self.lr_warm_up_steps[cycle]: f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle] self.last_f = f return f else: t = (n - self.lr_warm_up_steps[cycle]) / (self.cycle_lengths[cycle] - self.lr_warm_up_steps[cycle]) t = min(t, 1.0) f = self.f_min[cycle] + 0.5 * (self.f_max[cycle] - self.f_min[cycle]) * ( 1 + np.cos(t * np.pi)) self.last_f = f return f def __call__(self, n, **kwargs): return self.schedule(n, **kwargs) class LambdaLinearScheduler(LambdaWarmUpCosineScheduler2): def schedule(self, n, **kwargs): cycle = self.find_in_interval(n) n = n - self.cum_cycles[cycle] if self.verbosity_interval > 0: if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, " f"current cycle {cycle}") if n < self.lr_warm_up_steps[cycle]: f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle] self.last_f = f return f else: f = self.f_min[cycle] + (self.f_max[cycle] - self.f_min[cycle]) * (self.cycle_lengths[cycle] - n) / (self.cycle_lengths[cycle]) self.last_f = f return f
import importlib import torch from torch import optim import numpy as np from inspect import isfunction from PIL import Image, ImageDraw, ImageFont def log_txt_as_img(wh, xc, size=10): # wh a tuple of (width, height) # xc a list of captions to plot b = len(xc) txts = list() for bi in range(b): txt = Image.new("RGB", wh, color="white") draw = ImageDraw.Draw(txt) font = ImageFont.truetype('data/DejaVuSans.ttf', size=size) nc = int(40 * (wh[0] / 256)) lines = "\n".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc)) try: draw.text((0, 0), lines, fill="black", font=font) except UnicodeEncodeError: print("Cant encode string for logging. Skipping.") txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0 txts.append(txt) txts = np.stack(txts) txts = torch.tensor(txts) return txts def ismap(x): if not isinstance(x, torch.Tensor): return False return (len(x.shape) == 4) and (x.shape[1] > 3) def isimage(x): if not isinstance(x,torch.Tensor): return False return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1) def exists(x): return x is not None def default(val, d): if exists(val): return val return d() if isfunction(d) else d def mean_flat(tensor): """ https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86 Take the mean over all non-batch dimensions. """ return tensor.mean(dim=list(range(1, len(tensor.shape)))) def count_params(model, verbose=False): total_params = sum(p.numel() for p in model.parameters()) if verbose: print(f"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.") return total_params def instantiate_from_config(config): if not "target" in config: if config == '__is_first_stage__': return None elif config == "__is_unconditional__": return None raise KeyError("Expected key `target` to instantiate.") return get_obj_from_str(config["target"])(**config.get("params", dict())) def get_obj_from_str(string, reload=False): module, cls = string.rsplit(".", 1) if reload: module_imp = importlib.import_module(module) importlib.reload(module_imp) return getattr(importlib.import_module(module, package=None), cls) class AdamWwithEMAandWings(optim.Optimizer): # credit to https://gist.github.com/crowsonkb/65f7265353f403714fce3b2595e0b298 def __init__(self, params, lr=1.e-3, betas=(0.9, 0.999), eps=1.e-8, # TODO: check hyperparameters before using weight_decay=1.e-2, amsgrad=False, ema_decay=0.9999, # ema decay to match previous code ema_power=1., param_names=()): """AdamW that saves EMA versions of the parameters.""" if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) if not 0.0 <= weight_decay: raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) if not 0.0 <= ema_decay <= 1.0: raise ValueError("Invalid ema_decay value: {}".format(ema_decay)) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad, ema_decay=ema_decay, ema_power=ema_power, param_names=param_names) super().__init__(params, defaults) def __setstate__(self, state): super().__setstate__(state) for group in self.param_groups: group.setdefault('amsgrad', False) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: params_with_grad = [] grads = [] exp_avgs = [] exp_avg_sqs = [] ema_params_with_grad = [] state_sums = [] max_exp_avg_sqs = [] state_steps = [] amsgrad = group['amsgrad'] beta1, beta2 = group['betas'] ema_decay = group['ema_decay'] ema_power = group['ema_power'] for p in group['params']: if p.grad is None: continue params_with_grad.append(p) if p.grad.is_sparse: raise RuntimeError('AdamW does not support sparse gradients') grads.append(p.grad) state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) if amsgrad: # Maintains max of all exp. moving avg. of sq. grad. values state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) # Exponential moving average of parameter values state['param_exp_avg'] = p.detach().float().clone() exp_avgs.append(state['exp_avg']) exp_avg_sqs.append(state['exp_avg_sq']) ema_params_with_grad.append(state['param_exp_avg']) if amsgrad: max_exp_avg_sqs.append(state['max_exp_avg_sq']) # update the steps for each param group update state['step'] += 1 # record the step after step update state_steps.append(state['step']) optim._functional.adamw(params_with_grad, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, amsgrad=amsgrad, beta1=beta1, beta2=beta2, lr=group['lr'], weight_decay=group['weight_decay'], eps=group['eps'], maximize=False) cur_ema_decay = min(ema_decay, 1 - state['step'] ** -ema_power) for param, ema_param in zip(params_with_grad, ema_params_with_grad): ema_param.mul_(cur_ema_decay).add_(param.float(), alpha=1 - cur_ema_decay) return loss
import torch try: import lightning.pytorch as pl except: import pytorch_lightning as pl import torch.nn.functional as F from contextlib import contextmanager from ldm.modules.diffusionmodules.model import Encoder, Decoder from ldm.modules.distributions.distributions import DiagonalGaussianDistribution from ldm.util import instantiate_from_config from ldm.modules.ema import LitEma class AutoencoderKL(pl.LightningModule): def __init__(self, ddconfig, lossconfig, embed_dim, ckpt_path=None, ignore_keys=[], image_key="image", colorize_nlabels=None, monitor=None, ema_decay=None, learn_logvar=False ): super().__init__() self.learn_logvar = learn_logvar self.image_key = image_key self.encoder = Encoder(**ddconfig) self.decoder = Decoder(**ddconfig) self.loss = instantiate_from_config(lossconfig) assert ddconfig["double_z"] self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1) self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) self.embed_dim = embed_dim if colorize_nlabels is not None: assert type(colorize_nlabels)==int self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) if monitor is not None: self.monitor = monitor self.use_ema = ema_decay is not None if self.use_ema: self.ema_decay = ema_decay assert 0. < ema_decay < 1. self.model_ema = LitEma(self, decay=ema_decay) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) def init_from_ckpt(self, path, ignore_keys=list()): sd = torch.load(path, map_location="cpu")["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] self.load_state_dict(sd, strict=False) print(f"Restored from {path}") @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.parameters()) self.model_ema.copy_to(self) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.parameters()) if context is not None: print(f"{context}: Restored training weights") def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self) def encode(self, x): h = self.encoder(x) moments = self.quant_conv(h) posterior = DiagonalGaussianDistribution(moments) return posterior def decode(self, z): z = self.post_quant_conv(z) dec = self.decoder(z) return dec def forward(self, input, sample_posterior=True): posterior = self.encode(input) if sample_posterior: z = posterior.sample() else: z = posterior.mode() dec = self.decode(z) return dec, posterior def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() return x def training_step(self, batch, batch_idx, optimizer_idx): inputs = self.get_input(batch, self.image_key) reconstructions, posterior = self(inputs) if optimizer_idx == 0: # train encoder+decoder+logvar aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, last_layer=self.get_last_layer(), split="train") self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False) return aeloss if optimizer_idx == 1: # train the discriminator discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, last_layer=self.get_last_layer(), split="train") self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False) return discloss def validation_step(self, batch, batch_idx): log_dict = self._validation_step(batch, batch_idx) with self.ema_scope(): log_dict_ema = self._validation_step(batch, batch_idx, postfix="_ema") return log_dict def _validation_step(self, batch, batch_idx, postfix=""): inputs = self.get_input(batch, self.image_key) reconstructions, posterior = self(inputs) aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step, last_layer=self.get_last_layer(), split="val"+postfix) discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step, last_layer=self.get_last_layer(), split="val"+postfix) self.log(f"val{postfix}/rec_loss", log_dict_ae[f"val{postfix}/rec_loss"]) self.log_dict(log_dict_ae) self.log_dict(log_dict_disc) return self.log_dict def configure_optimizers(self): lr = self.learning_rate ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list( self.quant_conv.parameters()) + list(self.post_quant_conv.parameters()) if self.learn_logvar: print(f"{self.__class__.__name__}: Learning logvar") ae_params_list.append(self.loss.logvar) opt_ae = torch.optim.Adam(ae_params_list, lr=lr, betas=(0.5, 0.9)) opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9)) return [opt_ae, opt_disc], [] def get_last_layer(self): return self.decoder.conv_out.weight @torch.no_grad() def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs): log = dict() x = self.get_input(batch, self.image_key) x = x.to(self.device) if not only_inputs: xrec, posterior = self(x) if x.shape[1] > 3: # colorize with random projection assert xrec.shape[1] > 3 x = self.to_rgb(x) xrec = self.to_rgb(xrec) log["samples"] = self.decode(torch.randn_like(posterior.sample())) log["reconstructions"] = xrec if log_ema or self.use_ema: with self.ema_scope(): xrec_ema, posterior_ema = self(x) if x.shape[1] > 3: # colorize with random projection assert xrec_ema.shape[1] > 3 xrec_ema = self.to_rgb(xrec_ema) log["samples_ema"] = self.decode(torch.randn_like(posterior_ema.sample())) log["reconstructions_ema"] = xrec_ema log["inputs"] = x return log def to_rgb(self, x): assert self.image_key == "segmentation" if not hasattr(self, "colorize"): self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) x = F.conv2d(x, weight=self.colorize) x = 2.*(x-x.min())/(x.max()-x.min()) - 1. return x class IdentityFirstStage(torch.nn.Module): def __init__(self, *args, vq_interface=False, **kwargs): self.vq_interface = vq_interface super().__init__() def encode(self, x, *args, **kwargs): return x def decode(self, x, *args, **kwargs): return x def quantize(self, x, *args, **kwargs): if self.vq_interface: return x, None, [None, None, None] return x def forward(self, x, *args, **kwargs): return x
"""SAMPLING ONLY.""" import torch import numpy as np from tqdm import tqdm from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, extract_into_tensor class DDIMSampler(object): def __init__(self, model, schedule="linear", **kwargs): super().__init__() self.model = model self.ddpm_num_timesteps = model.num_timesteps self.schedule = schedule def register_buffer(self, name, attr): if type(attr) == torch.Tensor: if attr.device != torch.device("cuda"): attr = attr.to(torch.device("cuda")) setattr(self, name, attr) def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) alphas_cumprod = self.model.alphas_cumprod assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) self.register_buffer('betas', to_torch(self.model.betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) # ddim sampling parameters ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), ddim_timesteps=self.ddim_timesteps, eta=ddim_eta,verbose=verbose) self.register_buffer('ddim_sigmas', ddim_sigmas) self.register_buffer('ddim_alphas', ddim_alphas) self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) @torch.no_grad() def sample(self, S, batch_size, shape, conditioning=None, callback=None, normals_sequence=None, img_callback=None, quantize_x0=False, eta=0., mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, verbose=True, x_T=None, log_every_t=100, unconditional_guidance_scale=1., unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... dynamic_threshold=None, ucg_schedule=None, **kwargs ): if conditioning is not None: if isinstance(conditioning, dict): ctmp = conditioning[list(conditioning.keys())[0]] while isinstance(ctmp, list): ctmp = ctmp[0] cbs = ctmp.shape[0] if cbs != batch_size: print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") elif isinstance(conditioning, list): for ctmp in conditioning: if ctmp.shape[0] != batch_size: print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") else: if conditioning.shape[0] != batch_size: print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) # sampling C, H, W = shape size = (batch_size, C, H, W) print(f'Data shape for DDIM sampling is {size}, eta {eta}') samples, intermediates = self.ddim_sampling(conditioning, size, callback=callback, img_callback=img_callback, quantize_denoised=quantize_x0, mask=mask, x0=x0, ddim_use_original_steps=False, noise_dropout=noise_dropout, temperature=temperature, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, x_T=x_T, log_every_t=log_every_t, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning, dynamic_threshold=dynamic_threshold, ucg_schedule=ucg_schedule ) return samples, intermediates @torch.no_grad() def ddim_sampling(self, cond, shape, x_T=None, ddim_use_original_steps=False, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, log_every_t=100, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None, ucg_schedule=None): device = self.model.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T if timesteps is None: timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps elif timesteps is not None and not ddim_use_original_steps: subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1 timesteps = self.ddim_timesteps[:subset_end] intermediates = {'x_inter': [img], 'pred_x0': [img]} time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps) total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] print(f"Running DDIM Sampling with {total_steps} timesteps") iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps) for i, step in enumerate(iterator): index = total_steps - i - 1 ts = torch.full((b,), step, device=device, dtype=torch.long) if mask is not None: assert x0 is not None img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? img = img_orig * mask + (1. - mask) * img if ucg_schedule is not None: assert len(ucg_schedule) == len(time_range) unconditional_guidance_scale = ucg_schedule[i] outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, quantize_denoised=quantize_denoised, temperature=temperature, noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning, dynamic_threshold=dynamic_threshold) img, pred_x0 = outs if callback: callback(i) if img_callback: img_callback(pred_x0, i) if index % log_every_t == 0 or index == total_steps - 1: intermediates['x_inter'].append(img) intermediates['pred_x0'].append(pred_x0) return img, intermediates @torch.no_grad() def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None): b, *_, device = *x.shape, x.device if unconditional_conditioning is None or unconditional_guidance_scale == 1.: model_output = self.model.apply_model(x, t, c) else: x_in = torch.cat([x] * 2) t_in = torch.cat([t] * 2) if isinstance(c, dict): assert isinstance(unconditional_conditioning, dict) c_in = dict() for k in c: if isinstance(c[k], list): c_in[k] = [torch.cat([ unconditional_conditioning[k][i], c[k][i]]) for i in range(len(c[k]))] else: c_in[k] = torch.cat([ unconditional_conditioning[k], c[k]]) elif isinstance(c, list): c_in = list() assert isinstance(unconditional_conditioning, list) for i in range(len(c)): c_in.append(torch.cat([unconditional_conditioning[i], c[i]])) else: c_in = torch.cat([unconditional_conditioning, c]) model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond) if self.model.parameterization == "v": e_t = self.model.predict_eps_from_z_and_v(x, t, model_output) else: e_t = model_output if score_corrector is not None: assert self.model.parameterization == "eps", 'not implemented' e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas # select parameters corresponding to the currently considered timestep a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) # current prediction for x_0 if self.model.parameterization != "v": pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() else: pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output) if quantize_denoised: pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) if dynamic_threshold is not None: raise NotImplementedError() # direction pointing to x_t dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise return x_prev, pred_x0 @torch.no_grad() def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None): num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0] assert t_enc <= num_reference_steps num_steps = t_enc if use_original_steps: alphas_next = self.alphas_cumprod[:num_steps] alphas = self.alphas_cumprod_prev[:num_steps] else: alphas_next = self.ddim_alphas[:num_steps] alphas = torch.tensor(self.ddim_alphas_prev[:num_steps]) x_next = x0 intermediates = [] inter_steps = [] for i in tqdm(range(num_steps), desc='Encoding Image'): t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long) if unconditional_guidance_scale == 1.: noise_pred = self.model.apply_model(x_next, t, c) else: assert unconditional_conditioning is not None e_t_uncond, noise_pred = torch.chunk( self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)), torch.cat((unconditional_conditioning, c))), 2) noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond) xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next weighted_noise_pred = alphas_next[i].sqrt() * ( (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred x_next = xt_weighted + weighted_noise_pred if return_intermediates and i % ( num_steps // return_intermediates) == 0 and i < num_steps - 1: intermediates.append(x_next) inter_steps.append(i) elif return_intermediates and i >= num_steps - 2: intermediates.append(x_next) inter_steps.append(i) if callback: callback(i) out = {'x_encoded': x_next, 'intermediate_steps': inter_steps} if return_intermediates: out.update({'intermediates': intermediates}) return x_next, out @torch.no_grad() def stochastic_encode(self, x0, t, use_original_steps=False, noise=None): # fast, but does not allow for exact reconstruction # t serves as an index to gather the correct alphas if use_original_steps: sqrt_alphas_cumprod = self.sqrt_alphas_cumprod sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod else: sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas) sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas if noise is None: noise = torch.randn_like(x0) return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise) @torch.no_grad() def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None, use_original_steps=False, callback=None): timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps timesteps = timesteps[:t_start] time_range = np.flip(timesteps) total_steps = timesteps.shape[0] print(f"Running DDIM Sampling with {total_steps} timesteps") iterator = tqdm(time_range, desc='Decoding image', total=total_steps) x_dec = x_latent for i, step in enumerate(iterator): index = total_steps - i - 1 ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long) x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning) if callback: callback(i) return x_dec
import torch import numpy as np def append_dims(x, target_dims): """Appends dimensions to the end of a tensor until it has target_dims dimensions. From https://github.com/crowsonkb/k-diffusion/blob/master/k_diffusion/utils.py""" dims_to_append = target_dims - x.ndim if dims_to_append < 0: raise ValueError(f'input has {x.ndim} dims but target_dims is {target_dims}, which is less') return x[(...,) + (None,) * dims_to_append] def norm_thresholding(x0, value): s = append_dims(x0.pow(2).flatten(1).mean(1).sqrt().clamp(min=value), x0.ndim) return x0 * (value / s) def spatial_norm_thresholding(x0, value): # b c h w s = x0.pow(2).mean(1, keepdim=True).sqrt().clamp(min=value) return x0 * (value / s)
import os import torch import lightning.pytorch as pl from omegaconf import OmegaConf from torch.nn import functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from copy import deepcopy from einops import rearrange from glob import glob from natsort import natsorted from ldm.modules.diffusionmodules.openaimodel import EncoderUNetModel, UNetModel from ldm.util import log_txt_as_img, default, ismap, instantiate_from_config __models__ = { 'class_label': EncoderUNetModel, 'segmentation': UNetModel } def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self class NoisyLatentImageClassifier(pl.LightningModule): def __init__(self, diffusion_path, num_classes, ckpt_path=None, pool='attention', label_key=None, diffusion_ckpt_path=None, scheduler_config=None, weight_decay=1.e-2, log_steps=10, monitor='val/loss', *args, **kwargs): super().__init__(*args, **kwargs) self.num_classes = num_classes # get latest config of diffusion model diffusion_config = natsorted(glob(os.path.join(diffusion_path, 'configs', '*-project.yaml')))[-1] self.diffusion_config = OmegaConf.load(diffusion_config).model self.diffusion_config.params.ckpt_path = diffusion_ckpt_path self.load_diffusion() self.monitor = monitor self.numd = self.diffusion_model.first_stage_model.encoder.num_resolutions - 1 self.log_time_interval = self.diffusion_model.num_timesteps // log_steps self.log_steps = log_steps self.label_key = label_key if not hasattr(self.diffusion_model, 'cond_stage_key') \ else self.diffusion_model.cond_stage_key assert self.label_key is not None, 'label_key neither in diffusion model nor in model.params' if self.label_key not in __models__: raise NotImplementedError() self.load_classifier(ckpt_path, pool) self.scheduler_config = scheduler_config self.use_scheduler = self.scheduler_config is not None self.weight_decay = weight_decay def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def load_diffusion(self): model = instantiate_from_config(self.diffusion_config) self.diffusion_model = model.eval() self.diffusion_model.train = disabled_train for param in self.diffusion_model.parameters(): param.requires_grad = False def load_classifier(self, ckpt_path, pool): model_config = deepcopy(self.diffusion_config.params.unet_config.params) model_config.in_channels = self.diffusion_config.params.unet_config.params.out_channels model_config.out_channels = self.num_classes if self.label_key == 'class_label': model_config.pool = pool self.model = __models__[self.label_key](**model_config) if ckpt_path is not None: print('#####################################################################') print(f'load from ckpt "{ckpt_path}"') print('#####################################################################') self.init_from_ckpt(ckpt_path) @torch.no_grad() def get_x_noisy(self, x, t, noise=None): noise = default(noise, lambda: torch.randn_like(x)) continuous_sqrt_alpha_cumprod = None if self.diffusion_model.use_continuous_noise: continuous_sqrt_alpha_cumprod = self.diffusion_model.sample_continuous_noise_level(x.shape[0], t + 1) # todo: make sure t+1 is correct here return self.diffusion_model.q_sample(x_start=x, t=t, noise=noise, continuous_sqrt_alpha_cumprod=continuous_sqrt_alpha_cumprod) def forward(self, x_noisy, t, *args, **kwargs): return self.model(x_noisy, t) @torch.no_grad() def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x @torch.no_grad() def get_conditioning(self, batch, k=None): if k is None: k = self.label_key assert k is not None, 'Needs to provide label key' targets = batch[k].to(self.device) if self.label_key == 'segmentation': targets = rearrange(targets, 'b h w c -> b c h w') for down in range(self.numd): h, w = targets.shape[-2:] targets = F.interpolate(targets, size=(h // 2, w // 2), mode='nearest') # targets = rearrange(targets,'b c h w -> b h w c') return targets def compute_top_k(self, logits, labels, k, reduction="mean"): _, top_ks = torch.topk(logits, k, dim=1) if reduction == "mean": return (top_ks == labels[:, None]).float().sum(dim=-1).mean().item() elif reduction == "none": return (top_ks == labels[:, None]).float().sum(dim=-1) def on_train_epoch_start(self): # save some memory self.diffusion_model.model.to('cpu') @torch.no_grad() def write_logs(self, loss, logits, targets): log_prefix = 'train' if self.training else 'val' log = {} log[f"{log_prefix}/loss"] = loss.mean() log[f"{log_prefix}/acc@1"] = self.compute_top_k( logits, targets, k=1, reduction="mean" ) log[f"{log_prefix}/acc@5"] = self.compute_top_k( logits, targets, k=5, reduction="mean" ) self.log_dict(log, prog_bar=False, logger=True, on_step=self.training, on_epoch=True) self.log('loss', log[f"{log_prefix}/loss"], prog_bar=True, logger=False) self.log('global_step', self.global_step, logger=False, on_epoch=False, prog_bar=True) lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, on_step=True, logger=True, on_epoch=False, prog_bar=True) def shared_step(self, batch, t=None): x, *_ = self.diffusion_model.get_input(batch, k=self.diffusion_model.first_stage_key) targets = self.get_conditioning(batch) if targets.dim() == 4: targets = targets.argmax(dim=1) if t is None: t = torch.randint(0, self.diffusion_model.num_timesteps, (x.shape[0],), device=self.device).long() else: t = torch.full(size=(x.shape[0],), fill_value=t, device=self.device).long() x_noisy = self.get_x_noisy(x, t) logits = self(x_noisy, t) loss = F.cross_entropy(logits, targets, reduction='none') self.write_logs(loss.detach(), logits.detach(), targets.detach()) loss = loss.mean() return loss, logits, x_noisy, targets def training_step(self, batch, batch_idx): loss, *_ = self.shared_step(batch) return loss def reset_noise_accs(self): self.noisy_acc = {t: {'acc@1': [], 'acc@5': []} for t in range(0, self.diffusion_model.num_timesteps, self.diffusion_model.log_every_t)} def on_validation_start(self): self.reset_noise_accs() @torch.no_grad() def validation_step(self, batch, batch_idx): loss, *_ = self.shared_step(batch) for t in self.noisy_acc: _, logits, _, targets = self.shared_step(batch, t) self.noisy_acc[t]['acc@1'].append(self.compute_top_k(logits, targets, k=1, reduction='mean')) self.noisy_acc[t]['acc@5'].append(self.compute_top_k(logits, targets, k=5, reduction='mean')) return loss def configure_optimizers(self): optimizer = AdamW(self.model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay) if self.use_scheduler: scheduler = instantiate_from_config(self.scheduler_config) print("Setting up LambdaLR scheduler...") scheduler = [ { 'scheduler': LambdaLR(optimizer, lr_lambda=scheduler.schedule), 'interval': 'step', 'frequency': 1 }] return [optimizer], scheduler return optimizer @torch.no_grad() def log_images(self, batch, N=8, *args, **kwargs): log = dict() x = self.get_input(batch, self.diffusion_model.first_stage_key) log['inputs'] = x y = self.get_conditioning(batch) if self.label_key == 'class_label': y = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) log['labels'] = y if ismap(y): log['labels'] = self.diffusion_model.to_rgb(y) for step in range(self.log_steps): current_time = step * self.log_time_interval _, logits, x_noisy, _ = self.shared_step(batch, t=current_time) log[f'inputs@t{current_time}'] = x_noisy pred = F.one_hot(logits.argmax(dim=1), num_classes=self.num_classes) pred = rearrange(pred, 'b h w c -> b c h w') log[f'pred@t{current_time}'] = self.diffusion_model.to_rgb(pred) for key in log: log[key] = log[key][:N] return log
"""SAMPLING ONLY.""" import torch import numpy as np from tqdm import tqdm from functools import partial from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like from ldm.models.diffusion.sampling_util import norm_thresholding class PLMSSampler(object): def __init__(self, model, schedule="linear", **kwargs): super().__init__() self.model = model self.ddpm_num_timesteps = model.num_timesteps self.schedule = schedule def register_buffer(self, name, attr): if type(attr) == torch.Tensor: if attr.device != torch.device("cuda"): attr = attr.to(torch.device("cuda")) setattr(self, name, attr) def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): if ddim_eta != 0: raise ValueError('ddim_eta must be 0 for PLMS') self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) alphas_cumprod = self.model.alphas_cumprod assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) self.register_buffer('betas', to_torch(self.model.betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) # ddim sampling parameters ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), ddim_timesteps=self.ddim_timesteps, eta=ddim_eta,verbose=verbose) self.register_buffer('ddim_sigmas', ddim_sigmas) self.register_buffer('ddim_alphas', ddim_alphas) self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) @torch.no_grad() def sample(self, S, batch_size, shape, conditioning=None, callback=None, normals_sequence=None, img_callback=None, quantize_x0=False, eta=0., mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, verbose=True, x_T=None, log_every_t=100, unconditional_guidance_scale=1., unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... dynamic_threshold=None, **kwargs ): if conditioning is not None: if isinstance(conditioning, dict): cbs = conditioning[list(conditioning.keys())[0]].shape[0] if cbs != batch_size: print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") else: if conditioning.shape[0] != batch_size: print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) # sampling C, H, W = shape size = (batch_size, C, H, W) print(f'Data shape for PLMS sampling is {size}') samples, intermediates = self.plms_sampling(conditioning, size, callback=callback, img_callback=img_callback, quantize_denoised=quantize_x0, mask=mask, x0=x0, ddim_use_original_steps=False, noise_dropout=noise_dropout, temperature=temperature, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, x_T=x_T, log_every_t=log_every_t, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning, dynamic_threshold=dynamic_threshold, ) return samples, intermediates @torch.no_grad() def plms_sampling(self, cond, shape, x_T=None, ddim_use_original_steps=False, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, log_every_t=100, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None): device = self.model.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T if timesteps is None: timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps elif timesteps is not None and not ddim_use_original_steps: subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1 timesteps = self.ddim_timesteps[:subset_end] intermediates = {'x_inter': [img], 'pred_x0': [img]} time_range = list(reversed(range(0,timesteps))) if ddim_use_original_steps else np.flip(timesteps) total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] print(f"Running PLMS Sampling with {total_steps} timesteps") iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps) old_eps = [] for i, step in enumerate(iterator): index = total_steps - i - 1 ts = torch.full((b,), step, device=device, dtype=torch.long) ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long) if mask is not None: assert x0 is not None img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? img = img_orig * mask + (1. - mask) * img outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, quantize_denoised=quantize_denoised, temperature=temperature, noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning, old_eps=old_eps, t_next=ts_next, dynamic_threshold=dynamic_threshold) img, pred_x0, e_t = outs old_eps.append(e_t) if len(old_eps) >= 4: old_eps.pop(0) if callback: callback(i) if img_callback: img_callback(pred_x0, i) if index % log_every_t == 0 or index == total_steps - 1: intermediates['x_inter'].append(img) intermediates['pred_x0'].append(pred_x0) return img, intermediates @torch.no_grad() def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None, dynamic_threshold=None): b, *_, device = *x.shape, x.device def get_model_output(x, t): if unconditional_conditioning is None or unconditional_guidance_scale == 1.: e_t = self.model.apply_model(x, t, c) else: x_in = torch.cat([x] * 2) t_in = torch.cat([t] * 2) c_in = torch.cat([unconditional_conditioning, c]) e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) if score_corrector is not None: assert self.model.parameterization == "eps" e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) return e_t alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas def get_x_prev_and_pred_x0(e_t, index): # select parameters corresponding to the currently considered timestep a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) # current prediction for x_0 pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() if quantize_denoised: pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) if dynamic_threshold is not None: pred_x0 = norm_thresholding(pred_x0, dynamic_threshold) # direction pointing to x_t dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise return x_prev, pred_x0 e_t = get_model_output(x, t) if len(old_eps) == 0: # Pseudo Improved Euler (2nd order) x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index) e_t_next = get_model_output(x_prev, t_next) e_t_prime = (e_t + e_t_next) / 2 elif len(old_eps) == 1: # 2nd order Pseudo Linear Multistep (Adams-Bashforth) e_t_prime = (3 * e_t - old_eps[-1]) / 2 elif len(old_eps) == 2: # 3nd order Pseudo Linear Multistep (Adams-Bashforth) e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12 elif len(old_eps) >= 3: # 4nd order Pseudo Linear Multistep (Adams-Bashforth) e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24 x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index) return x_prev, pred_x0, e_t
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ import numpy as np import torch import torch.nn as nn try: import lightning.pytorch as pl from lightning.pytorch.utilities import rank_zero_info, rank_zero_only except: import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_only, rank_zero_info import itertools from contextlib import contextmanager, nullcontext from functools import partial from einops import rearrange, repeat from ldm.models.autoencoder import * from ldm.models.autoencoder import AutoencoderKL, IdentityFirstStage from ldm.models.diffusion.ddim import * from ldm.models.diffusion.ddim import DDIMSampler from ldm.modules.diffusionmodules.model import * from ldm.modules.diffusionmodules.model import Decoder, Encoder, Model from ldm.modules.diffusionmodules.openaimodel import * from ldm.modules.diffusionmodules.openaimodel import AttentionPool2d from ldm.modules.diffusionmodules.util import extract_into_tensor, make_beta_schedule, noise_like from ldm.modules.distributions.distributions import DiagonalGaussianDistribution, normal_kl from ldm.modules.ema import LitEma from ldm.modules.encoders.modules import * from ldm.util import count_params, default, exists, instantiate_from_config, isimage, ismap, log_txt_as_img, mean_flat from omegaconf import ListConfig from torch.optim.lr_scheduler import LambdaLR from torchvision.utils import make_grid from tqdm import tqdm __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., use_fp16=True, make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization rank_zero_info(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size self.channels = channels self.use_positional_encodings = use_positional_encodings self.unet_config = unet_config self.conditioning_key = conditioning_key self.model = DiffusionWrapper(unet_config, conditioning_key) # count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) rank_zero_info(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit self.ckpt = ckpt self.ignore_keys = ignore_keys self.load_only_unet = load_only_unet self.reset_ema = reset_ema self.reset_num_ema_updates = reset_num_ema_updates if reset_ema: assert exists(ckpt) ''' Uncomment if you Use DDP Strategy ''' # if ckpt is not None: # self.init_from_ckpt(ckpt, ignore_keys=ignore_keys, only_model=load_only_unet) # if reset_ema: # assert self.use_ema # rank_zero_info(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") # self.model_ema = LitEma(self.model) if reset_num_ema_updates: rank_zero_info(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.timesteps = timesteps self.beta_schedule = beta_schedule self.given_betas = given_betas self.linear_start = linear_start self.linear_end = linear_end self.cosine_s = cosine_s self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.logvar_init = logvar_init self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.use_fp16 = use_fp16 self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch((1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas**2 / (2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas**2 / (2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: rank_zero_info(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: rank_zero_info(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): rank_zero_info("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm(itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) rank_zero_info(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: rank_zero_info(f"Missing Keys:\n {missing}") if len(unexpected) > 0: rank_zero_info(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v) def predict_eps_from_z_and_v(self, x_t, t, v): return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t) def q_posterior(self, x_start, x_t, t): posterior_mean = (extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_v(self, x, noise, t): return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') if self.use_fp16: x = x.to(memory_format=torch.contiguous_format).half() else: x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, use_fp16=True, force_null_conditioning=False, *args, **kwargs): self.force_null_conditioning = force_null_conditioning self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning: conditioning_key = None super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.first_stage_config = first_stage_config self.cond_stage_config = cond_stage_config self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None ''' Uncomment if you Use DDP Strategy ''' # self.restarted_from_ckpt = False # if self.ckpt is not None: # self.init_from_ckpt(self.ckpt, self.ignore_keys) # self.restarted_from_ckpt = True # if self.reset_ema: # assert self.use_ema # rank_zero_info( # f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") # self.model_ema = LitEma(self.model) if self.reset_num_ema_updates: rank_zero_info(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() def configure_sharded_model(self) -> None: rank_zero_info("Configure sharded model for LatentDiffusion") self.model = DiffusionWrapper(self.unet_config, self.conditioning_key) count_params(self.model, verbose=True) if self.use_ema: self.model_ema = LitEma(self.model) if self.ckpt is not None: self.init_from_ckpt(self.ckpt, ignore_keys=self.ignore_keys, only_model=self.load_only_unet) if self.reset_ema: assert self.use_ema rank_zero_info( f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) self.register_schedule(given_betas=self.given_betas, beta_schedule=self.beta_schedule, timesteps=self.timesteps, linear_start=self.linear_start, linear_end=self.linear_end, cosine_s=self.cosine_s) self.logvar = torch.full(fill_value=self.logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) if self.ucg_training: self.ucg_prng = np.random.RandomState() self.instantiate_first_stage(self.first_stage_config) self.instantiate_cond_stage(self.cond_stage_config) if self.ckpt is not None: self.init_from_ckpt(self.ckpt, self.ignore_keys) self.restarted_from_ckpt = True if self.reset_ema: assert self.use_ema rank_zero_info( f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) def make_cond_schedule(self,): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings rank_zero_info("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) rank_zero_info(f"setting self.scale_factor to {self.scale_factor}") rank_zero_info("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": rank_zero_info("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": rank_zero_info(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append( self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z.half() if self.use_fp16 else self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip( weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, return_x=False): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None and not self.force_null_conditioning: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox', "txt"]: xc = batch[cond_key] elif cond_key in ['class_label', 'cls']: xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_x: out.extend([x]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c) return loss def forward(self, x, c, *args, **kwargs): t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is expected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output = self.apply_model(x_noisy, t, cond) loss_dict = {} prefix = 'train' if self.training else 'val' if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) loss_dict.update({'logvar': self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) loss += (self.original_elbo_weight * loss_vlb) loss_dict.update({f'{prefix}/loss': loss}) return loss, loss_dict def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1., 1.) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) if return_x0: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = { key: cond[key][:batch_size] if not isinstance(cond[key], list) else list( map(lambda x: x[:batch_size], cond[key])) for key in cond } else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed(range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = { key: cond[key][:batch_size] if not isinstance(cond[key], list) else list( map(lambda x: x[:batch_size], cond[key])) for key in cond } else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning(self, batch_size, null_label=None): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: if self.cond_stage_key in ["class_label", "cls"]: xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device) return self.get_learned_conditioning(xc) else: raise NotImplementedError("todo") if isinstance(c, list): # in case the encoder gives us a list for i in range(len(c)): c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device) else: c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device) return c @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, **kwargs): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25) log["conditioning"] = xc elif self.cond_stage_key in ['class_label', "cls"]: try: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25) log['conditioning'] = xc except KeyError: # probably no "human_label" in batch pass elif isimage(xc): log["conditioning"] = xc if ismap(xc): log["original_conditioning"] = self.to_rgb(xc) if plot_diffusion_rows: # get diffusion row diffusion_row = list() z_start = z[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(z_start) z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) diffusion_row.append(self.decode_first_stage(z_noisy)) diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) log["diffusion_row"] = diffusion_grid if sample: # get denoise row with ema_scope("Sampling"): samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, ddim_steps=ddim_steps, eta=ddim_eta) # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) x_samples = self.decode_first_stage(samples) log["samples"] = x_samples if plot_denoise_rows: denoise_grid = self._get_denoise_row_from_list(z_denoise_row) log["denoise_row"] = denoise_grid if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance( self.first_stage_model, IdentityFirstStage): # also display when quantizing x0 while sampling with ema_scope("Plotting Quantized Denoised"): samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, ddim_steps=ddim_steps, eta=ddim_eta, quantize_denoised=True) # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True, # quantize_denoised=True) x_samples = self.decode_first_stage(samples.to(self.device)) log["samples_x0_quantized"] = x_samples if unconditional_guidance_scale > 1.0: uc = self.get_unconditional_conditioning(N, unconditional_guidance_label) if self.model.conditioning_key == "crossattn-adm": uc = {"c_crossattn": [uc], "c_adm": c["c_adm"]} with ema_scope("Sampling with classifier-free guidance"): samples_cfg, _ = self.sample_log( cond=c, batch_size=N, ddim=use_ddim, ddim_steps=ddim_steps, eta=ddim_eta, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=uc, ) x_samples_cfg = self.decode_first_stage(samples_cfg) log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg if inpaint: # make a simple center square b, h, w = z.shape[0], z.shape[2], z.shape[3] mask = torch.ones(N, h, w).to(self.device) # zeros will be filled in mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0. mask = mask[:, None, ...] with ema_scope("Plotting Inpaint"): samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta, ddim_steps=ddim_steps, x0=z[:N], mask=mask) x_samples = self.decode_first_stage(samples.to(self.device)) log["samples_inpainting"] = x_samples log["mask"] = mask # outpaint mask = 1. - mask with ema_scope("Plotting Outpaint"): samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta, ddim_steps=ddim_steps, x0=z[:N], mask=mask) x_samples = self.decode_first_stage(samples.to(self.device)) log["samples_outpainting"] = x_samples if plot_progressive_rows: with ema_scope("Plotting Progressives"): img, progressives = self.progressive_denoising(c, shape=(self.channels, self.image_size, self.image_size), batch_size=N) prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation") log["progressive_row"] = prog_row if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.cond_stage_trainable: rank_zero_info(f"{self.__class__.__name__}: Also optimizing conditioner params!") params = params + list(self.cond_stage_model.parameters()) if self.learn_logvar: rank_zero_info('Diffusion model optimizing logvar') params.append(self.logvar) from colossalai.nn.optimizer import HybridAdam opt = HybridAdam(params, lr=lr) # opt = torch.optim.AdamW(params, lr=lr) if self.use_scheduler: assert 'target' in self.scheduler_config scheduler = instantiate_from_config(self.scheduler_config) rank_zero_info("Setting up LambdaLR scheduler...") scheduler = [{'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule), 'interval': 'step', 'frequency': 1}] return [opt], scheduler return opt @torch.no_grad() def to_rgb(self, x): x = x.float() if not hasattr(self, "colorize"): self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x) x = nn.functional.conv2d(x, weight=self.colorize) x = 2. * (x - x.min()) / (x.max() - x.min()) - 1. return x class DiffusionWrapper(pl.LightningModule): def __init__(self, diff_model_config, conditioning_key): super().__init__() self.sequential_cross_attn = diff_model_config.pop("sequential_crossattn", False) self.diffusion_model = instantiate_from_config(diff_model_config) self.conditioning_key = conditioning_key assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm', 'hybrid-adm', 'crossattn-adm'] def forward(self, x, t, c_concat: list = None, c_crossattn: list = None, c_adm=None): if self.conditioning_key is None: out = self.diffusion_model(x, t) elif self.conditioning_key == 'concat': xc = torch.cat([x] + c_concat, dim=1) out = self.diffusion_model(xc, t) elif self.conditioning_key == 'crossattn': if not self.sequential_cross_attn: cc = torch.cat(c_crossattn, 1) else: cc = c_crossattn out = self.diffusion_model(x, t, context=cc) elif self.conditioning_key == 'hybrid': xc = torch.cat([x] + c_concat, dim=1) cc = torch.cat(c_crossattn, 1) out = self.diffusion_model(xc, t, context=cc) elif self.conditioning_key == 'hybrid-adm': assert c_adm is not None xc = torch.cat([x] + c_concat, dim=1) cc = torch.cat(c_crossattn, 1) out = self.diffusion_model(xc, t, context=cc, y=c_adm) elif self.conditioning_key == 'crossattn-adm': assert c_adm is not None cc = torch.cat(c_crossattn, 1) out = self.diffusion_model(x, t, context=cc, y=c_adm) elif self.conditioning_key == 'adm': cc = c_crossattn[0] out = self.diffusion_model(x, t, y=cc) else: raise NotImplementedError() return out class LatentUpscaleDiffusion(LatentDiffusion): def __init__(self, *args, low_scale_config, low_scale_key="LR", noise_level_key=None, **kwargs): super().__init__(*args, **kwargs) # assumes that neither the cond_stage nor the low_scale_model contain trainable params assert not self.cond_stage_trainable self.instantiate_low_stage(low_scale_config) self.low_scale_key = low_scale_key self.noise_level_key = noise_level_key def instantiate_low_stage(self, config): model = instantiate_from_config(config) self.low_scale_model = model.eval() self.low_scale_model.train = disabled_train for param in self.low_scale_model.parameters(): param.requires_grad = False @torch.no_grad() def get_input(self, batch, k, cond_key=None, bs=None, log_mode=False): if not log_mode: z, c = super().get_input(batch, k, force_c_encode=True, bs=bs) else: z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=bs) x_low = batch[self.low_scale_key][:bs] x_low = rearrange(x_low, 'b h w c -> b c h w') if self.use_fp16: x_low = x_low.to(memory_format=torch.contiguous_format).half() else: x_low = x_low.to(memory_format=torch.contiguous_format).float() zx, noise_level = self.low_scale_model(x_low) if self.noise_level_key is not None: # get noise level from batch instead, e.g. when extracting a custom noise level for bsr raise NotImplementedError('TODO') all_conds = {"c_concat": [zx], "c_crossattn": [c], "c_adm": noise_level} if log_mode: # TODO: maybe disable if too expensive x_low_rec = self.low_scale_model.decode(zx) return z, all_conds, x, xrec, xc, x_low, x_low_rec, noise_level return z, all_conds @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, **kwargs): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc, x_low, x_low_rec, noise_level = self.get_input(batch, self.first_stage_key, bs=N, log_mode=True) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec log["x_lr"] = x_low log[f"x_lr_rec_@noise_levels{'-'.join(map(lambda x: str(x), list(noise_level.cpu().numpy())))}"] = x_low_rec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25) log["conditioning"] = xc elif self.cond_stage_key in ['class_label', 'cls']: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25) log['conditioning'] = xc elif isimage(xc): log["conditioning"] = xc if ismap(xc): log["original_conditioning"] = self.to_rgb(xc) if plot_diffusion_rows: # get diffusion row diffusion_row = list() z_start = z[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(z_start) z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) diffusion_row.append(self.decode_first_stage(z_noisy)) diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) log["diffusion_row"] = diffusion_grid if sample: # get denoise row with ema_scope("Sampling"): samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, ddim_steps=ddim_steps, eta=ddim_eta) # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) x_samples = self.decode_first_stage(samples) log["samples"] = x_samples if plot_denoise_rows: denoise_grid = self._get_denoise_row_from_list(z_denoise_row) log["denoise_row"] = denoise_grid if unconditional_guidance_scale > 1.0: uc_tmp = self.get_unconditional_conditioning(N, unconditional_guidance_label) # TODO explore better "unconditional" choices for the other keys # maybe guide away from empty text label and highest noise level and maximally degraded zx? uc = dict() for k in c: if k == "c_crossattn": assert isinstance(c[k], list) and len(c[k]) == 1 uc[k] = [uc_tmp] elif k == "c_adm": # todo: only run with text-based guidance? assert isinstance(c[k], torch.Tensor) #uc[k] = torch.ones_like(c[k]) * self.low_scale_model.max_noise_level uc[k] = c[k] elif isinstance(c[k], list): uc[k] = [c[k][i] for i in range(len(c[k]))] else: uc[k] = c[k] with ema_scope("Sampling with classifier-free guidance"): samples_cfg, _ = self.sample_log( cond=c, batch_size=N, ddim=use_ddim, ddim_steps=ddim_steps, eta=ddim_eta, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=uc, ) x_samples_cfg = self.decode_first_stage(samples_cfg) log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg if plot_progressive_rows: with ema_scope("Plotting Progressives"): img, progressives = self.progressive_denoising(c, shape=(self.channels, self.image_size, self.image_size), batch_size=N) prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation") log["progressive_row"] = prog_row return log class LatentFinetuneDiffusion(LatentDiffusion): """ Basis for different finetunas, such as inpainting or depth2image To disable finetuning mode, set finetune_keys to None """ def __init__( self, concat_keys: tuple, finetune_keys=("model.diffusion_model.input_blocks.0.0.weight", "model_ema.diffusion_modelinput_blocks00weight"), keep_finetune_dims=4, # if model was trained without concat mode before and we would like to keep these channels c_concat_log_start=None, # to log reconstruction of c_concat codes c_concat_log_end=None, *args, **kwargs): ckpt = kwargs.pop("ckpt", None) ignore_keys = kwargs.pop("ignore_keys", list()) super().__init__(*args, **kwargs) self.finetune_keys = finetune_keys self.concat_keys = concat_keys self.keep_dims = keep_finetune_dims self.c_concat_log_start = c_concat_log_start self.c_concat_log_end = c_concat_log_end if exists(self.finetune_keys): assert exists(ckpt), 'can only finetune from a given checkpoint' if exists(ckpt): self.init_from_ckpt(ckpt, ignore_keys) def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): rank_zero_info("Deleting key {} from state_dict.".format(k)) del sd[k] # make it explicit, finetune by including extra input channels if exists(self.finetune_keys) and k in self.finetune_keys: new_entry = None for name, param in self.named_parameters(): if name in self.finetune_keys: rank_zero_info( f"modifying key '{name}' and keeping its original {self.keep_dims} (channels) dimensions only" ) new_entry = torch.zeros_like(param) # zero init assert exists(new_entry), 'did not find matching parameter to modify' new_entry[:, :self.keep_dims, ...] = sd[k] sd[k] = new_entry missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) rank_zero_info(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: rank_zero_info(f"Missing Keys: {missing}") if len(unexpected) > 0: rank_zero_info(f"Unexpected Keys: {unexpected}") @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, **kwargs): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, bs=N, return_first_stage_outputs=True) c_cat, c = c["c_concat"][0], c["c_crossattn"][0] N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25) log["conditioning"] = xc elif self.cond_stage_key in ['class_label', 'cls']: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25) log['conditioning'] = xc elif isimage(xc): log["conditioning"] = xc if ismap(xc): log["original_conditioning"] = self.to_rgb(xc) if not (self.c_concat_log_start is None and self.c_concat_log_end is None): log["c_concat_decoded"] = self.decode_first_stage(c_cat[:, self.c_concat_log_start:self.c_concat_log_end]) if plot_diffusion_rows: # get diffusion row diffusion_row = list() z_start = z[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(z_start) z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) diffusion_row.append(self.decode_first_stage(z_noisy)) diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) log["diffusion_row"] = diffusion_grid if sample: # get denoise row with ema_scope("Sampling"): samples, z_denoise_row = self.sample_log(cond={ "c_concat": [c_cat], "c_crossattn": [c] }, batch_size=N, ddim=use_ddim, ddim_steps=ddim_steps, eta=ddim_eta) # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) x_samples = self.decode_first_stage(samples) log["samples"] = x_samples if plot_denoise_rows: denoise_grid = self._get_denoise_row_from_list(z_denoise_row) log["denoise_row"] = denoise_grid if unconditional_guidance_scale > 1.0: uc_cross = self.get_unconditional_conditioning(N, unconditional_guidance_label) uc_cat = c_cat uc_full = {"c_concat": [uc_cat], "c_crossattn": [uc_cross]} with ema_scope("Sampling with classifier-free guidance"): samples_cfg, _ = self.sample_log( cond={ "c_concat": [c_cat], "c_crossattn": [c] }, batch_size=N, ddim=use_ddim, ddim_steps=ddim_steps, eta=ddim_eta, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=uc_full, ) x_samples_cfg = self.decode_first_stage(samples_cfg) log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg return log class LatentInpaintDiffusion(LatentFinetuneDiffusion): """ can either run as pure inpainting model (only concat mode) or with mixed conditionings, e.g. mask as concat and text via cross-attn. To disable finetuning mode, set finetune_keys to None """ def __init__(self, concat_keys=("mask", "masked_image"), masked_image_key="masked_image", *args, **kwargs): super().__init__(concat_keys, *args, **kwargs) self.masked_image_key = masked_image_key assert self.masked_image_key in concat_keys @torch.no_grad() def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False): # note: restricted to non-trainable encoders currently assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for inpainting' z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=bs) assert exists(self.concat_keys) c_cat = list() for ck in self.concat_keys: if self.use_fp16: cc = rearrange(batch[ck], 'b h w c -> b c h w').to(memory_format=torch.contiguous_format).half() else: cc = rearrange(batch[ck], 'b h w c -> b c h w').to(memory_format=torch.contiguous_format).float() if bs is not None: cc = cc[:bs] cc = cc.to(self.device) bchw = z.shape if ck != self.masked_image_key: cc = torch.nn.functional.interpolate(cc, size=bchw[-2:]) else: cc = self.get_first_stage_encoding(self.encode_first_stage(cc)) c_cat.append(cc) c_cat = torch.cat(c_cat, dim=1) all_conds = {"c_concat": [c_cat], "c_crossattn": [c]} if return_first_stage_outputs: return z, all_conds, x, xrec, xc return z, all_conds @torch.no_grad() def log_images(self, *args, **kwargs): log = super(LatentInpaintDiffusion, self).log_images(*args, **kwargs) log["masked_image"] = rearrange(args[0]["masked_image"], 'b h w c -> b c h w').to(memory_format=torch.contiguous_format).float() return log class LatentDepth2ImageDiffusion(LatentFinetuneDiffusion): """ condition on monocular depth estimation """ def __init__(self, depth_stage_config, concat_keys=("midas_in",), *args, **kwargs): super().__init__(concat_keys=concat_keys, *args, **kwargs) self.depth_model = instantiate_from_config(depth_stage_config) self.depth_stage_key = concat_keys[0] @torch.no_grad() def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False): # note: restricted to non-trainable encoders currently assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for depth2img' z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=bs) assert exists(self.concat_keys) assert len(self.concat_keys) == 1 c_cat = list() for ck in self.concat_keys: cc = batch[ck] if bs is not None: cc = cc[:bs] cc = cc.to(self.device) cc = self.depth_model(cc) cc = torch.nn.functional.interpolate( cc, size=z.shape[2:], mode="bicubic", align_corners=False, ) depth_min, depth_max = torch.amin(cc, dim=[1, 2, 3], keepdim=True), torch.amax(cc, dim=[1, 2, 3], keepdim=True) cc = 2. * (cc - depth_min) / (depth_max - depth_min + 0.001) - 1. c_cat.append(cc) c_cat = torch.cat(c_cat, dim=1) all_conds = {"c_concat": [c_cat], "c_crossattn": [c]} if return_first_stage_outputs: return z, all_conds, x, xrec, xc return z, all_conds @torch.no_grad() def log_images(self, *args, **kwargs): log = super().log_images(*args, **kwargs) depth = self.depth_model(args[0][self.depth_stage_key]) depth_min, depth_max = torch.amin(depth, dim=[1, 2, 3], keepdim=True), \ torch.amax(depth, dim=[1, 2, 3], keepdim=True) log["depth"] = 2. * (depth - depth_min) / (depth_max - depth_min) - 1. return log class LatentUpscaleFinetuneDiffusion(LatentFinetuneDiffusion): """ condition on low-res image (and optionally on some spatial noise augmentation) """ def __init__(self, concat_keys=("lr",), reshuffle_patch_size=None, low_scale_config=None, low_scale_key=None, *args, **kwargs): super().__init__(concat_keys=concat_keys, *args, **kwargs) self.reshuffle_patch_size = reshuffle_patch_size self.low_scale_model = None if low_scale_config is not None: rank_zero_info("Initializing a low-scale model") assert exists(low_scale_key) self.instantiate_low_stage(low_scale_config) self.low_scale_key = low_scale_key def instantiate_low_stage(self, config): model = instantiate_from_config(config) self.low_scale_model = model.eval() self.low_scale_model.train = disabled_train for param in self.low_scale_model.parameters(): param.requires_grad = False @torch.no_grad() def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False): # note: restricted to non-trainable encoders currently assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for upscaling-ft' z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=bs) assert exists(self.concat_keys) assert len(self.concat_keys) == 1 # optionally make spatial noise_level here c_cat = list() noise_level = None for ck in self.concat_keys: cc = batch[ck] cc = rearrange(cc, 'b h w c -> b c h w') if exists(self.reshuffle_patch_size): assert isinstance(self.reshuffle_patch_size, int) cc = rearrange(cc, 'b c (p1 h) (p2 w) -> b (p1 p2 c) h w', p1=self.reshuffle_patch_size, p2=self.reshuffle_patch_size) if bs is not None: cc = cc[:bs] cc = cc.to(self.device) if exists(self.low_scale_model) and ck == self.low_scale_key: cc, noise_level = self.low_scale_model(cc) c_cat.append(cc) c_cat = torch.cat(c_cat, dim=1) if exists(noise_level): all_conds = {"c_concat": [c_cat], "c_crossattn": [c], "c_adm": noise_level} else: all_conds = {"c_concat": [c_cat], "c_crossattn": [c]} if return_first_stage_outputs: return z, all_conds, x, xrec, xc return z, all_conds @torch.no_grad() def log_images(self, *args, **kwargs): log = super().log_images(*args, **kwargs) log["lr"] = rearrange(args[0]["lr"], 'b h w c -> b c h w') return log
import torch import torch.nn.functional as F import math from tqdm import tqdm class NoiseScheduleVP: def __init__( self, schedule='discrete', betas=None, alphas_cumprod=None, continuous_beta_0=0.1, continuous_beta_1=20., ): """Create a wrapper class for the forward SDE (VP type). *** Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t. We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images. *** The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ). We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper). Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have: log_alpha_t = self.marginal_log_mean_coeff(t) sigma_t = self.marginal_std(t) lambda_t = self.marginal_lambda(t) Moreover, as lambda(t) is an invertible function, we also support its inverse function: t = self.inverse_lambda(lambda_t) =============================================================== We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]). 1. For discrete-time DPMs: For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by: t_i = (i + 1) / N e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1. We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3. Args: betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details) alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details) Note that we always have alphas_cumprod = cumprod(betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`. **Important**: Please pay special attention for the args for `alphas_cumprod`: The `alphas_cumprod` is the \hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that q_{t_n | 0}(x_{t_n} | x_0) = N ( \sqrt{\hat{alpha_n}} * x_0, (1 - \hat{alpha_n}) * I ). Therefore, the notation \hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have alpha_{t_n} = \sqrt{\hat{alpha_n}}, and log(alpha_{t_n}) = 0.5 * log(\hat{alpha_n}). 2. For continuous-time DPMs: We support two types of VPSDEs: linear (DDPM) and cosine (improved-DDPM). The hyperparameters for the noise schedule are the default settings in DDPM and improved-DDPM: Args: beta_min: A `float` number. The smallest beta for the linear schedule. beta_max: A `float` number. The largest beta for the linear schedule. cosine_s: A `float` number. The hyperparameter in the cosine schedule. cosine_beta_max: A `float` number. The hyperparameter in the cosine schedule. T: A `float` number. The ending time of the forward process. =============================================================== Args: schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs, 'linear' or 'cosine' for continuous-time DPMs. Returns: A wrapper object of the forward SDE (VP type). =============================================================== Example: # For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1): >>> ns = NoiseScheduleVP('discrete', betas=betas) # For discrete-time DPMs, given alphas_cumprod (the \hat{alpha_n} array for n = 0, 1, ..., N - 1): >>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod) # For continuous-time DPMs (VPSDE), linear schedule: >>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.) """ if schedule not in ['discrete', 'linear', 'cosine']: raise ValueError( "Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'".format( schedule)) self.schedule = schedule if schedule == 'discrete': if betas is not None: log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0) else: assert alphas_cumprod is not None log_alphas = 0.5 * torch.log(alphas_cumprod) self.total_N = len(log_alphas) self.T = 1. self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1)) self.log_alpha_array = log_alphas.reshape((1, -1,)) else: self.total_N = 1000 self.beta_0 = continuous_beta_0 self.beta_1 = continuous_beta_1 self.cosine_s = 0.008 self.cosine_beta_max = 999. self.cosine_t_max = math.atan(self.cosine_beta_max * (1. + self.cosine_s) / math.pi) * 2. * ( 1. + self.cosine_s) / math.pi - self.cosine_s self.cosine_log_alpha_0 = math.log(math.cos(self.cosine_s / (1. + self.cosine_s) * math.pi / 2.)) self.schedule = schedule if schedule == 'cosine': # For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T. # Note that T = 0.9946 may be not the optimal setting. However, we find it works well. self.T = 0.9946 else: self.T = 1. def marginal_log_mean_coeff(self, t): """ Compute log(alpha_t) of a given continuous-time label t in [0, T]. """ if self.schedule == 'discrete': return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device), self.log_alpha_array.to(t.device)).reshape((-1)) elif self.schedule == 'linear': return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0 elif self.schedule == 'cosine': log_alpha_fn = lambda s: torch.log(torch.cos((s + self.cosine_s) / (1. + self.cosine_s) * math.pi / 2.)) log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0 return log_alpha_t def marginal_alpha(self, t): """ Compute alpha_t of a given continuous-time label t in [0, T]. """ return torch.exp(self.marginal_log_mean_coeff(t)) def marginal_std(self, t): """ Compute sigma_t of a given continuous-time label t in [0, T]. """ return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t))) def marginal_lambda(self, t): """ Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T]. """ log_mean_coeff = self.marginal_log_mean_coeff(t) log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff)) return log_mean_coeff - log_std def inverse_lambda(self, lamb): """ Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t. """ if self.schedule == 'linear': tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb)) Delta = self.beta_0 ** 2 + tmp return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0) elif self.schedule == 'discrete': log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb) t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]), torch.flip(self.t_array.to(lamb.device), [1])) return t.reshape((-1,)) else: log_alpha = -0.5 * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb)) t_fn = lambda log_alpha_t: torch.arccos(torch.exp(log_alpha_t + self.cosine_log_alpha_0)) * 2. * ( 1. + self.cosine_s) / math.pi - self.cosine_s t = t_fn(log_alpha) return t def model_wrapper( model, noise_schedule, model_type="noise", model_kwargs={}, guidance_type="uncond", condition=None, unconditional_condition=None, guidance_scale=1., classifier_fn=None, classifier_kwargs={}, ): """Create a wrapper function for the noise prediction model. DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to firstly wrap the model function to a noise prediction model that accepts the continuous time as the input. We support four types of the diffusion model by setting `model_type`: 1. "noise": noise prediction model. (Trained by predicting noise). 2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0). 3. "v": velocity prediction model. (Trained by predicting the velocity). The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2]. [1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models." arXiv preprint arXiv:2202.00512 (2022). [2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models." arXiv preprint arXiv:2210.02303 (2022). 4. "score": marginal score function. (Trained by denoising score matching). Note that the score function and the noise prediction model follows a simple relationship: ``` noise(x_t, t) = -sigma_t * score(x_t, t) ``` We support three types of guided sampling by DPMs by setting `guidance_type`: 1. "uncond": unconditional sampling by DPMs. The input `model` has the following format: `` model(x, t_input, **model_kwargs) -> noise | x_start | v | score `` 2. "classifier": classifier guidance sampling [3] by DPMs and another classifier. The input `model` has the following format: `` model(x, t_input, **model_kwargs) -> noise | x_start | v | score `` The input `classifier_fn` has the following format: `` classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond) `` [3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis," in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794. 3. "classifier-free": classifier-free guidance sampling by conditional DPMs. The input `model` has the following format: `` model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score `` And if cond == `unconditional_condition`, the model output is the unconditional DPM output. [4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance." arXiv preprint arXiv:2207.12598 (2022). The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999) or continuous-time labels (i.e. epsilon to T). We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise: `` def model_fn(x, t_continuous) -> noise: t_input = get_model_input_time(t_continuous) return noise_pred(model, x, t_input, **model_kwargs) `` where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver. =============================================================== Args: model: A diffusion model with the corresponding format described above. noise_schedule: A noise schedule object, such as NoiseScheduleVP. model_type: A `str`. The parameterization type of the diffusion model. "noise" or "x_start" or "v" or "score". model_kwargs: A `dict`. A dict for the other inputs of the model function. guidance_type: A `str`. The type of the guidance for sampling. "uncond" or "classifier" or "classifier-free". condition: A pytorch tensor. The condition for the guided sampling. Only used for "classifier" or "classifier-free" guidance type. unconditional_condition: A pytorch tensor. The condition for the unconditional sampling. Only used for "classifier-free" guidance type. guidance_scale: A `float`. The scale for the guided sampling. classifier_fn: A classifier function. Only used for the classifier guidance. classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function. Returns: A noise prediction model that accepts the noised data and the continuous time as the inputs. """ def get_model_input_time(t_continuous): """ Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time. For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N]. For continuous-time DPMs, we just use `t_continuous`. """ if noise_schedule.schedule == 'discrete': return (t_continuous - 1. / noise_schedule.total_N) * 1000. else: return t_continuous def noise_pred_fn(x, t_continuous, cond=None): if t_continuous.reshape((-1,)).shape[0] == 1: t_continuous = t_continuous.expand((x.shape[0])) t_input = get_model_input_time(t_continuous) if cond is None: output = model(x, t_input, **model_kwargs) else: output = model(x, t_input, cond, **model_kwargs) if model_type == "noise": return output elif model_type == "x_start": alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous) dims = x.dim() return (x - expand_dims(alpha_t, dims) * output) / expand_dims(sigma_t, dims) elif model_type == "v": alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous) dims = x.dim() return expand_dims(alpha_t, dims) * output + expand_dims(sigma_t, dims) * x elif model_type == "score": sigma_t = noise_schedule.marginal_std(t_continuous) dims = x.dim() return -expand_dims(sigma_t, dims) * output def cond_grad_fn(x, t_input): """ Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t). """ with torch.enable_grad(): x_in = x.detach().requires_grad_(True) log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs) return torch.autograd.grad(log_prob.sum(), x_in)[0] def model_fn(x, t_continuous): """ The noise predicition model function that is used for DPM-Solver. """ if t_continuous.reshape((-1,)).shape[0] == 1: t_continuous = t_continuous.expand((x.shape[0])) if guidance_type == "uncond": return noise_pred_fn(x, t_continuous) elif guidance_type == "classifier": assert classifier_fn is not None t_input = get_model_input_time(t_continuous) cond_grad = cond_grad_fn(x, t_input) sigma_t = noise_schedule.marginal_std(t_continuous) noise = noise_pred_fn(x, t_continuous) return noise - guidance_scale * expand_dims(sigma_t, dims=cond_grad.dim()) * cond_grad elif guidance_type == "classifier-free": if guidance_scale == 1. or unconditional_condition is None: return noise_pred_fn(x, t_continuous, cond=condition) else: x_in = torch.cat([x] * 2) t_in = torch.cat([t_continuous] * 2) c_in = torch.cat([unconditional_condition, condition]) noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2) return noise_uncond + guidance_scale * (noise - noise_uncond) assert model_type in ["noise", "x_start", "v"] assert guidance_type in ["uncond", "classifier", "classifier-free"] return model_fn class DPM_Solver: def __init__(self, model_fn, noise_schedule, predict_x0=False, thresholding=False, max_val=1.): """Construct a DPM-Solver. We support both the noise prediction model ("predicting epsilon") and the data prediction model ("predicting x0"). If `predict_x0` is False, we use the solver for the noise prediction model (DPM-Solver). If `predict_x0` is True, we use the solver for the data prediction model (DPM-Solver++). In such case, we further support the "dynamic thresholding" in [1] when `thresholding` is True. The "dynamic thresholding" can greatly improve the sample quality for pixel-space DPMs with large guidance scales. Args: model_fn: A noise prediction model function which accepts the continuous-time input (t in [epsilon, T]): `` def model_fn(x, t_continuous): return noise `` noise_schedule: A noise schedule object, such as NoiseScheduleVP. predict_x0: A `bool`. If true, use the data prediction model; else, use the noise prediction model. thresholding: A `bool`. Valid when `predict_x0` is True. Whether to use the "dynamic thresholding" in [1]. max_val: A `float`. Valid when both `predict_x0` and `thresholding` are True. The max value for thresholding. [1] Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022b. """ self.model = model_fn self.noise_schedule = noise_schedule self.predict_x0 = predict_x0 self.thresholding = thresholding self.max_val = max_val def noise_prediction_fn(self, x, t): """ Return the noise prediction model. """ return self.model(x, t) def data_prediction_fn(self, x, t): """ Return the data prediction model (with thresholding). """ noise = self.noise_prediction_fn(x, t) dims = x.dim() alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t) x0 = (x - expand_dims(sigma_t, dims) * noise) / expand_dims(alpha_t, dims) if self.thresholding: p = 0.995 # A hyperparameter in the paper of "Imagen" [1]. s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1) s = expand_dims(torch.maximum(s, self.max_val * torch.ones_like(s).to(s.device)), dims) x0 = torch.clamp(x0, -s, s) / s return x0 def model_fn(self, x, t): """ Convert the model to the noise prediction model or the data prediction model. """ if self.predict_x0: return self.data_prediction_fn(x, t) else: return self.noise_prediction_fn(x, t) def get_time_steps(self, skip_type, t_T, t_0, N, device): """Compute the intermediate time steps for sampling. Args: skip_type: A `str`. The type for the spacing of the time steps. We support three types: - 'logSNR': uniform logSNR for the time steps. - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.) - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.) t_T: A `float`. The starting time of the sampling (default is T). t_0: A `float`. The ending time of the sampling (default is epsilon). N: A `int`. The total number of the spacing of the time steps. device: A torch device. Returns: A pytorch tensor of the time steps, with the shape (N + 1,). """ if skip_type == 'logSNR': lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device)) lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device)) logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device) return self.noise_schedule.inverse_lambda(logSNR_steps) elif skip_type == 'time_uniform': return torch.linspace(t_T, t_0, N + 1).to(device) elif skip_type == 'time_quadratic': t_order = 2 t = torch.linspace(t_T ** (1. / t_order), t_0 ** (1. / t_order), N + 1).pow(t_order).to(device) return t else: raise ValueError( "Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'".format(skip_type)) def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device): """ Get the order of each step for sampling by the singlestep DPM-Solver. We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as "DPM-Solver-fast". Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is: - If order == 1: We take `steps` of DPM-Solver-1 (i.e. DDIM). - If order == 2: - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling. - If steps % 2 == 0, we use K steps of DPM-Solver-2. - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1. - If order == 3: - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling. - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1. - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1. - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2. ============================================ Args: order: A `int`. The max order for the solver (2 or 3). steps: A `int`. The total number of function evaluations (NFE). skip_type: A `str`. The type for the spacing of the time steps. We support three types: - 'logSNR': uniform logSNR for the time steps. - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.) - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.) t_T: A `float`. The starting time of the sampling (default is T). t_0: A `float`. The ending time of the sampling (default is epsilon). device: A torch device. Returns: orders: A list of the solver order of each step. """ if order == 3: K = steps // 3 + 1 if steps % 3 == 0: orders = [3, ] * (K - 2) + [2, 1] elif steps % 3 == 1: orders = [3, ] * (K - 1) + [1] else: orders = [3, ] * (K - 1) + [2] elif order == 2: if steps % 2 == 0: K = steps // 2 orders = [2, ] * K else: K = steps // 2 + 1 orders = [2, ] * (K - 1) + [1] elif order == 1: K = 1 orders = [1, ] * steps else: raise ValueError("'order' must be '1' or '2' or '3'.") if skip_type == 'logSNR': # To reproduce the results in DPM-Solver paper timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device) else: timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[ torch.cumsum(torch.tensor([0, ] + orders)).to(device)] return timesteps_outer, orders def denoise_to_zero_fn(self, x, s): """ Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization. """ return self.data_prediction_fn(x, s) def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False): """ DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`. Args: x: A pytorch tensor. The initial value at time `s`. s: A pytorch tensor. The starting time, with the shape (x.shape[0],). t: A pytorch tensor. The ending time, with the shape (x.shape[0],). model_s: A pytorch tensor. The model function evaluated at time `s`. If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. return_intermediate: A `bool`. If true, also return the model value at time `s`. Returns: x_t: A pytorch tensor. The approximated solution at time `t`. """ ns = self.noise_schedule dims = x.dim() lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) h = lambda_t - lambda_s log_alpha_s, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(t) sigma_s, sigma_t = ns.marginal_std(s), ns.marginal_std(t) alpha_t = torch.exp(log_alpha_t) if self.predict_x0: phi_1 = torch.expm1(-h) if model_s is None: model_s = self.model_fn(x, s) x_t = ( expand_dims(sigma_t / sigma_s, dims) * x - expand_dims(alpha_t * phi_1, dims) * model_s ) if return_intermediate: return x_t, {'model_s': model_s} else: return x_t else: phi_1 = torch.expm1(h) if model_s is None: model_s = self.model_fn(x, s) x_t = ( expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x - expand_dims(sigma_t * phi_1, dims) * model_s ) if return_intermediate: return x_t, {'model_s': model_s} else: return x_t def singlestep_dpm_solver_second_update(self, x, s, t, r1=0.5, model_s=None, return_intermediate=False, solver_type='dpm_solver'): """ Singlestep solver DPM-Solver-2 from time `s` to time `t`. Args: x: A pytorch tensor. The initial value at time `s`. s: A pytorch tensor. The starting time, with the shape (x.shape[0],). t: A pytorch tensor. The ending time, with the shape (x.shape[0],). r1: A `float`. The hyperparameter of the second-order solver. model_s: A pytorch tensor. The model function evaluated at time `s`. If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time). solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. The type slightly impacts the performance. We recommend to use 'dpm_solver' type. Returns: x_t: A pytorch tensor. The approximated solution at time `t`. """ if solver_type not in ['dpm_solver', 'taylor']: raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) if r1 is None: r1 = 0.5 ns = self.noise_schedule dims = x.dim() lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) h = lambda_t - lambda_s lambda_s1 = lambda_s + r1 * h s1 = ns.inverse_lambda(lambda_s1) log_alpha_s, log_alpha_s1, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff( s1), ns.marginal_log_mean_coeff(t) sigma_s, sigma_s1, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(t) alpha_s1, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_t) if self.predict_x0: phi_11 = torch.expm1(-r1 * h) phi_1 = torch.expm1(-h) if model_s is None: model_s = self.model_fn(x, s) x_s1 = ( expand_dims(sigma_s1 / sigma_s, dims) * x - expand_dims(alpha_s1 * phi_11, dims) * model_s ) model_s1 = self.model_fn(x_s1, s1) if solver_type == 'dpm_solver': x_t = ( expand_dims(sigma_t / sigma_s, dims) * x - expand_dims(alpha_t * phi_1, dims) * model_s - (0.5 / r1) * expand_dims(alpha_t * phi_1, dims) * (model_s1 - model_s) ) elif solver_type == 'taylor': x_t = ( expand_dims(sigma_t / sigma_s, dims) * x - expand_dims(alpha_t * phi_1, dims) * model_s + (1. / r1) * expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * ( model_s1 - model_s) ) else: phi_11 = torch.expm1(r1 * h) phi_1 = torch.expm1(h) if model_s is None: model_s = self.model_fn(x, s) x_s1 = ( expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x - expand_dims(sigma_s1 * phi_11, dims) * model_s ) model_s1 = self.model_fn(x_s1, s1) if solver_type == 'dpm_solver': x_t = ( expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x - expand_dims(sigma_t * phi_1, dims) * model_s - (0.5 / r1) * expand_dims(sigma_t * phi_1, dims) * (model_s1 - model_s) ) elif solver_type == 'taylor': x_t = ( expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x - expand_dims(sigma_t * phi_1, dims) * model_s - (1. / r1) * expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * (model_s1 - model_s) ) if return_intermediate: return x_t, {'model_s': model_s, 'model_s1': model_s1} else: return x_t def singlestep_dpm_solver_third_update(self, x, s, t, r1=1. / 3., r2=2. / 3., model_s=None, model_s1=None, return_intermediate=False, solver_type='dpm_solver'): """ Singlestep solver DPM-Solver-3 from time `s` to time `t`. Args: x: A pytorch tensor. The initial value at time `s`. s: A pytorch tensor. The starting time, with the shape (x.shape[0],). t: A pytorch tensor. The ending time, with the shape (x.shape[0],). r1: A `float`. The hyperparameter of the third-order solver. r2: A `float`. The hyperparameter of the third-order solver. model_s: A pytorch tensor. The model function evaluated at time `s`. If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`). If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it. return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times). solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. The type slightly impacts the performance. We recommend to use 'dpm_solver' type. Returns: x_t: A pytorch tensor. The approximated solution at time `t`. """ if solver_type not in ['dpm_solver', 'taylor']: raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) if r1 is None: r1 = 1. / 3. if r2 is None: r2 = 2. / 3. ns = self.noise_schedule dims = x.dim() lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) h = lambda_t - lambda_s lambda_s1 = lambda_s + r1 * h lambda_s2 = lambda_s + r2 * h s1 = ns.inverse_lambda(lambda_s1) s2 = ns.inverse_lambda(lambda_s2) log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t = ns.marginal_log_mean_coeff( s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(s2), ns.marginal_log_mean_coeff(t) sigma_s, sigma_s1, sigma_s2, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std( s2), ns.marginal_std(t) alpha_s1, alpha_s2, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_s2), torch.exp(log_alpha_t) if self.predict_x0: phi_11 = torch.expm1(-r1 * h) phi_12 = torch.expm1(-r2 * h) phi_1 = torch.expm1(-h) phi_22 = torch.expm1(-r2 * h) / (r2 * h) + 1. phi_2 = phi_1 / h + 1. phi_3 = phi_2 / h - 0.5 if model_s is None: model_s = self.model_fn(x, s) if model_s1 is None: x_s1 = ( expand_dims(sigma_s1 / sigma_s, dims) * x - expand_dims(alpha_s1 * phi_11, dims) * model_s ) model_s1 = self.model_fn(x_s1, s1) x_s2 = ( expand_dims(sigma_s2 / sigma_s, dims) * x - expand_dims(alpha_s2 * phi_12, dims) * model_s + r2 / r1 * expand_dims(alpha_s2 * phi_22, dims) * (model_s1 - model_s) ) model_s2 = self.model_fn(x_s2, s2) if solver_type == 'dpm_solver': x_t = ( expand_dims(sigma_t / sigma_s, dims) * x - expand_dims(alpha_t * phi_1, dims) * model_s + (1. / r2) * expand_dims(alpha_t * phi_2, dims) * (model_s2 - model_s) ) elif solver_type == 'taylor': D1_0 = (1. / r1) * (model_s1 - model_s) D1_1 = (1. / r2) * (model_s2 - model_s) D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1) D2 = 2. * (D1_1 - D1_0) / (r2 - r1) x_t = ( expand_dims(sigma_t / sigma_s, dims) * x - expand_dims(alpha_t * phi_1, dims) * model_s + expand_dims(alpha_t * phi_2, dims) * D1 - expand_dims(alpha_t * phi_3, dims) * D2 ) else: phi_11 = torch.expm1(r1 * h) phi_12 = torch.expm1(r2 * h) phi_1 = torch.expm1(h) phi_22 = torch.expm1(r2 * h) / (r2 * h) - 1. phi_2 = phi_1 / h - 1. phi_3 = phi_2 / h - 0.5 if model_s is None: model_s = self.model_fn(x, s) if model_s1 is None: x_s1 = ( expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x - expand_dims(sigma_s1 * phi_11, dims) * model_s ) model_s1 = self.model_fn(x_s1, s1) x_s2 = ( expand_dims(torch.exp(log_alpha_s2 - log_alpha_s), dims) * x - expand_dims(sigma_s2 * phi_12, dims) * model_s - r2 / r1 * expand_dims(sigma_s2 * phi_22, dims) * (model_s1 - model_s) ) model_s2 = self.model_fn(x_s2, s2) if solver_type == 'dpm_solver': x_t = ( expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x - expand_dims(sigma_t * phi_1, dims) * model_s - (1. / r2) * expand_dims(sigma_t * phi_2, dims) * (model_s2 - model_s) ) elif solver_type == 'taylor': D1_0 = (1. / r1) * (model_s1 - model_s) D1_1 = (1. / r2) * (model_s2 - model_s) D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1) D2 = 2. * (D1_1 - D1_0) / (r2 - r1) x_t = ( expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x - expand_dims(sigma_t * phi_1, dims) * model_s - expand_dims(sigma_t * phi_2, dims) * D1 - expand_dims(sigma_t * phi_3, dims) * D2 ) if return_intermediate: return x_t, {'model_s': model_s, 'model_s1': model_s1, 'model_s2': model_s2} else: return x_t def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type="dpm_solver"): """ Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`. Args: x: A pytorch tensor. The initial value at time `s`. model_prev_list: A list of pytorch tensor. The previous computed model values. t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) t: A pytorch tensor. The ending time, with the shape (x.shape[0],). solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. The type slightly impacts the performance. We recommend to use 'dpm_solver' type. Returns: x_t: A pytorch tensor. The approximated solution at time `t`. """ if solver_type not in ['dpm_solver', 'taylor']: raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) ns = self.noise_schedule dims = x.dim() model_prev_1, model_prev_0 = model_prev_list t_prev_1, t_prev_0 = t_prev_list lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda( t_prev_0), ns.marginal_lambda(t) log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t) sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t) alpha_t = torch.exp(log_alpha_t) h_0 = lambda_prev_0 - lambda_prev_1 h = lambda_t - lambda_prev_0 r0 = h_0 / h D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1) if self.predict_x0: if solver_type == 'dpm_solver': x_t = ( expand_dims(sigma_t / sigma_prev_0, dims) * x - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 - 0.5 * expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * D1_0 ) elif solver_type == 'taylor': x_t = ( expand_dims(sigma_t / sigma_prev_0, dims) * x - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1_0 ) else: if solver_type == 'dpm_solver': x_t = ( expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 - 0.5 * expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * D1_0 ) elif solver_type == 'taylor': x_t = ( expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1_0 ) return x_t def multistep_dpm_solver_third_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpm_solver'): """ Multistep solver DPM-Solver-3 from time `t_prev_list[-1]` to time `t`. Args: x: A pytorch tensor. The initial value at time `s`. model_prev_list: A list of pytorch tensor. The previous computed model values. t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) t: A pytorch tensor. The ending time, with the shape (x.shape[0],). solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. The type slightly impacts the performance. We recommend to use 'dpm_solver' type. Returns: x_t: A pytorch tensor. The approximated solution at time `t`. """ ns = self.noise_schedule dims = x.dim() model_prev_2, model_prev_1, model_prev_0 = model_prev_list t_prev_2, t_prev_1, t_prev_0 = t_prev_list lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_2), ns.marginal_lambda( t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t) log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t) sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t) alpha_t = torch.exp(log_alpha_t) h_1 = lambda_prev_1 - lambda_prev_2 h_0 = lambda_prev_0 - lambda_prev_1 h = lambda_t - lambda_prev_0 r0, r1 = h_0 / h, h_1 / h D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1) D1_1 = expand_dims(1. / r1, dims) * (model_prev_1 - model_prev_2) D1 = D1_0 + expand_dims(r0 / (r0 + r1), dims) * (D1_0 - D1_1) D2 = expand_dims(1. / (r0 + r1), dims) * (D1_0 - D1_1) if self.predict_x0: x_t = ( expand_dims(sigma_t / sigma_prev_0, dims) * x - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1 - expand_dims(alpha_t * ((torch.exp(-h) - 1. + h) / h ** 2 - 0.5), dims) * D2 ) else: x_t = ( expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1 - expand_dims(sigma_t * ((torch.exp(h) - 1. - h) / h ** 2 - 0.5), dims) * D2 ) return x_t def singlestep_dpm_solver_update(self, x, s, t, order, return_intermediate=False, solver_type='dpm_solver', r1=None, r2=None): """ Singlestep DPM-Solver with the order `order` from time `s` to time `t`. Args: x: A pytorch tensor. The initial value at time `s`. s: A pytorch tensor. The starting time, with the shape (x.shape[0],). t: A pytorch tensor. The ending time, with the shape (x.shape[0],). order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3. return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times). solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. The type slightly impacts the performance. We recommend to use 'dpm_solver' type. r1: A `float`. The hyperparameter of the second-order or third-order solver. r2: A `float`. The hyperparameter of the third-order solver. Returns: x_t: A pytorch tensor. The approximated solution at time `t`. """ if order == 1: return self.dpm_solver_first_update(x, s, t, return_intermediate=return_intermediate) elif order == 2: return self.singlestep_dpm_solver_second_update(x, s, t, return_intermediate=return_intermediate, solver_type=solver_type, r1=r1) elif order == 3: return self.singlestep_dpm_solver_third_update(x, s, t, return_intermediate=return_intermediate, solver_type=solver_type, r1=r1, r2=r2) else: raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order)) def multistep_dpm_solver_update(self, x, model_prev_list, t_prev_list, t, order, solver_type='dpm_solver'): """ Multistep DPM-Solver with the order `order` from time `t_prev_list[-1]` to time `t`. Args: x: A pytorch tensor. The initial value at time `s`. model_prev_list: A list of pytorch tensor. The previous computed model values. t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) t: A pytorch tensor. The ending time, with the shape (x.shape[0],). order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3. solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. The type slightly impacts the performance. We recommend to use 'dpm_solver' type. Returns: x_t: A pytorch tensor. The approximated solution at time `t`. """ if order == 1: return self.dpm_solver_first_update(x, t_prev_list[-1], t, model_s=model_prev_list[-1]) elif order == 2: return self.multistep_dpm_solver_second_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type) elif order == 3: return self.multistep_dpm_solver_third_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type) else: raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order)) def dpm_solver_adaptive(self, x, order, t_T, t_0, h_init=0.05, atol=0.0078, rtol=0.05, theta=0.9, t_err=1e-5, solver_type='dpm_solver'): """ The adaptive step size solver based on singlestep DPM-Solver. Args: x: A pytorch tensor. The initial value at time `t_T`. order: A `int`. The (higher) order of the solver. We only support order == 2 or 3. t_T: A `float`. The starting time of the sampling (default is T). t_0: A `float`. The ending time of the sampling (default is epsilon). h_init: A `float`. The initial step size (for logSNR). atol: A `float`. The absolute tolerance of the solver. For image data, the default setting is 0.0078, followed [1]. rtol: A `float`. The relative tolerance of the solver. The default setting is 0.05. theta: A `float`. The safety hyperparameter for adapting the step size. The default setting is 0.9, followed [1]. t_err: A `float`. The tolerance for the time. We solve the diffusion ODE until the absolute error between the current time and `t_0` is less than `t_err`. The default setting is 1e-5. solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. The type slightly impacts the performance. We recommend to use 'dpm_solver' type. Returns: x_0: A pytorch tensor. The approximated solution at time `t_0`. [1] A. Jolicoeur-Martineau, K. Li, R. Piché-Taillefer, T. Kachman, and I. Mitliagkas, "Gotta go fast when generating data with score-based models," arXiv preprint arXiv:2105.14080, 2021. """ ns = self.noise_schedule s = t_T * torch.ones((x.shape[0],)).to(x) lambda_s = ns.marginal_lambda(s) lambda_0 = ns.marginal_lambda(t_0 * torch.ones_like(s).to(x)) h = h_init * torch.ones_like(s).to(x) x_prev = x nfe = 0 if order == 2: r1 = 0.5 lower_update = lambda x, s, t: self.dpm_solver_first_update(x, s, t, return_intermediate=True) higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, solver_type=solver_type, **kwargs) elif order == 3: r1, r2 = 1. / 3., 2. / 3. lower_update = lambda x, s, t: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, return_intermediate=True, solver_type=solver_type) higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update(x, s, t, r1=r1, r2=r2, solver_type=solver_type, **kwargs) else: raise ValueError("For adaptive step size solver, order must be 2 or 3, got {}".format(order)) while torch.abs((s - t_0)).mean() > t_err: t = ns.inverse_lambda(lambda_s + h) x_lower, lower_noise_kwargs = lower_update(x, s, t) x_higher = higher_update(x, s, t, **lower_noise_kwargs) delta = torch.max(torch.ones_like(x).to(x) * atol, rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev))) norm_fn = lambda v: torch.sqrt(torch.square(v.reshape((v.shape[0], -1))).mean(dim=-1, keepdim=True)) E = norm_fn((x_higher - x_lower) / delta).max() if torch.all(E <= 1.): x = x_higher s = t x_prev = x_lower lambda_s = ns.marginal_lambda(s) h = torch.min(theta * h * torch.float_power(E, -1. / order).float(), lambda_0 - lambda_s) nfe += order print('adaptive solver nfe', nfe) return x def sample(self, x, steps=20, t_start=None, t_end=None, order=3, skip_type='time_uniform', method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver', atol=0.0078, rtol=0.05, ): """ Compute the sample at time `t_end` by DPM-Solver, given the initial `x` at time `t_start`. ===================================================== We support the following algorithms for both noise prediction model and data prediction model: - 'singlestep': Singlestep DPM-Solver (i.e. "DPM-Solver-fast" in the paper), which combines different orders of singlestep DPM-Solver. We combine all the singlestep solvers with order <= `order` to use up all the function evaluations (steps). The total number of function evaluations (NFE) == `steps`. Given a fixed NFE == `steps`, the sampling procedure is: - If `order` == 1: - Denote K = steps. We use K steps of DPM-Solver-1 (i.e. DDIM). - If `order` == 2: - Denote K = (steps // 2) + (steps % 2). We take K intermediate time steps for sampling. - If steps % 2 == 0, we use K steps of singlestep DPM-Solver-2. - If steps % 2 == 1, we use (K - 1) steps of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1. - If `order` == 3: - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling. - If steps % 3 == 0, we use (K - 2) steps of singlestep DPM-Solver-3, and 1 step of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1. - If steps % 3 == 1, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of DPM-Solver-1. - If steps % 3 == 2, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of singlestep DPM-Solver-2. - 'multistep': Multistep DPM-Solver with the order of `order`. The total number of function evaluations (NFE) == `steps`. We initialize the first `order` values by lower order multistep solvers. Given a fixed NFE == `steps`, the sampling procedure is: Denote K = steps. - If `order` == 1: - We use K steps of DPM-Solver-1 (i.e. DDIM). - If `order` == 2: - We firstly use 1 step of DPM-Solver-1, then use (K - 1) step of multistep DPM-Solver-2. - If `order` == 3: - We firstly use 1 step of DPM-Solver-1, then 1 step of multistep DPM-Solver-2, then (K - 2) step of multistep DPM-Solver-3. - 'singlestep_fixed': Fixed order singlestep DPM-Solver (i.e. DPM-Solver-1 or singlestep DPM-Solver-2 or singlestep DPM-Solver-3). We use singlestep DPM-Solver-`order` for `order`=1 or 2 or 3, with total [`steps` // `order`] * `order` NFE. - 'adaptive': Adaptive step size DPM-Solver (i.e. "DPM-Solver-12" and "DPM-Solver-23" in the paper). We ignore `steps` and use adaptive step size DPM-Solver with a higher order of `order`. You can adjust the absolute tolerance `atol` and the relative tolerance `rtol` to balance the computatation costs (NFE) and the sample quality. - If `order` == 2, we use DPM-Solver-12 which combines DPM-Solver-1 and singlestep DPM-Solver-2. - If `order` == 3, we use DPM-Solver-23 which combines singlestep DPM-Solver-2 and singlestep DPM-Solver-3. ===================================================== Some advices for choosing the algorithm: - For **unconditional sampling** or **guided sampling with small guidance scale** by DPMs: Use singlestep DPM-Solver ("DPM-Solver-fast" in the paper) with `order = 3`. e.g. >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=False) >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3, skip_type='time_uniform', method='singlestep') - For **guided sampling with large guidance scale** by DPMs: Use multistep DPM-Solver with `predict_x0 = True` and `order = 2`. e.g. >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=True) >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=2, skip_type='time_uniform', method='multistep') We support three types of `skip_type`: - 'logSNR': uniform logSNR for the time steps. **Recommended for low-resolutional images** - 'time_uniform': uniform time for the time steps. **Recommended for high-resolutional images**. - 'time_quadratic': quadratic time for the time steps. ===================================================== Args: x: A pytorch tensor. The initial value at time `t_start` e.g. if `t_start` == T, then `x` is a sample from the standard normal distribution. steps: A `int`. The total number of function evaluations (NFE). t_start: A `float`. The starting time of the sampling. If `T` is None, we use self.noise_schedule.T (default is 1.0). t_end: A `float`. The ending time of the sampling. If `t_end` is None, we use 1. / self.noise_schedule.total_N. e.g. if total_N == 1000, we have `t_end` == 1e-3. For discrete-time DPMs: - We recommend `t_end` == 1. / self.noise_schedule.total_N. For continuous-time DPMs: - We recommend `t_end` == 1e-3 when `steps` <= 15; and `t_end` == 1e-4 when `steps` > 15. order: A `int`. The order of DPM-Solver. skip_type: A `str`. The type for the spacing of the time steps. 'time_uniform' or 'logSNR' or 'time_quadratic'. method: A `str`. The method for sampling. 'singlestep' or 'multistep' or 'singlestep_fixed' or 'adaptive'. denoise_to_zero: A `bool`. Whether to denoise to time 0 at the final step. Default is `False`. If `denoise_to_zero` is `True`, the total NFE is (`steps` + 1). This trick is firstly proposed by DDPM (https://arxiv.org/abs/2006.11239) and score_sde (https://arxiv.org/abs/2011.13456). Such trick can improve the FID for diffusion models sampling by diffusion SDEs for low-resolutional images (such as CIFAR-10). However, we observed that such trick does not matter for high-resolutional images. As it needs an additional NFE, we do not recommend it for high-resolutional images. lower_order_final: A `bool`. Whether to use lower order solvers at the final steps. Only valid for `method=multistep` and `steps < 15`. We empirically find that this trick is a key to stabilizing the sampling by DPM-Solver with very few steps (especially for steps <= 10). So we recommend to set it to be `True`. solver_type: A `str`. The taylor expansion type for the solver. `dpm_solver` or `taylor`. We recommend `dpm_solver`. atol: A `float`. The absolute tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'. rtol: A `float`. The relative tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'. Returns: x_end: A pytorch tensor. The approximated solution at time `t_end`. """ t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end t_T = self.noise_schedule.T if t_start is None else t_start device = x.device if method == 'adaptive': with torch.no_grad(): x = self.dpm_solver_adaptive(x, order=order, t_T=t_T, t_0=t_0, atol=atol, rtol=rtol, solver_type=solver_type) elif method == 'multistep': assert steps >= order timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device) assert timesteps.shape[0] - 1 == steps with torch.no_grad(): vec_t = timesteps[0].expand((x.shape[0])) model_prev_list = [self.model_fn(x, vec_t)] t_prev_list = [vec_t] # Init the first `order` values by lower order multistep DPM-Solver. for init_order in tqdm(range(1, order), desc="DPM init order"): vec_t = timesteps[init_order].expand(x.shape[0]) x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, init_order, solver_type=solver_type) model_prev_list.append(self.model_fn(x, vec_t)) t_prev_list.append(vec_t) # Compute the remaining values by `order`-th order multistep DPM-Solver. for step in tqdm(range(order, steps + 1), desc="DPM multistep"): vec_t = timesteps[step].expand(x.shape[0]) if lower_order_final and steps < 15: step_order = min(order, steps + 1 - step) else: step_order = order x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, step_order, solver_type=solver_type) for i in range(order - 1): t_prev_list[i] = t_prev_list[i + 1] model_prev_list[i] = model_prev_list[i + 1] t_prev_list[-1] = vec_t # We do not need to evaluate the final model value. if step < steps: model_prev_list[-1] = self.model_fn(x, vec_t) elif method in ['singlestep', 'singlestep_fixed']: if method == 'singlestep': timesteps_outer, orders = self.get_orders_and_timesteps_for_singlestep_solver(steps=steps, order=order, skip_type=skip_type, t_T=t_T, t_0=t_0, device=device) elif method == 'singlestep_fixed': K = steps // order orders = [order, ] * K timesteps_outer = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device) for i, order in enumerate(orders): t_T_inner, t_0_inner = timesteps_outer[i], timesteps_outer[i + 1] timesteps_inner = self.get_time_steps(skip_type=skip_type, t_T=t_T_inner.item(), t_0=t_0_inner.item(), N=order, device=device) lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner) vec_s, vec_t = t_T_inner.tile(x.shape[0]), t_0_inner.tile(x.shape[0]) h = lambda_inner[-1] - lambda_inner[0] r1 = None if order <= 1 else (lambda_inner[1] - lambda_inner[0]) / h r2 = None if order <= 2 else (lambda_inner[2] - lambda_inner[0]) / h x = self.singlestep_dpm_solver_update(x, vec_s, vec_t, order, solver_type=solver_type, r1=r1, r2=r2) if denoise_to_zero: x = self.denoise_to_zero_fn(x, torch.ones((x.shape[0],)).to(device) * t_0) return x ############################################################# # other utility functions ############################################################# def interpolate_fn(x, xp, yp): """ A piecewise linear function y = f(x), using xp and yp as keypoints. We implement f(x) in a differentiable way (i.e. applicable for autograd). The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.) Args: x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver). xp: PyTorch tensor with shape [C, K], where K is the number of keypoints. yp: PyTorch tensor with shape [C, K]. Returns: The function values f(x), with shape [N, C]. """ N, K = x.shape[0], xp.shape[1] all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2) sorted_all_x, x_indices = torch.sort(all_x, dim=2) x_idx = torch.argmin(x_indices, dim=2) cand_start_idx = x_idx - 1 start_idx = torch.where( torch.eq(x_idx, 0), torch.tensor(1, device=x.device), torch.where( torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx, ), ) end_idx = torch.where(torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1) start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2) end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2) start_idx2 = torch.where( torch.eq(x_idx, 0), torch.tensor(0, device=x.device), torch.where( torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx, ), ) y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1) start_y = torch.gather(y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2)).squeeze(2) end_y = torch.gather(y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2)).squeeze(2) cand = start_y + (x - start_x) * (end_y - start_y) / (end_x - start_x) return cand def expand_dims(v, dims): """ Expand the tensor `v` to the dim `dims`. Args: `v`: a PyTorch tensor with shape [N]. `dim`: a `int`. Returns: a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`. """ return v[(...,) + (None,) * (dims - 1)]
from .sampler import DPMSolverSampler
"""SAMPLING ONLY.""" import torch from .dpm_solver import NoiseScheduleVP, model_wrapper, DPM_Solver MODEL_TYPES = { "eps": "noise", "v": "v" } class DPMSolverSampler(object): def __init__(self, model, **kwargs): super().__init__() self.model = model to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device) self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod)) def register_buffer(self, name, attr): if type(attr) == torch.Tensor: if attr.device != torch.device("cuda"): attr = attr.to(torch.device("cuda")) setattr(self, name, attr) @torch.no_grad() def sample(self, S, batch_size, shape, conditioning=None, callback=None, normals_sequence=None, img_callback=None, quantize_x0=False, eta=0., mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, verbose=True, x_T=None, log_every_t=100, unconditional_guidance_scale=1., unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... **kwargs ): if conditioning is not None: if isinstance(conditioning, dict): cbs = conditioning[list(conditioning.keys())[0]].shape[0] if cbs != batch_size: print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") else: if conditioning.shape[0] != batch_size: print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") # sampling C, H, W = shape size = (batch_size, C, H, W) print(f'Data shape for DPM-Solver sampling is {size}, sampling steps {S}') device = self.model.betas.device if x_T is None: img = torch.randn(size, device=device) else: img = x_T ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod) model_fn = model_wrapper( lambda x, t, c: self.model.apply_model(x, t, c), ns, model_type=MODEL_TYPES[self.model.parameterization], guidance_type="classifier-free", condition=conditioning, unconditional_condition=unconditional_conditioning, guidance_scale=unconditional_guidance_scale, ) dpm_solver = DPM_Solver(model_fn, ns, predict_x0=True, thresholding=False) x = dpm_solver.sample(img, steps=S, skip_type="time_uniform", method="multistep", order=2, lower_order_final=True) return x.to(device), None