python_code
stringlengths 0
108k
|
---|
from .loader import load
from .service import Service
__all__ = ["Service", "load"]
|
from __future__ import annotations
import os
import sys
import typing as t
import logging
import importlib
from typing import TYPE_CHECKING
import fs
from simple_di import inject
from simple_di import Provide
from ..bento import Bento
from ..models import ModelStore
from .service import on_import_svc
from .service import on_load_bento
from ...exceptions import NotFound
from ...exceptions import BentoMLException
from ...exceptions import ImportServiceError
from ..bento.bento import BENTO_YAML_FILENAME
from ..bento.bento import BENTO_PROJECT_DIR_NAME
from ..bento.bento import DEFAULT_BENTO_BUILD_FILE
from ..configuration import BENTOML_VERSION
from ..bento.build_config import BentoBuildConfig
from ..configuration.containers import BentoMLContainer
if TYPE_CHECKING:
from ..bento import BentoStore
from .service import Service
logger = logging.getLogger(__name__)
@inject
def import_service(
svc_import_path: str,
*,
working_dir: t.Optional[str] = None,
change_global_cwd: bool = False,
model_store: ModelStore = Provide[BentoMLContainer.model_store],
) -> Service:
"""Import a Service instance from source code, by providing the svc_import_path
which represents the module where the Service instance is created and optionally
what attribute can be used to access this Service instance in that module
Example usage:
# When multiple service defined in the same module
import_service("fraud_detector:svc_a")
import_service("fraud_detector:svc_b")
# Find svc by Python module name or file path
import_service("fraud_detector:svc")
import_service("fraud_detector.py:svc")
import_service("foo.bar.fraud_detector:svc")
import_service("./def/abc/fraud_detector.py:svc")
# When there's only one Service instance in the target module, the attributes
# part in the svc_import_path can be omitted
import_service("fraud_detector.py")
import_service("fraud_detector")
"""
from bentoml import Service
prev_cwd = None
sys_path_modified = False
try:
if working_dir is not None:
working_dir = os.path.realpath(os.path.expanduser(working_dir))
if change_global_cwd:
# Set cwd(current working directory) to the Bento's project directory,
# which allows user code to read files using relative path
prev_cwd = os.getcwd()
os.chdir(working_dir)
else:
working_dir = os.getcwd()
if working_dir not in sys.path:
sys.path.insert(0, working_dir)
sys_path_modified = True
logger.debug(
'Importing service "%s" from working dir: "%s"',
svc_import_path,
working_dir,
)
import_path, _, attrs_str = svc_import_path.partition(":")
if not import_path:
raise ImportServiceError(
f'Invalid import target "{svc_import_path}", must format as '
'"<module>:<attribute>" or "<module>'
)
if os.path.exists(import_path):
import_path = os.path.realpath(import_path)
# Importing from a module file path:
if not import_path.startswith(working_dir):
raise ImportServiceError(
f'Module "{import_path}" not found in working directory "{working_dir}"'
)
file_name, ext = os.path.splitext(import_path)
if ext != ".py":
raise ImportServiceError(
f'Invalid module extension "{ext}" in target "{svc_import_path}",'
' the only extension acceptable here is ".py"'
)
# move up until no longer in a python package or in the working dir
module_name_parts: t.List[str] = []
path = file_name
while True:
path, name = os.path.split(path)
module_name_parts.append(name)
if (
not os.path.exists(os.path.join(path, "__init__.py"))
or path == working_dir
):
break
module_name = ".".join(module_name_parts[::-1])
else:
# Importing by module name:
module_name = import_path
# Import the service using the Bento's own model store
BentoMLContainer.model_store.set(model_store)
try:
module = importlib.import_module(module_name, package=working_dir)
except ImportError as e:
raise ImportServiceError(
f'{e} happens when importing "{module_name}" in '
f'current path: {repr(sys.path)}. working dir: "{working_dir}", '
f'current dir: "{os.getcwd()}"'
)
finally:
# Reset to default local model store
BentoMLContainer.model_store.reset()
if attrs_str:
instance = module
try:
for attr_str in attrs_str.split("."):
instance = getattr(instance, attr_str)
except AttributeError:
raise ImportServiceError(
f'Attribute "{attrs_str}" not found in module "{module_name}".'
)
else:
instances = [
(k, v) for k, v in module.__dict__.items() if isinstance(v, Service)
]
if len(instances) == 1:
attrs_str = instances[0][0]
instance = instances[0][1]
else:
raise ImportServiceError(
f'Multiple Service instances found in module "{module_name}", use'
'"<module>:<svc_variable_name>" to specify the service instance or'
"define only service instance per python module/file"
)
assert isinstance(
instance, Service
), f'import target "{module_name}:{attrs_str}" is not a bentoml.Service instance'
on_import_svc(
svc=instance,
working_dir=working_dir,
import_str=f"{module_name}:{attrs_str}",
)
return instance
except ImportServiceError:
if prev_cwd and change_global_cwd:
# Reset to previous cwd
os.chdir(prev_cwd)
if sys_path_modified and working_dir:
# Undo changes to sys.path
sys.path.remove(working_dir)
raise
@inject
def load_bento(
bento_tag: str,
bento_store: "BentoStore" = Provide[BentoMLContainer.bento_store],
change_global_cwd: bool = False,
) -> "Service":
"""Load a Service instance from a bento found in local bento store:
Example usage:
load_bento("FraudDetector:latest")
load_bento("FraudDetector:20210709_DE14C9")
"""
bento = bento_store.get(bento_tag)
logger.debug(
'Loading bento "%s" found in local store: %s',
bento.tag,
bento._fs.getsyspath("/"),
)
# not in validate as it's only really necessary when getting bentos from disk
if bento.info.bentoml_version != BENTOML_VERSION:
logger.warning(
f'Bento "{bento.tag}" was built with BentoML version {bento.info.bentoml_version}, which does not match the current BentoML version {BENTOML_VERSION}'
)
return _load_bento(bento, change_global_cwd)
def load_bento_dir(path: str, change_global_cwd: bool = False) -> "Service":
"""Load a Service instance from a bento directory
Example usage:
load_bento_dir("~/bentoml/bentos/iris_classifier/4tht2icroji6zput3suqi5nl2")
"""
bento_fs = fs.open_fs(path)
bento = Bento.from_fs(bento_fs)
logger.debug(
'Loading bento "%s" from directory: %s',
bento.tag,
path,
)
return _load_bento(bento, change_global_cwd)
def _load_bento(bento: Bento, change_global_cwd: bool) -> "Service":
# Use Bento's user project path as working directory when importing the service
working_dir = bento._fs.getsyspath(BENTO_PROJECT_DIR_NAME)
# Use Bento's local "{base_dir}/models/" directory as its model store
model_store = ModelStore(bento._fs.getsyspath("models"))
svc = import_service(
bento.info.service,
working_dir=working_dir,
change_global_cwd=change_global_cwd,
model_store=model_store,
)
on_load_bento(svc, bento)
return svc
def load(
bento_identifier: str,
working_dir: t.Optional[str] = None,
change_global_cwd: bool = False,
) -> "Service":
"""Load a Service instance by the bento_identifier
A bento_identifier:str can be provided in three different forms:
* Tag pointing to a Bento in local Bento store under `BENTOML_HOME/bentos`
* File path to a Bento directory
* "import_str" for loading a service instance from the `working_dir`
Example load from Bento usage:
.. code-block:: python
# load from local bento store
load("FraudDetector:latest")
load("FraudDetector:4tht2icroji6zput")
# load from bento directory
load("~/bentoml/bentos/iris_classifier/4tht2icroji6zput")
Example load from working directory by "import_str" usage:
.. code-block:: python
# When multiple service defined in the same module
load("fraud_detector:svc_a")
load("fraud_detector:svc_b")
# Find svc by Python module name or file path
load("fraud_detector:svc")
load("fraud_detector.py:svc")
load("foo.bar.fraud_detector:svc")
load("./def/abc/fraud_detector.py:svc")
# When there's only one Service instance in the target module, the attributes
# part in the svc_import_path can be omitted
load("fraud_detector.py")
load("fraud_detector")
"""
if os.path.isdir(os.path.expanduser(bento_identifier)):
bento_path = os.path.abspath(os.path.expanduser(bento_identifier))
if os.path.isfile(
os.path.expanduser(os.path.join(bento_path, BENTO_YAML_FILENAME))
):
# Loading from path to a built Bento
try:
svc = load_bento_dir(bento_path, change_global_cwd=change_global_cwd)
except ImportServiceError as e:
raise BentoMLException(
f"Failed loading Bento from directory {bento_path}: {e}"
)
logger.info("Service loaded from Bento directory: %s", svc)
elif os.path.isfile(
os.path.expanduser(os.path.join(bento_path, DEFAULT_BENTO_BUILD_FILE))
):
# Loading from path to a project directory containing bentofile.yaml
try:
with open(
os.path.join(bento_path, DEFAULT_BENTO_BUILD_FILE),
"r",
encoding="utf-8",
) as f:
build_config = BentoBuildConfig.from_yaml(f)
assert (
build_config.service
), '"service" field in "bentofile.yaml" is required for loading the service, e.g. "service: my_service.py:svc"'
svc = import_service(
build_config.service,
working_dir=working_dir,
change_global_cwd=change_global_cwd,
)
except ImportServiceError as e:
raise BentoMLException(
f"Failed loading Bento from directory {bento_path}: {e}"
)
logger.debug(f"'{svc.name}' loaded from '{bento_path}': {svc}")
else:
raise BentoMLException(
f"Failed loading service from path {bento_path}. When loading from a path, it must be either a Bento containing bento.yaml or a project directory containing bentofile.yaml"
)
else:
try:
# Loading from service definition file, e.g. "my_service.py:svc"
svc = import_service(
bento_identifier,
working_dir=working_dir,
change_global_cwd=change_global_cwd,
)
logger.debug(f"'{svc.name}' imported from source: {svc}")
except ImportServiceError as e1:
try:
# Loading from local bento store by tag, e.g. "iris_classifier:latest"
svc = load_bento(bento_identifier, change_global_cwd=change_global_cwd)
logger.debug(f"'{svc.name}' loaded from Bento store: {svc}")
except (NotFound, ImportServiceError) as e2:
raise BentoMLException(
f"Failed to load bento or import service "
f"'{bento_identifier}'. If you are attempting to "
f"import bento in local store: `{e1}`, or if you are importing by "
f"python module path: `{e2}`"
)
return svc
|
from __future__ import annotations
import re
import typing as t
import inspect
from typing import Optional
import yaml
from ..types import is_compatible_type
from ..context import InferenceApiContext as Context
from ...exceptions import InvalidArgument
from ..io_descriptors import IODescriptor
RESERVED_API_NAMES = [
"index",
"swagger",
"docs",
"metrics",
"healthz",
"livez",
"readyz",
]
class InferenceAPI:
def __init__(
self,
user_defined_callback: t.Callable[..., t.Any],
input_descriptor: IODescriptor[t.Any],
output_descriptor: IODescriptor[t.Any],
name: Optional[str],
doc: Optional[str] = None,
route: Optional[str] = None,
):
# Use user_defined_callback function variable if name not specified
name = user_defined_callback.__name__ if name is None else name
# Use user_defined_callback function docstring `__doc__` if doc not specified
doc = user_defined_callback.__doc__ if doc is None else doc
# Use API name as route if route not specified
route = name if route is None else route
InferenceAPI._validate_name(name)
InferenceAPI._validate_route(route)
self.name = name
self.needs_ctx = False
self.ctx_param = None
self.func = user_defined_callback
input_type = input_descriptor.input_type()
self.multi_input = isinstance(input_type, dict)
sig = inspect.signature(user_defined_callback)
if len(sig.parameters) == 0:
raise ValueError("Expected API function to take parameters.")
if isinstance(input_type, dict):
# note: in python 3.6 kwarg order was not guaranteed to be preserved,
# though it is in practice.
for key in input_type:
if key not in sig.parameters:
if (
key in ["context", "ctx"]
or sig.parameters[key].annotation == Context
):
if self.needs_ctx:
raise ValueError(
f"API function has two context parameters: '{self.ctx_param}' and '{key}'; it should only have one."
)
self.needs_ctx = True
self.ctx_param = key
continue
raise ValueError(
f"API function has extra parameter with name '{key}'."
)
annotation: t.Type[t.Any] = sig.parameters[key].annotation
if (
isinstance(annotation, t.Type)
and annotation != inspect.Signature.empty
):
# if type annotations have been successfully resolved
if not is_compatible_type(input_type[key], annotation):
raise TypeError(
f"Expected type of argument '{key}' to be '{input_type[key]}', got '{sig.parameters[key].annotation}'"
)
expected_args = len(input_type) + (1 if self.needs_ctx else 0)
if len(sig.parameters) != expected_args:
raise ValueError(
f"expected API function to have arguments ({', '.join(input_type.keys())}, [context]), got ({', '.join(sig.parameters.keys())})"
)
else:
param_iter = iter(sig.parameters)
first_arg = next(param_iter)
annotation = sig.parameters[first_arg].annotation
if isinstance(annotation, t.Type) and annotation != inspect.Signature.empty:
if not is_compatible_type(input_type, annotation):
raise TypeError(
f"Expected type of argument '{first_arg}' to be '{input_type}', got '{sig.parameters[first_arg].annotation}'"
)
if len(sig.parameters) > 2:
raise ValueError("API function should only take one or two arguments")
elif len(sig.parameters) == 2:
self.needs_ctx = True
second_arg = next(param_iter)
annotation = sig.parameters[second_arg].annotation
if (
isinstance(annotation, t.Type)
and annotation != inspect.Signature.empty
):
if not annotation == Context:
raise TypeError(
f"Expected type of argument '{second_arg}' to be '{input_type}', got '{sig.parameters[second_arg].annotation}'"
)
self.input = input_descriptor
self.output = output_descriptor
self.doc = doc
self.route = route
def __str__(self):
return f"{self.__class__.__name__}({str(self.input)} → {str(self.output)})"
@staticmethod
def _validate_name(api_name: str):
if not api_name.isidentifier():
raise InvalidArgument(
"Invalid API name: '{}', a valid identifier may only contain letters,"
" numbers, underscores and not starting with a number.".format(api_name)
)
if api_name in RESERVED_API_NAMES:
raise InvalidArgument(
"Reserved API name: '{}' is reserved for infra endpoints".format(
api_name
)
)
@staticmethod
def _validate_route(route: str):
if re.findall(
r"[?#]+|^(//)|^:", route
): # contains '?' or '#' OR start with '//' OR start with ':'
# https://tools.ietf.org/html/rfc3986#page-22
raise InvalidArgument(
"The path {} contains illegal url characters".format(route)
)
if route in RESERVED_API_NAMES:
raise InvalidArgument(
"Reserved API route: '{}' is reserved for infra endpoints".format(route)
)
def _InferenceAPI_dumper(dumper: yaml.Dumper, api: InferenceAPI) -> yaml.Node:
return dumper.represent_dict(
{
"route": api.route,
"doc": api.doc,
"input": api.input.__class__.__name__,
"output": api.output.__class__.__name__,
}
)
yaml.add_representer(InferenceAPI, _InferenceAPI_dumper)
|
from __future__ import annotations
import typing as t
import logging
from typing import TYPE_CHECKING
import numpy as np
import bentoml
from bentoml import Tag
from bentoml.exceptions import NotFound
from bentoml.exceptions import InvalidArgument
from bentoml.exceptions import MissingDependencyException
from bentoml._internal.models.model import ModelContext
from ..utils.pkg import get_pkg_version
if TYPE_CHECKING:
from bentoml.types import ModelSignature
from bentoml.types import ModelSignatureDict
from .. import external_typing as ext
try:
import lightgbm as lgb # type: ignore (missing type stubs for lightgbm)
except ImportError: # pragma: no cover
raise MissingDependencyException(
"""lightgbm is required in order to use module `bentoml.lightgbm`, install
lightgbm with `pip install lightgbm`. For more information, refer to
https://github.com/microsoft/LightGBM/tree/master/python-package
"""
)
MODULE_NAME = "bentoml.lightgbm"
MODEL_FILENAME = "saved_model.ubj"
API_VERSION = "v1"
logger = logging.getLogger(__name__)
def get(tag_like: str | Tag) -> bentoml.Model:
"""
Get the BentoML model with the given tag.
Args:
tag_like (``str`` ``|`` :obj:`~bentoml.Tag`):
The tag of the model to retrieve from the model store.
Returns:
:obj:`~bentoml.Model`: A BentoML :obj:`~bentoml.Model` with the matching tag.
Example:
.. code-block:: python
import bentoml
# target model must be from the BentoML model store
model = bentoml.lightgbm.get("my_lightgbm_model:latest")
"""
model = bentoml.models.get(tag_like)
if model.info.module not in (MODULE_NAME, __name__):
raise NotFound(
f"Model {model.tag} was saved with module {model.info.module}, not loading with {MODULE_NAME}."
)
return model
def load_model(bento_model: str | Tag | bentoml.Model) -> lgb.basic.Booster: # type: ignore (incomplete ligthgbm type stubs)
"""
Load the LightGBM model with the given tag from the local BentoML model store.
Args:
bento_model (``str`` ``|`` :obj:`~bentoml.Tag` ``|`` :obj:`~bentoml.Model`):
Either the tag of the model to get from the store, or a BentoML `~bentoml.Model`
instance to load the model from.
Returns:
:obj:`~lightgbm.basic.Booster`: The LightGBM model loaded from the model store or BentoML :obj:`~bentoml.Model`.
Example:
.. code-block:: python
import bentoml
gbm = bentoml.lightgbm.load("my_lightgbm_model:latest")
""" # noqa
if not isinstance(bento_model, bentoml.Model):
bento_model = get(bento_model)
assert isinstance(bento_model, bentoml.Model)
if bento_model.info.module not in (MODULE_NAME, __name__):
raise NotFound(
f"Model {bento_model.tag} was saved with module {bento_model.info.module}, not loading with {MODULE_NAME}."
)
model_file = bento_model.path_of(MODEL_FILENAME)
booster = lgb.basic.Booster(model_file=model_file) # type: ignore (incomplete ligthgbm type stubs)
return booster # type: ignore
def save_model(
name: str,
model: lgb.basic.Booster, # type: ignore (incomplete ligthgbm type stubs)
*,
signatures: dict[str, ModelSignatureDict] | None = None,
labels: dict[str, str] | None = None,
custom_objects: dict[str, t.Any] | None = None,
metadata: dict[str, t.Any] | None = None,
) -> bentoml.Model:
"""
Save a LightGBM model instance to the BentoML model store.
Args:
name (``str``):
The name to give to the model in the BentoML store. This must be a valid
:obj:`~bentoml.Tag` name.
model (:obj:`~lgb.basic.Booster`):
The LightGBM model (booster) to be saved.
signatures (``dict[str, ModelSignatureDict]``, optional):
Signatures of predict methods to be used. If not provided, the signatures default to
``{"predict": {"batchable": False}}``. See :obj:`~bentoml.types.ModelSignature` for more
details.
labels (``dict[str, str]``, optional):
A default set of management labels to be associated with the model. An example is
``{"training-set": "data-1"}``.
custom_objects (``dict[str, Any]``, optional):
Custom objects to be saved with the model. An example is
``{"my-normalizer": normalizer}``.
Custom objects are currently serialized with cloudpickle, but this implementation is
subject to change.
metadata (``dict[str, Any]``, optional):
Metadata to be associated with the model. An example is ``{"max_depth": 2}``.
Metadata is intended for display in model management UI and therefore must be a default
Python type, such as ``str`` or ``int``.
Returns:
:obj:`~bentoml.Tag`: A :obj:`tag` with a format `name:version` where `name` is the
user-defined model's name, and a generated `version` by BentoML.
Example:
.. code-block:: python
import bentoml
import lightgbm as lgb
import pandas as pd
# load a dataset
df_train = pd.read_csv("regression.train", header=None, sep="\t")
df_test = pd.read_csv("regression.test", header=None, sep="\t")
y_train = df_train[0]
y_test = df_test[0]
X_train = df_train.drop(0, axis=1)
X_test = df_test.drop(0, axis=1)
# create dataset for lightgbm
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
# specify your configurations as a dict
params = {
"boosting_type": "gbdt",
"objective": "regression",
"metric": {"l2", "l1"},
"num_leaves": 31,
"learning_rate": 0.05,
}
# train
gbm = lgb.train(
params, lgb_train, num_boost_round=20, valid_sets=lgb_eval
)
# save the booster to BentoML modelstore:
bento_model = bentoml.lightgbm.save_model("my_lightgbm_model", gbm, booster_params=params)
""" # noqa: LN001
# Ensure that `model` is actually the Booster object, and not for example one of the scikit-learn wrapper objects.
if not isinstance(model, lgb.basic.Booster): # type: ignore (incomplete ligthgbm type stubs)
try:
# Work around a LightGBM issue (https://github.com/microsoft/LightGBM/issues/3014)
# 'model.booster_' chjecks that the model has been fitted and will error otherwise.
if not hasattr(model, "fitted_"): # type: ignore (incomplete ligthgbm type stubs)
model.fitted_ = True
model = model.booster_ # type: ignore (incomplete ligthgbm type stubs)
except AttributeError as e:
logger.error(
"Unable to obtain a lightgbm.basic.Booster from the specified model."
)
raise e
if not isinstance(model, lgb.basic.Booster): # type: ignore (incomplete ligthgbm type stubs)
raise TypeError(f"Given model ({model}) is not a lightgbm.basic.Booster.")
context: ModelContext = ModelContext(
framework_name="lightgbm",
framework_versions={"lightgbm": get_pkg_version("lightgbm")},
)
if signatures is None:
logger.info(
'Using default model signature `{"predict": {"batchable": False}}` for LightGBM model'
)
signatures = {
"predict": {"batchable": False},
}
with bentoml.models.create(
name,
module=MODULE_NAME,
api_version=API_VERSION,
signatures=signatures,
labels=labels,
custom_objects=custom_objects,
metadata=metadata,
context=context,
) as bento_model:
model.save_model(bento_model.path_of(MODEL_FILENAME))
return bento_model
def get_runnable(bento_model: bentoml.Model) -> t.Type[bentoml.Runnable]:
"""
Private API: use :obj:`~bentoml.Model.to_runnable` instead.
"""
class LightGBMRunnable(bentoml.Runnable):
SUPPORT_NVIDIA_GPU = (
False # LightGBM only supports GPU during training, not for inference.
)
SUPPORT_CPU_MULTI_THREADING = True
def __init__(self):
super().__init__()
self.model = load_model(bento_model)
self.predict_fns: dict[str, t.Callable[..., t.Any]] = {}
for method_name in bento_model.info.signatures:
try:
self.predict_fns[method_name] = getattr(self.model, method_name) # type: ignore (incomplete ligthgbm type stubs)
except AttributeError:
raise InvalidArgument(
f"No method with name {method_name} found for LightGBM model of type {self.model.__class__}"
)
def add_runnable_method(method_name: str, options: ModelSignature):
def _run(
self: LightGBMRunnable,
input_data: ext.NpNDArray | ext.PdDataFrame,
) -> ext.NpNDArray:
res = self.predict_fns[method_name](input_data)
return np.asarray(res) # type: ignore (unknown ndarray types)
LightGBMRunnable.add_method(
_run,
name=method_name,
batchable=options.batchable,
batch_dim=options.batch_dim,
input_spec=options.input_spec,
output_spec=options.output_spec,
)
for method_name, options in bento_model.info.signatures.items():
add_runnable_method(method_name, options)
return LightGBMRunnable
|
from __future__ import annotations
import os
import typing as t
import logging
import importlib
import importlib.util
from typing import TYPE_CHECKING
import attr
import bentoml
from bentoml import Tag
from bentoml.models import Model
from bentoml.models import ModelContext
from bentoml.models import ModelOptions
from bentoml.exceptions import NotFound
from bentoml.exceptions import BentoMLException
from bentoml.exceptions import MissingDependencyException
from ..types import LazyType
from ..utils.pkg import get_pkg_version
if TYPE_CHECKING:
from bentoml.types import ModelSignature
from bentoml.types import ModelSignatureDict
from ..external_typing import transformers as ext
MODULE_NAME = "bentoml.transformers"
API_VERSION = "v1"
logger = logging.getLogger(__name__)
def _check_flax_supported() -> None: # pragma: no cover
_supported: bool = get_pkg_version("transformers").startswith("4")
if not _supported:
logger.warning(
"Detected transformers version: "
f"{get_pkg_version('transformers')}, which "
"doesn't have supports for Flax. "
"Update `transformers` to 4.x and "
"above to have Flax supported."
)
else:
_flax_available = (
importlib.util.find_spec("jax") is not None
and importlib.util.find_spec("flax") is not None
)
if _flax_available:
_jax_version = get_pkg_version("jax")
_flax_version = get_pkg_version("flax")
logger.info(
f"Jax version {_jax_version}, "
f"Flax version {_flax_version} available."
)
else:
logger.warning(
"No versions of Flax or Jax are found under "
"the current machine. In order to use "
"Flax with transformers 4.x and above, "
"refers to https://github.com/google/flax#quick-install"
)
try:
import transformers
_check_flax_supported()
except ImportError: # pragma: no cover
raise MissingDependencyException(
"""\
transformers is required in order to use module `bentoml.transformers`.
Instruction: Install transformers with `pip install transformers`.
"""
)
@attr.define
class TransformersOptions(ModelOptions):
"""Options for the Transformers model."""
task: str = attr.field(
validator=[
attr.validators.instance_of(str),
lambda instance, attribute, value: transformers.pipelines.check_task(value), # type: ignore
]
)
kwargs: t.Dict[str, t.Any] = attr.field(factory=dict)
def get(tag_like: str | Tag) -> Model:
model = bentoml.models.get(tag_like)
if model.info.module not in (MODULE_NAME, __name__):
raise NotFound(
f"Model {model.tag} was saved with module {model.info.module}, not loading with {MODULE_NAME}."
)
return model
def load_model(
bento_model: str | Tag | Model,
**kwargs: t.Any,
) -> ext.TransformersPipeline:
"""
Load the Transformers model from BentoML local modelstore with given name.
Args:
bento_model (``str`` ``|`` :obj:`~bentoml.Tag` ``|`` :obj:`~bentoml.Model`):
Either the tag of the model to get from the store, or a BentoML `~bentoml.Model`
instance to load the model from.
kwargs (:code:`Any`):
Additional keyword arguments to pass to the model.
Returns:
``Pipeline``:
The Transformers pipeline loaded from the model store.
Example:
.. code-block:: python
import bentoml
pipeline = bentoml.transformers.load_model('my_model:latest')
""" # noqa
if not isinstance(bento_model, Model):
bento_model = get(bento_model)
if bento_model.info.module not in (MODULE_NAME, __name__):
raise BentoMLException(
f"Model {bento_model.tag} was saved with module {bento_model.info.module}, not loading with {MODULE_NAME}."
)
pipeline_task: str = bento_model.info.options.task # type: ignore
pipeline_kwargs: t.Dict[str, t.Any] = bento_model.info.options.kwargs # type: ignore
pipeline_kwargs.update(kwargs)
if len(pipeline_kwargs) > 0:
logger.info(
f"Loading '{pipeline_task}' pipeline '{bento_model.tag}' with kwargs {pipeline_kwargs}."
)
return transformers.pipeline(task=pipeline_task, model=bento_model.path, **pipeline_kwargs) # type: ignore
def save_model(
name: str,
pipeline: ext.TransformersPipeline,
*,
signatures: dict[str, ModelSignatureDict | ModelSignature] | None = None,
labels: dict[str, str] | None = None,
custom_objects: dict[str, t.Any] | None = None,
metadata: dict[str, t.Any] | None = None,
) -> bentoml.Model:
"""
Save a model instance to BentoML modelstore.
Args:
name (:code:`str`):
Name for given model instance. This should pass Python identifier check.
pipeline (:code:`Pipeline`):
Instance of the Transformers pipeline to be saved.
signatures (:code: `Dict[str, bool | BatchDimType | AnyType | tuple[AnyType]]`)
Methods to expose for running inference on the target model. Signatures are
used for creating Runner instances when serving model with bentoml.Service
labels (:code:`Dict[str, str]`, `optional`, default to :code:`None`):
user-defined labels for managing models, e.g. team=nlp, stage=dev
custom_objects (``dict[str, Any]``, optional):
Custom objects to be saved with the model. An example is
``{"my-normalizer": normalizer}``.
Custom objects are currently serialized with cloudpickle, but this implementation is
subject to change.
metadata (:code:`Dict[str, Any]`, `optional`, default to :code:`None`):
Custom metadata for given model.
Returns:
:obj:`~bentoml.Tag`: A :obj:`tag` with a format `name:version` where `name` is
the user-defined model's name, and a generated `version`.
Examples:
.. code-block:: python
import bentoml
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("distilgpt2")
model = AutoModelForCausalLM.from_pretrained("distilgpt2")
generator = pipeline(task="text-generation", model=model, tokenizer=tokenizer)
bento_model = bentoml.transformers.save_model("text-generation-pipeline", generator)
""" # noqa
if not isinstance(
pipeline,
LazyType["ext.TransformersPipeline"]("transformers.pipelines.base.Pipeline"),
):
raise BentoMLException(
"`pipeline` must be an instance of `transformers.pipelines.base.Pipeline`. "
"To save other Transformers types like models, tokenizers, configs, feature "
"extractors, construct a pipeline with the model, tokenizer, config, or feature "
"extractor specified as arguments, then call save_model with the pipeline. "
"Refer to https://huggingface.co/docs/transformers/main_classes/pipelines "
"for more information on pipelines. If transformers doesn't provide a task you "
"need, refers to the custom pipeline section to create your own pipelines."
"""
```python
import bentoml
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("distilgpt2")
model = AutoModelForCausalLM.from_pretrained("distilgpt2")
generator = pipeline(task="text-generation", model=model, tokenizer=tokenizer)
bentoml.transformers.save_model("text-generation-pipeline", generator)
```
"""
)
context = ModelContext(
framework_name="transformers",
framework_versions={"transformers": get_pkg_version("transformers")},
)
options = TransformersOptions(task=pipeline.task)
if signatures is None:
signatures = {
"__call__": {"batchable": False},
}
logger.info(
f"Using the default model signature for Transformers ({signatures}) for model {name}."
)
with bentoml.models.create(
name,
module=MODULE_NAME,
api_version=API_VERSION,
labels=labels,
context=context,
options=options,
signatures=signatures,
custom_objects=custom_objects,
metadata=metadata,
) as bento_model:
pipeline.save_pretrained(bento_model.path)
return bento_model
def get_runnable(
bento_model: bentoml.Model,
) -> t.Type[bentoml.Runnable]:
"""
Private API: use :obj:`~bentoml.Model.to_runnable` instead.
"""
class TransformersRunnable(bentoml.Runnable):
SUPPORT_NVIDIA_GPU = True # type: ignore
SUPPORT_CPU_MULTI_THREADING = True # type: ignore
def __init__(self):
super().__init__()
available_gpus = os.getenv("NVIDIA_VISIBLE_DEVICES")
if available_gpus is not None and available_gpus != "":
# assign GPU resources
kwargs = {
"device": available_gpus,
}
else:
# assign CPU resources
kwargs = {}
self.pipeline = load_model(bento_model, **kwargs)
self.predict_fns: dict[str, t.Callable[..., t.Any]] = {}
for method_name in bento_model.info.signatures:
self.predict_fns[method_name] = getattr(self.pipeline, method_name)
def add_runnable_method(method_name: str, options: ModelSignature):
def _run(self: TransformersRunnable, *args: t.Any, **kwargs: t.Any) -> t.Any:
return getattr(self.pipeline, method_name)(*args, **kwargs)
TransformersRunnable.add_method(
_run,
name=method_name,
batchable=options.batchable,
batch_dim=options.batch_dim,
input_spec=options.input_spec,
output_spec=options.output_spec,
)
for method_name, options in bento_model.info.signatures.items():
add_runnable_method(method_name, options)
return TransformersRunnable
|
from __future__ import annotations
import os
import typing as t
import logging
import functools
from typing import TYPE_CHECKING
import attr
import bentoml
from bentoml import Tag
from bentoml import Runnable
from bentoml.models import ModelContext
from bentoml.models import ModelOptions
from bentoml.exceptions import NotFound
from bentoml.exceptions import MissingDependencyException
from ..types import LazyType
from ..models.model import ModelSignature
from ..runner.utils import Params
from ..utils.tensorflow import get_tf_version
logger = logging.getLogger(__name__)
if TYPE_CHECKING: # pragma: no cover
from .. import external_typing as ext
from ..models.model import ModelSignatureDict
from ..external_typing import tensorflow as tf_ext
KerasArgType = t.Union[t.List[t.Union[int, float]], ext.NpNDArray, tf_ext.Tensor]
try:
import tensorflow as tf
import tensorflow.keras as keras
except ImportError: # pragma: no cover
raise MissingDependencyException(
"""\
`tensorflow` is required to use `bentoml.keras`, since
we will use Tensorflow as Keras backend.\n
Instruction: Refers to https://www.tensorflow.org/install for
more information of your use case.
"""
)
MODULE_NAME = "bentoml.keras"
API_VERSION = "v1"
@attr.define
class KerasOptions(ModelOptions):
"""Options for the Keras model."""
include_optimizer: bool
partial_kwargs: t.Dict[str, t.Any] = attr.field(factory=dict)
def get(tag_like: str | Tag) -> bentoml.Model:
model = bentoml.models.get(tag_like)
if model.info.module not in (MODULE_NAME, __name__):
raise NotFound(
f"Model {model.tag} was saved with module {model.info.module}, not loading with {MODULE_NAME}."
)
return model
def load_model(
bento_model: str | Tag | bentoml.Model,
device_name: str = "/device:CPU:0",
) -> "tf_ext.KerasModel":
"""
Load a model from BentoML local modelstore with given name.
Args:
bento_model (``str`` ``|`` :obj:`~bentoml.Tag` ``|`` :obj:`~bentoml.Model`):
Either the tag of the model to get from the store, or a BentoML `~bentoml.Model`
instance to load the model from.
device_name (``str`` | ``None``):
The device id to load the model on. The device id format should be compatible with `tf.device <https://www.tensorflow.org/api_docs/python/tf/device>`_
Returns:
:obj:`keras.Model`: an instance of users :obj:`keras.Model` from BentoML modelstore.
Examples:
.. code-block:: python
import bentoml
# load a model back into memory:
loaded = bentoml.keras.load_model("keras_model")
""" # noqa
if not isinstance(bento_model, bentoml.Model):
bento_model = get(bento_model)
if bento_model.info.module not in (MODULE_NAME, __name__):
raise NotFound(
f"Model {bento_model.tag} was saved with module {bento_model.info.module}, not loading with {MODULE_NAME}."
)
with tf.device(device_name):
return keras.models.load_model(
bento_model.path,
custom_objects=bento_model.custom_objects,
)
def save_model(
name: str,
model: "tf_ext.KerasModel",
*,
tf_signatures: "tf_ext.ConcreteFunction" | None = None,
tf_save_options: "tf_ext.SaveOptions" | None = None,
include_optimizer: bool = False,
signatures: t.Dict[str, ModelSignature]
| t.Dict[str, ModelSignatureDict]
| None = None,
labels: t.Optional[t.Dict[str, str]] = None,
custom_objects: t.Optional[t.Dict[str, t.Any]] = None,
metadata: t.Optional[t.Dict[str, t.Any]] = None,
) -> bentoml.Model:
"""
Save a model instance to BentoML modelstore.
Args:
name (:code:`str`):
Name for given model instance. This should pass Python identifier check.
model (:obj:`tensorflow.keras.Model` | :obj:`tensorflow.keras.engine.sequential.Sequential`):
Instance of the Keras model to be saved to BentoML modelstore.
tf_signatures (:code:`Union[Callable[..., Any], dict]`, `optional`, default to :code:`None`):
Refers to `Signatures explanation <https://www.tensorflow.org/api_docs/python/tf/saved_model/save>`_
from Tensorflow documentation for more information.
tf_save_options (`tf.saved_model.SaveOptions`, `optional`, default to :code:`None`):
:obj:`tf.saved_model.SaveOptions` object that specifies options for saving.
signatures (:code: `Dict[str, bool | BatchDimType | AnyType | tuple[AnyType]]`)
Methods to expose for running inference on the target model. Signatures are
used for creating Runner instances when serving model with bentoml.Service
labels (:code:`Dict[str, str]`, `optional`, default to :code:`None`):
user-defined labels for managing models, e.g. team=nlp, stage=dev
custom_objects (:code:`Dict[str, Any]`, `optional`, default to :code:`None`):
Dictionary of Keras custom objects, if specified.
metadata (:code:`Dict[str, Any]`, `optional`, default to :code:`None`):
Custom metadata for given model.
Returns:
:obj:`~bentoml.Tag`: A :obj:`tag` with a format `name:version` where `name` is
the user-defined model's name, and a generated `version` by BentoML.
Examples:
.. code-block:: python
import bentoml
import tensorflow as tf
import tensorflow.keras as keras
def custom_activation(x):
return tf.nn.tanh(x) ** 2
class CustomLayer(keras.layers.Layer):
def __init__(self, units=32, **kwargs):
super(CustomLayer, self).__init__(**kwargs)
self.units = tf.Variable(units, name="units")
def call(self, inputs, training=False):
if training:
return inputs * self.units
else:
return inputs
def get_config(self):
config = super(CustomLayer, self).get_config()
config.update({"units": self.units.numpy()})
return config
def KerasSequentialModel() -> keras.models.Model:
net = keras.models.Sequential(
(
keras.layers.Dense(
units=1,
input_shape=(5,),
use_bias=False,
kernel_initializer=keras.initializers.Ones(),
),
)
)
opt = keras.optimizers.Adam(0.002, 0.5)
net.compile(optimizer=opt, loss="binary_crossentropy", metrics=["accuracy"])
return net
model = KerasSequentialModel()
# `save` a given model and retrieve coresponding tag:
bento_model = bentoml.keras.save_model("keras_model", model)
# `save` a given model with custom objects definition:
custom_objects = {
"CustomLayer": CustomLayer,
"custom_activation": custom_activation,
},
custom_bento_model = bentoml.keras.save_model("custom_obj_keras", custom_objects=custom_objects)
""" # noqa
if not isinstance(
model,
(
LazyType("tensorflow.keras.Model"),
LazyType("tensorflow.keras.sequential", "Sequential"),
),
):
raise TypeError(
f"Given model ({model}) is not a keras.model.Model or keras.engine.sequential.Sequential."
)
context = ModelContext(
framework_name="keras", framework_versions={"tensorflow": get_tf_version()}
)
if signatures is None:
signatures = {
"predict": {
"batchable": False,
}
}
logger.info(f"Using the default model signature {signatures} for Keras models.")
options = KerasOptions(include_optimizer=include_optimizer)
with bentoml.models.create(
name,
module=MODULE_NAME,
api_version=API_VERSION,
options=options,
context=context,
labels=labels,
custom_objects=custom_objects,
metadata=metadata,
signatures=signatures,
) as bento_model:
model.save(
bento_model.path,
signatures=tf_signatures,
options=tf_save_options,
include_optimizer=include_optimizer,
)
return bento_model
def get_runnable(
bento_model: bentoml.Model,
):
"""
Private API: use :obj:`~bentoml.Model.to_runnable` instead.
"""
partial_kwargs: t.Dict[str, t.Any] = bento_model.info.options.partial_kwargs # type: ignore
class KerasRunnable(Runnable):
SUPPORT_NVIDIA_GPU = True
SUPPORT_CPU_MULTI_THREADING = True
def __init__(self):
super().__init__()
if len(tf.config.list_physical_devices("GPU")) > 0:
# In Multi-GPU scenarios, the visible cuda devices will be set for each Runner worker
# by the runner's Scheduling Strategy. So that the Runnable implementation only needs
# to find the first GPU device visible to current process.
self.device_name = "/device:GPU:0"
else:
self.device_name = "/device:CPU:0"
if "BENTOML_NUM_THREAD" in os.environ:
num_threads = int(os.environ["BENTOML_NUM_THREAD"])
tf.config.threading.set_inter_op_parallelism_threads(num_threads)
tf.config.threading.set_intra_op_parallelism_threads(num_threads)
self.model = load_model(bento_model, device_name=self.device_name)
self.methods_cache: t.Dict[str, t.Callable[..., t.Any]] = {}
def _gen_run_method(runnable_self: KerasRunnable, method_name: str):
raw_method = getattr(runnable_self.model, method_name)
method_partial_kwargs = partial_kwargs.get(method_name)
if method_partial_kwargs:
raw_method = functools.partial(raw_method, **method_partial_kwargs)
def _mapping(item: "KerasArgType") -> "tf_ext.TensorLike":
if not LazyType["tf_ext.TensorLike"]("tensorflow.Tensor").isinstance(item):
return t.cast("tf_ext.TensorLike", tf.convert_to_tensor(item))
else:
return item
def _run_method(
runnable_self: KerasRunnable, *args: "KerasArgType"
) -> "ext.NpNDArray":
params = Params["KerasArgType"](*args)
with tf.device(runnable_self.device_name):
params = params.map(_mapping)
res: "tf_ext.EagerTensor" | "ext.NpNDArray" = raw_method(params.args)
if LazyType["tf_ext.EagerTensor"](
"tensorflow.python.framework.ops._EagerTensorBase"
).isinstance(res):
return t.cast("ext.NpNDArray", res.numpy())
return res
return _run_method
def add_run_method(method_name: str, options: ModelSignature):
def run_method(
runnable_self: KerasRunnable,
*args: "KerasArgType",
) -> "ext.NpNDArray":
_run_method = runnable_self.methods_cache.get(method_name)
if not _run_method:
_run_method = _gen_run_method(runnable_self, method_name)
runnable_self.methods_cache[method_name] = _run_method
return _run_method(runnable_self, *args)
KerasRunnable.add_method(
run_method,
name=method_name,
batchable=options.batchable,
batch_dim=options.batch_dim,
input_spec=options.input_spec,
output_spec=options.output_spec,
)
for method_name, options in bento_model.info.signatures.items():
add_run_method(method_name, options)
return KerasRunnable
|
from __future__ import annotations
import typing as t
import logging
from typing import TYPE_CHECKING
import bentoml
from bentoml import Tag
from bentoml.models import Model
from bentoml.models import ModelContext
from bentoml.exceptions import NotFound
from bentoml.exceptions import BentoMLException
from bentoml.exceptions import MissingDependencyException
from ..models import PKL_EXT
from ..models import SAVE_NAMESPACE
from ..models.model import ModelSignature
if TYPE_CHECKING:
from .. import external_typing as ext
ModelType = t.Any
MODULE_NAME = "bentoml.picklable_model"
API_VERSION = "v1"
try:
import cloudpickle # type: ignore
except ImportError: # pragma: no cover
raise MissingDependencyException(
f"""cloudpickle is required in order to use the module `{MODULE_NAME}`, install
cloudpickle with `pip install cloudpickle`.
"""
)
logger = logging.getLogger(__name__)
def get(tag_like: str | Tag) -> Model:
model = bentoml.models.get(tag_like)
if model.info.module not in (MODULE_NAME, __name__):
raise NotFound(
f"Model {model.tag} was saved with module {model.info.module}, not loading with {MODULE_NAME}."
)
return model
def load_model(bento_model: str | Tag | Model) -> ModelType:
"""
Load the picklable model with the given tag from the local BentoML model store.
Args:
bento_model (``str`` ``|`` :obj:`~bentoml.Tag` ``|`` :obj:`~bentoml.Model`):
Either the tag of the model to get from the store, or a BentoML `~bentoml.Model`
instance to load the model from.
...
Returns:
``object``
The picklable model loaded from the model store or BentoML :obj:`~bentoml.Model`.
Example:
.. code-block:: python
import bentoml
picklable_model = bentoml.picklable_model.load_model('my_model:latest')
""" # noqa
if not isinstance(bento_model, Model):
bento_model = get(bento_model)
if bento_model.info.module not in (MODULE_NAME, __name__):
raise BentoMLException(
f"Model {bento_model.tag} was saved with module {bento_model.info.module}, not loading with {MODULE_NAME}."
)
model_file = bento_model.path_of(f"{SAVE_NAMESPACE}{PKL_EXT}")
with open(model_file, "rb") as f:
# The protocol version used is detected automatically, so we do not
# have to specify it.
return cloudpickle.load(f)
def save_model(
name: str,
model: ModelType,
*,
signatures: dict[str, ModelSignature] | None = None,
labels: t.Dict[str, str] | None = None,
custom_objects: t.Dict[str, t.Any] | None = None,
metadata: t.Dict[str, t.Any] | None = None,
) -> bentoml.Model:
"""
Save a model instance to BentoML modelstore.
Args:
name (:code:`str`):
Name for given model instance. This should pass Python identifier check.
model (:code:`Union[BaseEstimator, Pipeline]`):
Instance of model to be saved.
signatures (:code: `Dict[str, ModelSignatureDict]`)
Methods to expose for running inference on the target model. Signatures are
used for creating Runner instances when serving model with bentoml.Service
labels (:code:`Dict[str, str]`, `optional`, default to :code:`None`):
user-defined labels for managing models, e.g. team=nlp, stage=dev
custom_objects (:code:`Dict[str, Any]]`, `optional`, default to :code:`None`):
user-defined additional python objects to be saved alongside the model,
e.g. a tokenizer instance, preprocessor function, model configuration json
metadata (:code:`Dict[str, Any]`, `optional`, default to :code:`None`):
Custom metadata for given model.
Returns:
:obj:`~bentoml.Tag`: A :obj:`tag` with a format `name:version` where `name` is
the user-defined model's name, and a generated `version`.
Examples:
.. code-block:: python
import bentoml
bento_model = bentoml.picklable.save_model('picklable_pyobj', model)
""" # noqa
context = ModelContext(
framework_name="cloudpickle",
framework_versions={"cloudpickle": cloudpickle.__version__},
)
if signatures is None:
logger.info(
'Using default model signature `{"__call__": {"batchable": False}}` for picklable model'
)
signatures = {"__call__": ModelSignature(batchable=False)}
with bentoml.models.create(
name,
module=MODULE_NAME,
api_version=API_VERSION,
labels=labels,
custom_objects=custom_objects,
metadata=metadata,
context=context,
signatures=signatures,
) as bento_model:
with open(bento_model.path_of(f"{SAVE_NAMESPACE}{PKL_EXT}"), "wb") as f:
cloudpickle.dump(model, f)
return bento_model
def get_runnable(bento_model: Model):
"""
Private API: use :obj:`~bentoml.Model.to_runnable` instead.
"""
class PicklableRunnable(bentoml.Runnable):
SUPPORT_NVIDIA_GPU = False # type: ignore
SUPPORT_CPU_MULTI_THREADING = False # type: ignore
def __init__(self):
super().__init__()
self.model = load_model(bento_model)
def _get_run(method_name: str):
def _run(
self: PicklableRunnable,
*args: ext.NpNDArray | ext.PdDataFrame,
**kwargs: ext.NpNDArray | ext.PdDataFrame,
) -> ext.NpNDArray:
assert isinstance(method_name, str), repr(method_name)
return getattr(self.model, method_name)(*args, **kwargs)
return _run
for method_name, options in bento_model.info.signatures.items():
assert isinstance(method_name, str), repr(method_name)
PicklableRunnable.add_method(
_get_run(method_name),
name=method_name,
batchable=options.batchable,
batch_dim=options.batch_dim,
input_spec=options.input_spec,
output_spec=options.output_spec,
)
return PicklableRunnable
|
from __future__ import annotations
import typing as t
from typing import TYPE_CHECKING
import bentoml
from .torchscript import get
from .torchscript import load_model
from .torchscript import save_model as script_save_model
from .torchscript import get_runnable
from ...exceptions import MissingDependencyException
if TYPE_CHECKING:
from ..models.model import ModelSignaturesType
_IMPORT_ERROR = f"""\
`pytorch_lightning` and `torch` is required in order to use module `{__name__}`\n
Refers to https://pytorch.org/get-started/locally/ to setup PyTorch correctly.
Then run `pip install pytorch_lightning`
"""
try:
import pytorch_lightning as pl
except ImportError: # pragma: no cover
raise MissingDependencyException(_IMPORT_ERROR)
MODULE_NAME = "bentoml.pytorch_lightning"
__all__ = ["save_model", "load_model", "get_runnable", "get"]
def save_model(
name: str,
model: pl.LightningModule,
*,
signatures: ModelSignaturesType | None = None,
labels: t.Dict[str, str] | None = None,
custom_objects: t.Dict[str, t.Any] | None = None,
metadata: t.Dict[str, t.Any] | None = None,
) -> bentoml.Model:
"""
Save a model instance to BentoML modelstore.
Args:
name (:code:`str`):
Name for given model instance. This should pass Python identifier check.
model (`pl.LightningModule`):
Instance of model to be saved
labels (:code:`Dict[str, str]`, `optional`, default to :code:`None`):
user-defined labels for managing models, e.g. team=nlp, stage=dev
custom_objects (:code:`Dict[str, Any]]`, `optional`, default to :code:`None`):
user-defined additional python objects to be saved alongside the model,
e.g. a tokenizer instance, preprocessor function, model configuration json
metadata (:code:`Dict[str, Any]`, `optional`, default to :code:`None`):
Custom metadata for given model.
model_store (:mod:`~bentoml._internal.models.store.ModelStore`, default to :mod:`BentoMLContainer.model_store`):
BentoML modelstore, provided by DI Container.
Returns:
:obj:`~bentoml.Tag`: A :obj:`tag` with a format `name:version` where `name` is the user-defined model's name, and a generated `version` by BentoML.
Examples:
.. code-block:: python
import bentoml
import torch
import pytorch_lightning as pl
class LitClassifier(pl.LightningModule):
def __init__(self, hidden_dim: int = 128, learning_rate: float = 0.0001):
super().__init__()
self.save_hyperparameters()
self.l1 = torch.nn.Linear(28 * 28, self.hparams.hidden_dim)
self.l2 = torch.nn.Linear(self.hparams.hidden_dim, 10)
def forward(self, x):
x = x.view(x.size(0), -1)
x = torch.relu(self.l1(x))
x = torch.relu(self.l2(x))
return x
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
loss = F.cross_entropy(y_hat, y)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
loss = F.cross_entropy(y_hat, y)
self.log("valid_loss", loss)
def test_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
loss = F.cross_entropy(y_hat, y)
self.log("test_loss", loss)
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
tag = bentoml.pytorch_lightning.save("lit_classifier", LitClassifier())
"""
if not isinstance(model, pl.LightningModule):
raise TypeError(
f"`model` must be an instance of `pl.LightningModule`, got {type(model)}"
)
script_module = model.to_torchscript()
assert not isinstance(
script_module, dict
), "Saving a dict of pytorch_lightning Module into one BentoModel is not supported"
return script_save_model(
name,
script_module,
signatures=signatures,
labels=labels,
custom_objects=custom_objects,
metadata=metadata,
_include_pytorch_lightning_version=True,
)
|
from __future__ import annotations
import typing as t
import logging
from typing import TYPE_CHECKING
from pathlib import Path
import cloudpickle
import bentoml
from bentoml import Tag
from ..types import LazyType
from ..models import Model
from ..utils.pkg import get_pkg_version
from ...exceptions import NotFound
from ...exceptions import BentoMLException
from ..models.model import ModelContext
from .common.pytorch import torch
from .common.pytorch import PyTorchTensorContainer
__all__ = ["load_model", "save_model", "get_runnable", "get", "PyTorchTensorContainer"]
MODULE_NAME = "bentoml.pytorch"
MODEL_FILENAME = "saved_model.pt"
API_VERSION = "v1"
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from ..models.model import ModelSignaturesType
def get(tag_like: str | Tag) -> Model:
model = bentoml.models.get(tag_like)
if model.info.module not in (MODULE_NAME, __name__):
raise NotFound(
f"Model {model.tag} was saved with module {model.info.module}, not loading with {MODULE_NAME}."
)
return model
def load_model(
bentoml_model: str | Tag | Model,
device_id: t.Optional[str] = "cpu",
) -> torch.nn.Module:
"""
Load a model from a BentoML Model with given name.
Args:
tag (:code:`Union[str, Tag]`):
Tag of a saved model in BentoML local modelstore.
device_id (:code:`str`, `optional`, default to :code:`cpu`):
Optional devices to put the given model on. Refers to `device attributes <https://pytorch.org/docs/stable/tensor_attributes.html#torch.torch.device>`_.
Returns:
:obj:`torch.nn.Module`: an instance of :code:`torch.nn.Module` from BentoML modelstore.
Examples:
.. code-block:: python
import bentoml
model = bentoml.pytorch.load_model('lit_classifier:latest', device_id="cuda:0")
"""
if isinstance(bentoml_model, (str, Tag)):
bentoml_model = get(bentoml_model)
if bentoml_model.info.module not in (MODULE_NAME, __name__):
raise BentoMLException(
f"Model {bentoml_model.tag} was saved with module {bentoml_model.info.module}, not loading with {MODULE_NAME}."
)
weight_file = bentoml_model.path_of(MODEL_FILENAME)
with Path(weight_file).open("rb") as file:
model: "torch.nn.Module" = torch.load(file, map_location=device_id)
return model
def save_model(
name: str,
model: "torch.nn.Module",
*,
signatures: ModelSignaturesType | None = None,
labels: t.Dict[str, str] | None = None,
custom_objects: t.Dict[str, t.Any] | None = None,
metadata: t.Dict[str, t.Any] | None = None,
) -> bentoml.Model:
"""
Save a model instance to BentoML modelstore.
Args:
name (:code:`str`):
Name for given model instance. This should pass Python identifier check.
model (:code:`torch.nn.Module`):
Instance of model to be saved
signatures (:code:`ModelSignaturesType`, `optional`, default to :code:`None`):
A dictionary of method names and their corresponding signatures.
labels (:code:`Dict[str, str]`, `optional`, default to :code:`None`):
user-defined labels for managing models, e.g. team=nlp, stage=dev
custom_objects (:code:`Dict[str, Any]]`, `optional`, default to :code:`None`):
user-defined additional python objects to be saved alongside the model,
e.g. a tokenizer instance, preprocessor function, model configuration json
metadata (:code:`Dict[str, Any]`, `optional`, default to :code:`None`):
Custom metadata for given model.
Returns:
:obj:`~bentoml.Tag`: A :obj:`tag` with a format `name:version` where `name` is the user-defined model's name, and a generated `version` by BentoML.
Examples:
.. code-block:: python
import torch
import bentoml
class NGramLanguageModeler(nn.Module):
def __init__(self, vocab_size, embedding_dim, context_size):
super(NGramLanguageModeler, self).__init__()
self.embeddings = nn.Embedding(vocab_size, embedding_dim)
self.linear1 = nn.Linear(context_size * embedding_dim, 128)
self.linear2 = nn.Linear(128, vocab_size)
def forward(self, inputs):
embeds = self.embeddings(inputs).view((1, -1))
out = F.relu(self.linear1(embeds))
out = self.linear2(out)
log_probs = F.log_softmax(out, dim=1)
return log_probs
tag = bentoml.pytorch.save("ngrams", NGramLanguageModeler(len(vocab), EMBEDDING_DIM, CONTEXT_SIZE))
# example tag: ngrams:20201012_DE43A2
Integration with Torch Hub and BentoML:
.. code-block:: python
import torch
import bentoml
resnet50 = torch.hub.load("pytorch/vision", "resnet50", pretrained=True)
...
# trained a custom resnet50
tag = bentoml.pytorch.save("resnet50", resnet50)
"""
if not LazyType("torch.nn.Module").isinstance(model):
raise TypeError(f"Given model ({model}) is not a torch.nn.Module.")
context: ModelContext = ModelContext(
framework_name="torch",
framework_versions={"torch": get_pkg_version("torch")},
)
if signatures is None:
signatures = {"__call__": {"batchable": False}}
logger.info(
f"Using the default model signature ({signatures}) for model {name}."
)
with bentoml.models.create(
name,
module=MODULE_NAME,
api_version=API_VERSION,
labels=labels,
signatures=signatures,
custom_objects=custom_objects,
options=None,
context=context,
metadata=metadata,
) as bento_model:
weight_file = bento_model.path_of(MODEL_FILENAME)
with open(weight_file, "wb") as file:
torch.save(model, file, pickle_module=cloudpickle) # type: ignore
return bento_model
def get_runnable(bento_model: Model):
"""
Private API: use :obj:`~bentoml.Model.to_runnable` instead.
"""
from .common.pytorch import partial_class
from .common.pytorch import PytorchModelRunnable
from .common.pytorch import make_pytorch_runnable_method
for method_name, options in bento_model.info.signatures.items():
PytorchModelRunnable.add_method(
make_pytorch_runnable_method(method_name),
name=method_name,
batchable=options.batchable,
batch_dim=options.batch_dim,
input_spec=options.input_spec,
output_spec=options.output_spec,
)
return partial_class(
PytorchModelRunnable,
bento_model=bento_model,
loader=load_model,
)
|
from __future__ import annotations
import pickle
import typing as t
import logging
import functools
import itertools
import contextlib
from typing import TYPE_CHECKING
import attr
import bentoml
from bentoml import Tag
from bentoml import Runnable
from bentoml.models import ModelContext
from bentoml.models import ModelOptions
from bentoml.exceptions import NotFound
from bentoml.exceptions import MissingDependencyException
from ..types import LazyType
from ..models.model import ModelSignature
from ..runner.utils import Params
from ..runner.container import Payload
from ..runner.container import DataContainer
from ..runner.container import DataContainerRegistry
from ..utils.tensorflow import get_tf_version
from ..utils.tensorflow import hook_loaded_model
logger = logging.getLogger(__name__)
try:
import tensorflow as tf # type: ignore
except ImportError: # pragma: no cover
raise MissingDependencyException(
"""\
`tensorflow` is required in order to use `bentoml.tensorflow`.
Instruction: `pip install tensorflow`
"""
)
if TYPE_CHECKING:
from .. import external_typing as ext
from ..models.model import ModelSignatureDict
from ..external_typing import tensorflow as tf_ext
TFArgType = t.Union[t.List[t.Union[int, float]], ext.NpNDArray, tf_ext.Tensor]
MODULE_NAME = "bentoml.tensorflow"
API_VERSION = "v1"
@attr.define
class TensorflowOptions(ModelOptions):
"""Options for the Keras model."""
partial_kwargs: t.Dict[str, t.Any] = attr.field(factory=dict)
def get(tag_like: str | Tag) -> bentoml.Model:
model = bentoml.models.get(tag_like)
if model.info.module not in (MODULE_NAME, __name__):
raise NotFound(
f"Model {model.tag} was saved with module {model.info.module}, not loading with {MODULE_NAME}."
)
return model
def load_model(
bento_model: str | Tag | bentoml.Model,
device_name: str = "/device:CPU:0",
) -> "tf_ext.AutoTrackable" | "tf_ext.Module":
"""
Load a tensorflow model from BentoML local modelstore with given name.
Args:
bento_model (``str`` ``|`` :obj:`~bentoml.Tag` ``|`` :obj:`~bentoml.Model`):
Either the tag of the model to get from the store, or a BentoML `~bentoml.Model`
instance to load the model from.
device_name (``str`` | ``None``):
The device id to load the model on. The device id format should be compatible with `tf.device <https://www.tensorflow.org/api_docs/python/tf/device>`_
Returns:
:obj:`SavedModel`: an instance of :obj:`SavedModel` format from BentoML modelstore.
Examples:
.. code-block:: python
import bentoml
# load a model back into memory
model = bentoml.tensorflow.load_model("my_tensorflow_model")
""" # noqa: LN001
if not isinstance(bento_model, bentoml.Model):
bento_model = get(bento_model)
if bento_model.info.module not in (MODULE_NAME, __name__):
raise NotFound(
f"Model {bento_model.tag} was saved with module {bento_model.info.module}, not loading with {MODULE_NAME}."
)
if "GPU" in device_name:
physical_devices = tf.config.list_physical_devices("GPU")
tf.config.experimental.set_memory_growth(physical_devices[0], True)
with tf.device(device_name):
tf_model: "tf_ext.AutoTrackable" = tf.saved_model.load(bento_model.path) # type: ignore
return hook_loaded_model(tf_model, MODULE_NAME)
def save_model(
name: str,
model: t.Union["tf_ext.KerasModel", "tf_ext.Module"],
*,
tf_signatures: "tf_ext.ConcreteFunction" | None = None,
tf_save_options: "tf_ext.SaveOptions" | None = None,
signatures: t.Dict[str, ModelSignature]
| t.Dict[str, ModelSignatureDict]
| None = None,
labels: t.Dict[str, str] | None = None,
custom_objects: t.Dict[str, t.Any] | None = None,
metadata: t.Dict[str, t.Any] | None = None,
) -> bentoml.Model:
"""
Save a model instance to BentoML modelstore.
Args:
name (:code:`str`):
Name for given model instance. This should pass Python identifier check.
model (``keras.Model`` | ``tf.Module``):
Instance of model to be saved
tf_signatures (:code:`Union[Callable[..., Any], dict]`, `optional`, default to :code:`None`):
Refers to `Signatures explanation <https://www.tensorflow.org/api_docs/python/tf/saved_model/save>`_
from Tensorflow documentation for more information.
tf_save_options (`tf.saved_model.SaveOptions`, `optional`, default to :code:`None`):
:obj:`tf.saved_model.SaveOptions` object that specifies options for saving.
signatures (:code: `Dict[str, bool | BatchDimType | AnyType | tuple[AnyType]]`)
Methods to expose for running inference on the target model. Signatures are
used for creating Runner instances when serving model with bentoml.Service
labels (:code:`Dict[str, str]`, `optional`, default to :code:`None`):
user-defined labels for managing models, e.g. team=nlp, stage=dev
custom_objects (:code:`Dict[str, Any]]`, `optional`, default to :code:`None`):
user-defined additional python objects to be saved alongside the model,
e.g. a tokenizer instance, preprocessor function, model configuration json
metadata (:code:`Dict[str, Any]`, `optional`, default to :code:`None`):
Custom metadata for given model.
Raises:
ValueError: If :obj:`obj` is not trackable.
Returns:
:obj:`~bentoml.Tag`: A :obj:`tag` with a format `name:version` where `name` is
the user-defined model's name, and a generated `version` by BentoML.
Examples:
.. code-block:: python
import tensorflow as tf
import numpy as np
import bentoml
class NativeModel(tf.Module):
def __init__(self):
super().__init__()
self.weights = np.asfarray([[1.0], [1.0], [1.0], [1.0], [1.0]])
self.dense = lambda inputs: tf.matmul(inputs, self.weights)
@tf.function(
input_signature=[tf.TensorSpec(shape=[1, 5], dtype=tf.float64, name="inputs")]
)
def __call__(self, inputs):
return self.dense(inputs)
# then save the given model to BentoML modelstore:
model = NativeModel()
bento_model = bentoml.tensorflow.save_model("native_toy", model)
.. note::
:code:`bentoml.tensorflow.save_model` API also support saving `RaggedTensor <https://www.tensorflow.org/guide/ragged_tensor>`_ model and Keras model. If you choose to save a Keras model
with :code:`bentoml.tensorflow.save_model`, then the model will be saved under a :obj:`SavedModel` format instead of :obj:`.h5`.
""" # noqa
context = ModelContext(
framework_name="tensorflow",
framework_versions={"tensorflow": get_tf_version()},
)
# will add signatures inference from tf_signatures later
if signatures is None:
signatures = {
"__call__": {
"batchable": False,
}
}
logger.info(
f"Using the default model signature {signatures} for TensorFlow models."
)
with bentoml.models.create(
name,
module=MODULE_NAME,
api_version=API_VERSION,
options=TensorflowOptions(),
context=context,
labels=labels,
custom_objects=custom_objects,
metadata=metadata,
signatures=signatures, # type: ignore
) as bento_model:
tf.saved_model.save(
model,
bento_model.path,
signatures=tf_signatures,
options=tf_save_options,
)
return bento_model
def get_runnable(
bento_model: bentoml.Model,
):
"""
Private API: use :obj:`~bentoml.Model.to_runnable` instead.
"""
partial_kwargs: t.Dict[str, t.Any] = bento_model.info.options.partial_kwargs
class TensorflowRunnable(Runnable):
SUPPORT_NVIDIA_GPU = True
SUPPORT_CPU_MULTI_THREADING = True
def __init__(self):
super().__init__()
if len(tf.config.list_physical_devices("GPU")) > 0:
# In Multi-GPU scenarios, the visible cuda devices will be set for each Runner worker
# by the runner's Scheduling Strategy. So that the Runnable implementation only needs
# to find the first GPU device visible to current process.
self.device_name = "/device:GPU:0"
else:
self.device_name = "/device:CPU:0"
self.model = load_model(bento_model, device_name=self.device_name)
self.methods_cache: t.Dict[str, t.Callable[..., t.Any]] = {}
self.session_stack = contextlib.ExitStack()
self.session_stack.enter_context(tf.device(self.device_name))
def __del__(self):
try:
self.session_stack.close()
except RuntimeError:
pass
def _gen_run_method(runnable_self: TensorflowRunnable, method_name: str):
raw_method = getattr(runnable_self.model, method_name)
method_partial_kwargs = partial_kwargs.get(method_name)
if method_partial_kwargs:
raw_method = functools.partial(raw_method, **method_partial_kwargs)
def _mapping(item: "TFArgType") -> "tf_ext.TensorLike":
if not LazyType["tf_ext.TensorLike"]("tensorflow.Tensor").isinstance(item):
return t.cast("tf_ext.TensorLike", tf.convert_to_tensor(item))
else:
return item
def _run_method(
_runnable_self: TensorflowRunnable,
*args: "TFArgType",
**kwargs: "TFArgType",
) -> "ext.NpNDArray":
params = Params["TFArgType"](*args, **kwargs)
params = params.map(_mapping)
res = raw_method(*params.args, **params.kwargs)
return t.cast("ext.NpNDArray", res.numpy())
return _run_method
def add_run_method(method_name: str, options: ModelSignature):
def run_method(
runnable_self: TensorflowRunnable, *args: "TFArgType", **kwargs: "TFArgType"
) -> "ext.NpNDArray":
_run_method = runnable_self.methods_cache.get(
method_name
) # is methods_cache nessesary?
if not _run_method:
_run_method = _gen_run_method(runnable_self, method_name)
runnable_self.methods_cache[method_name] = _run_method
return _run_method(runnable_self, *args, **kwargs)
TensorflowRunnable.add_method(
run_method,
name=method_name,
batchable=options.batchable,
batch_dim=options.batch_dim,
input_spec=options.input_spec,
output_spec=options.output_spec,
)
for method_name, options in bento_model.info.signatures.items():
add_run_method(method_name, options)
return TensorflowRunnable
class TensorflowTensorContainer(
DataContainer["tf_ext.EagerTensor", "tf_ext.EagerTensor"]
):
@classmethod
def batches_to_batch(
cls, batches: t.Sequence["tf_ext.EagerTensor"], batch_dim: int = 0
) -> t.Tuple["tf_ext.EagerTensor", list[int]]:
batch: "tf_ext.EagerTensor" = tf.concat(batches, axis=batch_dim)
# TODO: fix typing mismatch @larme
indices: list[int] = list(
itertools.accumulate(subbatch.shape[batch_dim] for subbatch in batches)
) # type: ignore
indices = [0] + indices
return batch, indices
@classmethod
def batch_to_batches(
cls, batch: "tf_ext.EagerTensor", indices: t.Sequence[int], batch_dim: int = 0
) -> t.List["tf_ext.EagerTensor"]:
size_splits = [indices[i + 1] - indices[i] for i in range(len(indices) - 1)]
return tf.split(batch, size_splits, axis=batch_dim) # type: ignore
@classmethod
def to_payload(
cls,
batch: "tf_ext.EagerTensor",
batch_dim: int = 0,
) -> Payload:
return cls.create_payload(
pickle.dumps(batch),
batch_size=batch.shape[batch_dim],
)
@classmethod
def from_payload(
cls,
payload: Payload,
) -> "tf_ext.EagerTensor":
return pickle.loads(payload.data)
@classmethod
def batch_to_payloads(
cls,
batch: "tf_ext.EagerTensor",
indices: t.Sequence[int],
batch_dim: int = 0,
) -> t.List[Payload]:
batches = cls.batch_to_batches(batch, indices, batch_dim)
payloads = [cls.to_payload(subbatch) for subbatch in batches]
return payloads
@classmethod
def from_batch_payloads(
cls,
payloads: t.Sequence[Payload],
batch_dim: int = 0,
) -> t.Tuple["tf_ext.EagerTensor", t.List[int]]:
batches = [cls.from_payload(payload) for payload in payloads]
return cls.batches_to_batch(batches, batch_dim)
DataContainerRegistry.register_container(
LazyType("tensorflow.python.framework.ops", "_EagerTensorBase"),
LazyType("tensorflow.python.framework.ops", "_EagerTensorBase"),
TensorflowTensorContainer,
)
|
from __future__ import annotations
import typing as t
import logging
from typing import TYPE_CHECKING
import bentoml
from bentoml import Tag
from ..utils.pkg import get_pkg_version
from ...exceptions import NotFound
from ...exceptions import BentoMLException
from ...exceptions import MissingDependencyException
from ..models.model import Model
from ..models.model import ModelContext
if TYPE_CHECKING:
from ..models.model import ModelSignaturesType
_PL_IMPORT_ERROR = f"""\
`torch` is required in order to use module `{__name__}`\n
Refers to https://pytorch.org/get-started/locally/ to setup PyTorch correctly.
Then run `pip install torch`
"""
try:
import torch
except ImportError: # pragma: no cover
raise MissingDependencyException(_PL_IMPORT_ERROR)
logger = logging.getLogger(__name__)
MODULE_NAME = "bentoml.torchscript"
MODEL_FILENAME = "saved_model.pt"
API_VERSION = "v1"
def get(tag_like: str | Tag) -> Model:
model = bentoml.models.get(tag_like)
if model.info.module not in (MODULE_NAME, __name__):
raise NotFound(
f"Model {model.tag} was saved with module {model.info.module}, not loading with {MODULE_NAME}."
)
return model
def load_model(
bentoml_model: str | Tag | Model,
device_id: t.Optional[str] = "cpu",
) -> torch.ScriptModule:
"""
Load a model from BentoML local modelstore with given name.
Args:
tag (:code:`Union[str, Tag]`):
Tag of a saved model in BentoML local modelstore.
device_id (:code:`str`, `optional`):
Optional devices to put the given model on. Refers to https://pytorch.org/docs/stable/tensor_attributes.html#torch.torch.device
model_store (:mod:`~bentoml._internal.models.store.ModelStore`, default to :mod:`BentoMLContainer.model_store`):
BentoML modelstore, provided by DI Container.
Returns:
:obj:`torch.ScriptModule`: an instance of :obj:`torch.ScriptModule` from BentoML modelstore.
Examples:
.. code-block:: python
import bentoml
lit = bentoml.torchscript.load_model('lit_classifier:latest', device_id="cuda:0")
"""
if isinstance(bentoml_model, (str, Tag)):
bentoml_model = get(bentoml_model)
if bentoml_model.info.module not in (MODULE_NAME, __name__):
raise BentoMLException(
f"Model {bentoml_model} was saved with module {bentoml_model.info.module}, not loading with {MODULE_NAME}."
)
weight_file = bentoml_model.path_of(MODEL_FILENAME)
model: torch.ScriptModule = torch.jit.load(weight_file, map_location=device_id) # type: ignore[reportPrivateImportUsage]
return model
def save_model(
name: str,
model: torch.ScriptModule,
*,
signatures: ModelSignaturesType | None = None,
labels: t.Dict[str, str] | None = None,
custom_objects: t.Dict[str, t.Any] | None = None,
metadata: t.Dict[str, t.Any] | None = None,
_include_pytorch_lightning_version: bool = False,
) -> bentoml.Model:
"""
Save a model instance to BentoML modelstore.
Args:
name (:code:`str`):
Name for given model instance. This should pass Python identifier check.
model (`torch.ScriptModule`):
Instance of model to be saved
signatures (:code:`dict`, `optional`):
A dictionary of method names and their corresponding signatures.
labels (:code:`Dict[str, str]`, `optional`, default to :code:`None`):
user-defined labels for managing models, e.g. team=nlp, stage=dev
custom_objects (:code:`Dict[str, Any]]`, `optional`, default to :code:`None`):
user-defined additional python objects to be saved alongside the model,
e.g. a tokenizer instance, preprocessor function, model configuration json
metadata (:code:`Dict[str, Any]`, `optional`, default to :code:`None`):
Custom metadata for given model.
Returns:
:obj:`~bentoml.Tag`: A :obj:`tag` with a format `name:version` where `name` is the user-defined model's name, and a generated `version` by BentoML.
Examples:
.. code-block:: python
import bentoml
import torch
TODO(jiang)
"""
if not isinstance(model, (torch.ScriptModule, torch.jit.ScriptModule)):
raise TypeError(f"Given model ({model}) is not a torch.ScriptModule.")
if _include_pytorch_lightning_version:
framework_versions = {
"torch": get_pkg_version("torch"),
"pytorch_lightning": get_pkg_version("pytorch_lightning"),
}
else:
framework_versions = {"torch": get_pkg_version("torch")}
context: ModelContext = ModelContext(
framework_name="torchscript",
framework_versions=framework_versions,
)
if signatures is None:
signatures = {"__call__": {"batchable": False}}
logger.info(
f"Using the default model signature ({signatures}) for model {name}."
)
with bentoml.models.create(
name,
module=MODULE_NAME,
api_version=API_VERSION,
labels=labels,
signatures=signatures,
custom_objects=custom_objects,
options=None,
context=context,
metadata=metadata,
) as bento_model:
weight_file = bento_model.path_of(MODEL_FILENAME)
torch.jit.save(model, weight_file) # type: ignore
return bento_model
def get_runnable(bento_model: Model):
"""
Private API: use :obj:`~bentoml.Model.to_runnable` instead.
"""
from .common.pytorch import partial_class
from .common.pytorch import PytorchModelRunnable
from .common.pytorch import make_pytorch_runnable_method
for method_name, options in bento_model.info.signatures.items():
PytorchModelRunnable.add_method(
make_pytorch_runnable_method(method_name),
name=method_name,
batchable=options.batchable,
batch_dim=options.batch_dim,
input_spec=options.input_spec,
output_spec=options.output_spec,
)
return partial_class(
PytorchModelRunnable,
bento_model=bento_model,
loader=load_model,
)
|
from __future__ import annotations
import os
import typing as t
import logging
from typing import TYPE_CHECKING
import numpy as np
import bentoml
from bentoml import Tag
from bentoml.exceptions import NotFound
from bentoml.exceptions import InvalidArgument
from bentoml.exceptions import MissingDependencyException
from bentoml._internal.models.model import ModelContext
from ..utils.pkg import get_pkg_version
if TYPE_CHECKING:
from bentoml.types import ModelSignature
from bentoml.types import ModelSignatureDict
from .. import external_typing as ext
try:
import xgboost as xgb
except ImportError: # pragma: no cover
raise MissingDependencyException(
"""xgboost is required in order to use module `bentoml.xgboost`, install
xgboost with `pip install xgboost`. For more information, refers to
https://xgboost.readthedocs.io/en/latest/install.html
"""
)
MODULE_NAME = "bentoml.xgboost"
MODEL_FILENAME = "saved_model.ubj"
API_VERSION = "v1"
logger = logging.getLogger(__name__)
def get(tag_like: str | Tag) -> bentoml.Model:
"""
Get the BentoML model with the given tag.
Args:
tag_like (``str`` ``|`` :obj:`~bentoml.Tag`):
The tag of the model to retrieve from the model store.
Returns:
:obj:`~bentoml.Model`: A BentoML :obj:`~bentoml.Model` with the matching tag.
Example:
.. code-block:: python
import bentoml
# target model must be from the BentoML model store
model = bentoml.xgboost.get("my_xgboost_model")
"""
model = bentoml.models.get(tag_like)
if model.info.module not in (MODULE_NAME, __name__):
raise NotFound(
f"Model {model.tag} was saved with module {model.info.module}, not loading with {MODULE_NAME}."
)
return model
def load_model(bento_model: str | Tag | bentoml.Model) -> xgb.core.Booster:
"""
Load the XGBoost model with the given tag from the local BentoML model store.
Args:
bento_model (``str`` ``|`` :obj:`~bentoml.Tag` ``|`` :obj:`~bentoml.Model`):
Either the tag of the model to get from the store, or a BentoML `~bentoml.Model`
instance to load the model from.
Returns:
:obj:`~xgboost.core.Booster`: The XGBoost model loaded from the model store or BentoML :obj:`~bentoml.Model`.
Example:
.. code-block:: python
import bentoml
# target model must be from the BentoML model store
booster = bentoml.xgboost.load_model("my_xgboost_model")
""" # noqa: LN001
if not isinstance(bento_model, bentoml.Model):
bento_model = get(bento_model)
assert isinstance(bento_model, bentoml.Model)
if bento_model.info.module not in (MODULE_NAME, __name__):
raise NotFound(
f"Model {bento_model.tag} was saved with module {bento_model.info.module}, not loading with {MODULE_NAME}."
)
model_file = bento_model.path_of(MODEL_FILENAME)
booster = xgb.core.Booster(model_file=model_file)
return booster
def save_model(
name: str,
model: xgb.core.Booster,
*,
signatures: dict[str, ModelSignatureDict] | None = None,
labels: dict[str, str] | None = None,
custom_objects: dict[str, t.Any] | None = None,
metadata: dict[str, t.Any] | None = None,
) -> bentoml.Model:
"""
Save an XGBoost model instance to the BentoML model store.
Args:
name (``str``):
The name to give to the model in the BentoML store. This must be a valid
:obj:`~bentoml.Tag` name.
model (:obj:`~xgboost.core.Booster`):
The XGBoost model to be saved.
signatures (``dict[str, ModelSignatureDict]``, optional):
Signatures of predict methods to be used. If not provided, the signatures default to
``{"predict": {"batchable": False}}``. See :obj:`~bentoml.types.ModelSignature` for more
details.
labels (``dict[str, str]``, optional):
A default set of management labels to be associated with the model. An example is
``{"training-set": "data-1"}``.
custom_objects (``dict[str, Any]``, optional):
Custom objects to be saved with the model. An example is
``{"my-normalizer": normalizer}``.
Custom objects are currently serialized with cloudpickle, but this implementation is
subject to change.
metadata (``dict[str, Any]``, optional):
Metadata to be associated with the model. An example is ``{"max_depth": 2}``.
Metadata is intended for display in model management UI and therefore must be a default
Python type, such as ``str`` or ``int``.
Returns:
:obj:`~bentoml.Tag`: A :obj:`tag` with a format `name:version` where `name` is the
user-defined model's name, and a generated `version` by BentoML.
Example:
.. code-block:: python
import xgboost as xgb
import bentoml
# read in data
dtrain = xgb.DMatrix('demo/data/agaricus.txt.train')
dtest = xgb.DMatrix('demo/data/agaricus.txt.test')
# specify parameters via map
param = dict(max_depth=2, eta=1, objective='binary:logistic')
num_round = 2
bst = xgb.train(param, dtrain, num_round)
...
# `save` the booster to BentoML modelstore:
bento_model = bentoml.xgboost.save_model("my_xgboost_model", bst, booster_params=param)
""" # noqa: LN001
if not isinstance(model, xgb.core.Booster):
raise TypeError(f"Given model ({model}) is not a xgboost.core.Booster.")
context: ModelContext = ModelContext(
framework_name="xgboost",
framework_versions={"xgboost": get_pkg_version("xgboost")},
)
if signatures is None:
logger.info(
'Using default model signature `{"predict": {"batchable": False}}` for XGBoost model'
)
signatures = {
"predict": {"batchable": False},
}
with bentoml.models.create(
name,
module=MODULE_NAME,
api_version=API_VERSION,
signatures=signatures,
labels=labels,
custom_objects=custom_objects,
metadata=metadata,
context=context,
) as bento_model:
model.save_model(bento_model.path_of(MODEL_FILENAME)) # type: ignore (incomplete XGBoost types)
return bento_model
def get_runnable(bento_model: bentoml.Model) -> t.Type[bentoml.Runnable]:
"""
Private API: use :obj:`~bentoml.Model.to_runnable` instead.
"""
class XGBoostRunnable(bentoml.Runnable):
SUPPORT_NVIDIA_GPU = True
SUPPORT_CPU_MULTI_THREADING = True
def __init__(self):
super().__init__()
self.model = load_model(bento_model)
# check for resources
available_gpus = os.getenv("NVIDIA_VISIBLE_DEVICES")
if available_gpus is not None and available_gpus != "":
self.model.set_param({"predictor": "gpu_predictor", "gpu_id": 0}) # type: ignore (incomplete XGBoost types)
else:
nthreads = os.getenv("OMP_NUM_THREADS")
if nthreads is not None and nthreads != "":
nthreads = max(int(nthreads), 1)
else:
nthreads = 1
self.model.set_param({"predictor": "cpu_predictor", "nthread": nthreads}) # type: ignore (incomplete XGBoost types)
self.predict_fns: dict[str, t.Callable[..., t.Any]] = {}
for method_name in bento_model.info.signatures:
try:
self.predict_fns[method_name] = getattr(self.model, method_name)
except AttributeError:
raise InvalidArgument(
f"No method with name {method_name} found for XGBoost model of type {self.model.__class__}"
)
def add_runnable_method(method_name: str, options: ModelSignature):
def _run(
self: XGBoostRunnable,
input_data: ext.NpNDArray
| ext.PdDataFrame, # TODO: add support for DMatrix
) -> ext.NpNDArray:
dmatrix = xgb.DMatrix(input_data)
res = self.predict_fns[method_name](dmatrix)
return np.asarray(res) # type: ignore (incomplete np types)
XGBoostRunnable.add_method(
_run,
name=method_name,
batchable=options.batchable,
batch_dim=options.batch_dim,
input_spec=options.input_spec,
output_spec=options.output_spec,
)
for method_name, options in bento_model.info.signatures.items():
add_runnable_method(method_name, options)
return XGBoostRunnable
|
from __future__ import annotations
import typing as t
import logging
from typing import TYPE_CHECKING
import bentoml
from bentoml import Tag
from bentoml.models import Model
from bentoml.models import ModelContext
from bentoml.exceptions import NotFound
from bentoml.exceptions import BentoMLException
from bentoml.exceptions import MissingDependencyException
from ..types import LazyType
from ..utils.pkg import get_pkg_version
if TYPE_CHECKING:
from sklearn.base import BaseEstimator
from sklearn.pipeline import Pipeline
from bentoml.types import ModelSignature
from bentoml._internal.models.model import ModelSignaturesType
from .. import external_typing as ext
SklearnModel: t.TypeAlias = BaseEstimator | Pipeline
try:
import joblib
from joblib import parallel_backend
except ImportError: # pragma: no cover
try:
from sklearn.utils._joblib import joblib
from sklearn.utils._joblib import parallel_backend
except ImportError:
raise MissingDependencyException(
"""sklearn is required in order to use the module `bentoml.sklearn`, install
sklearn with `pip install sklearn`. For more information, refer to
https://scikit-learn.org/stable/install.html
"""
)
MODULE_NAME = "bentoml.sklearn"
MODEL_FILENAME = "saved_model.pkl"
API_VERSION = "v1"
logger = logging.getLogger(__name__)
def get(tag_like: str | Tag) -> Model:
model = bentoml.models.get(tag_like)
if model.info.module not in (MODULE_NAME, __name__):
raise NotFound(
f"Model {model.tag} was saved with module {model.info.module}, not loading with {MODULE_NAME}."
)
return model
def load_model(
bento_model: str | Tag | Model,
) -> SklearnModel:
"""
Load the scikit-learn model with the given tag from the local BentoML model store.
Args:
bento_model (``str`` ``|`` :obj:`~bentoml.Tag` ``|`` :obj:`~bentoml.Model`):
Either the tag of the model to get from the store, or a BentoML `~bentoml.Model`
instance to load the model from.
...
Returns:
``BaseEstimator`` ``|`` ``Pipeline``:
The scikit-learn model loaded from the model store or BentoML :obj:`~bentoml.Model`.
Example:
.. code-block:: python
import bentoml
sklearn = bentoml.sklearn.load_model('my_model:latest')
""" # noqa
if not isinstance(bento_model, Model):
bento_model = get(bento_model)
if bento_model.info.module not in (MODULE_NAME, __name__):
raise BentoMLException(
f"Model {bento_model.tag} was saved with module {bento_model.info.module}, not loading with {MODULE_NAME}."
)
model_file = bento_model.path_of(MODEL_FILENAME)
return joblib.load(model_file)
def save_model(
name: str,
model: SklearnModel,
*,
signatures: ModelSignaturesType | None = None,
labels: t.Dict[str, str] | None = None,
custom_objects: t.Dict[str, t.Any] | None = None,
metadata: t.Dict[str, t.Any] | None = None,
) -> bentoml.Model:
"""
Save a model instance to BentoML modelstore.
Args:
name (:code:`str`):
Name for given model instance. This should pass Python identifier check.
model (:code:`Union[BaseEstimator, Pipeline]`):
Instance of model to be saved.
signatures (:code: `Dict[str, ModelSignatureDict]`)
Methods to expose for running inference on the target model. Signatures are
used for creating Runner instances when serving model with bentoml.Service
labels (:code:`Dict[str, str]`, `optional`, default to :code:`None`):
user-defined labels for managing models, e.g. team=nlp, stage=dev
custom_objects (:code:`Dict[str, Any]]`, `optional`, default to :code:`None`):
user-defined additional python objects to be saved alongside the model,
e.g. a tokenizer instance, preprocessor function, model configuration json
metadata (:code:`Dict[str, Any]`, `optional`, default to :code:`None`):
Custom metadata for given model.
Returns:
:obj:`~bentoml.Tag`: A :obj:`tag` with a format `name:version` where `name` is
the user-defined model's name, and a generated `version`.
Examples:
.. code-block:: python
import bentoml
from sklearn.datasets import load_iris
from sklearn.neighbors import KNeighborsClassifier
model = KNeighborsClassifier()
iris = load_iris()
X = iris.data[:, :4]
Y = iris.target
model.fit(X, Y)
bento_model = bentoml.sklearn.save_model('kneighbors', model)
""" # noqa
if not (
LazyType("sklearn.base.BaseEstimator").isinstance(model)
or LazyType("sklearn.pipeline.Pipeline").isinstance(model)
):
raise TypeError(
f"Given model ({model}) is not a sklearn.base.BaseEstimator or sklearn.pipeline.Pipeline."
)
context = ModelContext(
framework_name="sklearn",
framework_versions={"scikit-learn": get_pkg_version("scikit-learn")},
)
if signatures is None:
signatures = {"predict": {"batchable": False}}
logger.info(
f"Using the default model signature for sklearn ({signatures}) for model {name}."
)
with bentoml.models.create(
name,
module=MODULE_NAME,
api_version=API_VERSION,
labels=labels,
custom_objects=custom_objects,
metadata=metadata,
context=context,
signatures=signatures,
) as bento_model:
joblib.dump(model, bento_model.path_of(MODEL_FILENAME))
return bento_model
def get_runnable(bento_model: Model):
"""
Private API: use :obj:`~bentoml.Model.to_runnable` instead.
"""
class SklearnRunnable(bentoml.Runnable):
SUPPORT_NVIDIA_GPU = False # type: ignore
SUPPORT_CPU_MULTI_THREADING = True # type: ignore
def __init__(self):
super().__init__()
self.model = load_model(bento_model)
def add_runnable_method(method_name: str, options: ModelSignature):
def _run(
self: SklearnRunnable, input_data: ext.NpNDArray | ext.PdDataFrame
) -> ext.NpNDArray:
# TODO: set inner_max_num_threads and n_jobs param here base on strategy env vars
with parallel_backend(backend="loky"):
return getattr(self.model, method_name)(input_data)
SklearnRunnable.add_method(
_run,
name=method_name,
batchable=options.batchable,
batch_dim=options.batch_dim,
input_spec=options.input_spec,
output_spec=options.output_spec,
)
for method_name, options in bento_model.info.signatures.items():
add_runnable_method(method_name, options)
return SklearnRunnable
|
from __future__ import annotations
import pickle
import typing as t
import logging
import functools
import itertools
import contextlib
from typing import TYPE_CHECKING
from simple_di import inject
from simple_di import Provide
import bentoml
from ...types import LazyType
from ....exceptions import MissingDependencyException
from ...models.model import Model
from ...runner.utils import Params
from ...runner.container import Payload
from ...runner.container import DataContainer
from ...runner.container import DataContainerRegistry
from ...configuration.containers import DeploymentContainer
try:
import torch
except ImportError: # pragma: no cover
raise MissingDependencyException(
"""\
torch is required in order to use module `bentoml.pytorch`,
`bentoml.torchscript` and `bentoml.pytorch_lightning`.
Instruction: Refers to https://pytorch.org/get-started/locally/
to setup PyTorch correctly. """ # noqa
)
if TYPE_CHECKING:
import pytorch_lightning as pl
from ... import external_typing as ext
ModelType = t.Union[torch.nn.Module, torch.ScriptModule, pl.LightningModule]
logger = logging.getLogger(__name__)
def partial_class(cls: type, *args: t.Any, **kwargs: t.Any) -> type:
class NewClass(cls):
def __init__(self, *inner_args: t.Any, **inner_kwargs: t.Any) -> None:
functools.partial(cls.__init__, *args, **kwargs)(
self, *inner_args, **inner_kwargs
)
return NewClass
class PytorchModelRunnable(bentoml.Runnable):
SUPPORT_NVIDIA_GPU = True
SUPPORT_CPU_MULTI_THREADING = True
def __init__(
self,
bento_model: Model,
loader: t.Callable[..., torch.nn.Module],
):
super().__init__()
# if torch.cuda.device_count():
if torch.cuda.is_available():
self.device_id = "cuda"
torch.set_default_tensor_type(
"torch.cuda.FloatTensor"
) # initially torch.FloatTensor
else:
self.device_id = "cpu"
self.model: ModelType = loader(bento_model, device_id=self.device_id)
self.model.train(False) # to turn off dropout and batchnorm
self._no_grad_context = contextlib.ExitStack()
if hasattr(torch, "inference_mode"): # pytorch>=1.9
self._no_grad_context.enter_context(torch.inference_mode())
else:
self._no_grad_context.enter_context(torch.no_grad())
def __del__(self):
self._no_grad_context.close()
# no need for now because our worker process will quit and return the gpu memory
# if self.device_id == "cuda":
# torch.cuda.empty_cache()
def make_pytorch_runnable_method(method_name: str) -> t.Callable[..., torch.Tensor]:
def _run(
self: PytorchModelRunnable,
*args: ext.NpNDArray | torch.Tensor,
**kwargs: ext.NpNDArray | torch.Tensor,
) -> torch.Tensor:
params = Params[t.Union["ext.NpNDArray", torch.Tensor]](*args, **kwargs)
def _mapping(item: t.Union["ext.NpNDArray", torch.Tensor]) -> torch.Tensor:
if LazyType["ext.NpNDArray"]("numpy.ndarray").isinstance(item):
return torch.Tensor(item, device=self.device_id)
else:
return item.to(self.device_id) # type: ignore # the overhead is trivial if it is already on the right device
params = params.map(_mapping)
return getattr(self.model, method_name)(*params.args, **params.kwargs)
return _run
class PyTorchTensorContainer(DataContainer[torch.Tensor, torch.Tensor]):
@classmethod
def batches_to_batch(
cls,
batches: t.Sequence[torch.Tensor],
batch_dim: int = 0,
) -> t.Tuple[torch.Tensor, list[int]]:
batch = torch.cat(tuple(batches), dim=batch_dim)
indices = list(
itertools.accumulate(subbatch.shape[batch_dim] for subbatch in batches)
)
indices = [0] + indices
return batch, indices
@classmethod
def batch_to_batches(
cls,
batch: torch.Tensor,
indices: t.Sequence[int],
batch_dim: int = 0,
) -> t.List[torch.Tensor]:
sizes = [indices[i] - indices[i - 1] for i in range(1, len(indices))]
output: list[torch.Tensor] = torch.split(batch, sizes, dim=batch_dim)
return output
@classmethod
@inject
def to_payload( # pylint: disable=arguments-differ
cls,
batch: torch.Tensor,
batch_dim: int = 0,
plasma_db: "ext.PlasmaClient" | None = Provide[DeploymentContainer.plasma_db],
) -> Payload:
batch = batch.numpy()
if plasma_db:
return cls.create_payload(
plasma_db.put(batch).binary(),
batch_size=batch.shape[batch_dim],
meta={"plasma": True},
)
return cls.create_payload(
pickle.dumps(batch),
batch_size=batch.shape[batch_dim],
meta={"plasma": False},
)
@classmethod
@inject
def from_payload( # pylint: disable=arguments-differ
cls,
payload: Payload,
plasma_db: "ext.PlasmaClient" | None = Provide[DeploymentContainer.plasma_db],
) -> torch.Tensor:
if payload.meta.get("plasma"):
import pyarrow.plasma as plasma
assert plasma_db
ret = plasma_db.get(plasma.ObjectID(payload.data))
else:
ret = pickle.loads(payload.data)
return torch.Tensor(ret)
@classmethod
@inject
def batch_to_payloads( # pylint: disable=arguments-differ
cls,
batch: torch.Tensor,
indices: t.Sequence[int],
batch_dim: int = 0,
plasma_db: "ext.PlasmaClient" | None = Provide[DeploymentContainer.plasma_db],
) -> t.List[Payload]:
batches = cls.batch_to_batches(batch, indices, batch_dim)
payloads = [cls.to_payload(i, batch_dim=batch_dim) for i in batches]
return payloads
@classmethod
@inject
def from_batch_payloads( # pylint: disable=arguments-differ
cls,
payloads: t.Sequence[Payload],
batch_dim: int = 0,
plasma_db: "ext.PlasmaClient" | None = Provide[DeploymentContainer.plasma_db],
) -> t.Tuple[torch.Tensor, list[int]]:
batches = [cls.from_payload(payload, plasma_db) for payload in payloads]
return cls.batches_to_batch(batches, batch_dim)
DataContainerRegistry.register_container(
LazyType("torch", "Tensor"),
LazyType("torch", "Tensor"),
PyTorchTensorContainer,
)
|
# pylint: disable=redefined-outer-name # pragma: no cover
from __future__ import annotations
import os
import re
import sys
import json
import time
import socket
import typing as t
import urllib
import logging
import contextlib
import subprocess
import urllib.error
import urllib.request
from typing import TYPE_CHECKING
from contextlib import contextmanager
from .._internal.tag import Tag
from .._internal.utils import reserve_free_port
from .._internal.utils import cached_contextmanager
from .._internal.utils.platform import kill_subprocess_tree
logger = logging.getLogger("bentoml.tests")
if TYPE_CHECKING:
from aiohttp.typedefs import LooseHeaders
from starlette.datastructures import Headers
from starlette.datastructures import FormData
async def parse_multipart_form(headers: "Headers", body: bytes) -> "FormData":
"""
parse starlette forms from headers and body
"""
from starlette.formparsers import MultiPartParser
async def async_bytesio(bytes_: bytes) -> t.AsyncGenerator[bytes, None]:
yield bytes_
yield b""
return
parser = MultiPartParser(headers=headers, stream=async_bytesio(body))
return await parser.parse()
async def async_request(
method: str,
url: str,
headers: t.Optional["LooseHeaders"] = None,
data: t.Any = None,
timeout: t.Optional[int] = None,
) -> t.Tuple[int, "Headers", bytes]:
"""
A HTTP client with async API.
"""
import aiohttp
from starlette.datastructures import Headers
async with aiohttp.ClientSession() as sess:
async with sess.request(
method, url, data=data, headers=headers, timeout=timeout
) as r:
r_body = await r.read()
headers = t.cast(t.Mapping[str, str], r.headers)
return r.status, Headers(headers), r_body
def _wait_until_api_server_ready(
host_url: str,
timeout: float,
check_interval: float = 1,
popen: t.Optional["subprocess.Popen[t.Any]"] = None,
) -> bool:
start_time = time.time()
proxy_handler = urllib.request.ProxyHandler({})
opener = urllib.request.build_opener(proxy_handler)
logger.info("Waiting for host %s to be ready..", host_url)
while time.time() - start_time < timeout:
try:
if popen and popen.poll() is not None:
return False
elif opener.open(f"http://{host_url}/readyz", timeout=1).status == 200:
return True
else:
time.sleep(check_interval)
except (
ConnectionError,
urllib.error.URLError,
socket.timeout,
) as e:
logger.info(f"[{e}]retrying to connect to the host {host_url}...")
logger.error(e)
time.sleep(check_interval)
else:
logger.info(
f"Timed out waiting {timeout} seconds for Server {host_url} to be ready, "
)
return False
@cached_contextmanager("{project_path}")
def bentoml_build(project_path: str) -> t.Generator["Tag", None, None]:
"""
Build a BentoML project.
"""
logger.info(f"Building bento: {project_path}")
output = subprocess.check_output(
["bentoml", "build", project_path], stderr=subprocess.STDOUT
)
match = re.search(
r'Bento\(tag="([A-Za-z0-9\-_\.]+:[a-z0-9]+)"\)',
output.decode(),
)
assert match, f"Build failed. The details:\n {output.decode()}"
tag = Tag.from_taglike(match[1])
yield tag
logger.info(f"Deleting bento: {tag}")
subprocess.call(["bentoml", "delete", "-y", str(tag)])
@cached_contextmanager("{bento_tag}, {image_tag}")
def bentoml_containerize(
bento_tag: t.Union[str, "Tag"],
image_tag: t.Optional[str] = None,
) -> t.Generator[str, None, None]:
"""
Build the docker image from a saved bento, yield the docker image tag
"""
bento_tag = Tag.from_taglike(bento_tag)
if image_tag is None:
image_tag = bento_tag.name
logger.info(f"Building bento server docker image: {bento_tag}")
subprocess.check_call(["bentoml", "containerize", str(bento_tag), "-t", image_tag])
yield image_tag
logger.info(f"Removing bento server docker image: {image_tag}")
subprocess.call(["docker", "rmi", image_tag])
@cached_contextmanager("{image_tag}, {config_file}")
def run_bento_server_in_docker(
image_tag: str,
config_file: t.Optional[str] = None,
timeout: float = 40,
):
"""
Launch a bentoml service container from a docker image, yield the host URL
"""
container_name = f"bentoml-test-{image_tag}-{hash(config_file)}"
with reserve_free_port() as port:
pass
cmd = [
"docker",
"run",
"--rm",
"--name",
container_name,
"--publish",
f"{port}:3000",
"--env",
"BENTOML_LOG_STDOUT=true",
"--env",
"BENTOML_LOG_STDERR=true",
]
if config_file is not None:
cmd.extend(["--env", "BENTOML_CONFIG=/home/bentoml/bentoml_config.yml"])
cmd.extend(
["-v", f"{os.path.abspath(config_file)}:/home/bentoml/bentoml_config.yml"]
)
cmd.append(image_tag)
logger.info(f"Running API server docker image: {cmd}")
with subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
encoding="utf-8",
) as proc:
try:
host_url = f"127.0.0.1:{port}"
if _wait_until_api_server_ready(host_url, timeout, popen=proc):
yield host_url
else:
raise RuntimeError(
f"API server {host_url} failed to start within {timeout} seconds"
)
finally:
subprocess.call(["docker", "stop", container_name])
time.sleep(1)
@contextmanager
def run_bento_server(
bento: str,
workdir: t.Optional[str] = None,
config_file: t.Optional[str] = None,
dev_server: bool = False,
timeout: float = 90,
):
"""
Launch a bentoml service directly by the bentoml CLI, yields the host URL.
"""
workdir = workdir if workdir is not None else "./"
my_env = os.environ.copy()
if config_file is not None:
my_env["BENTOML_CONFIG"] = os.path.abspath(config_file)
with reserve_free_port() as port:
cmd = [sys.executable, "-m", "bentoml", "serve"]
if not dev_server:
cmd += ["--production"]
if port:
cmd += ["--port", f"{port}"]
cmd += [bento]
cmd += ["--working-dir", workdir]
logger.info(f"Running command: `{cmd}`")
p = subprocess.Popen(
cmd,
stderr=subprocess.STDOUT,
env=my_env,
encoding="utf-8",
)
try:
host_url = f"127.0.0.1:{port}"
_wait_until_api_server_ready(host_url, timeout=timeout)
yield host_url
finally:
kill_subprocess_tree(p)
p.communicate()
@contextmanager
def run_bento_server_distributed(
bento_tag: t.Union[str, "Tag"],
config_file: t.Optional[str] = None,
timeout: float = 90,
):
"""
Launch a bentoml service directly by the bentoml CLI, yields the host URL.
"""
my_env = os.environ.copy()
if config_file is not None:
my_env["BENTOML_CONFIG"] = os.path.abspath(config_file)
import yaml
import bentoml
bento_service = bentoml.bentos.get(bento_tag)
path = bento_service.path
with open(os.path.join(path, "bento.yaml"), "r", encoding="utf-8") as f:
bentofile = yaml.safe_load(f)
runner_map = {}
processes: t.List[subprocess.Popen[str]] = []
for runner in bentofile["runners"]:
with reserve_free_port() as port:
bind = f"tcp://127.0.0.1:{port}"
runner_map[runner["name"]] = bind
cmd = [
sys.executable,
"-m",
"bentoml._internal.server.cli.runner",
str(bento_tag),
"--bind",
bind,
"--working-dir",
path,
"--runner-name",
runner["name"],
]
logger.info(f"Running command: `{cmd}`")
processes.append(
subprocess.Popen(
cmd,
encoding="utf-8",
stderr=subprocess.STDOUT,
env=my_env,
)
)
with reserve_free_port() as server_port:
bind = f"tcp://127.0.0.1:{server_port}"
my_env["BENTOML_RUNNER_MAP"] = json.dumps(runner_map)
cmd = [
sys.executable,
"-m",
"bentoml._internal.server.cli.api_server",
str(bento_tag),
"--bind",
bind,
"--working-dir",
path,
]
logger.info(f"Running command: `{cmd}`")
processes.append(
subprocess.Popen(
cmd,
stderr=subprocess.STDOUT,
encoding="utf-8",
env=my_env,
)
)
try:
host_url = f"127.0.0.1:{server_port}"
_wait_until_api_server_ready(host_url, timeout=timeout)
yield host_url
finally:
for p in processes:
kill_subprocess_tree(p)
for p in processes:
p.communicate()
@cached_contextmanager("{bento}, {project_path}, {config_file}, {deployment_mode}")
def host_bento(
bento: t.Union[str, Tag, None] = None,
project_path: str = ".",
config_file: str | None = None,
deployment_mode: str = "standalone",
clean_context: contextlib.ExitStack | None = None,
) -> t.Generator[str, None, None]:
"""
Host a bentoml service, yields the host URL.
Args:
bento: a beoto tag or `module_path:service`
project_path: the path to the project directory
config_file: the path to the config file
deployment_mode: the deployment mode, one of `standalone`, `docker` or `distributed`
clean_context: a contextlib.ExitStack to clean up the intermediate files,
like docker image and bentos. If None, it will be created. Used for reusing
those files in the same test session.
"""
import bentoml
if clean_context is None:
clean_context = contextlib.ExitStack()
clean_on_exit = True
else:
clean_on_exit = False
try:
logger.info(
f"starting bento server {bento} at {project_path} "
f"with config file {config_file} "
f"in {deployment_mode} mode..."
)
if bento is None or not bentoml.list(bento):
bento_tag = clean_context.enter_context(bentoml_build(project_path))
else:
bento_tag = bentoml.get(bento).tag
if deployment_mode == "docker":
image_tag = clean_context.enter_context(bentoml_containerize(bento_tag))
with run_bento_server_in_docker(
image_tag,
config_file,
) as host:
yield host
elif deployment_mode == "standalone":
with run_bento_server(
str(bento_tag),
config_file=config_file,
workdir=project_path,
) as host:
yield host
elif deployment_mode == "distributed":
with run_bento_server_distributed(
str(bento_tag),
config_file=config_file,
) as host:
yield host
else:
raise ValueError(f"Unknown deployment mode: {deployment_mode}")
finally:
logger.info("shutting down bento server...")
if clean_on_exit:
logger.info("Cleaning up...")
clean_context.close()
|
import typing as t
import logging
from typing import TYPE_CHECKING
logger = logging.getLogger("bentoml.tests")
if TYPE_CHECKING:
from aiohttp.typedefs import LooseHeaders
from starlette.datastructures import Headers
from starlette.datastructures import FormData
async def parse_multipart_form(headers: "Headers", body: bytes) -> "FormData":
"""
parse starlette forms from headers and body
"""
from starlette.formparsers import MultiPartParser
async def async_bytesio(bytes_: bytes) -> t.AsyncGenerator[bytes, None]:
yield bytes_
yield b""
return
parser = MultiPartParser(headers=headers, stream=async_bytesio(body))
return await parser.parse()
async def async_request(
method: str,
url: str,
headers: t.Union[None, t.Tuple[t.Tuple[str, str], ...], "LooseHeaders"] = None,
data: t.Any = None,
timeout: t.Optional[int] = None,
assert_status: t.Union[int, t.Callable[[int], bool], None] = None,
assert_data: t.Union[bytes, t.Callable[[bytes], bool], None] = None,
assert_headers: t.Optional[t.Callable[[t.Any], bool]] = None,
) -> t.Tuple[int, "Headers", bytes]:
"""
raw async request client
"""
import aiohttp
from starlette.datastructures import Headers
async with aiohttp.ClientSession() as sess:
async with sess.request(
method, url, data=data, headers=headers, timeout=timeout
) as r:
r_body = await r.read()
if assert_status is not None:
if callable(assert_status):
assert assert_status(r.status), f"{r.status} {repr(r_body)}"
else:
assert r.status == assert_status, f"{r.status} {repr(r_body)}"
if assert_data is not None:
if callable(assert_data):
assert assert_data(r_body), r_body
else:
assert r_body == assert_data, r_body
if assert_headers is not None:
assert assert_headers(r.headers), repr(r.headers)
headers = t.cast(t.Mapping[str, str], r.headers)
return r.status, Headers(headers), r_body
|
import torch
import colossalai
from colossalai.core import global_context as gpc
from colossalai.trainer import Trainer, hooks
from colossalai.utils import MultiTimer
from colossalai.logging import disable_existing_loggers, get_dist_logger
import torch
import torch.nn.functional as F
from einops import rearrange
from torch import nn
from baobei.autoregressive import AutoregressiveWrapper
from baobei.baobei import baobei_model
def Trainer():
assert torch.cuda.is_available()
assert hasattr(gpc.config, "epochs"), "Please provide epochs in your configuration"
assert hasattr(gpc.config, "lr"), "Please provide LEARNING_RATE in your configuration"
assert hasattr(gpc.config, "gradient_accumulation"), "Please provide gradient_accumulation in your configuration"
assert hasattr(gpc.config, "clip_grad_norm"), "Please provide clip_grad_norm in your configuration"
assert hasattr(gpc.config, "seed"), "Please provide seed in your configuration"
disable_existing_loggers()
parser = colossalai.get_default_parser()
parser.add_argument(
'--use_trainer',
action='store_true',
help='whether to use trainer'
)
args = parser.parse_args()
colossalai.launch_from_torch(
config = '',
seed = gpc.config.seed
)
# Colossal logger
logger = get_dist_logger()
logger.info("Initialized environment", ranks=[0])
model = baobei_model()
model = AutoregressiveWrapper(model)
# build dataloaders
train_dataloader, eval_dataloader = build_dataloaders()
# Loss Function
class loss_function(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x_inp, x_labels):
x_inp, x_labels = x_inp[:, :-1], x_labels[:, 1:]
loss = F.cross_entropy(rearrange(x_inp, "b c n -> b n c"), x_labels)
return loss
loss_fn = loss_function()
# optimizer function
optimizer = torch.optim.AdamW(
model.parameters(),
lr = gpc.config.lr,
weight_decay=gpc.config.WEIGHT_DECAY
)
# initialze model, optimizer, criterion, and data loaders
engine, train_dataloader, _, _ = colossalai.initialize(
model,
optimizer,
loss_fn,
train_dataloader = train_dataloader
)
def batch_data_process_func(batch_data):
data = batch_data["input_ids"]
labels = batch_data["labels"]
return data, labels
# Time session with ColossalAI
timer = MultiTimer()
# trainer
trainer = Trainer(
engine = engine,
timer = timer,
logger = logger
)
hook_list = [
hooks.LogMetricByStepHook(),
hooks.LossHook(),
hooks.LogMetricByEpochHook(logger)
]
trainer.fit(
train_dataloader = train_dataloader,
epochs = gpc.config.epochs,
hooks = hook_list,
display_progress = True
)
|
import torch
import torch.nn.functional as F
from einops import rearrange
from torch import nn
# helper function
def exists(val):
return val is not None
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# top k filtering
def top_k(logits, thres=0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float("-inf"))
probs.scatter_(1, ind, val)
return probs
class AutoregressiveWrapper(nn.Module):
def __init__(self, net, max_seq_len=2048, pad_value=0):
super().__init__()
self.max_seq_len = max_seq_len
self.pad_value = pad_value
self.net = net
@torch.no_grad()
@eval_decorator
def generate(
self,
start_tokens,
seq_len,
eos_token=None,
temperature=1.0,
filter_thres=0.9,
**kwargs
):
b, t, device = *start_tokens.shape, start_tokens.device
out = start_tokens
for _ in range(seq_len):
logits = self.net(out, **kwargs)[:, -1, :]
filtered_logits = top_k(logits, thres=filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
if exists(eos_token):
is_eos_token = out == eos_token
if is_eos_token.any(dim=-1).all():
# mask out everything after the eos tokens
shifted_is_eos_tokens = F.pad(is_eos_token, (1, -1))
mask = shifted_is_eos_tokens.float().cumsum(dim=-1) >= 1
out = out.masked_fill(mask, self.pad_value)
break
out = out[:, t:]
return out
def forward(self, x, **kwargs):
logits = self.net(x, **kwargs)
return logits
|
import torch
import torch.nn.functional as F
import colossalai
from einops import rearrange
from torch import einsum, nn
from math import log2, floor
import bitsandbytes as bnb
from colossalai.core import global_context as gpc
def exists(val):
return val is not None
# normalization
class RMSNorm(nn.Module):
def __init__(self, dim, eps = 1e-8):
super().__init__()
self.scale = dim ** -0.5
self.eps = eps
self.g = nn.Parameter(torch.ones(dim))
def forward(self, x):
norm = torch.norm(x, dim = -1, keepdim = True) * self.scale
return x / norm.clamp(min = self.eps) * self.g
# AliBi
class AlibiPositionalBias(nn.Module):
def __init__(self, heads):
super().__init__()
self.heads = heads
slopes = torch.Tensor(self._get_slopes(heads))
slopes = rearrange(slopes, 'h -> h 1 1')
self.register_buffer('slopes', slopes, persistent = False)
self.register_buffer('bias', None, persistent = False)
def get_bias(self, i, j, device):
i_arange = torch.arange(i, device = device)
j_arange = torch.arange(j, device = device)
bias = -torch.abs(rearrange(j_arange, 'j -> 1 1 j') - rearrange(i_arange, 'i -> 1 i 1'))
return bias
@staticmethod
def _get_slopes(heads):
def get_slopes_power_of_2(n):
start = (2**(-2**-(log2(n)-3)))
ratio = start
return [start*ratio**i for i in range(n)]
if log2(heads).is_integer():
return get_slopes_power_of_2(heads)
closest_power_of_2 = 2 ** floor(log2(heads))
return get_slopes_power_of_2(closest_power_of_2) + get_slopes_power_of_2(2 * closest_power_of_2)[0::2][:heads-closest_power_of_2]
def forward(self, qk_sim):
h, i, j, device = *qk_sim.shape[-3:], qk_sim.device
if exists(self.bias) and self.bias.shape[-1] >= j:
return self.bias[..., :i, :j]
bias = self.get_bias(i, j, device)
bias = bias * self.slopes
num_heads_unalibied = h - bias.shape[0]
bias = F.pad(bias, (0, 0, 0, 0, 0, num_heads_unalibied))
self.register_buffer('bias', bias, persistent = False)
return bias
# residual
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
return self.fn(x) + x
# classic Noam Shazeer paper, use GEGLU for gating the feedforward
# https://arxiv.org/abs/2002.05202
class GEGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim = -1)
return x * F.gelu(gate)
# parallel attention and feedforward with residual
# discovered by Wang et al + EleutherAI from GPT-J fame
class ParallelTransformerBlock(nn.Module):
def __init__(self, dim, dim_head=64, heads=8, ff_mult=4):
super().__init__()
self.norm = RMSNorm(dim)
attn_inner_dim = dim_head * heads
ff_inner_dim = dim * ff_mult
self.fused_dims = (attn_inner_dim, dim_head, (ff_inner_dim * 2))
self.heads = heads
self.scale = dim_head**-0.5
self.alibi_pos_biases = AlibiPositionalBias(heads = self.heads)
self.fused_attn_ff_proj = colossalai.nn.Linear(dim, sum(self.fused_dims), bias=False)
self.attn_out = colossalai.nn.Linear(attn_inner_dim, dim, bias=False)
self.ff_out = nn.Sequential(
GEGLU(),
colossalai.nn.Linear(ff_inner_dim, dim, bias=False)
)
# for caching causal mask
self.register_buffer("mask", None, persistent=False)
def get_mask(self, n, device):
if self.mask is not None and self.mask.shape[-1] >= n:
return self.mask[:n, :n]
mask = torch.triu(torch.ones((n, n), device=device, dtype=torch.bool), 1)
self.register_buffer("mask", mask, persistent=False)
return mask
def forward(self, x):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, device, h = x.shape[1], x.device, self.heads
# pre layernorm
x = self.norm(x)
# attention queries, keys or values (shared key / values is a personal discovery of mine), and feedforward inner
q, kv, ff = self.fused_attn_ff_proj(x).split(self.fused_dims, dim=-1)
# split heads
# they use multi-query single-key-value attention, yet another Noam Shazeer paper
# they found no performance loss past a certain scale, and more efficient decoding obviously
# https://arxiv.org/abs/1911.02150
q = rearrange(q, "b n (h d) -> b h n d", h = h)
# scale
q = q * self.scale
# similarity
sim = einsum("b h i d, b j d -> b h i j", q, kv)
# add the alibi bias
sim = sim + self.alibi_pos_biases(sim)
# causal mask
causal_mask = self.get_mask(n, device)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# attention
attn = sim.softmax(dim=-1)
out = einsum("b h i j, b j d -> b h i d", attn, kv)
# merge heads
out = rearrange(out, "b h n d -> b n (h d)")
merge_heads = self.attn_out(out) + self.ff_out(ff)
return merge_heads
def baobei(*, dim, num_tokens, depth, dim_head=64, heads=8, ff_mult=4):
net = nn.Sequential(
bnb.nn.StableEmbedding(num_tokens, dim),
*[Residual(ParallelTransformerBlock(dim, dim_head, heads, ff_mult)) for _ in range(depth)],
RMSNorm(dim),
colossalai.nn.Linear(dim, num_tokens, bias=False)
)
# they used embedding weight tied projection out to logits, not common, but works
net[-1].weight = net[0].weight
nn.init.normal_(net[0].weight, std=0.02)
return net
def baobei_model():
model = baobei(
num_tokens = gpc.config.num_tokens,
dim = gpc.config.dim,
depth = gpc.config.depth,
dim_head = gpc.config.dim_head,
heads = gpc.config.heads
)
return model
# For testing functionality of the model
if __name__ == "__main__":
palm = baobei(
num_tokens = 100000,
dim = 512,
depth = 4,
heads = 2,
dim_head = 256,
).cuda()
tokens = torch.randint(0, 20000, (1, 2048)).cuda()
logits = palm(tokens) # (1, 2048, 20000)
n_params_torch = sum(
p.numel() for p in palm.parameters() if p.requires_grad
)
print(f"Number of parameters in torch model: {n_params_torch}")
|
import haiku as hk
import jax
import jax.numpy as jnp
from jax import einsum, numpy
from einops import rearrange
# helper functions
def exists(val):
return val is not None
def cast_tuple(val, num = 1):
return val if isinstance(val, tuple) else ((val,) * num)
def default(val, d):
return val if exists(val) else d
# positional embedding
class RotaryEmbedding(hk.Module):
def __init__(self, dim):
super().__init__()
self.inv_freq = 1. / (10000 ** (jax.arange(0, dim, 2).float() / dim))
def __call__(self, max_seq_len, *, offset = 0):
seq = jnp.arange(max_seq_len) + offset
freqs = einsum('i , j -> i j', seq.type_as(self.inv_freq), self.inv_freq)
emb = jnp.concatenate((freqs, freqs), axis = -1)
return rearrange(emb, 'n d -> 1 1 n d')
def jax_unstack(x, axis = 0):
return jnp.moveaxis(x, axis, 0)
def rotate_half(x):
x = rearrange(x, '... (j d) -> ... j d', j = 2)
x1, x2 = jax_unstack(x, axis = -2)
return jnp.concatenate((-x2, x1), axis = -1)
def apply_rotary_pos_emb(t, freqs):
seq_len, rot_dim = t.shape[-2], freqs.shape[-1]
t, t_pass = t[..., :rot_dim], t[..., rot_dim:]
t = (t * jnp.cos(freqs)) + (rotate_half(t) * jnp.sin(freqs))
return jnp.concatenate((t, t_pass), axis = -1)
# attention
class Attention(hk.Module):
def __init__(
self,
dim,
*,
context_dim = None,
dim_head = 64,
heads = 8,
causal = False,
dropout = 0.
):
super().__init__()
context_dim = default(context_dim, dim)
self.heads = heads
self.scale = dim_head ** -0.5
self.causal = causal
inner_dim = dim_head * heads
self.dropout = dropout
self.to_q = hk.Linear(inner_dim, with_bias = False)
self.to_k = hk.Linear(inner_dim, with_bias = False)
self.to_v = hk.Linear(inner_dim, with_bias = False)
self.to_out = hk.Linear(dim)
def __call__(self, x, context = None, pos_emb = None):
b, h, scale = x.shape[0], self.heads, self.scale
kv_input = default(context, x)
q, k, v = self.to_q(x), self.to_k(kv_input), self.to_v(kv_input)
# split heads
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
# scale
q = q * scale
# apply relative positional encoding (rotary embeddings)
if exists(pos_emb):
q_pos_emb, k_pos_emb = cast_tuple(pos_emb, num = 2)
q = apply_rotary_pos_emb(q, q_pos_emb)
k = apply_rotary_pos_emb(k, k_pos_emb)
# derive query key similarities
sim = einsum('b h i d, b h j d -> b h i j', q, k)
# masking
mask_value = -jnp.finfo(sim.dtype).max
if self.causal:
i, j = sim.shape[-2:]
causal_mask = jnp.ones(i, j, dtype = numpy.bool).triu(j - i + 1)
sim = jnp.where(causal_mask, sim, mask_value)
# attention
attn = jax.nn.softmax(sim, axis = -1)
attn = hk.dropout(rng = hk.next_rng_key(), rate = self.dropout, x = attn)
# aggregate
out = einsum('b h i j, b h j d -> b h i d', attn, v)
# merge heads
out = rearrange(out, 'b h n d -> b n (h d)')
# combine heads linear out
return self.to_out(out)
|
import flax.linen as nn
import jax
import jax.numpy as jnp
from jax.numpy import einsum
import numpy as np
from typing import Callable
from einops import rearrange, repeat, reduce
def cast_tuple(val, length = 1):
return val if isinstance(val, tuple) else ((val,) * length)
# cross embed layer
class CrossEmbedLayer(nn.Module):
dim: int
kernel_sizes: int
stride: int = 2
@nn.compact
def __call__(self, x):
kernel_sizes = sorted(self.kernel_sizes)
num_scales = len(kernel_sizes)
# calculate the dimension at each scale
dim_scales = [int(self.dim / (2 ** i)) for i in range(1, num_scales)]
dim_scales = [*dim_scales, self.dim - sum(dim_scales)]
convs = []
for kernel, dim_scale in zip(kernel_sizes, dim_scales):
convs.append(nn.Conv(dim_scale,
kernel_size=(kernel, kernel),
strides=(self.stride, self.stride),
padding='SAME'))
fmaps = tuple(map(lambda conv: conv(x), convs))
x = jnp.concatenate(fmaps, axis=-1)
return x
# dynamic positional bias
class DynamicPositionBias(nn.Module):
dim: int
@nn.compact
def __call__(self, x):
x = nn.Dense(self.dim)(x)
x = nn.LayerNorm(epsilon = 1e-5, use_bias = False)(x)
x = nn.relu(x)
x = nn.Dense(self.dim)(x)
x = nn.LayerNorm(epsilon = 1e-5, use_bias = False)(x)
x = nn.relu(x)
x = nn.Dense(self.dim)(x)
x = nn.LayerNorm(epsilon = 1e-5, use_bias = False)(x)
x = nn.relu(x)
x = nn.Dense(1)(x)
x = rearrange(x, '... () -> ...')
return x
# transformer classes
class LayerNorm(nn.Module): # layernorm, but done in the channel dimension #1
dim: int
eps: float = 1e-5
@nn.compact
def __call__(self, x):
eps = self.eps
g = self.param('g', nn.initializers.ones, [1, 1, 1, self.dim])
b = self.param('b', nn.initializers.zeros, [1, 1, 1, self.dim])
var = jnp.var(x, axis = -1, keepdims = True)
mean = jnp.mean(x, axis = -1, keepdims = True)
x = (x - mean) / jnp.sqrt((var + eps)) * g + b
return x
class MLP(nn.Module):
dim: int
mult: int = 4
dropout: float = 0.0
@nn.compact
def __call__(self, x):
x = LayerNorm(self.dim)(x)
x = nn.Conv(self.dim*self.mult, kernel_size=(1,1), strides=(1,1))(x)
x = nn.gelu(x)
x = nn.Dropout(rate=self.dropout, deterministic=False)(x)
x = nn.Conv(self.dim, kernel_size=(1,1), strides=(1,1))(x)
return x
class Attention(nn.Module):
dim: int
attn_type: str
window_size: int
dim_head: int = 32
dropout: float = 0.0
@nn.compact
def __call__(self, x):
assert self.attn_type in {'short', 'long'}, 'attention type must be one of local or distant'
heads = self.dim // self.dim_head
scale = self.dim_head ** -0.5
inner_dim = self.dim_head * heads
attn_type = self.attn_type
window_size = self.window_size
norm = LayerNorm(self.dim)
to_qkv = nn.Conv(inner_dim * 3, kernel_size=(1,1), strides=(1,1), use_bias=False)
to_out = nn.Conv(self.dim, kernel_size=(1,1), strides=(1,1))
# positions
dpb = DynamicPositionBias(self.dim // 4)
# calculate and store indices for retrieving bias
pos = jnp.arange(window_size)
grid = jnp.stack(jnp.meshgrid(pos, pos, indexing='ij'))
grid = rearrange(grid, 'c i j -> (i j) c')
rel_pos = grid[:, None] - grid[None, :]
rel_pos += window_size - 1
rel_pos_indices = jnp.sum(rel_pos * jnp.array([2 * window_size - 1, 1]), axis=-1)
_, height, width, _ = x.shape
wsz = window_size
# prenorm
x = norm(x)
# rearrange for short or long distance attention
if attn_type == 'short':
x = rearrange(x, 'b (h s1) (w s2) d -> (b h w) s1 s2 d', s1=wsz, s2=wsz)
elif attn_type == 'long':
x = rearrange(x, 'b (l1 h) (l2 w) d -> (b h w) l1 l2 d', l1=wsz, l2=wsz)
# queries / keys / values
qkv = to_qkv(x)
q, k, v = jnp.split(qkv, 3, axis=-1)
# split heads
q, k, v = map(lambda t: rearrange(t, 'b x y (h d) -> b h (x y) d', h=heads), (q, k, v))
q = q * scale
sim = einsum('b h i d, b h j d -> b h i j', q, k)
# add dynamic positional bias
pos = jnp.arange(-wsz, wsz + 1)
rel_pos = jnp.stack(jnp.meshgrid(pos, pos, indexing='ij'))
rel_pos = rearrange(rel_pos, 'c i j -> (i j) c')
biases = dpb(rel_pos)
rel_pos_bias = biases[rel_pos_indices]
sim = sim + rel_pos_bias
# attend
attn = nn.softmax(sim, axis = -1)
# merge heads
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h (x y) d -> b x y (h d) ', x=wsz, y=wsz)
out = to_out(out)
# rearrange back for long or short distance attention
if self.attn_type == 'short':
out = rearrange(out, '(b h w) s1 s2 d -> b (h s1) (w s2) d', h=height // wsz, w=width // wsz)
elif self.attn_type == 'long':
out = rearrange(out, '(b h w) l1 l2 d -> b (l1 h) (l2 w) d', h=height // wsz, w=width // wsz)
return out
class Transformer(nn.Module):
dim: int
local_window_size: int
global_window_size: int
depth: int = 4
dim_head: int = 32
attn_dropout: float = 0.0
ff_dropout: float = 0.0
@nn.compact
def __call__(self, x):
layers = []
for _ in range(self.depth):
layers.append([
Attention(self.dim, attn_type='short', window_size=self.local_window_size, dim_head=self.dim_head, dropout=self.attn_dropout),
MLP(self.dim, dropout=self.ff_dropout),
Attention(self.dim, attn_type='long', window_size=self.global_window_size, dim_head=self.dim_head, dropout=self.attn_dropout),
MLP(self.dim, dropout=self.ff_dropout)
])
for short_attn, short_ff, long_attn, long_ff in layers:
x = short_attn(x) + x
x = short_ff(x) + x
x = long_attn(x) + x
x = long_ff(x) + x
return x
class CrossFormer(nn.Module):
dim: tuple = (64, 128, 256, 512)
depth: tuple = (2, 2, 8, 2)
global_window_size: tuple = (8, 4, 2, 1)
local_window_size: int = 7
cross_embed_kernel_sizes: tuple = ((4, 8, 16, 32), (2, 4), (2, 4), (2, 4))
cross_embed_strides: tuple = (4, 2, 2, 2)
num_classes: int = 1000
attn_dropout: float = 0.0
ff_dropout: float = 0.0
@nn.compact
def __call__(self, x, **kwargs):
dim = cast_tuple(self.dim, 4)
depth = cast_tuple(self.depth, 4)
global_window_size = cast_tuple(self.global_window_size, 4)
local_window_size = cast_tuple(self.local_window_size, 4)
cross_embed_kernel_sizes = cast_tuple(self.cross_embed_kernel_sizes, 4)
cross_embed_strides = cast_tuple(self.cross_embed_strides, 4)
assert len(dim) == 4
assert len(depth) == 4
assert len(global_window_size) == 4
assert len(local_window_size) == 4
assert len(cross_embed_kernel_sizes) == 4
assert len(cross_embed_strides) == 4
# layers
crossformer_layers = []
for dim_out, layers, global_wsz, local_wsz, cel_kernel_sizes, cel_stride in zip(dim, depth,
global_window_size, local_window_size,
cross_embed_kernel_sizes, cross_embed_strides):
crossformer_layers.append([
CrossEmbedLayer(dim_out, cel_kernel_sizes, stride=cel_stride),
Transformer(dim_out, local_window_size=local_wsz, global_window_size=global_wsz, depth=layers,
attn_dropout=self.attn_dropout, ff_dropout=self.ff_dropout)
])
# final logits
to_logits = nn.Sequential([
nn.Dense(self.num_classes)
])
for cel, transformer in crossformer_layers:
x = cel(x)
x = transformer(x)
x = reduce(x, 'b h w c -> b c', 'mean')
x = to_logits(x)
return x
if __name__ == '__main__':
import numpy as np
key = jax.random.PRNGKey(0)
img = jax.random.normal(key, (1, 224, 224, 3))
v = CrossFormer(
num_classes = 1000, # number of output classes
dim = (64, 128, 256, 512), # dimension at each stage
depth = (2, 2, 8, 2), # depth of transformer at each stage
global_window_size = (8, 4, 2, 1), # global window sizes at each stage
local_window_size = 7, # local window size (can be customized for each stage, but in paper, held constant at 7 for all stages)
)
init_rngs = {'params': jax.random.PRNGKey(1),
'dropout': jax.random.PRNGKey(2),
'emb_dropout': jax.random.PRNGKey(3)}
params = v.init(init_rngs, img)
output = v.apply(params, img, rngs=init_rngs)
print(output.shape)
n_params_flax = sum(
jax.tree_leaves(jax.tree_map(lambda x: np.prod(x.shape), params))
)
print(f"Number of parameters in Flax model: {n_params_flax}")
|
# Copyright 2022 Garena Online Private Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
from torch import Tensor
from torch.optim.optimizer import Optimizer
from typing import List
class Adan(Optimizer):
"""
Implements a pytorch variant of Adan
Adan was proposed in
Adan: Adaptive Nesterov Momentum Algorithm for Faster Optimizing Deep Models[J]. arXiv preprint arXiv:2208.06677, 2022.
https://arxiv.org/abs/2208.06677
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
betas (Tuple[float, float, flot], optional): coefficients used for computing
running averages of gradient and its norm. (default: (0.98, 0.92, 0.99))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): decoupled weight decay (L2 penalty) (default: 0)
max_grad_norm (float, optional): value used to clip
global grad norm (default: 0.0 no clip)
no_prox (bool): how to perform the decoupled weight decay (default: False)
foreach (bool): if True would use torch._foreach implementation. It's faster but uses
slightly more memory. (default: True)
"""
def __init__(self, params, lr=1e-3, betas=(0.98, 0.92, 0.99), eps=1e-8,
weight_decay=0.0, max_grad_norm=0.0, no_prox=False, foreach: bool=True):
if not 0.0 <= max_grad_norm:
raise ValueError("Invalid Max grad norm: {}".format(max_grad_norm))
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= betas[2] < 1.0:
raise ValueError("Invalid beta parameter at index 2: {}".format(betas[2]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay,
max_grad_norm=max_grad_norm, no_prox=no_prox, foreach=foreach)
super().__init__(params, defaults)
def __setstate__(self, state):
super(Adan, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('no_prox', False)
@torch.no_grad()
def restart_opt(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
if p.requires_grad:
state = self.state[p]
# State initialization
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p)
# Exponential moving average of gradient difference
state['exp_avg_diff'] = torch.zeros_like(p)
@torch.no_grad()
def step(self, closure=None):
"""
Performs a single optimization step.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
if self.defaults['max_grad_norm'] > 0:
device = self.param_groups[0]['params'][0].device
global_grad_norm = torch.zeros(1, device=device)
max_grad_norm = torch.tensor(self.defaults['max_grad_norm'], device=device)
for group in self.param_groups:
for p in group['params']:
if p.grad is not None:
grad = p.grad
global_grad_norm.add_(grad.pow(2).sum())
global_grad_norm = torch.sqrt(global_grad_norm)
clip_global_grad_norm = torch.clamp(max_grad_norm / (global_grad_norm + group['eps']), max=1.0)
else:
clip_global_grad_norm = 1.0
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
exp_avg_diffs = []
pre_grads = []
beta1, beta2, beta3 = group['betas']
# assume same step across group now to simplify things
# per parameter step can be easily support by making it tensor, or pass list into kernel
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
bias_correction1 = 1.0 - beta1 ** group['step']
bias_correction2 = 1.0 - beta2 ** group['step']
bias_correction3 = 1.0 - beta3 ** group['step']
for p in group['params']:
if p.grad is None:
continue
params_with_grad.append(p)
grads.append(p.grad)
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
state['exp_avg_diff'] = torch.zeros_like(p)
if 'pre_grad' not in state or group['step'] == 1:
# at first step grad wouldn't be clipped by `clip_global_grad_norm`
# this is only to simplify implementation
state['pre_grad'] = p.grad
exp_avgs.append(state['exp_avg'])
exp_avg_sqs.append(state['exp_avg_sq'])
exp_avg_diffs.append(state['exp_avg_diff'])
pre_grads.append(state['pre_grad'])
kwargs = dict(
params=params_with_grad,
grads=grads,
exp_avgs=exp_avgs,
exp_avg_sqs=exp_avg_sqs,
exp_avg_diffs=exp_avg_diffs,
pre_grads=pre_grads,
beta1=beta1,
beta2=beta2,
beta3=beta3,
bias_correction1=bias_correction1,
bias_correction2=bias_correction2,
bias_correction3_sqrt=math.sqrt(bias_correction3),
lr=group['lr'],
weight_decay=group['weight_decay'],
eps=group['eps'],
no_prox=group['no_prox'],
clip_global_grad_norm=clip_global_grad_norm,
)
if group["foreach"]:
copy_grads = _multi_tensor_adan(**kwargs)
else:
copy_grads = _single_tensor_adan(**kwargs)
for p, copy_grad in zip(params_with_grad, copy_grads):
self.state[p]['pre_grad'] = copy_grad
return loss
def _single_tensor_adan(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
exp_avg_diffs: List[Tensor],
pre_grads: List[Tensor],
*,
beta1: float,
beta2: float,
beta3: float,
bias_correction1: float,
bias_correction2: float,
bias_correction3_sqrt: float,
lr: float,
weight_decay: float,
eps: float,
no_prox: bool,
clip_global_grad_norm: Tensor,
):
copy_grads = []
for i, param in enumerate(params):
grad = grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
exp_avg_diff = exp_avg_diffs[i]
pre_grad = pre_grads[i]
grad = grad.mul_(clip_global_grad_norm)
copy_grads.append(grad.clone())
diff = grad - pre_grad
update = grad + beta2 * diff
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) # m_t
exp_avg_diff.mul_(beta2).add_(diff, alpha=1 - beta2) # diff_t
exp_avg_sq.mul_(beta3).addcmul_(update, update, value=1 - beta3) # n_t
denom = ((exp_avg_sq).sqrt() / bias_correction3_sqrt).add_(eps)
update = ((exp_avg / bias_correction1 + beta2 * exp_avg_diff / bias_correction2)).div_(denom)
if no_prox:
param.mul_(1 - lr * weight_decay)
param.add_(update, alpha=-lr)
else:
param.add_(update, alpha=-lr)
param.div_(1 + lr * weight_decay)
return copy_grads
def _multi_tensor_adan(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
exp_avg_diffs: List[Tensor],
pre_grads: List[Tensor],
*,
beta1: float,
beta2: float,
beta3: float,
bias_correction1: float,
bias_correction2: float,
bias_correction3_sqrt: float,
lr: float,
weight_decay: float,
eps: float,
no_prox: bool,
clip_global_grad_norm: Tensor,
):
if clip_global_grad_norm<1.0:
torch._foreach_mul_(grads, clip_global_grad_norm.item())
copy_grads = [g.clone() for g in grads]
diff = torch._foreach_sub(grads, pre_grads)
# NOTE: line below while looking identical gives different result, due to float precision errors.
# using mul+add produces identical results to single-tensor, using add+alpha doesn't
# On cuda this difference doesn't matter due to its' own precision non-determinism
# update = torch._foreach_add(grads, torch._foreach_mul(diff, beta2))
update = torch._foreach_add(grads, diff, alpha=beta2)
torch._foreach_mul_(exp_avgs, beta1)
torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) # m_t
torch._foreach_mul_(exp_avg_diffs, beta2)
torch._foreach_add_(exp_avg_diffs, diff, alpha=1 - beta2) # diff_t
torch._foreach_mul_(exp_avg_sqs, beta3)
torch._foreach_addcmul_(exp_avg_sqs, update, update, value=1 - beta3) # n_t
denom = torch._foreach_sqrt(exp_avg_sqs)
torch._foreach_div_(denom, bias_correction3_sqrt)
torch._foreach_add_(denom, eps)
update = torch._foreach_div(exp_avgs, bias_correction1)
# NOTE: same issue as above. beta2 * diff / bias_correction2 != diff * (beta2 / bias_correction2)
# using faster version by default.
# torch._foreach_add_(update, torch._foreach_div(torch._foreach_mul(exp_avg_diffs, beta2), bias_correction2))
torch._foreach_add_(update, torch._foreach_mul(exp_avg_diffs, beta2 / bias_correction2))
torch._foreach_div_(update, denom)
if no_prox:
torch._foreach_mul_(params, 1 - lr * weight_decay)
torch._foreach_add_(params, update, alpha=-lr)
else:
torch._foreach_add_(params, update, alpha=-lr)
torch._foreach_div_(params, 1 + lr * weight_decay)
return copy_grads
|
# Copyright 2022 Garena Online Private Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
from torch import Tensor
from torch.optim.optimizer import Optimizer
from typing import List
class Adan(Optimizer):
"""
Implements a pytorch variant of Adan
Adan was proposed in
Adan: Adaptive Nesterov Momentum Algorithm for Faster Optimizing Deep Models[J]. arXiv preprint arXiv:2208.06677, 2022.
https://arxiv.org/abs/2208.06677
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
betas (Tuple[float, float, flot], optional): coefficients used for computing
running averages of gradient and its norm. (default: (0.98, 0.92, 0.99))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): decoupled weight decay (L2 penalty) (default: 0)
max_grad_norm (float, optional): value used to clip
global grad norm (default: 0.0 no clip)
no_prox (bool): how to perform the decoupled weight decay (default: False)
foreach (bool): if True would use torch._foreach implementation. It's faster but uses
slightly more memory. (default: True)
"""
def __init__(self, params, lr=1e-3, betas=(0.98, 0.92, 0.99), eps=1e-8,
weight_decay=0.0, max_grad_norm=0.0, no_prox=False, foreach: bool=True):
if not 0.0 <= max_grad_norm:
raise ValueError("Invalid Max grad norm: {}".format(max_grad_norm))
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= betas[2] < 1.0:
raise ValueError("Invalid beta parameter at index 2: {}".format(betas[2]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay,
max_grad_norm=max_grad_norm, no_prox=no_prox, foreach=foreach)
super().__init__(params, defaults)
def __setstate__(self, state):
super(Adan, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('no_prox', False)
@torch.no_grad()
def restart_opt(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
if p.requires_grad:
state = self.state[p]
# State initialization
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p)
# Exponential moving average of gradient difference
state['exp_avg_diff'] = torch.zeros_like(p)
@torch.no_grad()
def step(self, closure=None):
"""
Performs a single optimization step.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
if self.defaults['max_grad_norm'] > 0:
device = self.param_groups[0]['params'][0].device
global_grad_norm = torch.zeros(1, device=device)
max_grad_norm = torch.tensor(self.defaults['max_grad_norm'], device=device)
for group in self.param_groups:
for p in group['params']:
if p.grad is not None:
grad = p.grad
global_grad_norm.add_(grad.pow(2).sum())
global_grad_norm = torch.sqrt(global_grad_norm)
clip_global_grad_norm = torch.clamp(max_grad_norm / (global_grad_norm + group['eps']), max=1.0)
else:
clip_global_grad_norm = 1.0
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
exp_avg_diffs = []
pre_grads = []
beta1, beta2, beta3 = group['betas']
# assume same step across group now to simplify things
# per parameter step can be easily support by making it tensor, or pass list into kernel
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
bias_correction1 = 1.0 - beta1 ** group['step']
bias_correction2 = 1.0 - beta2 ** group['step']
bias_correction3 = 1.0 - beta3 ** group['step']
for p in group['params']:
if p.grad is None:
continue
params_with_grad.append(p)
grads.append(p.grad)
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
state['exp_avg_diff'] = torch.zeros_like(p)
if 'pre_grad' not in state or group['step'] == 1:
# at first step grad wouldn't be clipped by `clip_global_grad_norm`
# this is only to simplify implementation
state['pre_grad'] = p.grad
exp_avgs.append(state['exp_avg'])
exp_avg_sqs.append(state['exp_avg_sq'])
exp_avg_diffs.append(state['exp_avg_diff'])
pre_grads.append(state['pre_grad'])
kwargs = dict(
params=params_with_grad,
grads=grads,
exp_avgs=exp_avgs,
exp_avg_sqs=exp_avg_sqs,
exp_avg_diffs=exp_avg_diffs,
pre_grads=pre_grads,
beta1=beta1,
beta2=beta2,
beta3=beta3,
bias_correction1=bias_correction1,
bias_correction2=bias_correction2,
bias_correction3_sqrt=math.sqrt(bias_correction3),
lr=group['lr'],
weight_decay=group['weight_decay'],
eps=group['eps'],
no_prox=group['no_prox'],
clip_global_grad_norm=clip_global_grad_norm,
)
if group["foreach"]:
copy_grads = _multi_tensor_adan(**kwargs)
else:
copy_grads = _single_tensor_adan(**kwargs)
for p, copy_grad in zip(params_with_grad, copy_grads):
self.state[p]['pre_grad'] = copy_grad
return loss
def _single_tensor_adan(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
exp_avg_diffs: List[Tensor],
pre_grads: List[Tensor],
*,
beta1: float,
beta2: float,
beta3: float,
bias_correction1: float,
bias_correction2: float,
bias_correction3_sqrt: float,
lr: float,
weight_decay: float,
eps: float,
no_prox: bool,
clip_global_grad_norm: Tensor,
):
copy_grads = []
for i, param in enumerate(params):
grad = grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
exp_avg_diff = exp_avg_diffs[i]
pre_grad = pre_grads[i]
grad = grad.mul_(clip_global_grad_norm)
copy_grads.append(grad.clone())
diff = grad - pre_grad
update = grad + beta2 * diff
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) # m_t
exp_avg_diff.mul_(beta2).add_(diff, alpha=1 - beta2) # diff_t
exp_avg_sq.mul_(beta3).addcmul_(update, update, value=1 - beta3) # n_t
denom = ((exp_avg_sq).sqrt() / bias_correction3_sqrt).add_(eps)
update = ((exp_avg / bias_correction1 + beta2 * exp_avg_diff / bias_correction2)).div_(denom)
if no_prox:
param.mul_(1 - lr * weight_decay)
param.add_(update, alpha=-lr)
else:
param.add_(update, alpha=-lr)
param.div_(1 + lr * weight_decay)
return copy_grads
def _multi_tensor_adan(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
exp_avg_diffs: List[Tensor],
pre_grads: List[Tensor],
*,
beta1: float,
beta2: float,
beta3: float,
bias_correction1: float,
bias_correction2: float,
bias_correction3_sqrt: float,
lr: float,
weight_decay: float,
eps: float,
no_prox: bool,
clip_global_grad_norm: Tensor,
):
if clip_global_grad_norm<1.0:
torch._foreach_mul_(grads, clip_global_grad_norm.item())
copy_grads = [g.clone() for g in grads]
diff = torch._foreach_sub(grads, pre_grads)
# NOTE: line below while looking identical gives different result, due to float precision errors.
# using mul+add produces identical results to single-tensor, using add+alpha doesn't
# On cuda this difference doesn't matter due to its' own precision non-determinism
# update = torch._foreach_add(grads, torch._foreach_mul(diff, beta2))
update = torch._foreach_add(grads, diff, alpha=beta2)
torch._foreach_mul_(exp_avgs, beta1)
torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) # m_t
torch._foreach_mul_(exp_avg_diffs, beta2)
torch._foreach_add_(exp_avg_diffs, diff, alpha=1 - beta2) # diff_t
torch._foreach_mul_(exp_avg_sqs, beta3)
torch._foreach_addcmul_(exp_avg_sqs, update, update, value=1 - beta3) # n_t
denom = torch._foreach_sqrt(exp_avg_sqs)
torch._foreach_div_(denom, bias_correction3_sqrt)
torch._foreach_add_(denom, eps)
update = torch._foreach_div(exp_avgs, bias_correction1)
# NOTE: same issue as above. beta2 * diff / bias_correction2 != diff * (beta2 / bias_correction2)
# using faster version by default.
# torch._foreach_add_(update, torch._foreach_div(torch._foreach_mul(exp_avg_diffs, beta2), bias_correction2))
torch._foreach_add_(update, torch._foreach_mul(exp_avg_diffs, beta2 / bias_correction2))
torch._foreach_div_(update, denom)
if no_prox:
torch._foreach_mul_(params, 1 - lr * weight_decay)
torch._foreach_add_(params, update, alpha=-lr)
else:
torch._foreach_add_(params, update, alpha=-lr)
torch._foreach_div_(params, 1 + lr * weight_decay)
return copy_grads
|
import sys
import math
import functools
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
sys.path.append('utils')
from proj_adaptive_softmax import ProjectedAdaptiveLogSoftmax
from log_uniform_sampler import LogUniformSampler, sample_logits
class PositionalEmbedding(nn.Module):
def __init__(self, demb):
super(PositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer('inv_freq', inv_freq)
def forward(self, pos_seq, bsz=None):
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
if bsz is not None:
return pos_emb[:,None,:].expand(-1, bsz, -1)
else:
return pos_emb[:,None,:]
class PositionwiseFF(nn.Module):
def __init__(self, d_model, d_inner, dropout, pre_lnorm=False):
super(PositionwiseFF, self).__init__()
self.d_model = d_model
self.d_inner = d_inner
self.dropout = dropout
self.CoreNet = nn.Sequential(
nn.Linear(d_model, d_inner), nn.ReLU(inplace=True),
nn.Dropout(dropout),
nn.Linear(d_inner, d_model),
nn.Dropout(dropout),
)
self.layer_norm = nn.LayerNorm(d_model)
self.pre_lnorm = pre_lnorm
def forward(self, inp):
if self.pre_lnorm:
##### layer normalization + positionwise feed-forward
core_out = self.CoreNet(self.layer_norm(inp))
##### residual connection
output = core_out + inp
else:
##### positionwise feed-forward
core_out = self.CoreNet(inp)
##### residual connection + layer normalization
output = self.layer_norm(inp + core_out)
return output
class MultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0,
pre_lnorm=False):
super(MultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.q_net = nn.Linear(d_model, n_head * d_head, bias=False)
self.kv_net = nn.Linear(d_model, 2 * n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model)
self.scale = 1 / (d_head ** 0.5)
self.pre_lnorm = pre_lnorm
def forward(self, h, attn_mask=None, mems=None):
##### multihead attention
# [hlen x bsz x n_head x d_head]
if mems is not None:
c = torch.cat([mems, h], 0)
else:
c = h
if self.pre_lnorm:
##### layer normalization
c = self.layer_norm(c)
head_q = self.q_net(h)
head_k, head_v = torch.chunk(self.kv_net(c), 2, -1)
head_q = head_q.view(h.size(0), h.size(1), self.n_head, self.d_head)
head_k = head_k.view(c.size(0), c.size(1), self.n_head, self.d_head)
head_v = head_v.view(c.size(0), c.size(1), self.n_head, self.d_head)
# [qlen x klen x bsz x n_head]
attn_score = torch.einsum('ibnd,jbnd->ijbn', (head_q, head_k))
attn_score.mul_(self.scale)
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score.masked_fill_(attn_mask[None,:,:,None], -float('inf'))
elif attn_mask.dim() == 3:
attn_score.masked_fill_(attn_mask[:,:,:,None], -float('inf'))
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
# [qlen x klen x bsz x n_head] + [klen x bsz x n_head x d_head] -> [qlen x bsz x n_head x d_head]
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, head_v))
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
##### residual connection
output = h + attn_out
else:
##### residual connection + layer normalization
output = self.layer_norm(h + attn_out)
return output
class RelMultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0,
tgt_len=None, ext_len=None, mem_len=None, pre_lnorm=False):
super(RelMultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model)
self.scale = 1 / (d_head ** 0.5)
self.pre_lnorm = pre_lnorm
def _parallelogram_mask(self, h, w, left=False):
mask = torch.ones((h, w)).byte()
m = min(h, w)
mask[:m,:m] = torch.triu(mask[:m,:m])
mask[-m:,-m:] = torch.tril(mask[-m:,-m:])
if left:
return mask
else:
return mask.flip(0)
def _shift(self, x, qlen, klen, mask, left=False):
if qlen > 1:
zero_pad = torch.zeros((x.size(0), qlen-1, x.size(2), x.size(3)),
device=x.device, dtype=x.dtype)
else:
zero_pad = torch.zeros(0, device=x.device, dtype=x.dtype)
if left:
mask = mask.flip(1)
x_padded = torch.cat([zero_pad, x], dim=1).expand(qlen, -1, -1, -1)
else:
x_padded = torch.cat([x, zero_pad], dim=1).expand(qlen, -1, -1, -1)
x = x_padded.masked_select(mask[:,:,None,None]) \
.view(qlen, klen, x.size(2), x.size(3))
return x
def _rel_shift(self, x, zero_triu=False):
zero_pad = torch.zeros((x.size(0), 1, *x.size()[2:]),
device=x.device, dtype=x.dtype)
x_padded = torch.cat([zero_pad, x], dim=1)
x_padded = x_padded.view(x.size(1) + 1, x.size(0), *x.size()[2:])
x = x_padded[1:].view_as(x)
if zero_triu:
ones = torch.ones((x.size(0), x.size(1)))
x = x * torch.tril(ones, x.size(1) - x.size(0))[:,:,None,None]
return x
def forward(self, w, r, attn_mask=None, mems=None):
raise NotImplementedError
class RelPartialLearnableMultiHeadAttn(RelMultiHeadAttn):
def __init__(self, *args, **kwargs):
super(RelPartialLearnableMultiHeadAttn, self).__init__(*args, **kwargs)
self.r_net = nn.Linear(self.d_model, self.n_head * self.d_head, bias=False)
def forward(self, w, r, r_w_bias, r_r_bias, attn_mask=None, mems=None):
qlen, rlen, bsz = w.size(0), r.size(0), w.size(1)
if mems is not None:
cat = torch.cat([mems, w], 0)
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(cat))
else:
w_heads = self.qkv_net(cat)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(w))
else:
w_heads = self.qkv_net(w)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
klen = w_head_k.size(0)
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
r_head_k = r_head_k.view(rlen, self.n_head, self.d_head) # qlen x n_head x d_head
#### compute attention score
rw_head_q = w_head_q + r_w_bias # qlen x bsz x n_head x d_head
AC = torch.einsum('ibnd,jbnd->ijbn', (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head
rr_head_q = w_head_q + r_r_bias
BD = torch.einsum('ibnd,jnd->ijbn', (rr_head_q, r_head_k)) # qlen x klen x bsz x n_head
BD = self._rel_shift(BD)
# [qlen x klen x bsz x n_head]
attn_score = AC + BD
attn_score.mul_(self.scale)
#### compute attention probability
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score = attn_score.float().masked_fill(
attn_mask[None,:,:,None], -float('inf')).type_as(attn_score)
elif attn_mask.dim() == 3:
attn_score = attn_score.float().masked_fill(
attn_mask[:,:,:,None], -float('inf')).type_as(attn_score)
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
#### compute attention vector
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, w_head_v))
# [qlen x bsz x n_head x d_head]
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
##### residual connection
output = w + attn_out
else:
##### residual connection + layer normalization
output = self.layer_norm(w + attn_out)
return output
class RelLearnableMultiHeadAttn(RelMultiHeadAttn):
def __init__(self, *args, **kwargs):
super(RelLearnableMultiHeadAttn, self).__init__(*args, **kwargs)
def forward(self, w, r_emb, r_w_bias, r_bias, attn_mask=None, mems=None):
# r_emb: [klen, n_head, d_head], used for term B
# r_w_bias: [n_head, d_head], used for term C
# r_bias: [klen, n_head], used for term D
qlen, bsz = w.size(0), w.size(1)
if mems is not None:
cat = torch.cat([mems, w], 0)
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(cat))
else:
w_heads = self.qkv_net(cat)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(w))
else:
w_heads = self.qkv_net(w)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
klen = w_head_k.size(0)
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head)
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head)
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head)
if klen > r_emb.size(0):
r_emb_pad = r_emb[0:1].expand(klen-r_emb.size(0), -1, -1)
r_emb = torch.cat([r_emb_pad, r_emb], 0)
r_bias_pad = r_bias[0:1].expand(klen-r_bias.size(0), -1)
r_bias = torch.cat([r_bias_pad, r_bias], 0)
else:
r_emb = r_emb[-klen:]
r_bias = r_bias[-klen:]
#### compute attention score
rw_head_q = w_head_q + r_w_bias[None] # qlen x bsz x n_head x d_head
AC = torch.einsum('ibnd,jbnd->ijbn', (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head
B_ = torch.einsum('ibnd,jnd->ijbn', (w_head_q, r_emb)) # qlen x klen x bsz x n_head
D_ = r_bias[None, :, None] # 1 x klen x 1 x n_head
BD = self._rel_shift(B_ + D_)
# [qlen x klen x bsz x n_head]
attn_score = AC + BD
attn_score.mul_(self.scale)
#### compute attention probability
if attn_mask is not None and attn_mask.any().item():
if attn_mask.dim() == 2:
attn_score.masked_fill_(attn_mask[None,:,:,None], -float('inf'))
elif attn_mask.dim() == 3:
attn_score.masked_fill_(attn_mask[:,:,:,None], -float('inf'))
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
#### compute attention vector
attn_vec = torch.einsum('ijbn,jbnd->ibnd', (attn_prob, w_head_v))
# [qlen x bsz x n_head x d_head]
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
##### residual connection
output = w + attn_out
else:
##### residual connection + layer normalization
output = self.layer_norm(w + attn_out)
return output
class DecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout, **kwargs):
super(DecoderLayer, self).__init__()
self.dec_attn = MultiHeadAttn(n_head, d_model, d_head, dropout, **kwargs)
self.pos_ff = PositionwiseFF(d_model, d_inner, dropout,
pre_lnorm=kwargs.get('pre_lnorm'))
def forward(self, dec_inp, dec_attn_mask=None, mems=None):
output = self.dec_attn(dec_inp, attn_mask=dec_attn_mask,
mems=mems)
output = self.pos_ff(output)
return output
class RelLearnableDecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout,
**kwargs):
super(RelLearnableDecoderLayer, self).__init__()
self.dec_attn = RelLearnableMultiHeadAttn(n_head, d_model, d_head, dropout,
**kwargs)
self.pos_ff = PositionwiseFF(d_model, d_inner, dropout,
pre_lnorm=kwargs.get('pre_lnorm'))
def forward(self, dec_inp, r_emb, r_w_bias, r_bias, dec_attn_mask=None, mems=None):
output = self.dec_attn(dec_inp, r_emb, r_w_bias, r_bias,
attn_mask=dec_attn_mask,
mems=mems)
output = self.pos_ff(output)
return output
class RelPartialLearnableDecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout,
**kwargs):
super(RelPartialLearnableDecoderLayer, self).__init__()
self.dec_attn = RelPartialLearnableMultiHeadAttn(n_head, d_model,
d_head, dropout, **kwargs)
self.pos_ff = PositionwiseFF(d_model, d_inner, dropout,
pre_lnorm=kwargs.get('pre_lnorm'))
def forward(self, dec_inp, r, r_w_bias, r_r_bias, dec_attn_mask=None, mems=None):
output = self.dec_attn(dec_inp, r, r_w_bias, r_r_bias,
attn_mask=dec_attn_mask,
mems=mems)
output = self.pos_ff(output)
return output
class AdaptiveEmbedding(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1,
sample_softmax=False):
super(AdaptiveEmbedding, self).__init__()
self.n_token = n_token
self.d_embed = d_embed
self.cutoffs = cutoffs + [n_token]
self.div_val = div_val
self.d_proj = d_proj
self.emb_scale = d_proj ** 0.5
self.cutoff_ends = [0] + self.cutoffs
self.emb_layers = nn.ModuleList()
self.emb_projs = nn.ParameterList()
if div_val == 1:
self.emb_layers.append(
nn.Embedding(n_token, d_embed, sparse=sample_softmax>0)
)
if d_proj != d_embed:
self.emb_projs.append(nn.Parameter(torch.Tensor(d_proj, d_embed)))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1]
d_emb_i = d_embed // (div_val ** i)
self.emb_layers.append(nn.Embedding(r_idx-l_idx, d_emb_i))
self.emb_projs.append(nn.Parameter(torch.Tensor(d_proj, d_emb_i)))
def forward(self, inp):
if self.div_val == 1:
embed = self.emb_layers[0](inp)
if self.d_proj != self.d_embed:
embed = F.linear(embed, self.emb_projs[0])
else:
param = next(self.parameters())
inp_flat = inp.view(-1)
emb_flat = torch.zeros([inp_flat.size(0), self.d_proj],
dtype=param.dtype, device=param.device)
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
inp_i = inp_flat.index_select(0, indices_i) - l_idx
emb_i = self.emb_layers[i](inp_i)
emb_i = F.linear(emb_i, self.emb_projs[i])
emb_flat.index_copy_(0, indices_i, emb_i)
embed = emb_flat.view(*inp.size(), self.d_proj)
embed.mul_(self.emb_scale)
return embed
class MemTransformerLM(nn.Module):
def __init__(self, n_token, n_layer, n_head, d_model, d_head, d_inner,
dropout, dropatt, tie_weight=True, d_embed=None,
div_val=1, tie_projs=[False], pre_lnorm=False,
tgt_len=None, ext_len=None, mem_len=None,
cutoffs=[], adapt_inp=False,
same_length=False, attn_type=0, clamp_len=-1,
sample_softmax=-1):
super(MemTransformerLM, self).__init__()
self.n_token = n_token
d_embed = d_model if d_embed is None else d_embed
self.d_embed = d_embed
self.d_model = d_model
self.n_head = n_head
self.d_head = d_head
self.word_emb = AdaptiveEmbedding(n_token, d_embed, d_model, cutoffs,
div_val=div_val)
self.drop = nn.Dropout(dropout)
self.n_layer = n_layer
self.tgt_len = tgt_len
self.mem_len = mem_len
self.ext_len = ext_len
self.max_klen = tgt_len + ext_len + mem_len
self.attn_type = attn_type
self.layers = nn.ModuleList()
if attn_type == 0: # the default attention
for i in range(n_layer):
self.layers.append(
RelPartialLearnableDecoderLayer(
n_head, d_model, d_head, d_inner, dropout,
tgt_len=tgt_len, ext_len=ext_len, mem_len=mem_len,
dropatt=dropatt, pre_lnorm=pre_lnorm)
)
elif attn_type == 1: # learnable embeddings
for i in range(n_layer):
self.layers.append(
RelLearnableDecoderLayer(
n_head, d_model, d_head, d_inner, dropout,
tgt_len=tgt_len, ext_len=ext_len, mem_len=mem_len,
dropatt=dropatt, pre_lnorm=pre_lnorm)
)
elif attn_type in [2, 3]: # absolute embeddings
for i in range(n_layer):
self.layers.append(
DecoderLayer(
n_head, d_model, d_head, d_inner, dropout,
dropatt=dropatt, pre_lnorm=pre_lnorm)
)
self.sample_softmax = sample_softmax
# use sampled softmax
if sample_softmax > 0:
self.out_layer = nn.Linear(d_model, n_token)
if tie_weight:
self.out_layer.weight = self.word_emb.weight
self.tie_weight = tie_weight
self.sampler = LogUniformSampler(n_token, sample_softmax)
# use adaptive softmax (including standard softmax)
else:
self.crit = ProjectedAdaptiveLogSoftmax(n_token, d_embed, d_model,
cutoffs, div_val=div_val)
if tie_weight:
for i in range(len(self.crit.out_layers)):
self.crit.out_layers[i].weight = self.word_emb.emb_layers[i].weight
if tie_projs:
for i, tie_proj in enumerate(tie_projs):
if tie_proj and div_val == 1 and d_model != d_embed:
self.crit.out_projs[i] = self.word_emb.emb_projs[0]
elif tie_proj and div_val != 1:
self.crit.out_projs[i] = self.word_emb.emb_projs[i]
self.same_length = same_length
self.clamp_len = clamp_len
self._create_params()
def backward_compatible(self):
self.sample_softmax = -1
def _create_params(self):
if self.attn_type == 0: # default attention
self.pos_emb = PositionalEmbedding(self.d_model)
self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
elif self.attn_type == 1: # learnable
self.r_emb = nn.Parameter(torch.Tensor(
self.n_layer, self.max_klen, self.n_head, self.d_head))
self.r_w_bias = nn.Parameter(torch.Tensor(
self.n_layer, self.n_head, self.d_head))
self.r_bias = nn.Parameter(torch.Tensor(
self.n_layer, self.max_klen, self.n_head))
elif self.attn_type == 2: # absolute standard
self.pos_emb = PositionalEmbedding(self.d_model)
elif self.attn_type == 3: # absolute deeper SA
self.r_emb = nn.Parameter(torch.Tensor(
self.n_layer, self.max_klen, self.n_head, self.d_head))
def reset_length(self, tgt_len, ext_len, mem_len):
self.tgt_len = tgt_len
self.mem_len = mem_len
self.ext_len = ext_len
def init_mems(self):
if self.mem_len > 0:
mems = []
param = next(self.parameters())
for i in range(self.n_layer+1):
empty = torch.empty(0, dtype=param.dtype, device=param.device)
mems.append(empty)
return mems
else:
return None
def _update_mems(self, hids, mems, qlen, mlen):
# does not deal with None
if mems is None: return None
# mems is not None
assert len(hids) == len(mems), 'len(hids) != len(mems)'
# There are `mlen + qlen` steps that can be cached into mems
# For the next step, the last `ext_len` of the `qlen` tokens
# will be used as the extended context. Hence, we only cache
# the tokens from `mlen + qlen - self.ext_len - self.mem_len`
# to `mlen + qlen - self.ext_len`.
with torch.no_grad():
new_mems = []
end_idx = mlen + max(0, qlen - 0 - self.ext_len)
beg_idx = max(0, end_idx - self.mem_len)
for i in range(len(hids)):
cat = torch.cat([mems[i], hids[i]], dim=0)
new_mems.append(cat[beg_idx:end_idx].detach())
return new_mems
def _forward(self, dec_inp, mems=None):
qlen, bsz = dec_inp.size()
word_emb = self.word_emb(dec_inp)
mlen = mems[0].size(0) if mems is not None else 0
klen = mlen + qlen
if self.same_length:
all_ones = word_emb.new_ones(qlen, klen)
mask_len = klen - self.mem_len
if mask_len > 0:
mask_shift_len = qlen - mask_len
else:
mask_shift_len = qlen
dec_attn_mask = (torch.triu(all_ones, 1+mlen)
+ torch.tril(all_ones, -mask_shift_len)).byte()[:, :, None] # -1
else:
dec_attn_mask = torch.triu(
word_emb.new_ones(qlen, klen), diagonal=1+mlen).byte()[:,:,None]
hids = []
if self.attn_type == 0: # default
pos_seq = torch.arange(klen-1, -1, -1.0, device=word_emb.device,
dtype=word_emb.dtype)
if self.clamp_len > 0:
pos_seq.clamp_(max=self.clamp_len)
pos_emb = self.pos_emb(pos_seq)
core_out = self.drop(word_emb)
pos_emb = self.drop(pos_emb)
hids.append(core_out)
for i, layer in enumerate(self.layers):
mems_i = None if mems is None else mems[i]
core_out = layer(core_out, pos_emb, self.r_w_bias,
self.r_r_bias, dec_attn_mask=dec_attn_mask, mems=mems_i)
hids.append(core_out)
elif self.attn_type == 1: # learnable
core_out = self.drop(word_emb)
hids.append(core_out)
for i, layer in enumerate(self.layers):
if self.clamp_len > 0:
r_emb = self.r_emb[i][-self.clamp_len :]
r_bias = self.r_bias[i][-self.clamp_len :]
else:
r_emb, r_bias = self.r_emb[i], self.r_bias[i]
mems_i = None if mems is None else mems[i]
core_out = layer(core_out, r_emb, self.r_w_bias[i],
r_bias, dec_attn_mask=dec_attn_mask, mems=mems_i)
hids.append(core_out)
elif self.attn_type == 2: # absolute
pos_seq = torch.arange(klen - 1, -1, -1.0, device=word_emb.device,
dtype=word_emb.dtype)
if self.clamp_len > 0:
pos_seq.clamp_(max=self.clamp_len)
pos_emb = self.pos_emb(pos_seq)
core_out = self.drop(word_emb + pos_emb[-qlen:])
hids.append(core_out)
for i, layer in enumerate(self.layers):
mems_i = None if mems is None else mems[i]
if mems_i is not None and i == 0:
mems_i += pos_emb[:mlen]
core_out = layer(core_out, dec_attn_mask=dec_attn_mask,
mems=mems_i)
hids.append(core_out)
elif self.attn_type == 3:
core_out = self.drop(word_emb)
hids.append(core_out)
for i, layer in enumerate(self.layers):
mems_i = None if mems is None else mems[i]
if mems_i is not None and mlen > 0:
cur_emb = self.r_emb[i][:-qlen]
cur_size = cur_emb.size(0)
if cur_size < mlen:
cur_emb_pad = cur_emb[0:1].expand(mlen-cur_size, -1, -1)
cur_emb = torch.cat([cur_emb_pad, cur_emb], 0)
else:
cur_emb = cur_emb[-mlen:]
mems_i += cur_emb.view(mlen, 1, -1)
core_out += self.r_emb[i][-qlen:].view(qlen, 1, -1)
core_out = layer(core_out, dec_attn_mask=dec_attn_mask,
mems=mems_i)
hids.append(core_out)
core_out = self.drop(core_out)
new_mems = self._update_mems(hids, mems, mlen, qlen)
return core_out, new_mems
def forward(self, data, target, *mems):
# nn.DataParallel does not allow size(0) tensors to be broadcasted.
# So, have to initialize size(0) mems inside the model forward.
# Moreover, have to return new_mems to allow nn.DataParallel to piece
# them together.
if not mems: mems = self.init_mems()
tgt_len = target.size(0)
hidden, new_mems = self._forward(data, mems=mems)
pred_hid = hidden[-tgt_len:]
if self.sample_softmax > 0 and self.training:
assert self.tie_weight
logit = sample_logits(self.word_emb,
self.out_layer.bias, target, pred_hid, self.sampler)
loss = -F.log_softmax(logit, -1)[:, :, 0]
else:
loss = self.crit(pred_hid.view(-1, pred_hid.size(-1)), target.view(-1))
loss = loss.view(tgt_len, -1)
if new_mems is None:
return [loss]
else:
return [loss] + new_mems
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='unit test')
parser.add_argument('--n_layer', type=int, default=4, help='')
parser.add_argument('--n_rel_layer', type=int, default=4, help='')
parser.add_argument('--n_head', type=int, default=2, help='')
parser.add_argument('--d_head', type=int, default=2, help='')
parser.add_argument('--d_model', type=int, default=200, help='')
parser.add_argument('--d_embed', type=int, default=200, help='')
parser.add_argument('--d_inner', type=int, default=200, help='')
parser.add_argument('--dropout', type=float, default=0.0, help='')
parser.add_argument('--cuda', action='store_true', help='')
parser.add_argument('--seed', type=int, default=1111, help='')
parser.add_argument('--multi_gpu', action='store_true', help='')
args = parser.parse_args()
device = torch.device("cuda" if args.cuda else "cpu")
B = 4
tgt_len, mem_len, ext_len = 36, 36, 0
data_len = tgt_len * 20
args.n_token = 10000
import data_utils
data = torch.LongTensor(data_len*B).random_(0, args.n_token).to(device)
diter = data_utils.LMOrderedIterator(data, B, tgt_len, device=device, ext_len=ext_len)
cutoffs = [args.n_token // 2]
tie_projs = [False] + [True] * len(cutoffs)
for div_val in [1, 2]:
for d_embed in [200, 100]:
model = MemTransformerLM(args.n_token, args.n_layer, args.n_head,
args.d_model, args.d_head, args.d_inner, args.dropout,
dropatt=args.dropout, tie_weight=True,
d_embed=d_embed, div_val=div_val,
tie_projs=tie_projs, pre_lnorm=True,
tgt_len=tgt_len, ext_len=ext_len, mem_len=mem_len,
cutoffs=cutoffs, attn_type=0).to(device)
print(sum(p.numel() for p in model.parameters()))
mems = tuple()
for idx, (inp, tgt, seqlen) in enumerate(diter):
print('batch {}'.format(idx))
out = model(inp, tgt, *mems)
mems = out[1:]
|
import os, sys
import glob
from collections import Counter, OrderedDict
import numpy as np
import torch
from utils.vocabulary import Vocab
class LMOrderedIterator(object):
def __init__(self, data, bsz, bptt, device='cpu', ext_len=None):
"""
data -- LongTensor -- the LongTensor is strictly ordered
"""
self.bsz = bsz
self.bptt = bptt
self.ext_len = ext_len if ext_len is not None else 0
self.device = device
# Work out how cleanly we can divide the dataset into bsz parts.
self.n_step = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, self.n_step * bsz)
# Evenly divide the data across the bsz batches.
self.data = data.view(bsz, -1).t().contiguous().to(device)
# Number of mini-batches
self.n_batch = (self.n_step + self.bptt - 1) // self.bptt
def get_batch(self, i, bptt=None):
if bptt is None: bptt = self.bptt
seq_len = min(bptt, self.data.size(0) - 1 - i)
end_idx = i + seq_len
beg_idx = max(0, i - self.ext_len)
data = self.data[beg_idx:end_idx]
target = self.data[i+1:i+1+seq_len]
return data, target, seq_len
def get_fixlen_iter(self, start=0):
for i in range(start, self.data.size(0) - 1, self.bptt):
yield self.get_batch(i)
def get_varlen_iter(self, start=0, std=5, min_len=5, max_deviation=3):
max_len = self.bptt + max_deviation * std
i = start
while True:
bptt = self.bptt if np.random.random() < 0.95 else self.bptt / 2.
bptt = min(max_len, max(min_len, int(np.random.normal(bptt, std))))
data, target, seq_len = self.get_batch(i, bptt)
i += seq_len
yield data, target, seq_len
if i >= self.data.size(0) - 2:
break
def __iter__(self):
return self.get_fixlen_iter()
class LMShuffledIterator(object):
def __init__(self, data, bsz, bptt, device='cpu', ext_len=None, shuffle=False):
"""
data -- list[LongTensor] -- there is no order among the LongTensors
"""
self.data = data
self.bsz = bsz
self.bptt = bptt
self.ext_len = ext_len if ext_len is not None else 0
self.device = device
self.shuffle = shuffle
def get_sent_stream(self):
# index iterator
epoch_indices = np.random.permutation(len(self.data)) if self.shuffle \
else np.array(range(len(self.data)))
# sentence iterator
for idx in epoch_indices:
yield self.data[idx]
def stream_iterator(self, sent_stream):
# streams for each data in the batch
streams = [None] * self.bsz
data = torch.LongTensor(self.bptt, self.bsz)
target = torch.LongTensor(self.bptt, self.bsz)
n_retain = 0
while True:
# data : [n_retain+bptt x bsz]
# target : [bptt x bsz]
data[n_retain:].fill_(-1)
target.fill_(-1)
valid_batch = True
for i in range(self.bsz):
n_filled = 0
try:
while n_filled < self.bptt:
if streams[i] is None or len(streams[i]) <= 1:
streams[i] = next(sent_stream)
# number of new tokens to fill in
n_new = min(len(streams[i]) - 1, self.bptt - n_filled)
# first n_retain tokens are retained from last batch
data[n_retain+n_filled:n_retain+n_filled+n_new, i] = \
streams[i][:n_new]
target[n_filled:n_filled+n_new, i] = \
streams[i][1:n_new+1]
streams[i] = streams[i][n_new:]
n_filled += n_new
except StopIteration:
valid_batch = False
break
if not valid_batch:
return
data = data.to(self.device)
target = target.to(self.device)
yield data, target, self.bptt
n_retain = min(data.size(0), self.ext_len)
if n_retain > 0:
data[:n_retain] = data[-n_retain:]
data.resize_(n_retain + self.bptt, data.size(1))
def __iter__(self):
# sent_stream is an iterator
sent_stream = self.get_sent_stream()
for batch in self.stream_iterator(sent_stream):
yield batch
class LMMultiFileIterator(LMShuffledIterator):
def __init__(self, paths, vocab, bsz, bptt, device='cpu', ext_len=None,
shuffle=False):
self.paths = paths
self.vocab = vocab
self.bsz = bsz
self.bptt = bptt
self.ext_len = ext_len if ext_len is not None else 0
self.device = device
self.shuffle = shuffle
def get_sent_stream(self, path):
sents = self.vocab.encode_file(path, add_double_eos=True)
if self.shuffle:
np.random.shuffle(sents)
sent_stream = iter(sents)
return sent_stream
def __iter__(self):
if self.shuffle:
np.random.shuffle(self.paths)
for path in self.paths:
# sent_stream is an iterator
sent_stream = self.get_sent_stream(path)
for batch in self.stream_iterator(sent_stream):
yield batch
class Corpus(object):
def __init__(self, path, dataset, *args, **kwargs):
self.dataset = dataset
self.vocab = Vocab(*args, **kwargs)
if self.dataset in ['ptb', 'wt2', 'enwik8', 'text8']:
self.vocab.count_file(os.path.join(path, 'train.txt'))
self.vocab.count_file(os.path.join(path, 'valid.txt'))
self.vocab.count_file(os.path.join(path, 'test.txt'))
elif self.dataset == 'wt103':
self.vocab.count_file(os.path.join(path, 'train.txt'))
elif self.dataset == 'lm1b':
train_path_pattern = os.path.join(
path, '1-billion-word-language-modeling-benchmark-r13output',
'training-monolingual.tokenized.shuffled', 'news.en-*')
train_paths = glob.glob(train_path_pattern)
# the vocab will load from file when build_vocab() is called
self.vocab.build_vocab()
if self.dataset in ['ptb', 'wt2', 'wt103']:
self.train = self.vocab.encode_file(
os.path.join(path, 'train.txt'), ordered=True)
self.valid = self.vocab.encode_file(
os.path.join(path, 'valid.txt'), ordered=True)
self.test = self.vocab.encode_file(
os.path.join(path, 'test.txt'), ordered=True)
elif self.dataset in ['enwik8', 'text8']:
self.train = self.vocab.encode_file(
os.path.join(path, 'train.txt'), ordered=True, add_eos=False)
self.valid = self.vocab.encode_file(
os.path.join(path, 'valid.txt'), ordered=True, add_eos=False)
self.test = self.vocab.encode_file(
os.path.join(path, 'test.txt'), ordered=True, add_eos=False)
elif self.dataset == 'lm1b':
self.train = train_paths
self.valid = self.vocab.encode_file(
os.path.join(path, 'valid.txt'), ordered=False, add_double_eos=True)
self.test = self.vocab.encode_file(
os.path.join(path, 'test.txt'), ordered=False, add_double_eos=True)
def get_iterator(self, split, *args, **kwargs):
if split == 'train':
if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8']:
data_iter = LMOrderedIterator(self.train, *args, **kwargs)
elif self.dataset == 'lm1b':
kwargs['shuffle'] = True
data_iter = LMMultiFileIterator(self.train, self.vocab, *args, **kwargs)
elif split in ['valid', 'test']:
data = self.valid if split == 'valid' else self.test
if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8']:
data_iter = LMOrderedIterator(data, *args, **kwargs)
elif self.dataset == 'lm1b':
data_iter = LMShuffledIterator(data, *args, **kwargs)
return data_iter
def get_lm_corpus(datadir, dataset):
fn = os.path.join(datadir, 'cache.pt')
if os.path.exists(fn):
print('Loading cached dataset...')
corpus = torch.load(fn)
else:
print('Producing dataset {}...'.format(dataset))
kwargs = {}
if dataset in ['wt103', 'wt2']:
kwargs['special'] = ['<eos>']
kwargs['lower_case'] = False
elif dataset == 'ptb':
kwargs['special'] = ['<eos>']
kwargs['lower_case'] = True
elif dataset == 'lm1b':
kwargs['special'] = []
kwargs['lower_case'] = False
kwargs['vocab_file'] = os.path.join(datadir, '1b_word_vocab.txt')
elif dataset in ['enwik8', 'text8']:
pass
corpus = Corpus(datadir, dataset, **kwargs)
torch.save(corpus, fn)
return corpus
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='unit test')
parser.add_argument('--datadir', type=str, default='../data/text8',
help='location of the data corpus')
parser.add_argument('--dataset', type=str, default='text8',
choices=['ptb', 'wt2', 'wt103', 'lm1b', 'enwik8', 'text8'],
help='dataset name')
args = parser.parse_args()
corpus = get_lm_corpus(args.datadir, args.dataset)
print('Vocab size : {}'.format(len(corpus.vocab.idx2sym)))
|
# coding: utf-8
import argparse
import time
import math
import os, sys
import itertools
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from adan import Adan
from data_utils import get_lm_corpus
from mem_transformer import MemTransformerLM
from utils.exp_utils import create_exp_dir
from utils.data_parallel import BalancedDataParallel
parser = argparse.ArgumentParser(description='PyTorch Transformer Language Model')
parser.add_argument('--data', type=str, default='../data/wikitext-103',
help='location of the data corpus')
parser.add_argument('--dataset', type=str, default='wt103',
choices=['wt103', 'lm1b', 'enwik8', 'text8'],
help='dataset name')
parser.add_argument('--n_layer', type=int, default=12,
help='number of total layers')
parser.add_argument('--n_head', type=int, default=10,
help='number of heads')
parser.add_argument('--d_head', type=int, default=50,
help='head dimension')
parser.add_argument('--d_embed', type=int, default=-1,
help='embedding dimension')
parser.add_argument('--d_model', type=int, default=500,
help='model dimension')
parser.add_argument('--d_inner', type=int, default=1000,
help='inner dimension in FF')
parser.add_argument('--dropout', type=float, default=0.0,
help='global dropout rate')
parser.add_argument('--dropatt', type=float, default=0.0,
help='attention probability dropout rate')
parser.add_argument('--init', default='normal', type=str,
help='parameter initializer to use.')
parser.add_argument('--emb_init', default='normal', type=str,
help='parameter initializer to use.')
parser.add_argument('--init_range', type=float, default=0.1,
help='parameters initialized by U(-init_range, init_range)')
parser.add_argument('--emb_init_range', type=float, default=0.01,
help='parameters initialized by U(-init_range, init_range)')
parser.add_argument('--init_std', type=float, default=0.02,
help='parameters initialized by N(0, init_std)')
parser.add_argument('--proj_init_std', type=float, default=0.01,
help='parameters initialized by N(0, init_std)')
parser.add_argument('--optim', default='adam', type=str,
choices=['adam', 'sgd', 'adagrad', 'adan'],
help='optimizer to use.')
parser.add_argument('--lr', type=float, default=0.00025,
help='initial learning rate (0.00025|5 for adam|sgd)')
parser.add_argument('--wd', type=float, default=0.02,
help='weight decayss')
parser.add_argument('--mom', type=float, default=0.0,
help='momentum for sgd')
parser.add_argument('--scheduler', default='cosine', type=str,
choices=['cosine', 'inv_sqrt', 'dev_perf', 'constant'],
help='lr scheduler to use.')
parser.add_argument('--warmup_step', type=int, default=0,
help='upper epoch limit')
parser.add_argument('--decay_rate', type=float, default=0.5,
help='decay factor when ReduceLROnPlateau is used')
parser.add_argument('--lr_min', type=float, default=0.0,
help='minimum learning rate during annealing')
parser.add_argument('--clip', type=float, default=0.25,
help='gradient clipping')
parser.add_argument('--clip_nonemb', action='store_true',
help='only clip the gradient of non-embedding params')
parser.add_argument('--max_step', type=int, default=100000,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=60,
help='batch size')
parser.add_argument('--batch_chunk', type=int, default=1,
help='split batch into chunks to save memory')
parser.add_argument('--tgt_len', type=int, default=70,
help='number of tokens to predict')
parser.add_argument('--eval_tgt_len', type=int, default=50,
help='number of tokens to predict for evaluation')
parser.add_argument('--ext_len', type=int, default=0,
help='length of the extended context')
parser.add_argument('--mem_len', type=int, default=0,
help='length of the retained previous heads')
parser.add_argument('--not_tied', action='store_true',
help='do not tie the word embedding and softmax weights')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--cuda', action='store_true',
help='use CUDA')
parser.add_argument('--adaptive', action='store_true',
help='use adaptive softmax')
parser.add_argument('--div_val', type=int, default=1,
help='divident value for adapative input and softmax')
parser.add_argument('--pre_lnorm', action='store_true',
help='apply LayerNorm to the input instead of the output')
parser.add_argument('--varlen', action='store_true',
help='use variable length')
parser.add_argument('--multi_gpu', action='store_true',
help='use multiple GPU')
parser.add_argument('--log-interval', type=int, default=200,
help='report interval')
parser.add_argument('--eval-interval', type=int, default=4000,
help='evaluation interval')
parser.add_argument('--work_dir', default='LM-TFM', type=str,
help='experiment directory.')
parser.add_argument('--restart', action='store_true',
help='restart training from the saved checkpoint')
parser.add_argument('--restart_dir', type=str, default='',
help='restart dir')
parser.add_argument('--debug', action='store_true',
help='run in debug mode (do not create exp dir)')
parser.add_argument('--same_length', action='store_true',
help='use the same attn length for all tokens')
parser.add_argument('--attn_type', type=int, default=0,
help='attention type. 0 for ours, 1 for Shaw et al,'
'2 for Vaswani et al, 3 for Al Rfou et al.')
parser.add_argument('--clamp_len', type=int, default=-1,
help='use the same pos embeddings after clamp_len')
parser.add_argument('--eta_min', type=float, default=0.0,
help='min learning rate for cosine scheduler')
parser.add_argument('--gpu0_bsz', type=int, default=-1,
help='batch size on gpu 0')
parser.add_argument('--max_eval_steps', type=int, default=-1,
help='max eval steps')
parser.add_argument('--sample_softmax', type=int, default=-1,
help='number of samples in sampled softmax')
parser.add_argument('--patience', type=int, default=0,
help='patience')
parser.add_argument('--finetune_v2', action='store_true',
help='finetune v2')
parser.add_argument('--finetune_v3', action='store_true',
help='finetune v3')
parser.add_argument('--fp16', action='store_true',
help='Run in pseudo-fp16 mode (fp16 storage fp32 math).')
parser.add_argument('--static-loss-scale', type=float, default=1,
help='Static loss scale, positive power of 2 values can '
'improve fp16 convergence.')
parser.add_argument('--dynamic-loss-scale', action='store_true',
help='Use dynamic loss scaling. If supplied, this argument'
' supersedes --static-loss-scale.')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
args = parser.parse_args()
args.tied = not args.not_tied
if args.d_embed < 0:
args.d_embed = args.d_model
assert args.ext_len >= 0, 'extended context length must be non-negative'
assert args.batch_size % args.batch_chunk == 0
args.work_dir = '{}-{}'.format(args.work_dir, args.dataset)
args.work_dir = os.path.join(args.work_dir, time.strftime('%Y%m%d-%H%M%S'))
logging = create_exp_dir(args.work_dir,
scripts_to_save=['train.py', 'mem_transformer.py'], debug=args.debug)
# Set the random seed manually for reproducibility.
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print('WARNING: You have a CUDA device, so you should probably run with --cuda')
else:
torch.cuda.manual_seed_all(args.seed)
# Validate `--fp16` option
if args.fp16:
if not args.cuda:
print('WARNING: --fp16 requires --cuda, ignoring --fp16 option')
args.fp16 = False
else:
try:
from apex.fp16_utils import FP16_Optimizer
except:
print('WARNING: apex not installed, ignoring --fp16 option')
args.fp16 = False
device = torch.device('cuda' if args.cuda else 'cpu')
###############################################################################
# Load data
###############################################################################
corpus = get_lm_corpus(args.data, args.dataset)
ntokens = len(corpus.vocab)
args.n_token = ntokens
eval_batch_size = 10
tr_iter = corpus.get_iterator('train', args.batch_size, args.tgt_len,
device=device, ext_len=args.ext_len)
va_iter = corpus.get_iterator('valid', eval_batch_size, args.eval_tgt_len,
device=device, ext_len=args.ext_len)
te_iter = corpus.get_iterator('test', eval_batch_size, args.eval_tgt_len,
device=device, ext_len=args.ext_len)
# adaptive softmax / embedding
cutoffs, tie_projs = [], [False]
if args.adaptive:
assert args.dataset in ['wt103', 'lm1b']
if args.dataset == 'wt103':
cutoffs = [20000, 40000, 200000]
tie_projs += [True] * len(cutoffs)
elif args.dataset == 'lm1b':
cutoffs = [60000, 100000, 640000]
tie_projs += [False] * len(cutoffs)
###############################################################################
# Build the model
###############################################################################
def init_weight(weight):
if args.init == 'uniform':
nn.init.uniform_(weight, -args.init_range, args.init_range)
elif args.init == 'normal':
nn.init.normal_(weight, 0.0, args.init_std)
def init_bias(bias):
nn.init.constant_(bias, 0.0)
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
if hasattr(m, 'weight') and m.weight is not None:
init_weight(m.weight)
if hasattr(m, 'bias') and m.bias is not None:
init_bias(m.bias)
elif classname.find('AdaptiveEmbedding') != -1:
if hasattr(m, 'emb_projs'):
for i in range(len(m.emb_projs)):
if m.emb_projs[i] is not None:
nn.init.normal_(m.emb_projs[i], 0.0, args.proj_init_std)
elif classname.find('Embedding') != -1:
if hasattr(m, 'weight'):
init_weight(m.weight)
elif classname.find('ProjectedAdaptiveLogSoftmax') != -1:
if hasattr(m, 'cluster_weight') and m.cluster_weight is not None:
init_weight(m.cluster_weight)
if hasattr(m, 'cluster_bias') and m.cluster_bias is not None:
init_bias(m.cluster_bias)
if hasattr(m, 'out_projs'):
for i in range(len(m.out_projs)):
if m.out_projs[i] is not None:
nn.init.normal_(m.out_projs[i], 0.0, args.proj_init_std)
elif classname.find('LayerNorm') != -1:
if hasattr(m, 'weight'):
nn.init.normal_(m.weight, 1.0, args.init_std)
if hasattr(m, 'bias') and m.bias is not None:
init_bias(m.bias)
elif classname.find('TransformerLM') != -1:
if hasattr(m, 'r_emb'):
init_weight(m.r_emb)
if hasattr(m, 'r_w_bias'):
init_weight(m.r_w_bias)
if hasattr(m, 'r_r_bias'):
init_weight(m.r_r_bias)
if hasattr(m, 'r_bias'):
init_bias(m.r_bias)
def update_dropout(m):
classname = m.__class__.__name__
if classname.find('Dropout') != -1:
if hasattr(m, 'p'):
m.p = args.dropout
def update_dropatt(m):
if hasattr(m, 'dropatt'):
m.dropatt.p = args.dropatt
if args.restart:
with open(os.path.join(args.restart_dir, 'model.pt'), 'rb') as f:
model = torch.load(f)
if not args.fp16:
model = model.float()
model.apply(update_dropout)
model.apply(update_dropatt)
else:
model = MemTransformerLM(ntokens, args.n_layer, args.n_head, args.d_model,
args.d_head, args.d_inner, args.dropout, args.dropatt,
tie_weight=args.tied, d_embed=args.d_embed, div_val=args.div_val,
tie_projs=tie_projs, pre_lnorm=args.pre_lnorm, tgt_len=args.tgt_len,
ext_len=args.ext_len, mem_len=args.mem_len, cutoffs=cutoffs,
same_length=args.same_length, attn_type=args.attn_type,
clamp_len=args.clamp_len, sample_softmax=args.sample_softmax)
model.apply(weights_init)
model.word_emb.apply(weights_init) # ensure embedding init is not overridden by out_layer in case of weight sharing
args.n_all_param = sum([p.nelement() for p in model.parameters()])
args.n_nonemb_param = sum([p.nelement() for p in model.layers.parameters()])
if args.fp16:
model = model.half()
if args.multi_gpu:
model = model.to(device)
if args.gpu0_bsz >= 0:
para_model = BalancedDataParallel(args.gpu0_bsz // args.batch_chunk,
model, dim=1).to(device)
else:
para_model = nn.DataParallel(model, dim=1).to(device)
else:
para_model = model.to(device)
#### optimizer
if args.optim.lower() == 'sgd':
if args.sample_softmax > 0:
dense_params, sparse_params = [], []
for param in model.parameters():
if param.size() == model.word_emb.weight.size():
sparse_params.append(param)
else:
dense_params.append(param)
optimizer_sparse = optim.SGD(sparse_params, lr=args.lr * 2)
optimizer = optim.SGD(dense_params, lr=args.lr, momentum=args.mom)
else:
optimizer = optim.SGD(model.parameters(), lr=args.lr,
momentum=args.mom)
elif args.optim.lower() == 'adam':
if args.sample_softmax > 0:
dense_params, sparse_params = [], []
for param in model.parameters():
if param.size() == model.word_emb.weight.size():
sparse_params.append(param)
else:
dense_params.append(param)
optimizer_sparse = optim.SparseAdam(sparse_params, lr=args.lr)
optimizer = optim.Adam(dense_params, lr=args.lr)
else:
optimizer = optim.Adam(model.parameters(), lr=args.lr)
elif args.optim.lower() == 'adan':
if args.sample_softmax > 0:
dense_params, sparse_params = [], []
for param in model.parameters():
if param.size() == model.word_emb.weight.size():
sparse_params.append(param)
else:
dense_params.append(param)
optimizer_sparse = Adan(sparse_params,betas=args.opt_betas, lr=args.lr, weight_decay= args.wd)
optimizer = Adan(dense_params, lr=args.lr,betas=args.opt_betas, weight_decay= args.wd)
else:
optimizer = Adan(model.parameters(), lr=args.lr, betas=args.opt_betas, weight_decay= args.wd)
elif args.optim.lower() == 'adagrad':
optimizer = optim.Adagrad(model.parameters(), lr=args.lr)
#### scheduler
if args.scheduler == 'cosine':
# here we do not set eta_min to lr_min to be backward compatible
# because in previous versions eta_min is default to 0
# rather than the default value of lr_min 1e-6
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer,
args.max_step, eta_min=args.eta_min) # should use eta_min arg
if args.sample_softmax > 0:
scheduler_sparse = optim.lr_scheduler.CosineAnnealingLR(optimizer_sparse,
args.max_step, eta_min=args.eta_min) # should use eta_min arg
elif args.scheduler == 'inv_sqrt':
# originally used for Transformer (in Attention is all you need)
def lr_lambda(step):
# return a multiplier instead of a learning rate
if step == 0 and args.warmup_step == 0:
return 1.
else:
return 1. / (step ** 0.5) if step > args.warmup_step \
else step / (args.warmup_step ** 1.5)
scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)
elif args.scheduler == 'dev_perf':
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
factor=args.decay_rate, patience=args.patience, min_lr=args.lr_min)
if args.sample_softmax > 0:
scheduler_sparse = optim.lr_scheduler.ReduceLROnPlateau(optimizer_sparse,
factor=args.decay_rate, patience=args.patience, min_lr=args.lr_min)
elif args.scheduler == 'constant':
pass
if args.cuda and args.fp16:
# If args.dynamic_loss_scale is False, static_loss_scale will be used.
# If args.dynamic_loss_scale is True, it will take precedence over static_loss_scale.
optimizer = FP16_Optimizer(optimizer,
static_loss_scale = args.static_loss_scale,
dynamic_loss_scale = args.dynamic_loss_scale,
dynamic_loss_args = {'init_scale': 2 ** 16})
if args.restart:
if os.path.exists(os.path.join(args.restart_dir, 'optimizer.pt')):
with open(os.path.join(args.restart_dir, 'optimizer.pt'), 'rb') as f:
opt_state_dict = torch.load(f)
optimizer.load_state_dict(opt_state_dict)
else:
print('Optimizer was not saved. Start from scratch.')
logging('=' * 100)
for k, v in args.__dict__.items():
logging(' - {} : {}'.format(k, v))
logging('=' * 100)
logging('#params = {}'.format(args.n_all_param))
logging('#non emb params = {}'.format(args.n_nonemb_param))
###############################################################################
# Training code
###############################################################################
def evaluate(eval_iter):
# Turn on evaluation mode which disables dropout.
model.eval()
# If the model does not use memory at all, make the ext_len longer.
# Otherwise, make the mem_len longer and keep the ext_len the same.
if args.mem_len == 0:
model.reset_length(args.eval_tgt_len,
args.ext_len+args.tgt_len-args.eval_tgt_len, args.mem_len)
else:
model.reset_length(args.eval_tgt_len,
args.ext_len, args.mem_len+args.tgt_len-args.eval_tgt_len)
# Evaluation
total_len, total_loss = 0, 0.
with torch.no_grad():
mems = tuple()
for i, (data, target, seq_len) in enumerate(eval_iter):
if args.max_eval_steps > 0 and i >= args.max_eval_steps:
break
ret = model(data, target, *mems)
loss, mems = ret[0], ret[1:]
loss = loss.mean()
total_loss += seq_len * loss.float().item()
total_len += seq_len
# Switch back to the training mode
model.reset_length(args.tgt_len, args.ext_len, args.mem_len)
model.train()
return total_loss / total_len
def train():
# Turn on training mode which enables dropout.
global train_step, train_loss, best_val_loss, eval_start_time, log_start_time
model.train()
if args.batch_chunk > 1:
mems = [tuple() for _ in range(args.batch_chunk)]
else:
mems = tuple()
train_iter = tr_iter.get_varlen_iter() if args.varlen else tr_iter
for batch, (data, target, seq_len) in enumerate(train_iter):
model.zero_grad()
if args.batch_chunk > 1:
data_chunks = torch.chunk(data, args.batch_chunk, 1)
target_chunks = torch.chunk(target, args.batch_chunk, 1)
for i in range(args.batch_chunk):
data_i = data_chunks[i].contiguous()
target_i = target_chunks[i].contiguous()
ret = para_model(data_i, target_i, *mems[i])
loss, mems[i] = ret[0], ret[1:]
loss = loss.float().mean().type_as(loss) / args.batch_chunk
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
train_loss += loss.float().item()
else:
ret = para_model(data, target, *mems)
loss, mems = ret[0], ret[1:]
loss = loss.float().mean().type_as(loss)
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
train_loss += loss.float().item()
if args.fp16:
optimizer.clip_master_grads(args.clip)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
optimizer.step()
if args.sample_softmax > 0:
optimizer_sparse.step()
# step-wise learning rate annealing
train_step += 1
if args.scheduler in ['cosine', 'constant', 'dev_perf']:
# linear warmup stage
if train_step < args.warmup_step:
curr_lr = args.lr * train_step / args.warmup_step
optimizer.param_groups[0]['lr'] = curr_lr
if args.sample_softmax > 0:
optimizer_sparse.param_groups[0]['lr'] = curr_lr * 2
else:
if args.scheduler == 'cosine':
scheduler.step(train_step)
if args.sample_softmax > 0:
scheduler_sparse.step(train_step)
elif args.scheduler == 'inv_sqrt':
scheduler.step(train_step)
if train_step % args.log_interval == 0:
cur_loss = train_loss / args.log_interval
elapsed = time.time() - log_start_time
log_str = '| epoch {:3d} step {:>8d} | {:>6d} batches | lr {:.3g} ' \
'| ms/batch {:5.2f} | loss {:5.2f}'.format(
epoch, train_step, batch+1, optimizer.param_groups[0]['lr'],
elapsed * 1000 / args.log_interval, cur_loss)
if args.dataset in ['enwik8', 'text8']:
log_str += ' | bpc {:9.5f}'.format(cur_loss / math.log(2))
else:
log_str += ' | ppl {:9.3f}'.format(math.exp(cur_loss))
logging(log_str)
train_loss = 0
log_start_time = time.time()
if train_step % args.eval_interval == 0:
val_loss = evaluate(va_iter)
logging('-' * 100)
log_str = '| Eval {:3d} at step {:>8d} | time: {:5.2f}s ' \
'| valid loss {:5.2f}'.format(
train_step // args.eval_interval, train_step,
(time.time() - eval_start_time), val_loss)
if args.dataset in ['enwik8', 'text8']:
log_str += ' | bpc {:9.5f}'.format(val_loss / math.log(2))
else:
log_str += ' | valid ppl {:9.3f}'.format(math.exp(val_loss))
logging(log_str)
logging('-' * 100)
# Save the model if the validation loss is the best we've seen so far.
if not best_val_loss or val_loss < best_val_loss:
if not args.debug:
with open(os.path.join(args.work_dir, 'model.pt'), 'wb') as f:
torch.save(model, f)
with open(os.path.join(args.work_dir, 'optimizer.pt'), 'wb') as f:
torch.save(optimizer.state_dict(), f)
best_val_loss = val_loss
# dev-performance based learning rate annealing
if args.scheduler == 'dev_perf':
scheduler.step(val_loss)
if args.sample_softmax > 0:
scheduler_sparse.step(val_loss)
eval_start_time = time.time()
if train_step == args.max_step:
break
# Loop over epochs.
train_step = 0
train_loss = 0
best_val_loss = None
log_start_time = time.time()
eval_start_time = time.time()
# At any point you can hit Ctrl + C to break out of training early.
try:
for epoch in itertools.count(start=1):
train()
if train_step == args.max_step:
logging('-' * 100)
logging('End of training')
break
except KeyboardInterrupt:
logging('-' * 100)
logging('Exiting from training early')
# Load the best saved model.
with open(os.path.join(args.work_dir, 'model.pt'), 'rb') as f:
model = torch.load(f)
para_model = model.to(device)
# Run on test data.
test_loss = evaluate(te_iter)
logging('=' * 100)
if args.dataset in ['enwik8', 'text8']:
logging('| End of training | test loss {:5.2f} | test bpc {:9.5f}'.format(
test_loss, test_loss / math.log(2)))
else:
logging('| End of training | test loss {:5.2f} | test ppl {:9.3f}'.format(
test_loss, math.exp(test_loss)))
logging('=' * 100)
|
# coding: utf-8
import argparse
import time
import math
import os, sys
import torch
from data_utils import get_lm_corpus
from mem_transformer import MemTransformerLM
from utils.exp_utils import get_logger
parser = argparse.ArgumentParser(description='PyTorch Transformer Language Model')
parser.add_argument('--data', type=str, default='../data/wikitext-103',
help='location of the data corpus')
parser.add_argument('--dataset', type=str, default='wt103',
choices=['wt103', 'lm1b', 'enwik8', 'text8'],
help='dataset name')
parser.add_argument('--split', type=str, default='all',
choices=['all', 'valid', 'test'],
help='which split to evaluate')
parser.add_argument('--batch_size', type=int, default=10,
help='batch size')
parser.add_argument('--tgt_len', type=int, default=5,
help='number of tokens to predict')
parser.add_argument('--ext_len', type=int, default=0,
help='length of the extended context')
parser.add_argument('--mem_len', type=int, default=0,
help='length of the retained previous heads')
parser.add_argument('--clamp_len', type=int, default=-1,
help='max positional embedding index')
parser.add_argument('--cuda', action='store_true',
help='use CUDA')
parser.add_argument('--work_dir', type=str, required=True,
help='path to the work_dir')
parser.add_argument('--no_log', action='store_true',
help='do not log the eval result')
parser.add_argument('--same_length', action='store_true',
help='set same length attention with masking')
args = parser.parse_args()
assert args.ext_len >= 0, 'extended context length must be non-negative'
device = torch.device("cuda" if args.cuda else "cpu")
# Get logger
logging = get_logger(os.path.join(args.work_dir, 'log.txt'),
log_=not args.no_log)
# Load dataset
corpus = get_lm_corpus(args.data, args.dataset)
ntokens = len(corpus.vocab)
va_iter = corpus.get_iterator('valid', args.batch_size, args.tgt_len,
device=device, ext_len=args.ext_len)
te_iter = corpus.get_iterator('test', args.batch_size, args.tgt_len,
device=device, ext_len=args.ext_len)
# Load the best saved model.
with open(os.path.join(args.work_dir, 'model.pt'), 'rb') as f:
model = torch.load(f)
model.backward_compatible()
model = model.to(device)
logging('Evaluating with bsz {} tgt_len {} ext_len {} mem_len {} clamp_len {}'.format(
args.batch_size, args.tgt_len, args.ext_len, args.mem_len, args.clamp_len))
model.reset_length(args.tgt_len, args.ext_len, args.mem_len)
if args.clamp_len > 0:
model.clamp_len = args.clamp_len
if args.same_length:
model.same_length = True
###############################################################################
# Evaluation code
###############################################################################
def evaluate(eval_iter):
# Turn on evaluation mode which disables dropout.
model.eval()
total_len, total_loss = 0, 0.
start_time = time.time()
with torch.no_grad():
mems = tuple()
for idx, (data, target, seq_len) in enumerate(eval_iter):
ret = model(data, target, *mems)
loss, mems = ret[0], ret[1:]
loss = loss.mean()
total_loss += seq_len * loss.item()
total_len += seq_len
total_time = time.time() - start_time
logging('Time : {:.2f}s, {:.2f}ms/segment'.format(
total_time, 1000 * total_time / (idx+1)))
return total_loss / total_len
# Run on test data.
if args.split == 'all':
test_loss = evaluate(te_iter)
valid_loss = evaluate(va_iter)
elif args.split == 'valid':
valid_loss = evaluate(va_iter)
test_loss = None
elif args.split == 'test':
test_loss = evaluate(te_iter)
valid_loss = None
def format_log(loss, split):
if args.dataset in ['enwik8', 'text8']:
log_str = '| {0} loss {1:5.2f} | {0} bpc {2:9.5f} '.format(
split, loss, loss / math.log(2))
else:
log_str = '| {0} loss {1:5.2f} | {0} ppl {2:9.3f} '.format(
split, loss, math.exp(loss))
return log_str
log_str = ''
if valid_loss is not None:
log_str += format_log(valid_loss, 'valid')
if test_loss is not None:
log_str += format_log(test_loss, 'test')
logging('=' * 100)
logging(log_str)
logging('=' * 100)
|
from collections import defaultdict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class AdaptiveLogSoftmax(nn.Module):
def __init__(self, in_features, n_classes, cutoffs, keep_order=False):
super(AdaptiveLogSoftmax, self).__init__()
cutoffs = list(cutoffs)
if (cutoffs != sorted(cutoffs)) \
or (min(cutoffs) <= 0) \
or (max(cutoffs) >= (n_classes - 1)) \
or (len(set(cutoffs)) != len(cutoffs)) \
or any([int(c) != c for c in cutoffs]):
raise ValueError("cutoffs should be a sequence of unique, positive "
"integers sorted in an increasing order, where "
"each value is between 1 and n_classes-1")
self.in_features = in_features
self.n_classes = n_classes
self.cutoffs = cutoffs + [n_classes]
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.in_features))
self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))
self.keep_order = keep_order
def forward(self, hidden, target, weight, bias, keep_order=False):
if hidden.size(0) != target.size(0):
raise RuntimeError('Input and target should have the same size '
'in the batch dimension.')
head_weight = torch.cat(
[weight[:self.shortlist_size], self.cluster_weight], dim=0)
head_bias = torch.cat(
[bias[:self.shortlist_size], self.cluster_bias], dim=0)
head_logit = F.linear(hidden, head_weight, bias=head_bias)
head_logprob = F.log_softmax(head_logit, dim=1)
nll = torch.zeros_like(target,
dtype=hidden.dtype, device=hidden.device)
offset = 0
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
l_idx, h_idx = cutoff_values[i], cutoff_values[i + 1]
mask_i = (target >= l_idx) & (target < h_idx)
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
target_i = target.index_select(0, indices_i) - l_idx
head_logprob_i = head_logprob.index_select(0, indices_i)
if i == 0:
logprob_i = head_logprob_i.gather(1, target_i[:,None]).squeeze(1)
else:
weight_i = weight[l_idx:h_idx]
bias_i = bias[l_idx:h_idx]
hidden_i = hidden.index_select(0, indices_i)
tail_logit_i = F.linear(hidden_i, weight_i, bias=bias_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
logprob_i = head_logprob_i[:, -i] \
+ tail_logprob_i.gather(1, target_i[:,None]).squeeze(1)
if (hasattr(self, 'keep_order') and self.keep_order) or keep_order:
nll.index_copy_(0, indices_i, -logprob_i)
else:
nll[offset:offset+logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return nll
|
from torch.nn.parallel import DataParallel
import torch
from torch.nn.parallel._functions import Scatter
from torch.nn.parallel.parallel_apply import parallel_apply
def scatter(inputs, target_gpus, chunk_sizes, dim=0):
r"""
Slices tensors into approximately equal chunks and
distributes them across given GPUs. Duplicates
references to objects that are not tensors.
"""
def scatter_map(obj):
if isinstance(obj, torch.Tensor):
try:
return Scatter.apply(target_gpus, chunk_sizes, dim, obj)
except:
print('obj', obj.size())
print('dim', dim)
print('chunk_sizes', chunk_sizes)
quit()
if isinstance(obj, tuple) and len(obj) > 0:
return list(zip(*map(scatter_map, obj)))
if isinstance(obj, list) and len(obj) > 0:
return list(map(list, zip(*map(scatter_map, obj))))
if isinstance(obj, dict) and len(obj) > 0:
return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return [obj for targets in target_gpus]
# After scatter_map is called, a scatter_map cell will exist. This cell
# has a reference to the actual function scatter_map, which has references
# to a closure that has a reference to the scatter_map cell (because the
# fn is recursive). To avoid this reference cycle, we set the function to
# None, clearing the cell
try:
return scatter_map(inputs)
finally:
scatter_map = None
def scatter_kwargs(inputs, kwargs, target_gpus, chunk_sizes, dim=0):
r"""Scatter with support for kwargs dictionary"""
inputs = scatter(inputs, target_gpus, chunk_sizes, dim) if inputs else []
kwargs = scatter(kwargs, target_gpus, chunk_sizes, dim) if kwargs else []
if len(inputs) < len(kwargs):
inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
elif len(kwargs) < len(inputs):
kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs
class BalancedDataParallel(DataParallel):
def __init__(self, gpu0_bsz, *args, **kwargs):
self.gpu0_bsz = gpu0_bsz
super().__init__(*args, **kwargs)
def forward(self, *inputs, **kwargs):
if not self.device_ids:
return self.module(*inputs, **kwargs)
if self.gpu0_bsz == 0:
device_ids = self.device_ids[1:]
else:
device_ids = self.device_ids
inputs, kwargs = self.scatter(inputs, kwargs, device_ids)
if len(self.device_ids) == 1:
return self.module(*inputs[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids)
if self.gpu0_bsz == 0:
replicas = replicas[1:]
outputs = self.parallel_apply(replicas, device_ids, inputs, kwargs)
return self.gather(outputs, self.output_device)
def parallel_apply(self, replicas, device_ids, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs, device_ids)
def scatter(self, inputs, kwargs, device_ids):
bsz = inputs[0].size(self.dim)
num_dev = len(self.device_ids)
gpu0_bsz = self.gpu0_bsz
bsz_unit = (bsz - gpu0_bsz) // (num_dev - 1)
if gpu0_bsz < bsz_unit:
chunk_sizes = [gpu0_bsz] + [bsz_unit] * (num_dev - 1)
delta = bsz - sum(chunk_sizes)
for i in range(delta):
chunk_sizes[i + 1] += 1
if gpu0_bsz == 0:
chunk_sizes = chunk_sizes[1:]
else:
return super().scatter(inputs, kwargs, device_ids)
return scatter_kwargs(inputs, kwargs, device_ids, chunk_sizes, dim=self.dim)
|
from collections import defaultdict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
CUDA_MAJOR = int(torch.version.cuda.split('.')[0])
CUDA_MINOR = int(torch.version.cuda.split('.')[1])
class ProjectedAdaptiveLogSoftmax(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1,
keep_order=False):
super(ProjectedAdaptiveLogSoftmax, self).__init__()
self.n_token = n_token
self.d_embed = d_embed
self.d_proj = d_proj
self.cutoffs = cutoffs + [n_token]
self.cutoff_ends = [0] + self.cutoffs
self.div_val = div_val
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed))
self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))
self.out_layers = nn.ModuleList()
self.out_projs = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(
nn.Parameter(torch.Tensor(d_proj, d_embed))
)
else:
self.out_projs.append(None)
self.out_layers.append(nn.Linear(d_embed, n_token))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1]
d_emb_i = d_embed // (div_val ** i)
self.out_projs.append(
nn.Parameter(torch.Tensor(d_proj, d_emb_i))
)
self.out_layers.append(nn.Linear(d_emb_i, r_idx-l_idx))
self.keep_order = keep_order
def _compute_logit(self, hidden, weight, bias, proj):
if proj is None:
logit = F.linear(hidden, weight, bias=bias)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
proj_hid = F.linear(hidden, proj.t().contiguous())
logit = F.linear(proj_hid, weight, bias=bias)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def forward(self, hidden, target, keep_order=False):
'''
hidden :: [len*bsz x d_proj]
target :: [len*bsz]
'''
if hidden.size(0) != target.size(0):
raise RuntimeError('Input and target should have the same size '
'in the batch dimension.')
if self.n_clusters == 0:
logit = self._compute_logit(hidden, self.out_layers[0].weight,
self.out_layers[0].bias, self.out_projs[0])
nll = -F.log_softmax(logit, dim=-1) \
.gather(1, target.unsqueeze(1)).squeeze(1)
else:
# construct weights and biases
weights, biases = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers[0].weight[l_idx:r_idx]
bias_i = self.out_layers[0].bias[l_idx:r_idx]
else:
weight_i = self.out_layers[i].weight
bias_i = self.out_layers[i].bias
if i == 0:
weight_i = torch.cat(
[weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat(
[bias_i, self.cluster_bias], dim=0)
weights.append(weight_i)
biases.append(bias_i)
head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0]
head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
head_logprob = F.log_softmax(head_logit, dim=1)
nll = torch.zeros_like(target,
dtype=hidden.dtype, device=hidden.device)
offset = 0
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
mask_i = (target >= l_idx) & (target < r_idx)
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
target_i = target.index_select(0, indices_i) - l_idx
head_logprob_i = head_logprob.index_select(0, indices_i)
if i == 0:
logprob_i = head_logprob_i.gather(1, target_i[:,None]).squeeze(1)
else:
weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i]
hidden_i = hidden.index_select(0, indices_i)
tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
logprob_i = head_logprob_i[:, -i] \
+ tail_logprob_i.gather(1, target_i[:,None]).squeeze(1)
if (hasattr(self, 'keep_order') and self.keep_order) or keep_order:
nll.index_copy_(0, indices_i, -logprob_i)
else:
nll[offset:offset+logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return nll
|
import torch
from torch import nn
import numpy as np
class LogUniformSampler(object):
def __init__(self, range_max, n_sample):
"""
Reference : https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/candidate_sampling_ops.py
`P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)`
expected count can be approximated by 1 - (1 - p)^n
and we use a numerically stable version -expm1(num_tries * log1p(-p))
Our implementation fixes num_tries at 2 * n_sample, and the actual #samples will vary from run to run
"""
with torch.no_grad():
self.range_max = range_max
log_indices = torch.arange(1., range_max+2., 1.).log_()
self.dist = (log_indices[1:] - log_indices[:-1]) / log_indices[-1]
# print('P', self.dist.numpy().tolist()[-30:])
self.log_q = (- (-self.dist.double().log1p_() * 2 * n_sample).expm1_()).log_().float()
self.n_sample = n_sample
def sample(self, labels):
"""
labels: [b1, b2]
Return
true_log_probs: [b1, b2]
samp_log_probs: [n_sample]
neg_samples: [n_sample]
"""
# neg_samples = torch.empty(0).long()
n_sample = self.n_sample
n_tries = 2 * n_sample
with torch.no_grad():
neg_samples = torch.multinomial(self.dist, n_tries, replacement=True).unique()
device = labels.device
neg_samples = neg_samples.to(device)
true_log_probs = self.log_q[labels].to(device)
samp_log_probs = self.log_q[neg_samples].to(device)
return true_log_probs, samp_log_probs, neg_samples
def sample_logits(embedding, bias, labels, inputs, sampler):
"""
embedding: an nn.Embedding layer
bias: [n_vocab]
labels: [b1, b2]
inputs: [b1, b2, n_emb]
sampler: you may use a LogUniformSampler
Return
logits: [b1, b2, 1 + n_sample]
"""
true_log_probs, samp_log_probs, neg_samples = sampler.sample(labels)
n_sample = neg_samples.size(0)
b1, b2 = labels.size(0), labels.size(1)
all_ids = torch.cat([labels.view(-1), neg_samples])
all_w = embedding(all_ids)
true_w = all_w[: -n_sample].view(b1, b2, -1)
sample_w = all_w[- n_sample:].view(n_sample, -1)
all_b = bias[all_ids]
true_b = all_b[: -n_sample].view(b1, b2)
sample_b = all_b[- n_sample:]
hit = (labels[:, :, None] == neg_samples).detach()
true_logits = torch.einsum('ijk,ijk->ij',
[true_w, inputs]) + true_b - true_log_probs
sample_logits = torch.einsum('lk,ijk->ijl',
[sample_w, inputs]) + sample_b - samp_log_probs
sample_logits.masked_fill_(hit, -1e30)
logits = torch.cat([true_logits[:, :, None], sample_logits], -1)
return logits
# class LogUniformSampler(object):
# def __init__(self, range_max, unique=False):
# """
# Reference : https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/candidate_sampling_ops.py
# `P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)`
# """
# self.range_max = range_max
# log_indices = torch.arange(1., range_max+2., 1.).log_()
# self.dist = (log_indices[1:] - log_indices[:-1]) / log_indices[-1]
# self.unique = unique
# if self.unique:
# self.exclude_mask = torch.ByteTensor(range_max).fill_(0)
# def sample(self, n_sample, labels):
# pos_sample, new_labels = labels.unique(return_inverse=True)
# n_pos_sample = pos_sample.size(0)
# n_neg_sample = n_sample - n_pos_sample
# if self.unique:
# self.exclude_mask.index_fill_(0, pos_sample, 1)
# sample_dist = self.dist.clone().masked_fill_(self.exclude_mask, 0)
# self.exclude_mask.index_fill_(0, pos_sample, 0)
# else:
# sample_dist = self.dist
# neg_sample = torch.multinomial(sample_dist, n_neg_sample)
# sample = torch.cat([pos_sample, neg_sample])
# sample_prob = self.dist[sample]
# return new_labels, sample, sample_prob
if __name__ == '__main__':
S, B = 3, 4
n_vocab = 10000
n_sample = 5
H = 32
labels = torch.LongTensor(S, B).random_(0, n_vocab)
# sampler = LogUniformSampler(n_vocab, unique=False)
# new_labels, sample, sample_prob = sampler.sample(n_sample, labels)
sampler = LogUniformSampler(n_vocab, unique=True)
# true_probs, samp_probs, neg_samples = sampler.sample(n_sample, labels)
# print('true_probs', true_probs.numpy().tolist())
# print('samp_probs', samp_probs.numpy().tolist())
# print('neg_samples', neg_samples.numpy().tolist())
# print('sum', torch.sum(sampler.dist).item())
# assert torch.all(torch.sort(sample.unique())[0].eq(torch.sort(sample)[0])).item()
embedding = nn.Embedding(n_vocab, H)
bias = torch.zeros(n_vocab)
inputs = torch.Tensor(S, B, H).normal_()
logits, out_labels = sample_logits(embedding, bias, labels, inputs, sampler, n_sample)
print('logits', logits.detach().numpy().tolist())
print('logits shape', logits.size())
print('out_labels', out_labels.detach().numpy().tolist())
print('out_labels shape', out_labels.size())
|
import functools
import os, shutil
import numpy as np
import torch
def logging(s, log_path, print_=True, log_=True):
if print_:
print(s)
if log_:
with open(log_path, 'a+') as f_log:
f_log.write(s + '\n')
def get_logger(log_path, **kwargs):
return functools.partial(logging, log_path=log_path, **kwargs)
def create_exp_dir(dir_path, scripts_to_save=None, debug=False):
if debug:
print('Debug Mode : no experiment dir created')
return functools.partial(logging, log_path=None, log_=False)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
print('Experiment dir : {}'.format(dir_path))
if scripts_to_save is not None:
script_path = os.path.join(dir_path, 'scripts')
if not os.path.exists(script_path):
os.makedirs(script_path)
for script in scripts_to_save:
dst_file = os.path.join(dir_path, 'scripts', os.path.basename(script))
shutil.copyfile(script, dst_file)
return get_logger(log_path=os.path.join(dir_path, 'log.txt'))
def save_checkpoint(model, optimizer, path, epoch):
torch.save(model, os.path.join(path, 'model_{}.pt'.format(epoch)))
torch.save(optimizer.state_dict(), os.path.join(path, 'optimizer_{}.pt'.format(epoch)))
|
import os
from collections import Counter, OrderedDict
import torch
class Vocab(object):
def __init__(self, special=[], min_freq=0, max_size=None, lower_case=True,
delimiter=None, vocab_file=None):
self.counter = Counter()
self.special = special
self.min_freq = min_freq
self.max_size = max_size
self.lower_case = lower_case
self.delimiter = delimiter
self.vocab_file = vocab_file
def tokenize(self, line, add_eos=False, add_double_eos=False):
line = line.strip()
# convert to lower case
if self.lower_case:
line = line.lower()
# empty delimiter '' will evaluate False
if self.delimiter == '':
symbols = line
else:
symbols = line.split(self.delimiter)
if add_double_eos: # lm1b
return ['<S>'] + symbols + ['<S>']
elif add_eos:
return symbols + ['<eos>']
else:
return symbols
def count_file(self, path, verbose=False, add_eos=False):
if verbose: print('counting file {} ...'.format(path))
assert os.path.exists(path)
sents = []
with open(path, 'r', encoding='utf-8') as f:
for idx, line in enumerate(f):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
symbols = self.tokenize(line, add_eos=add_eos)
self.counter.update(symbols)
sents.append(symbols)
return sents
def count_sents(self, sents, verbose=False):
"""
sents : a list of sentences, each a list of tokenized symbols
"""
if verbose: print('counting {} sents ...'.format(len(sents)))
for idx, symbols in enumerate(sents):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
self.counter.update(symbols)
def _build_from_file(self, vocab_file):
self.idx2sym = []
self.sym2idx = OrderedDict()
with open(vocab_file, 'r', encoding='utf-8') as f:
for line in f:
symb = line.strip().split()[0]
self.add_symbol(symb)
self.unk_idx = self.sym2idx['<UNK>']
def build_vocab(self):
if self.vocab_file:
print('building vocab from {}'.format(self.vocab_file))
self._build_from_file(self.vocab_file)
print('final vocab size {}'.format(len(self)))
else:
print('building vocab with min_freq={}, max_size={}'.format(
self.min_freq, self.max_size))
self.idx2sym = []
self.sym2idx = OrderedDict()
for sym in self.special:
self.add_special(sym)
for sym, cnt in self.counter.most_common(self.max_size):
if cnt < self.min_freq: break
self.add_symbol(sym)
print('final vocab size {} from {} unique tokens'.format(
len(self), len(self.counter)))
def encode_file(self, path, ordered=False, verbose=False, add_eos=True,
add_double_eos=False):
if verbose: print('encoding file {} ...'.format(path))
assert os.path.exists(path)
encoded = []
with open(path, 'r', encoding='utf-8') as f:
for idx, line in enumerate(f):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
symbols = self.tokenize(line, add_eos=add_eos,
add_double_eos=add_double_eos)
encoded.append(self.convert_to_tensor(symbols))
if ordered:
encoded = torch.cat(encoded)
return encoded
def encode_sents(self, sents, ordered=False, verbose=False):
if verbose: print('encoding {} sents ...'.format(len(sents)))
encoded = []
for idx, symbols in enumerate(sents):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
encoded.append(self.convert_to_tensor(symbols))
if ordered:
encoded = torch.cat(encoded)
return encoded
def add_special(self, sym):
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
setattr(self, '{}_idx'.format(sym.strip('<>')), self.sym2idx[sym])
def add_symbol(self, sym):
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
def get_sym(self, idx):
assert 0 <= idx < len(self), 'Index {} out of range'.format(idx)
return self.idx2sym[idx]
def get_idx(self, sym):
if sym in self.sym2idx:
return self.sym2idx[sym]
else:
# print('encounter unk {}'.format(sym))
assert '<eos>' not in sym
assert hasattr(self, 'unk_idx')
return self.sym2idx.get(sym, self.unk_idx)
def get_symbols(self, indices):
return [self.get_sym(idx) for idx in indices]
def get_indices(self, symbols):
return [self.get_idx(sym) for sym in symbols]
def convert_to_tensor(self, symbols):
return torch.LongTensor(self.get_indices(symbols))
def convert_to_sent(self, indices, exclude=None):
if exclude is None:
return ' '.join([self.get_sym(idx) for idx in indices])
else:
return ' '.join([self.get_sym(idx) for idx in indices if idx not in exclude])
def __len__(self):
return len(self.idx2sym)
|
# Copyright 2022 Garena Online Private Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import math
from collections.abc import Collection
from dataclasses import dataclass, field
from typing import Any, List
import torch
import torch.distributed as dist
import torch.optim
from fairseq.dataclass import FairseqDataclass
from fairseq.optim import FairseqOptimizer, register_optimizer
from omegaconf import II, OmegaConf
logger = logging.getLogger(__name__)
@dataclass
class FairseqAdanConfig(FairseqDataclass):
adan_betas: Any = field(
default=(0.98, 0.92, 0.99), metadata={"help": "betas for Adan optimizer"}
)
adan_eps: float = field(
default=1e-8, metadata={"help": "epsilon for Adam optimizer"}
)
weight_decay: float = field(default=0.0, metadata={"help": "weight decay"})
no_prox: bool = field(
default=False, metadata={"help": "wether to perform prox operator"}
)
fp16_adan_stats: bool = field(
default=False, metadata={"help": "use FP16 stats (with automatic scaling)"}
)
# TODO common vars below in parent
tpu: bool = II("common.tpu")
lr: List[float] = II("optimization.lr")
@register_optimizer("adan", dataclass=FairseqAdanConfig)
class FairseqAdan(FairseqOptimizer):
"""
Adan optimizer for fairseq.
"""
def __init__(self, cfg: FairseqAdanConfig, params):
super().__init__(cfg)
fused_adan_cls = None
use_fused_adan = (
fused_adan_cls is not None
and torch.cuda.is_available()
)
if getattr(cfg, "tpu", False):
if self.cfg.fp16_adan_stats:
raise NotImplementedError("--fp16-adam-stats is only supported on GPU")
# on TPUs we use the Adam defined here, since it
# automatically casts gradients to FP32
self._optimizer = Adan(params, **self.optimizer_config)
elif use_fused_adan:
raise NotImplementedError("--fp16-adam-stats is only supported on GPU")
else:
if self.cfg.fp16_adan_stats:
raise NotImplementedError(
"--fp16-adam-stats is only supported with FusedAdanV1"
)
self._optimizer = Adan(params, **self.optimizer_config)
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
"lr": self.cfg.lr[0]
if isinstance(self.cfg.lr, Collection)
else self.cfg.lr,
"betas": eval(self.cfg.adan_betas)
if isinstance(self.cfg.adan_betas, str)
else OmegaConf.to_container(self.cfg.adan_betas),
"eps": self.cfg.adan_eps,
"weight_decay": self.cfg.weight_decay,
}
def average_params(self):
"""Reduce Params is only used during BMUF distributed training."""
state_dict = self.optimizer.state_dict()
total_gpus = float(dist.get_world_size())
for _, value in state_dict["state"].items():
value["exp_avg"] /= total_gpus
value["exp_avg_sq"] /= total_gpus
value['exp_avg_diff'] /= total_gpus
dist.all_reduce(value["exp_avg"], op=dist.ReduceOp.SUM)
dist.all_reduce(value["exp_avg_sq"], op=dist.ReduceOp.SUM)
dist.all_reduce(value["exp_avg_diff"], op=dist.ReduceOp.SUM)
class Adan(torch.optim.Optimizer):
r"""Implements Adan algorithm.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.98, 0.92, 0.99))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
"""
def __init__(self, params, lr=1e-3, betas=(0.98, 0.92, 0.99), eps=1e-8,
weight_decay=0.0, no_prox = False):
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, no_prox = no_prox)
super(Adan, self).__init__(params, defaults)
def __setstate__(self, state):
super(Adan, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('no_prox', False)
@property
def supports_memory_efficient_fp16(self):
return True
@property
def supports_flat_params(self):
return True
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
beta1, beta2, beta3 = group['betas']
# assume same step across group now to simplify things
# per parameter step can be easily support by making it tensor, or pass list into kernel
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
bias_correction1 = 1.0 - beta1 ** group['step']
bias_correction2 = 1.0 - beta2 ** group['step']
bias_correction3 = 1.0 - beta3 ** group['step']
for p in group['params']:
if p.grad is None:
continue
p_data_fp32 = p.data
if p.data.dtype in {torch.float16, torch.bfloat16}:
p_data_fp32 = p_data_fp32.float()
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
state['exp_avg_diff'] = torch.zeros_like(p_data_fp32)
else:
state["exp_avg"] = state["exp_avg"].to(p_data_fp32)
state["exp_avg_sq"] = state["exp_avg_sq"].to(p_data_fp32)
state['exp_avg_diff'] = state['exp_avg_diff'].to(p_data_fp32)
grad = p.grad.data
if grad.dtype in {torch.float16, torch.bfloat16}:
grad = grad.float()
if grad.is_sparse:
raise RuntimeError(
"Adan does not support sparse gradients, please consider SparseAdam instead"
)
if 'pre_grad' not in state or group['step'] == 1:
state['pre_grad'] = grad
copy_grad = grad.clone()
exp_avg, exp_avg_sq, exp_avg_diff = state['exp_avg'], state['exp_avg_sq'], state['exp_avg_diff']
diff = grad - state['pre_grad']
update = grad+beta2*diff
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) # m_t
exp_avg_diff.mul_(beta2).add_(diff, alpha=1 - beta2) # diff_t
exp_avg_sq.mul_(beta3).addcmul_(update, update, value=1 - beta3) # v_t
denom = ((exp_avg_sq).sqrt() / math.sqrt(bias_correction3)).add_(group['eps'])
update = ((exp_avg/bias_correction1+beta2*exp_avg_diff/bias_correction2) ).div_(denom)
if group['no_prox']:
p_data_fp32.mul_(1 - group['lr'] * group['weight_decay'])
p_data_fp32.add_(update, alpha=-group['lr'])
else:
p_data_fp32.add_(update, alpha=-group['lr'])
p_data_fp32.div_(1 + group['lr'] * group['weight_decay'])
state['pre_grad'] = copy_grad
if p.data.dtype in {torch.float16, torch.bfloat16}:
p.data.copy_(p_data_fp32)
return loss
|
import os
from fairseq.models.roberta import RobertaModel
import argparse
from scipy.stats import pearsonr
from sklearn.metrics import matthews_corrcoef
def get_acc(model_path, data_path, bin_path, task='rte'):
acc_list = []
gold, pred = [], []
roberta = RobertaModel.from_pretrained(
model_path,
checkpoint_file='checkpoint_best.pt',
data_name_or_path=bin_path#'RTE-bin'
)
label_fn = lambda label: roberta.task.label_dictionary.string(
[label + roberta.task.label_dictionary.nspecial]
)
ncorrect, nsamples = 0, 0
roberta.cuda()
roberta.eval()
if 'mnli' not in task:
dev_files = ['dev.tsv']
else: dev_files = ['dev_mismatched.tsv', 'dev_matched.tsv']
for dev_file in dev_files:
with open(os.path.join(data_path, dev_file)) as fin:
fin.readline()
for index, line in enumerate(fin):
tokens = line.strip().split('\t')
if 'rte' in task or 'qnli' in task:
sent1, sent2, target = tokens[1], tokens[2], tokens[3]
tokens = roberta.encode(sent1, sent2)
elif 'qqp' in task:
sent1, sent2, target = tokens[3], tokens[4], tokens[5]
tokens = roberta.encode(sent1, sent2)
elif 'mnli' in task:
sent1, sent2, target = tokens[8], tokens[9], tokens[11]
tokens = roberta.encode(sent1, sent2)
elif 'mrpc' in task:
sent1, sent2, target = tokens[3], tokens[4], tokens[0]
tokens = roberta.encode(sent1, sent2)
elif 'sts_b' in task:
sent1, sent2, target = tokens[7], tokens[8], float(tokens[9])
tokens = roberta.encode(sent1, sent2)
elif 'sst_2' in task:
sent, target = tokens[0], tokens[1]
tokens = roberta.encode(sent)
elif 'cola' in task:
sent, target = tokens[3], tokens[1]
tokens = roberta.encode(sent)
if 'sts_b' not in task:
prediction = roberta.predict('sentence_classification_head', tokens).argmax().item()
prediction_label = label_fn(prediction)
ncorrect += int(prediction_label == target)
nsamples += 1
if 'cola' in task:
target = int(target)
prediction_label = int(prediction_label)
pred.append(prediction_label)
gold.append(target)
else:
features = roberta.extract_features(tokens)
predictions = 5.0 * roberta.model.classification_heads['sentence_classification_head'](features)
gold.append(target)
pred.append(predictions.item())
if 'cola' in task:
out = matthews_corrcoef(gold, pred)
elif 'sts_b' in task:
out = pearsonr(gold, pred)[0]
else: out = float(ncorrect)/float(nsamples)
acc_list.append(out)
return acc_list
parser = argparse.ArgumentParser(description='GLUE test for acc')
parser.add_argument('--avg_num', type=int, default=1,
help='number of try')
parser.add_argument('--pre_path', type=str, default='./baseline/checkpoint_20_1000000.pt',
help='path to pre-trained model')
parser.add_argument('--data_path', type=str, default='./GLUE/glue_data/STS-B',
help='path to data')
parser.add_argument('--bin_path', type=str, default='./GLUE/STS-B-bin',
help='path to -bin data')
parser.add_argument('--finetune_path', type=str, default='./bert-fintune/adam/STS-B/',
help='path to finetuned model')
parser.add_argument('--task', type=str, default='sts_b',
help='task of finetune')
parser.add_argument('--inference', action='store_true', default=False,
help='inference only')
args = parser.parse_args()
acc_avg = 0.0
acc_avg2 = 0.0
for _ in range(args.avg_num):
if not args.inference:
val = os.system(' fairseq-hydra-train --config-dir ./fairseq/examples/roberta/config/finetuning \
--config-name {} \
task.data={} checkpoint.restore_file={} \
checkpoint.save_dir={}'.format(args.task, args.bin_path, args.pre_path, args.finetune_path))
all_acc = get_acc(args.finetune_path, args.data_path, args.bin_path, args.task)
acc_avg+=all_acc[0]
if len(all_acc)>1:
acc_avg2+=all_acc[1]
if acc_avg2>0:
print('Mismatched Accuracy1:{}, Matched Accuracy1:{}'.format(float(acc_avg)/float(args.avg_num), float(acc_avg2)/float(args.avg_num)))
else:
print('AVG Accuracy1:{}'.format(float(acc_avg)/float(args.avg_num)))
|
# Copyright 2022 Garena Online Private Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
from torch import Tensor
from torch.optim.optimizer import Optimizer
from typing import List
class Adan(Optimizer):
"""
Implements a pytorch variant of Adan
Adan was proposed in
Adan: Adaptive Nesterov Momentum Algorithm for Faster Optimizing Deep Models[J]. arXiv preprint arXiv:2208.06677, 2022.
https://arxiv.org/abs/2208.06677
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
betas (Tuple[float, float, flot], optional): coefficients used for computing
running averages of gradient and its norm. (default: (0.98, 0.92, 0.99))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): decoupled weight decay (L2 penalty) (default: 0)
max_grad_norm (float, optional): value used to clip
global grad norm (default: 0.0 no clip)
no_prox (bool): how to perform the decoupled weight decay (default: False)
foreach (bool): if True would use torch._foreach implementation. It's faster but uses
slightly more memory. (default: True)
"""
def __init__(self, params, lr=1e-3, betas=(0.98, 0.92, 0.99), eps=1e-8,
weight_decay=0.0, max_grad_norm=0.0, no_prox=False, foreach: bool=True):
if not 0.0 <= max_grad_norm:
raise ValueError("Invalid Max grad norm: {}".format(max_grad_norm))
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= betas[2] < 1.0:
raise ValueError("Invalid beta parameter at index 2: {}".format(betas[2]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay,
max_grad_norm=max_grad_norm, no_prox=no_prox, foreach=foreach)
super().__init__(params, defaults)
def __setstate__(self, state):
super(Adan, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('no_prox', False)
@torch.no_grad()
def restart_opt(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
if p.requires_grad:
state = self.state[p]
# State initialization
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p)
# Exponential moving average of gradient difference
state['exp_avg_diff'] = torch.zeros_like(p)
@torch.no_grad()
def step(self, closure=None):
"""
Performs a single optimization step.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
if self.defaults['max_grad_norm'] > 0:
device = self.param_groups[0]['params'][0].device
global_grad_norm = torch.zeros(1, device=device)
max_grad_norm = torch.tensor(self.defaults['max_grad_norm'], device=device)
for group in self.param_groups:
for p in group['params']:
if p.grad is not None:
grad = p.grad
global_grad_norm.add_(grad.pow(2).sum())
global_grad_norm = torch.sqrt(global_grad_norm)
clip_global_grad_norm = torch.clamp(max_grad_norm / (global_grad_norm + group['eps']), max=1.0)
else:
clip_global_grad_norm = 1.0
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
exp_avg_diffs = []
pre_grads = []
beta1, beta2, beta3 = group['betas']
# assume same step across group now to simplify things
# per parameter step can be easily support by making it tensor, or pass list into kernel
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
bias_correction1 = 1.0 - beta1 ** group['step']
bias_correction2 = 1.0 - beta2 ** group['step']
bias_correction3 = 1.0 - beta3 ** group['step']
for p in group['params']:
if p.grad is None:
continue
params_with_grad.append(p)
grads.append(p.grad)
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
state['exp_avg_diff'] = torch.zeros_like(p)
if 'pre_grad' not in state or group['step'] == 1:
# at first step grad wouldn't be clipped by `clip_global_grad_norm`
# this is only to simplify implementation
state['pre_grad'] = p.grad
exp_avgs.append(state['exp_avg'])
exp_avg_sqs.append(state['exp_avg_sq'])
exp_avg_diffs.append(state['exp_avg_diff'])
pre_grads.append(state['pre_grad'])
kwargs = dict(
params=params_with_grad,
grads=grads,
exp_avgs=exp_avgs,
exp_avg_sqs=exp_avg_sqs,
exp_avg_diffs=exp_avg_diffs,
pre_grads=pre_grads,
beta1=beta1,
beta2=beta2,
beta3=beta3,
bias_correction1=bias_correction1,
bias_correction2=bias_correction2,
bias_correction3_sqrt=math.sqrt(bias_correction3),
lr=group['lr'],
weight_decay=group['weight_decay'],
eps=group['eps'],
no_prox=group['no_prox'],
clip_global_grad_norm=clip_global_grad_norm,
)
if group["foreach"]:
copy_grads = _multi_tensor_adan(**kwargs)
else:
copy_grads = _single_tensor_adan(**kwargs)
for p, copy_grad in zip(params_with_grad, copy_grads):
self.state[p]['pre_grad'] = copy_grad
return loss
def _single_tensor_adan(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
exp_avg_diffs: List[Tensor],
pre_grads: List[Tensor],
*,
beta1: float,
beta2: float,
beta3: float,
bias_correction1: float,
bias_correction2: float,
bias_correction3_sqrt: float,
lr: float,
weight_decay: float,
eps: float,
no_prox: bool,
clip_global_grad_norm: Tensor,
):
copy_grads = []
for i, param in enumerate(params):
grad = grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
exp_avg_diff = exp_avg_diffs[i]
pre_grad = pre_grads[i]
grad = grad.mul_(clip_global_grad_norm)
copy_grads.append(grad.clone())
diff = grad - pre_grad
update = grad + beta2 * diff
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) # m_t
exp_avg_diff.mul_(beta2).add_(diff, alpha=1 - beta2) # diff_t
exp_avg_sq.mul_(beta3).addcmul_(update, update, value=1 - beta3) # n_t
denom = ((exp_avg_sq).sqrt() / bias_correction3_sqrt).add_(eps)
update = ((exp_avg / bias_correction1 + beta2 * exp_avg_diff / bias_correction2)).div_(denom)
if no_prox:
param.mul_(1 - lr * weight_decay)
param.add_(update, alpha=-lr)
else:
param.add_(update, alpha=-lr)
param.div_(1 + lr * weight_decay)
return copy_grads
def _multi_tensor_adan(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
exp_avg_diffs: List[Tensor],
pre_grads: List[Tensor],
*,
beta1: float,
beta2: float,
beta3: float,
bias_correction1: float,
bias_correction2: float,
bias_correction3_sqrt: float,
lr: float,
weight_decay: float,
eps: float,
no_prox: bool,
clip_global_grad_norm: Tensor,
):
if clip_global_grad_norm<1.0:
torch._foreach_mul_(grads, clip_global_grad_norm.item())
copy_grads = [g.clone() for g in grads]
diff = torch._foreach_sub(grads, pre_grads)
# NOTE: line below while looking identical gives different result, due to float precision errors.
# using mul+add produces identical results to single-tensor, using add+alpha doesn't
# On cuda this difference doesn't matter due to its' own precision non-determinism
# update = torch._foreach_add(grads, torch._foreach_mul(diff, beta2))
update = torch._foreach_add(grads, diff, alpha=beta2)
torch._foreach_mul_(exp_avgs, beta1)
torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) # m_t
torch._foreach_mul_(exp_avg_diffs, beta2)
torch._foreach_add_(exp_avg_diffs, diff, alpha=1 - beta2) # diff_t
torch._foreach_mul_(exp_avg_sqs, beta3)
torch._foreach_addcmul_(exp_avg_sqs, update, update, value=1 - beta3) # n_t
denom = torch._foreach_sqrt(exp_avg_sqs)
torch._foreach_div_(denom, bias_correction3_sqrt)
torch._foreach_add_(denom, eps)
update = torch._foreach_div(exp_avgs, bias_correction1)
# NOTE: same issue as above. beta2 * diff / bias_correction2 != diff * (beta2 / bias_correction2)
# using faster version by default.
# torch._foreach_add_(update, torch._foreach_div(torch._foreach_mul(exp_avg_diffs, beta2), bias_correction2))
torch._foreach_add_(update, torch._foreach_mul(exp_avg_diffs, beta2 / bias_correction2))
torch._foreach_div_(update, denom)
if no_prox:
torch._foreach_mul_(params, 1 - lr * weight_decay)
torch._foreach_add_(params, update, alpha=-lr)
else:
torch._foreach_add_(params, update, alpha=-lr)
torch._foreach_div_(params, 1 + lr * weight_decay)
return copy_grads
|
#!/usr/bin/env python3
""" ImageNet Training Script
This is intended to be a lean and easily modifiable ImageNet training script that reproduces ImageNet
training results with some of the latest networks and training techniques. It favours canonical PyTorch
and standard Python style over trying to be able to 'do it all.' That said, it offers quite a few speed
and training result improvements over the usual PyTorch example scripts. Repurpose as you see fit.
This script was started from an early version of the PyTorch ImageNet example
(https://github.com/pytorch/examples/tree/master/imagenet)
NVIDIA CUDA specific speedups adopted from NVIDIA Apex examples
(https://github.com/NVIDIA/apex/tree/master/examples/imagenet)
Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman)
"""
import argparse
import time
import yaml
import os
import logging
from collections import OrderedDict
from contextlib import suppress
from datetime import datetime
import torch
import torch.nn as nn
import torchvision.utils
from torch.nn.parallel import DistributedDataParallel as NativeDDP
from timm.data import create_dataset, create_loader, resolve_data_config, Mixup, FastCollateMixup, AugMixDataset
from timm.models import create_model, safe_model_name, resume_checkpoint, load_checkpoint,\
convert_splitbn_model, model_parameters
from timm.utils import *
from timm.loss import *
#from timm.optim import create_optimizer
from timm.scheduler import create_scheduler
from optim_factory import create_optimizer
from timm.utils import ApexScaler, NativeScaler
#import timm.optim.optim_factory as optim_factory
try:
from apex import amp
from apex.parallel import DistributedDataParallel as ApexDDP
from apex.parallel import convert_syncbn_model
has_apex = True
except ImportError:
has_apex = False
has_native_amp = False
try:
if getattr(torch.cuda.amp, 'autocast') is not None:
has_native_amp = True
except AttributeError:
pass
try:
import wandb
has_wandb = True
except ImportError:
has_wandb = False
torch.backends.cudnn.benchmark = True
_logger = logging.getLogger('train')
# The first arg parser parses out only the --config argument, this argument is used to
# load a yaml file containing key-values that override the defaults for the main parser below
config_parser = parser = argparse.ArgumentParser(description='Training Config', add_help=False)
parser.add_argument('-c', '--config', default='', type=str, metavar='FILE',
help='YAML config file specifying default arguments')
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
# Dataset / Model parameters
parser.add_argument('--data-dir', metavar='DIR',
help='path to dataset')
parser.add_argument('--dataset', '-d', metavar='NAME', default='',
help='dataset type (default: ImageFolder/ImageTar if empty)')
parser.add_argument('--train-split', metavar='NAME', default='train',
help='dataset train split (default: train)')
parser.add_argument('--val-split', metavar='NAME', default='validation',
help='dataset validation split (default: validation)')
parser.add_argument('--model', default='resnet50', type=str, metavar='MODEL',
help='Name of model to train (default: "resnet50"')
parser.add_argument('--pretrained', action='store_true', default=False,
help='Start with pretrained version of specified network (if avail)')
parser.add_argument('--initial-checkpoint', default='', type=str, metavar='PATH',
help='Initialize model from this checkpoint (default: none)')
parser.add_argument('--resume', default=None, type=str, metavar='PATH',
help='Resume full model and optimizer state from checkpoint (default: none)')
parser.add_argument('--no-resume-opt', action='store_true', default=False,
help='prevent resume of optimizer state when resuming model')
parser.add_argument('--num-classes', type=int, default=None, metavar='N',
help='number of label classes (Model default if None)')
parser.add_argument('--gp', default=None, type=str, metavar='POOL',
help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.')
parser.add_argument('--img-size', type=int, default=None, metavar='N',
help='Image patch size (default: None => model default)')
parser.add_argument('--input-size', default=None, nargs=3, type=int,
metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty')
parser.add_argument('--crop-pct', default=None, type=float,
metavar='N', help='Input image center crop percent (for validation only)')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('-b', '--batch-size', type=int, default=128, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('-vb', '--validation-batch-size', type=int, default=None, metavar='N',
help='validation batch size override (default: None)')
# Optimizer parameters
parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "sgd"')
parser.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: None, use opt default)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='Optimizer momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=2e-5,
help='weight decay (default: 2e-5)')
parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--clip-mode', type=str, default='norm',
help='Gradient clipping mode. One of ("norm", "value", "agc")')
parser.add_argument('--max-grad-norm', type=float, default=0.0,
help='Max grad norm (same as clip gradient norm, default: 0.0, no clipping)')
parser.add_argument('--bias-decay', action='store_true', default=False,
help='Perform the weight decay on bias term (default=False)')
parser.add_argument('--no-prox', action='store_true', default=False,
help='Perform the weight decay update like AdamW (default=False)')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "step"')
parser.add_argument('--lr', type=float, default=0.05, metavar='LR',
help='learning rate (default: 0.05)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',
help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',
help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',
help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--lr-cycle-mul', type=float, default=1.0, metavar='MULT',
help='learning rate cycle len multiplier (default: 1.0)')
parser.add_argument('--lr-cycle-decay', type=float, default=0.5, metavar='MULT',
help='amount to decay each learning rate cycle (default: 0.5)')
parser.add_argument('--lr-cycle-limit', type=int, default=1, metavar='N',
help='learning rate cycle limit, cycles enabled if > 1')
parser.add_argument('--lr-k-decay', type=float, default=1.0,
help='learning rate k-decay for cosine/poly (default: 1.0)')
parser.add_argument('--warmup-lr', type=float, default=0.0001, metavar='LR',
help='warmup learning rate (default: 0.0001)')
parser.add_argument('--min-lr', type=float, default=1e-6, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--epochs', type=int, default=300, metavar='N',
help='number of epochs to train (default: 300)')
parser.add_argument('--epoch-repeats', type=float, default=0., metavar='N',
help='epoch repeat multiplier (number of times to repeat dataset epoch per train epoch).')
parser.add_argument('--start-epoch', default=None, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--decay-epochs', type=float, default=100, metavar='N',
help='epoch interval to decay LR')
parser.add_argument('--warmup-epochs', type=int, default=3, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N',
help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Augmentation & regularization parameters
parser.add_argument('--no-aug', action='store_true', default=False,
help='Disable all training augmentation, override other train aug args')
parser.add_argument('--scale', type=float, nargs='+', default=[0.08, 1.0], metavar='PCT',
help='Random resize scale (default: 0.08 1.0)')
parser.add_argument('--ratio', type=float, nargs='+', default=[3./4., 4./3.], metavar='RATIO',
help='Random resize aspect ratio (default: 0.75 1.33)')
parser.add_argument('--hflip', type=float, default=0.5,
help='Horizontal flip training aug probability')
parser.add_argument('--vflip', type=float, default=0.,
help='Vertical flip training aug probability')
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default=None, metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". (default: None)'),
parser.add_argument('--aug-repeats', type=int, default=0,
help='Number of augmentation repetitions (distributed training only) (default: 0)')
parser.add_argument('--aug-splits', type=int, default=0,
help='Number of augmentation splits (default: 0, valid: 0 or >=2)')
parser.add_argument('--jsd-loss', action='store_true', default=False,
help='Enable Jensen-Shannon Divergence + CE loss. Use with `--aug-splits`.')
parser.add_argument('--bce-loss', action='store_true', default=False,
help='Enable BCE loss w/ Mixup/CutMix use.')
parser.add_argument('--reprob', type=float, default=0., metavar='PCT',
help='Random erase prob (default: 0.)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
parser.add_argument('--mixup', type=float, default=0.0,
help='mixup alpha, mixup enabled if > 0. (default: 0.)')
parser.add_argument('--cutmix', type=float, default=0.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 0.)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
parser.add_argument('--mixup-off-epoch', default=0, type=int, metavar='N',
help='Turn off mixup after this epoch, disabled if 0 (default: 0)')
parser.add_argument('--smoothing', type=float, default=0.1,
help='Label smoothing (default: 0.1)')
parser.add_argument('--train-interpolation', type=str, default='random',
help='Training interpolation (random, bilinear, bicubic default: "random")')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--drop-connect', type=float, default=None, metavar='PCT',
help='Drop connect rate, DEPRECATED, use drop-path (default: None)')
parser.add_argument('--drop-path', type=float, default=None, metavar='PCT',
help='Drop path rate (default: None)')
parser.add_argument('--drop-block', type=float, default=None, metavar='PCT',
help='Drop block rate (default: None)')
parser.add_argument('--bn-momentum', type=float, default=None,
help='BatchNorm momentum override (if not None)')
parser.add_argument('--bn-eps', type=float, default=None,
help='BatchNorm epsilon override (if not None)')
parser.add_argument('--sync-bn', action='store_true',
help='Enable NVIDIA Apex or Torch synchronized BatchNorm.')
parser.add_argument('--dist-bn', type=str, default='reduce',
help='Distribute BatchNorm stats between nodes after each epoch ("broadcast", "reduce", or "")')
parser.add_argument('--split-bn', action='store_true',
help='Enable separate BN layers per augmentation split.')
# Model Exponential Moving Average
parser.add_argument('--model-ema', action='store_true', default=False,
help='Enable tracking moving average of model weights')
parser.add_argument('--model-ema-force-cpu', action='store_true', default=False,
help='Force ema to be tracked on CPU, rank=0 node only. Disables EMA validation.')
parser.add_argument('--model-ema-decay', type=float, default=0.9998,
help='decay factor for model weights moving average (default: 0.9998)')
# Misc
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 42)')
parser.add_argument('--log-interval', type=int, default=50, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--recovery-interval', type=int, default=0, metavar='N',
help='how many batches to wait before writing recovery checkpoint')
parser.add_argument('--checkpoint-hist', type=int, default=2, metavar='N',
help='number of checkpoints to keep (default: 10)')
parser.add_argument('-j', '--workers', type=int, default=4, metavar='N',
help='how many training processes to use (default: 4)')
parser.add_argument('--save-images', action='store_true', default=False,
help='save images of input bathes every log interval for debugging')
parser.add_argument('--amp', action='store_true', default=False,
help='use NVIDIA Apex AMP or Native AMP for mixed precision training')
parser.add_argument('--apex-amp', action='store_true', default=False,
help='Use NVIDIA Apex AMP mixed precision')
parser.add_argument('--native-amp', action='store_true', default=False,
help='Use Native Torch AMP mixed precision')
parser.add_argument('--channels-last', action='store_true', default=False,
help='Use channels_last memory layout')
parser.add_argument('--pin-mem', action='store_true', default=False,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-prefetcher', action='store_true', default=False,
help='disable fast prefetcher')
parser.add_argument('--output', default='', type=str, metavar='PATH',
help='path to output folder (default: none, current dir)')
parser.add_argument('--experiment', default='', type=str, metavar='NAME',
help='name of train experiment, name of sub-folder for output')
parser.add_argument('--eval-metric', default='top1', type=str, metavar='EVAL_METRIC',
help='Best metric (default: "top1"')
parser.add_argument('--tta', type=int, default=0, metavar='N',
help='Test/inference time augmentation (oversampling) factor. 0=None (default: 0)')
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument('--use-multi-epochs-loader', action='store_true', default=False,
help='use the multi-epochs-loader to save time at the beginning of every epoch')
parser.add_argument('--torchscript', dest='torchscript', action='store_true',
help='convert model torchscript for inference')
parser.add_argument('--log-wandb', action='store_true', default=False,
help='log training and validation metrics to wandb')
def _parse_args():
# Do we have a config file to parse?
args_config, remaining = config_parser.parse_known_args()
if args_config.config:
with open(args_config.config, 'r') as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
# The main arg parser parses the rest of the args, the usual
# defaults will have been overridden if config file specified.
args = parser.parse_args(remaining)
# Cache the args as a text string to save them in the output dir later
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
return args, args_text
def main():
setup_default_logging()
args, args_text = _parse_args()
if args.log_wandb:
if has_wandb:
wandb.init(project=args.experiment, config=args)
else:
_logger.warning("You've requested to log metrics to wandb but package not found. "
"Metrics not being logged to wandb, try `pip install wandb`")
args.prefetcher = not args.no_prefetcher
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
args.device = 'cuda:0'
args.world_size = 1
args.rank = 0 # global rank
if args.distributed:
args.device = 'cuda:%d' % args.local_rank
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
args.world_size = torch.distributed.get_world_size()
args.rank = torch.distributed.get_rank()
_logger.info('Training in distributed mode with multiple processes, 1 GPU per process. Process %d, total %d.'
% (args.rank, args.world_size))
else:
_logger.info('Training with a single process on 1 GPUs.')
assert args.rank >= 0
# resolve AMP arguments based on PyTorch / Apex availability
use_amp = None
if args.amp:
# `--amp` chooses native amp before apex (APEX ver not actively maintained)
if has_native_amp:
args.native_amp = True
elif has_apex:
args.apex_amp = True
if args.apex_amp and has_apex:
use_amp = 'apex'
elif args.native_amp and has_native_amp:
use_amp = 'native'
elif args.apex_amp or args.native_amp:
_logger.warning("Neither APEX or native Torch AMP is available, using float32. "
"Install NVIDA apex or upgrade to PyTorch 1.6")
random_seed(args.seed, args.rank)
model = create_model(
args.model,
pretrained=args.pretrained,
num_classes=args.num_classes,
drop_rate=args.drop,
drop_connect_rate=args.drop_connect, # DEPRECATED, use drop_path
drop_path_rate=args.drop_path,
drop_block_rate=args.drop_block,
global_pool=args.gp,
bn_momentum=args.bn_momentum,
bn_eps=args.bn_eps,
scriptable=args.torchscript,
checkpoint_path=args.initial_checkpoint)
if args.num_classes is None:
assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.'
args.num_classes = model.num_classes # FIXME handle model default vs config num_classes more elegantly
if args.local_rank == 0:
_logger.info(
f'Model {safe_model_name(args.model)} created, param count:{sum([m.numel() for m in model.parameters()])}')
data_config = resolve_data_config(vars(args), model=model, verbose=args.local_rank == 0)
# setup augmentation batch splits for contrastive loss or split bn
num_aug_splits = 0
if args.aug_splits > 0:
assert args.aug_splits > 1, 'A split of 1 makes no sense'
num_aug_splits = args.aug_splits
# enable split bn (separate bn stats per batch-portion)
if args.split_bn:
assert num_aug_splits > 1 or args.resplit
model = convert_splitbn_model(model, max(num_aug_splits, 2))
# move model to GPU, enable channels last layout if set
model.cuda()
if args.channels_last:
model = model.to(memory_format=torch.channels_last)
# setup synchronized BatchNorm for distributed training
if args.distributed and args.sync_bn:
assert not args.split_bn
if has_apex and use_amp == 'apex':
# Apex SyncBN preferred unless native amp is activated
model = convert_syncbn_model(model)
else:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
if args.local_rank == 0:
_logger.info(
'Converted model to use Synchronized BatchNorm. WARNING: You may have issues if using '
'zero initialized BN layers (enabled by default for ResNets) while sync-bn enabled.')
if args.torchscript:
assert not use_amp == 'apex', 'Cannot use APEX AMP with torchscripted model'
assert not args.sync_bn, 'Cannot use SyncBatchNorm with torchscripted model'
model = torch.jit.script(model)
opt_lower = args.opt.lower()
if opt_lower == 'adan':
args.opt_args = {'max_grad_norm': args.max_grad_norm, 'no_prox': args.no_prox}
optimizer = create_optimizer(args, model, filter_bias_and_bn = not args.bias_decay)
print(optimizer)
# setup automatic mixed-precision (AMP) loss scaling and op casting
amp_autocast = suppress # do nothing
loss_scaler = None
if use_amp == 'apex':
model, optimizer = amp.initialize(model, optimizer, opt_level='O1')
loss_scaler = ApexScaler()
if args.local_rank == 0:
_logger.info('Using NVIDIA APEX AMP. Training in mixed precision.')
elif use_amp == 'native':
amp_autocast = torch.cuda.amp.autocast
loss_scaler = NativeScaler()
if args.local_rank == 0:
_logger.info('Using native Torch AMP. Training in mixed precision.')
else:
if args.local_rank == 0:
_logger.info('AMP not enabled. Training in float32.')
# optionally resume from a checkpoint
resume_epoch = None
if args.experiment:
output_dir = get_outdir(args.output if args.output else './output/train', args.experiment)
resume_path = os.path.join(output_dir, "last.pth.tar")
print(resume_path, os.path.exists(resume_path))
if os.path.exists(resume_path) and not args.resume: args.resume = resume_path
if args.resume:
resume_epoch = resume_checkpoint(
model, args.resume,
optimizer=None if args.no_resume_opt else optimizer,
loss_scaler=None if args.no_resume_opt else loss_scaler,
log_info=args.local_rank == 0)
# setup exponential moving average of model weights, SWA could be used here too
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEmaV2(
model, decay=args.model_ema_decay, device='cpu' if args.model_ema_force_cpu else None)
if args.resume:
load_checkpoint(model_ema.module, args.resume, use_ema=True)
# setup distributed training
if args.distributed:
if has_apex and use_amp == 'apex':
# Apex DDP preferred unless native amp is activated
if args.local_rank == 0:
_logger.info("Using NVIDIA APEX DistributedDataParallel.")
model = ApexDDP(model, delay_allreduce=True)
else:
if args.local_rank == 0:
_logger.info("Using native Torch DistributedDataParallel.")
model = NativeDDP(model, device_ids=[args.local_rank]) # can use device str in Torch >= 1.1
# NOTE: EMA model does not need to be wrapped by DDP
# setup learning rate schedule and starting epoch
lr_scheduler, num_epochs = create_scheduler(args, optimizer)
start_epoch = 0
if args.start_epoch is not None:
# a specified start_epoch will always override the resume epoch
start_epoch = args.start_epoch
elif resume_epoch is not None:
start_epoch = resume_epoch
if lr_scheduler is not None and start_epoch > 0:
lr_scheduler.step(start_epoch)
if args.local_rank == 0:
_logger.info('Scheduled epochs: {}'.format(num_epochs))
# create the train and eval datasets
dataset_train = create_dataset(
args.dataset,
root=args.data_dir, split=args.train_split, is_training=True,
batch_size=args.batch_size, repeats=args.epoch_repeats)
dataset_eval = create_dataset(
args.dataset, root=args.data_dir, split=args.val_split, is_training=False, batch_size=args.batch_size)
# setup mixup / cutmix
collate_fn = None
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
mixup_args = dict(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.num_classes)
if args.prefetcher:
assert not num_aug_splits # collate conflict (need to support deinterleaving in collate mixup)
collate_fn = FastCollateMixup(**mixup_args)
else:
mixup_fn = Mixup(**mixup_args)
# wrap dataset in AugMix helper
if num_aug_splits > 1:
dataset_train = AugMixDataset(dataset_train, num_splits=num_aug_splits)
# create data loaders w/ augmentation pipeiine
train_interpolation = args.train_interpolation
if args.no_aug or not train_interpolation:
train_interpolation = data_config['interpolation']
loader_train = create_loader(
dataset_train,
input_size=data_config['input_size'],
batch_size=args.batch_size,
is_training=True,
use_prefetcher=args.prefetcher,
no_aug=args.no_aug,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
re_split=args.resplit,
scale=args.scale,
ratio=args.ratio,
hflip=args.hflip,
vflip=args.vflip,
color_jitter=args.color_jitter,
auto_augment=args.aa,
num_aug_repeats=args.aug_repeats,
num_aug_splits=num_aug_splits,
interpolation=train_interpolation,
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
distributed=args.distributed,
collate_fn=collate_fn,
pin_memory=args.pin_mem,
use_multi_epochs_loader=args.use_multi_epochs_loader
)
loader_eval = create_loader(
dataset_eval,
input_size=data_config['input_size'],
batch_size=args.validation_batch_size or args.batch_size,
is_training=False,
use_prefetcher=args.prefetcher,
interpolation=data_config['interpolation'],
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
distributed=args.distributed,
crop_pct=data_config['crop_pct'],
pin_memory=args.pin_mem,
)
# setup loss function
if args.jsd_loss:
assert num_aug_splits > 1 # JSD only valid with aug splits set
train_loss_fn = JsdCrossEntropy(num_splits=num_aug_splits, smoothing=args.smoothing)
elif mixup_active:
# smoothing is handled with mixup target transform which outputs sparse, soft targets
if args.bce_loss:
train_loss_fn = nn.BCEWithLogitsLoss()
else:
train_loss_fn = SoftTargetCrossEntropy()
elif args.smoothing:
if args.bce_loss:
train_loss_fn = BinaryCrossEntropy(smoothing=args.smoothing)
else:
train_loss_fn = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
train_loss_fn = nn.CrossEntropyLoss()
train_loss_fn = train_loss_fn.cuda()
validate_loss_fn = nn.CrossEntropyLoss().cuda()
# setup checkpoint saver and eval metric tracking
eval_metric = args.eval_metric
best_metric = None
best_epoch = None
saver = None
output_dir = None
if args.rank == 0:
if args.experiment:
exp_name = args.experiment
else:
exp_name = '-'.join([
datetime.now().strftime("%Y%m%d-%H%M%S"),
safe_model_name(args.model),
str(data_config['input_size'][-1])
])
output_dir = get_outdir(args.output if args.output else './output/train', exp_name)
decreasing = True if eval_metric == 'loss' else False
saver = CheckpointSaver(
model=model, optimizer=optimizer, args=args, model_ema=model_ema, amp_scaler=loss_scaler,
checkpoint_dir=output_dir, recovery_dir=output_dir, decreasing=decreasing, max_history=args.checkpoint_hist)
with open(os.path.join(output_dir, 'args.yaml'), 'w') as f:
f.write(args_text)
try:
for epoch in range(start_epoch, num_epochs):
if args.distributed and hasattr(loader_train.sampler, 'set_epoch'):
loader_train.sampler.set_epoch(epoch)
train_metrics = train_one_epoch(
epoch, model, loader_train, optimizer, train_loss_fn, args,
lr_scheduler=lr_scheduler, saver=saver, output_dir=output_dir,
amp_autocast=amp_autocast, loss_scaler=loss_scaler, model_ema=model_ema, mixup_fn=mixup_fn)
if args.distributed and args.dist_bn in ('broadcast', 'reduce'):
if args.local_rank == 0:
_logger.info("Distributing BatchNorm running means and vars")
distribute_bn(model, args.world_size, args.dist_bn == 'reduce')
eval_metrics = validate(model, loader_eval, validate_loss_fn, args, amp_autocast=amp_autocast)
if model_ema is not None and not args.model_ema_force_cpu:
if args.distributed and args.dist_bn in ('broadcast', 'reduce'):
distribute_bn(model_ema, args.world_size, args.dist_bn == 'reduce')
ema_eval_metrics = validate(
model_ema.module, loader_eval, validate_loss_fn, args, amp_autocast=amp_autocast, log_suffix=' (EMA)')
eval_metrics = ema_eval_metrics
if lr_scheduler is not None:
# step LR for next epoch
lr_scheduler.step(epoch + 1, eval_metrics[eval_metric])
if output_dir is not None:
update_summary(
epoch, train_metrics, eval_metrics, os.path.join(output_dir, 'summary.csv'),
write_header=best_metric is None, log_wandb=args.log_wandb and has_wandb)
if saver is not None:
# save proper checkpoint with eval metric
save_metric = eval_metrics[eval_metric]
best_metric, best_epoch = saver.save_checkpoint(epoch, metric=save_metric)
except KeyboardInterrupt:
pass
if best_metric is not None:
_logger.info('*** Best metric: {0} (epoch {1})'.format(best_metric, best_epoch))
def train_one_epoch(
epoch, model, loader, optimizer, loss_fn, args,
lr_scheduler=None, saver=None, output_dir=None, amp_autocast=suppress,
loss_scaler=None, model_ema=None, mixup_fn=None):
if args.mixup_off_epoch and epoch >= args.mixup_off_epoch:
if args.prefetcher and loader.mixup_enabled:
loader.mixup_enabled = False
elif mixup_fn is not None:
mixup_fn.mixup_enabled = False
second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
batch_time_m = AverageMeter()
data_time_m = AverageMeter()
losses_m = AverageMeter()
model.train()
end = time.time()
last_idx = len(loader) - 1
num_updates = epoch * len(loader)
for batch_idx, (input, target) in enumerate(loader):
last_batch = batch_idx == last_idx
data_time_m.update(time.time() - end)
if not args.prefetcher:
input, target = input.cuda(), target.cuda()
if mixup_fn is not None:
input, target = mixup_fn(input, target)
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output = model(input)
loss = loss_fn(output, target)
if not args.distributed:
losses_m.update(loss.item(), input.size(0))
optimizer.zero_grad()
if loss_scaler is not None:
loss_scaler(
loss, optimizer,
clip_grad=args.clip_grad, clip_mode=args.clip_mode,
parameters=model_parameters(model, exclude_head='agc' in args.clip_mode),
create_graph=second_order)
else:
loss.backward(create_graph=second_order)
if args.clip_grad is not None:
dispatch_clip_grad(
model_parameters(model, exclude_head='agc' in args.clip_mode),
value=args.clip_grad, mode=args.clip_mode)
optimizer.step()
if model_ema is not None:
model_ema.update(model)
torch.cuda.synchronize()
num_updates += 1
batch_time_m.update(time.time() - end)
if last_batch or batch_idx % args.log_interval == 0:
lrl = [param_group['lr'] for param_group in optimizer.param_groups]
lr = sum(lrl) / len(lrl)
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
losses_m.update(reduced_loss.item(), input.size(0))
if args.local_rank == 0:
_logger.info(
'Train: {} [{:>4d}/{} ({:>3.0f}%)] '
'Loss: {loss.val:#.4g} ({loss.avg:#.3g}) '
'Time: {batch_time.val:.3f}s, {rate:>7.2f}/s '
'({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) '
'LR: {lr:.3e} '
'Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format(
epoch,
batch_idx, len(loader),
100. * batch_idx / last_idx,
loss=losses_m,
batch_time=batch_time_m,
rate=input.size(0) * args.world_size / batch_time_m.val,
rate_avg=input.size(0) * args.world_size / batch_time_m.avg,
lr=lr,
data_time=data_time_m))
if args.save_images and output_dir:
torchvision.utils.save_image(
input,
os.path.join(output_dir, 'train-batch-%d.jpg' % batch_idx),
padding=0,
normalize=True)
if saver is not None and args.recovery_interval and (
last_batch or (batch_idx + 1) % args.recovery_interval == 0):
saver.save_recovery(epoch, batch_idx=batch_idx)
if lr_scheduler is not None:
lr_scheduler.step_update(num_updates=num_updates, metric=losses_m.avg)
end = time.time()
# end for
if hasattr(optimizer, 'sync_lookahead'):
optimizer.sync_lookahead()
return OrderedDict([('loss', losses_m.avg)])
def validate(model, loader, loss_fn, args, amp_autocast=suppress, log_suffix=''):
batch_time_m = AverageMeter()
losses_m = AverageMeter()
top1_m = AverageMeter()
top5_m = AverageMeter()
model.eval()
end = time.time()
last_idx = len(loader) - 1
with torch.no_grad():
for batch_idx, (input, target) in enumerate(loader):
last_batch = batch_idx == last_idx
if not args.prefetcher:
input = input.cuda()
target = target.cuda()
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output = model(input)
if isinstance(output, (tuple, list)):
output = output[0]
# augmentation reduction
reduce_factor = args.tta
if reduce_factor > 1:
output = output.unfold(0, reduce_factor, reduce_factor).mean(dim=2)
target = target[0:target.size(0):reduce_factor]
loss = loss_fn(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
acc1 = reduce_tensor(acc1, args.world_size)
acc5 = reduce_tensor(acc5, args.world_size)
else:
reduced_loss = loss.data
torch.cuda.synchronize()
losses_m.update(reduced_loss.item(), input.size(0))
top1_m.update(acc1.item(), output.size(0))
top5_m.update(acc5.item(), output.size(0))
batch_time_m.update(time.time() - end)
end = time.time()
if args.local_rank == 0 and (last_batch or batch_idx % args.log_interval == 0):
log_name = 'Test' + log_suffix
_logger.info(
'{0}: [{1:>4d}/{2}] '
'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) '
'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) '
'Acc@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) '
'Acc@5: {top5.val:>7.4f} ({top5.avg:>7.4f})'.format(
log_name, batch_idx, last_idx, batch_time=batch_time_m,
loss=losses_m, top1=top1_m, top5=top5_m))
metrics = OrderedDict([('loss', losses_m.avg), ('top1', top1_m.avg), ('top5', top5_m.avg)])
return metrics
if __name__ == '__main__':
main()
|
import torch
class SAM(torch.optim.Optimizer):
def __init__(self, params, base_optimizer, rho=0.05, adaptive=False, **kwargs):
assert rho >= 0.0, f"Invalid rho, should be non-negative: {rho}"
defaults = dict(rho=rho, adaptive=adaptive, **kwargs)
super(SAM, self).__init__(params, defaults)
self.base_optimizer = base_optimizer(self.param_groups, **kwargs)
self.param_groups = self.base_optimizer.param_groups
@torch.no_grad()
def first_step(self, zero_grad=False):
grad_norm = self._grad_norm()
for group in self.param_groups:
scale = group["rho"] / (grad_norm + 1e-12)
for p in group["params"]:
if p.grad is None: continue
self.state[p]["old_p"] = p.data.clone()
e_w = (torch.pow(p, 2) if group["adaptive"] else 1.0) * p.grad * scale.to(p)
p.add_(e_w) # climb to the local maximum "w + e(w)"
if zero_grad: self.zero_grad()
@torch.no_grad()
def second_step(self, zero_grad=False):
for group in self.param_groups:
for p in group["params"]:
if p.grad is None: continue
p.data = self.state[p]["old_p"] # get back to "w" from "w + e(w)"
self.base_optimizer.step() # do the actual "sharpness-aware" update
if zero_grad: self.zero_grad()
@torch.no_grad()
def step(self, closure=None):
assert closure is not None, "Sharpness Aware Minimization requires closure, but it was not provided"
closure = torch.enable_grad()(closure) # the closure should do a full forward-backward pass
self.first_step(zero_grad=True)
closure()
self.second_step()
def _grad_norm(self):
shared_device = self.param_groups[0]["params"][0].device # put everything on the same device, in case of model parallelism
norm = torch.norm(
torch.stack([
((torch.abs(p) if group["adaptive"] else 1.0) * p.grad).norm(p=2).to(shared_device)
for group in self.param_groups for p in group["params"]
if p.grad is not None
]),
p=2
)
return norm
def load_state_dict(self, state_dict):
super().load_state_dict(state_dict)
self.base_optimizer.param_groups = self.param_groups
|
""" Optimizer Factory w/ Custom Weight Decay
Hacked together by / Copyright 2021 Ross Wightman
"""
import json
from itertools import islice
from typing import Optional, Callable, Tuple
import torch
import torch.nn as nn
import torch.optim as optim
from timm.models.helpers import group_parameters
from timm.optim.adabelief import AdaBelief
from timm.optim.adafactor import Adafactor
from timm.optim.adahessian import Adahessian
from timm.optim.adamp import AdamP
from timm.optim.lamb import Lamb
from timm.optim.lars import Lars
from timm.optim.lookahead import Lookahead
from timm.optim.madgrad import MADGRAD
from timm.optim.nadam import Nadam
from timm.optim.nvnovograd import NvNovoGrad
from timm.optim.radam import RAdam
from timm.optim.rmsprop_tf import RMSpropTF
from timm.optim.sgdp import SGDP
from adan import Adan
from sam import SAM
try:
from apex.optimizers import FusedNovoGrad, FusedAdam, FusedLAMB, FusedSGD
has_apex = True
except ImportError:
has_apex = False
def param_groups_weight_decay(
model: nn.Module,
weight_decay=1e-5,
no_weight_decay_list=()
):
no_weight_decay_list = set(no_weight_decay_list)
decay = []
no_decay = []
for name, param in model.named_parameters():
if not param.requires_grad:
continue
if param.ndim <= 1 or name.endswith(".bias") or name in no_weight_decay_list:
no_decay.append(param)
else:
decay.append(param)
return [
{'params': no_decay, 'weight_decay': 0.},
{'params': decay, 'weight_decay': weight_decay}]
def _group(it, size):
it = iter(it)
return iter(lambda: tuple(islice(it, size)), ())
def _layer_map(model, layers_per_group=12, num_groups=None):
def _in_head(n, hp):
if not hp:
return True
elif isinstance(hp, (tuple, list)):
return any([n.startswith(hpi) for hpi in hp])
else:
return n.startswith(hp)
head_prefix = getattr(model, 'pretrained_cfg', {}).get('classifier', None)
names_trunk = []
names_head = []
for n, _ in model.named_parameters():
names_head.append(n) if _in_head(n, head_prefix) else names_trunk.append(n)
# group non-head layers
num_trunk_layers = len(names_trunk)
if num_groups is not None:
layers_per_group = -(num_trunk_layers // -num_groups)
names_trunk = list(_group(names_trunk, layers_per_group))
num_trunk_groups = len(names_trunk)
layer_map = {n: i for i, l in enumerate(names_trunk) for n in l}
layer_map.update({n: num_trunk_groups for n in names_head})
return layer_map
def param_groups_layer_decay(
model: nn.Module,
weight_decay: float = 0.05,
no_weight_decay_list: Tuple[str] = (),
layer_decay: float = .75,
end_layer_decay: Optional[float] = None,
):
"""
Parameter groups for layer-wise lr decay & weight decay
Based on BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L58
"""
no_weight_decay_list = set(no_weight_decay_list)
param_group_names = {} # NOTE for debugging
param_groups = {}
if hasattr(model, 'group_matcher'):
# FIXME interface needs more work
layer_map = group_parameters(model, model.group_matcher(coarse=False), reverse=True)
else:
# fallback
layer_map = _layer_map(model)
num_layers = max(layer_map.values()) + 1
layer_max = num_layers - 1
layer_scales = list(layer_decay ** (layer_max - i) for i in range(num_layers))
for name, param in model.named_parameters():
if not param.requires_grad:
continue
# no decay: all 1D parameters and model specific ones
if param.ndim == 1 or name in no_weight_decay_list:
g_decay = "no_decay"
this_decay = 0.
else:
g_decay = "decay"
this_decay = weight_decay
layer_id = layer_map.get(name, layer_max)
group_name = "layer_%d_%s" % (layer_id, g_decay)
if group_name not in param_groups:
this_scale = layer_scales[layer_id]
param_group_names[group_name] = {
"lr_scale": this_scale,
"weight_decay": this_decay,
"param_names": [],
}
param_groups[group_name] = {
"lr_scale": this_scale,
"weight_decay": this_decay,
"params": [],
}
param_group_names[group_name]["param_names"].append(name)
param_groups[group_name]["params"].append(param)
# FIXME temporary output to debug new feature
print("parameter groups: \n%s" % json.dumps(param_group_names, indent=2))
return list(param_groups.values())
def optimizer_kwargs(cfg):
""" cfg/argparse to kwargs helper
Convert optimizer args in argparse args or cfg like object to keyword args for updated create fn.
"""
kwargs = dict(
opt=cfg.opt,
lr=cfg.lr,
weight_decay=cfg.weight_decay,
momentum=cfg.momentum)
if getattr(cfg, 'opt_eps', None) is not None:
kwargs['eps'] = cfg.opt_eps
if getattr(cfg, 'opt_betas', None) is not None:
kwargs['betas'] = cfg.opt_betas
if getattr(cfg, 'layer_decay', None) is not None:
kwargs['layer_decay'] = cfg.layer_decay
if getattr(cfg, 'opt_args', None) is not None:
kwargs.update(cfg.opt_args)
return kwargs
def create_optimizer(args, model, filter_bias_and_bn=True):
""" Legacy optimizer factory for backwards compatibility.
NOTE: Use create_optimizer_v2 for new code.
"""
return create_optimizer_v2(
model,
**optimizer_kwargs(cfg=args),
filter_bias_and_bn=filter_bias_and_bn,
)
def create_optimizer_v2(
model_or_params,
opt: str = 'sgd',
lr: Optional[float] = None,
weight_decay: float = 0.,
momentum: float = 0.9,
filter_bias_and_bn: bool = True,
layer_decay: Optional[float] = None,
param_group_fn: Optional[Callable] = None,
**kwargs):
""" Create an optimizer.
TODO currently the model is passed in and all parameters are selected for optimization.
For more general use an interface that allows selection of parameters to optimize and lr groups, one of:
* a filter fn interface that further breaks params into groups in a weight_decay compatible fashion
* expose the parameters interface and leave it up to caller
Args:
model_or_params (nn.Module): model containing parameters to optimize
opt: name of optimizer to create
lr: initial learning rate
weight_decay: weight decay to apply in optimizer
momentum: momentum for momentum based optimizers (others may use betas via kwargs)
filter_bias_and_bn: filter out bias, bn and other 1d params from weight decay
**kwargs: extra optimizer specific kwargs to pass through
Returns:
Optimizer
"""
if isinstance(model_or_params, nn.Module):
# a model was passed in, extract parameters and add weight decays to appropriate layers
no_weight_decay = {}
if hasattr(model_or_params, 'no_weight_decay'):
no_weight_decay = model_or_params.no_weight_decay()
if param_group_fn:
parameters = param_group_fn(model_or_params)
elif layer_decay is not None:
parameters = param_groups_layer_decay(
model_or_params,
weight_decay=weight_decay,
layer_decay=layer_decay,
no_weight_decay_list=no_weight_decay)
weight_decay = 0.
elif weight_decay and filter_bias_and_bn:
parameters = param_groups_weight_decay(model_or_params, weight_decay, no_weight_decay)
weight_decay = 0.
else:
parameters = model_or_params.parameters()
else:
# iterable of parameters or param groups passed in
parameters = model_or_params
opt_lower = opt.lower()
opt_split = opt_lower.split('_')
opt_lower = opt_split[-1]
if 'fused' in opt_lower:
assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers'
opt_args = dict(weight_decay=weight_decay, **kwargs)
if lr is not None:
opt_args.setdefault('lr', lr)
# basic SGD & related
if opt_lower == 'sgd' or opt_lower == 'nesterov':
# NOTE 'sgd' refers to SGD + nesterov momentum for legacy / backwards compat reasons
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters, momentum=momentum, nesterov=True, **opt_args)
elif opt_lower == 'sam':
opt_args.pop('eps', None)
optimizer = SAM(parameters, optim.SGD, momentum=momentum, nesterov=True, **opt_args)
elif opt_lower == 'adan':
optimizer = Adan(parameters, **opt_args)
elif opt_lower == 'momentum':
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters, momentum=momentum, nesterov=False, **opt_args)
elif opt_lower == 'sgdp':
optimizer = SGDP(parameters, momentum=momentum, nesterov=True, **opt_args)
# adaptive
elif opt_lower == 'adam':
optimizer = optim.Adam(parameters, **opt_args)
elif opt_lower == 'adamw':
optimizer = optim.AdamW(parameters, **opt_args)
elif opt_lower == 'adamp':
optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args)
elif opt_lower == 'nadam':
try:
# NOTE PyTorch >= 1.10 should have native NAdam
optimizer = optim.Nadam(parameters, **opt_args)
except AttributeError:
optimizer = Nadam(parameters, **opt_args)
elif opt_lower == 'radam':
optimizer = RAdam(parameters, **opt_args)
elif opt_lower == 'adamax':
optimizer = optim.Adamax(parameters, **opt_args)
elif opt_lower == 'adabelief':
optimizer = AdaBelief(parameters, rectify=False, **opt_args)
elif opt_lower == 'radabelief':
optimizer = AdaBelief(parameters, rectify=True, **opt_args)
elif opt_lower == 'adadelta':
optimizer = optim.Adadelta(parameters, **opt_args)
elif opt_lower == 'adagrad':
opt_args.setdefault('eps', 1e-8)
optimizer = optim.Adagrad(parameters, **opt_args)
elif opt_lower == 'adafactor':
optimizer = Adafactor(parameters, **opt_args)
elif opt_lower == 'lamb':
optimizer = Lamb(parameters, **opt_args)
elif opt_lower == 'lambc':
optimizer = Lamb(parameters, trust_clip=True, **opt_args)
elif opt_lower == 'larc':
optimizer = Lars(parameters, momentum=momentum, trust_clip=True, **opt_args)
elif opt_lower == 'lars':
optimizer = Lars(parameters, momentum=momentum, **opt_args)
elif opt_lower == 'nlarc':
optimizer = Lars(parameters, momentum=momentum, trust_clip=True, nesterov=True, **opt_args)
elif opt_lower == 'nlars':
optimizer = Lars(parameters, momentum=momentum, nesterov=True, **opt_args)
elif opt_lower == 'madgrad':
optimizer = MADGRAD(parameters, momentum=momentum, **opt_args)
elif opt_lower == 'madgradw':
optimizer = MADGRAD(parameters, momentum=momentum, decoupled_decay=True, **opt_args)
elif opt_lower == 'novograd' or opt_lower == 'nvnovograd':
optimizer = NvNovoGrad(parameters, **opt_args)
elif opt_lower == 'rmsprop':
optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=momentum, **opt_args)
elif opt_lower == 'rmsproptf':
optimizer = RMSpropTF(parameters, alpha=0.9, momentum=momentum, **opt_args)
# second order
elif opt_lower == 'adahessian':
optimizer = Adahessian(parameters, **opt_args)
# NVIDIA fused optimizers, require APEX to be installed
elif opt_lower == 'fusedsgd':
opt_args.pop('eps', None)
optimizer = FusedSGD(parameters, momentum=momentum, nesterov=True, **opt_args)
elif opt_lower == 'fusedmomentum':
opt_args.pop('eps', None)
optimizer = FusedSGD(parameters, momentum=momentum, nesterov=False, **opt_args)
elif opt_lower == 'fusedadam':
optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args)
elif opt_lower == 'fusedadamw':
optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args)
elif opt_lower == 'fusedlamb':
optimizer = FusedLAMB(parameters, **opt_args)
elif opt_lower == 'fusednovograd':
opt_args.setdefault('betas', (0.95, 0.98))
optimizer = FusedNovoGrad(parameters, **opt_args)
else:
assert False and "Invalid optimizer"
raise ValueError
if len(opt_split) > 1:
if opt_split[0] == 'lookahead':
optimizer = Lookahead(optimizer)
return optimizer
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# DeiT: https://github.com/facebookresearch/deit
# BEiT: https://github.com/microsoft/unilm/tree/master/beit
# --------------------------------------------------------
import argparse
import datetime
import json
import numpy as np
import os
import time
from pathlib import Path
import torch
import torch.backends.cudnn as cudnn
from torch.utils.tensorboard import SummaryWriter
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from adan import Adan
from timm.models import resume_checkpoint
import timm
#assert timm.__version__ == "0.3.2" # version check
import timm.optim.optim_factory as optim_factory
from timm.utils import *
import util.misc as misc
from util.misc import NativeScalerWithGradNormCount as NativeScaler
import models_mae
from engine_pretrain import train_one_epoch
def get_args_parser():
parser = argparse.ArgumentParser('MAE pre-training', add_help=False)
parser.add_argument('--batch_size', default=64, type=int,
help='Batch size per GPU (effective batch size is batch_size * accum_iter * # gpus')
parser.add_argument('--epochs', default=400, type=int)
parser.add_argument('--accum_iter', default=1, type=int,
help='Accumulate gradient iterations (for increasing the effective batch size under memory constraints)')
# Model parameters
parser.add_argument('--model', default='mae_vit_large_patch16', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--input_size', default=224, type=int,
help='images input size')
parser.add_argument('--mask_ratio', default=0.75, type=float,
help='Masking ratio (percentage of removed patches).')
parser.add_argument('--norm_pix_loss', action='store_true',
help='Use (per-patch) normalized pixels as targets for computing loss')
parser.set_defaults(norm_pix_loss=False)
# Optimizer parameters
parser.add_argument('--weight_decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
parser.add_argument('--lr', type=float, default=None, metavar='LR',
help='learning rate (absolute lr)')
parser.add_argument('--blr', type=float, default=1e-3, metavar='LR',
help='base learning rate: absolute_lr = base_lr * total_batch_size / 256')
parser.add_argument('--min_lr', type=float, default=0., metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0')
parser.add_argument('--warmup_epochs', type=int, default=40, metavar='N',
help='epochs to warmup LR')
parser.add_argument('--use-adan', action='store_true', default=False,
help='whether to use Adan')
parser.add_argument('--max-grad-norm', type=float, default=0.0,
help='max grad norm (default: 0.0 for no clip)')
parser.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: None, use opt default)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--bias-decay', action='store_true', default=False,
help='whether to decay bias term')
# Dataset parameters
parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str,
help='dataset path')
parser.add_argument('--output_dir', default=None,
help='path where to save, empty for no saving')
parser.add_argument('--log_dir', default='./pretrain_dir/',
help='path where to tensorboard log')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default=None,
help='resume from checkpoint')
parser.add_argument('--no-resume-opt', action='store_true', default=False,
help='prevent resume of optimizer state when resuming model')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin_mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--dist_url', default='env://',
help='url used to set up distributed training')
return parser
def main(args):
misc.init_distributed_mode(args)
print('job dir: {}'.format(os.path.dirname(os.path.realpath(__file__))))
print("{}".format(args).replace(', ', ',\n'))
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + misc.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
# simple augmentation
transform_train = transforms.Compose([
transforms.RandomResizedCrop(args.input_size, scale=(0.2, 1.0), interpolation=3), # 3 is bicubic
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
dataset_train = datasets.ImageFolder(os.path.join(args.data_path, 'train'), transform=transform_train)
print(dataset_train)
if True: # args.distributed:
num_tasks = misc.get_world_size()
global_rank = misc.get_rank()
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
print("Sampler_train = %s" % str(sampler_train))
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
if misc.is_main_process() and args.log_dir is not None:
TIMESTAMP = "{0:%Y-%m-%dT%H-%M-%S/}".format(datetime.datetime.now())
args.log_dir = args.log_dir+ 'mae-' + TIMESTAMP
os.makedirs(args.log_dir, exist_ok=True)
log_writer = SummaryWriter(log_dir=args.log_dir)
else:
log_writer = None
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
# define the model
model = models_mae.__dict__[args.model](norm_pix_loss=args.norm_pix_loss)
model.to(device)
model_without_ddp = model
print("Model = %s" % str(model_without_ddp))
eff_batch_size = args.batch_size * args.accum_iter * misc.get_world_size()
if args.lr is None: # only base_lr is specified
args.lr = args.blr * eff_batch_size / 256
print("base lr: %.2e" % (args.lr * 256 / eff_batch_size))
print("actual lr: %.2e" % args.lr)
print("accumulate grad iterations: %d" % args.accum_iter)
print("effective batch size: %d" % eff_batch_size)
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)
model_without_ddp = model.module
# following timm: set wd as 0 for bias and norm layers
param_groups = optim_factory.add_weight_decay(model_without_ddp, args.weight_decay)
if args.use_adan:
if args.bias_decay:
param = model_without_ddp.parameters()
else:
param = param_groups
args.weight_decay = 0.0
optimizer = Adan(param, weight_decay=args.weight_decay,
lr=args.lr, betas=args.opt_betas, eps = args.opt_eps, max_grad_norm=args.max_grad_norm
)
else:
optimizer = torch.optim.AdamW(param_groups, lr=args.lr, betas=(0.9, 0.95))
print(optimizer)
loss_scaler = NativeScaler()
resume_epoch = None
if not args.resume:
resume_path = os.path.join(args.output_dir, "last.pth.tar")
print(resume_path, os.path.isfile(resume_path))
if os.path.isfile(resume_path): args.resume = resume_path
if args.resume:
resume_epoch = resume_checkpoint(
model_without_ddp, args.resume,
optimizer=None if args.no_resume_opt else optimizer,
loss_scaler=None if args.no_resume_opt else loss_scaler,
log_info=misc.is_main_process())
if resume_epoch is not None:
args.start_epoch = resume_epoch
#misc.load_model(args=args, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler)
print(f"Start training for {args.epochs} epochs")
saver = None
if misc.is_main_process() and args.output_dir is not None:
saver = CheckpointSaver(
model=model, optimizer=optimizer, args=args, amp_scaler=loss_scaler,
checkpoint_dir=args.output_dir, recovery_dir=args.output_dir, decreasing=True, max_history=2)
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
train_stats = train_one_epoch(
model, data_loader_train,
optimizer, device, epoch, loss_scaler,
log_writer=log_writer,
args=args
)
if saver is not None:
# save proper checkpoint with eval metric
saver.save_checkpoint(epoch, train_stats['loss'])
# if args.output_dir and (epoch % 25 == 0 or epoch + 1 == args.epochs):
# misc.save_model(
# args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
# loss_scaler=loss_scaler, epoch=epoch)
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
'epoch': epoch,}
if args.output_dir and misc.is_main_process():
if log_writer is not None:
log_writer.flush()
with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
args = get_args_parser()
args = args.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# DeiT: https://github.com/facebookresearch/deit
# MoCo v3: https://github.com/facebookresearch/moco-v3
# --------------------------------------------------------
import argparse
import datetime
import json
import numpy as np
import os
import time
from pathlib import Path
import torch
import torch.backends.cudnn as cudnn
from torch.utils.tensorboard import SummaryWriter
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import timm
assert timm.__version__ == "0.3.2" # version check
from timm.models.layers import trunc_normal_
import util.misc as misc
from util.pos_embed import interpolate_pos_embed
from util.misc import NativeScalerWithGradNormCount as NativeScaler
from util.lars import LARS
from util.crop import RandomResizedCrop
import models_vit
from engine_finetune import train_one_epoch, evaluate
def get_args_parser():
parser = argparse.ArgumentParser('MAE linear probing for image classification', add_help=False)
parser.add_argument('--batch_size', default=512, type=int,
help='Batch size per GPU (effective batch size is batch_size * accum_iter * # gpus')
parser.add_argument('--epochs', default=90, type=int)
parser.add_argument('--accum_iter', default=1, type=int,
help='Accumulate gradient iterations (for increasing the effective batch size under memory constraints)')
# Model parameters
parser.add_argument('--model', default='vit_large_patch16', type=str, metavar='MODEL',
help='Name of model to train')
# Optimizer parameters
parser.add_argument('--weight_decay', type=float, default=0,
help='weight decay (default: 0 for linear probe following MoCo v1)')
parser.add_argument('--lr', type=float, default=None, metavar='LR',
help='learning rate (absolute lr)')
parser.add_argument('--blr', type=float, default=0.1, metavar='LR',
help='base learning rate: absolute_lr = base_lr * total_batch_size / 256')
parser.add_argument('--min_lr', type=float, default=0., metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0')
parser.add_argument('--warmup_epochs', type=int, default=10, metavar='N',
help='epochs to warmup LR')
# * Finetuning params
parser.add_argument('--finetune', default='',
help='finetune from checkpoint')
parser.add_argument('--global_pool', action='store_true')
parser.set_defaults(global_pool=False)
parser.add_argument('--cls_token', action='store_false', dest='global_pool',
help='Use class token instead of global pool for classification')
# Dataset parameters
parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str,
help='dataset path')
parser.add_argument('--nb_classes', default=1000, type=int,
help='number of the classification types')
parser.add_argument('--output_dir', default='./output_dir',
help='path where to save, empty for no saving')
parser.add_argument('--log_dir', default='./output_dir',
help='path where to tensorboard log')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='',
help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true',
help='Perform evaluation only')
parser.add_argument('--dist_eval', action='store_true', default=False,
help='Enabling distributed evaluation (recommended during training for faster monitor')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin_mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--dist_url', default='env://',
help='url used to set up distributed training')
return parser
def main(args):
misc.init_distributed_mode(args)
print('job dir: {}'.format(os.path.dirname(os.path.realpath(__file__))))
print("{}".format(args).replace(', ', ',\n'))
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + misc.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
# linear probe: weak augmentation
transform_train = transforms.Compose([
RandomResizedCrop(224, interpolation=3),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
transform_val = transforms.Compose([
transforms.Resize(256, interpolation=3),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
dataset_train = datasets.ImageFolder(os.path.join(args.data_path, 'train'), transform=transform_train)
dataset_val = datasets.ImageFolder(os.path.join(args.data_path, 'val'), transform=transform_val)
print(dataset_train)
print(dataset_val)
if True: # args.distributed:
num_tasks = misc.get_world_size()
global_rank = misc.get_rank()
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
print("Sampler_train = %s" % str(sampler_train))
if args.dist_eval:
if len(dataset_val) % num_tasks != 0:
print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
'This will slightly alter validation results as extra duplicate entries are added to achieve '
'equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(
dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=True) # shuffle=True to reduce monitor bias
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
if global_rank == 0 and args.log_dir is not None and not args.eval:
os.makedirs(args.log_dir, exist_ok=True)
log_writer = SummaryWriter(log_dir=args.log_dir)
else:
log_writer = None
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False
)
model = models_vit.__dict__[args.model](
num_classes=args.nb_classes,
global_pool=args.global_pool,
)
if args.finetune and not args.eval:
checkpoint = torch.load(args.finetune, map_location='cpu')
print("Load pre-trained checkpoint from: %s" % args.finetune)
checkpoint_model = checkpoint['model']
state_dict = model.state_dict()
for k in ['head.weight', 'head.bias']:
if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
# interpolate position embedding
interpolate_pos_embed(model, checkpoint_model)
# load pre-trained model
msg = model.load_state_dict(checkpoint_model, strict=False)
print(msg)
if args.global_pool:
assert set(msg.missing_keys) == {'head.weight', 'head.bias', 'fc_norm.weight', 'fc_norm.bias'}
else:
assert set(msg.missing_keys) == {'head.weight', 'head.bias'}
# manually initialize fc layer: following MoCo v3
trunc_normal_(model.head.weight, std=0.01)
# for linear prob only
# hack: revise model's head with BN
model.head = torch.nn.Sequential(torch.nn.BatchNorm1d(model.head.in_features, affine=False, eps=1e-6), model.head)
# freeze all but the head
for _, p in model.named_parameters():
p.requires_grad = False
for _, p in model.head.named_parameters():
p.requires_grad = True
model.to(device)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Model = %s" % str(model_without_ddp))
print('number of params (M): %.2f' % (n_parameters / 1.e6))
eff_batch_size = args.batch_size * args.accum_iter * misc.get_world_size()
if args.lr is None: # only base_lr is specified
args.lr = args.blr * eff_batch_size / 256
print("base lr: %.2e" % (args.lr * 256 / eff_batch_size))
print("actual lr: %.2e" % args.lr)
print("accumulate grad iterations: %d" % args.accum_iter)
print("effective batch size: %d" % eff_batch_size)
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
optimizer = LARS(model_without_ddp.head.parameters(), lr=args.lr, weight_decay=args.weight_decay)
print(optimizer)
loss_scaler = NativeScaler()
criterion = torch.nn.CrossEntropyLoss()
print("criterion = %s" % str(criterion))
misc.load_model(args=args, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler)
if args.eval:
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
exit(0)
print(f"Start training for {args.epochs} epochs")
start_time = time.time()
max_accuracy = 0.0
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
train_stats = train_one_epoch(
model, criterion, data_loader_train,
optimizer, device, epoch, loss_scaler,
max_norm=None,
log_writer=log_writer,
args=args
)
if args.output_dir:
misc.save_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
loss_scaler=loss_scaler, epoch=epoch)
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
max_accuracy = max(max_accuracy, test_stats["acc1"])
print(f'Max accuracy: {max_accuracy:.2f}%')
if log_writer is not None:
log_writer.add_scalar('perf/test_acc1', test_stats['acc1'], epoch)
log_writer.add_scalar('perf/test_acc5', test_stats['acc5'], epoch)
log_writer.add_scalar('perf/test_loss', test_stats['loss'], epoch)
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
if args.output_dir and misc.is_main_process():
if log_writer is not None:
log_writer.flush()
with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
args = get_args_parser()
args = args.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
|
# Copyright 2022 Garena Online Private Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
from torch import Tensor
from torch.optim.optimizer import Optimizer
from typing import List
class Adan(Optimizer):
"""
Implements a pytorch variant of Adan
Adan was proposed in
Adan: Adaptive Nesterov Momentum Algorithm for Faster Optimizing Deep Models[J]. arXiv preprint arXiv:2208.06677, 2022.
https://arxiv.org/abs/2208.06677
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
betas (Tuple[float, float, flot], optional): coefficients used for computing
running averages of gradient and its norm. (default: (0.98, 0.92, 0.99))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): decoupled weight decay (L2 penalty) (default: 0)
max_grad_norm (float, optional): value used to clip
global grad norm (default: 0.0 no clip)
no_prox (bool): how to perform the decoupled weight decay (default: False)
foreach (bool): if True would use torch._foreach implementation. It's faster but uses
slightly more memory. (default: True)
"""
def __init__(self, params, lr=1e-3, betas=(0.98, 0.92, 0.99), eps=1e-8,
weight_decay=0.0, max_grad_norm=0.0, no_prox=False, foreach: bool=True):
if not 0.0 <= max_grad_norm:
raise ValueError("Invalid Max grad norm: {}".format(max_grad_norm))
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= betas[2] < 1.0:
raise ValueError("Invalid beta parameter at index 2: {}".format(betas[2]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay,
max_grad_norm=max_grad_norm, no_prox=no_prox, foreach=foreach)
super().__init__(params, defaults)
def __setstate__(self, state):
super(Adan, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('no_prox', False)
@torch.no_grad()
def restart_opt(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
if p.requires_grad:
state = self.state[p]
# State initialization
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p)
# Exponential moving average of gradient difference
state['exp_avg_diff'] = torch.zeros_like(p)
@torch.no_grad()
def step(self, closure=None):
"""
Performs a single optimization step.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
if self.defaults['max_grad_norm'] > 0:
device = self.param_groups[0]['params'][0].device
global_grad_norm = torch.zeros(1, device=device)
max_grad_norm = torch.tensor(self.defaults['max_grad_norm'], device=device)
for group in self.param_groups:
for p in group['params']:
if p.grad is not None:
grad = p.grad
global_grad_norm.add_(grad.pow(2).sum())
global_grad_norm = torch.sqrt(global_grad_norm)
clip_global_grad_norm = torch.clamp(max_grad_norm / (global_grad_norm + group['eps']), max=1.0)
else:
clip_global_grad_norm = 1.0
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
exp_avg_diffs = []
pre_grads = []
beta1, beta2, beta3 = group['betas']
# assume same step across group now to simplify things
# per parameter step can be easily support by making it tensor, or pass list into kernel
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
bias_correction1 = 1.0 - beta1 ** group['step']
bias_correction2 = 1.0 - beta2 ** group['step']
bias_correction3 = 1.0 - beta3 ** group['step']
for p in group['params']:
if p.grad is None:
continue
params_with_grad.append(p)
grads.append(p.grad)
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
state['exp_avg_diff'] = torch.zeros_like(p)
if 'pre_grad' not in state or group['step'] == 1:
# at first step grad wouldn't be clipped by `clip_global_grad_norm`
# this is only to simplify implementation
state['pre_grad'] = p.grad
exp_avgs.append(state['exp_avg'])
exp_avg_sqs.append(state['exp_avg_sq'])
exp_avg_diffs.append(state['exp_avg_diff'])
pre_grads.append(state['pre_grad'])
kwargs = dict(
params=params_with_grad,
grads=grads,
exp_avgs=exp_avgs,
exp_avg_sqs=exp_avg_sqs,
exp_avg_diffs=exp_avg_diffs,
pre_grads=pre_grads,
beta1=beta1,
beta2=beta2,
beta3=beta3,
bias_correction1=bias_correction1,
bias_correction2=bias_correction2,
bias_correction3_sqrt=math.sqrt(bias_correction3),
lr=group['lr'],
weight_decay=group['weight_decay'],
eps=group['eps'],
no_prox=group['no_prox'],
clip_global_grad_norm=clip_global_grad_norm,
)
if group["foreach"]:
copy_grads = _multi_tensor_adan(**kwargs)
else:
copy_grads = _single_tensor_adan(**kwargs)
for p, copy_grad in zip(params_with_grad, copy_grads):
self.state[p]['pre_grad'] = copy_grad
return loss
def _single_tensor_adan(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
exp_avg_diffs: List[Tensor],
pre_grads: List[Tensor],
*,
beta1: float,
beta2: float,
beta3: float,
bias_correction1: float,
bias_correction2: float,
bias_correction3_sqrt: float,
lr: float,
weight_decay: float,
eps: float,
no_prox: bool,
clip_global_grad_norm: Tensor,
):
copy_grads = []
for i, param in enumerate(params):
grad = grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
exp_avg_diff = exp_avg_diffs[i]
pre_grad = pre_grads[i]
grad = grad.mul_(clip_global_grad_norm)
copy_grads.append(grad.clone())
diff = grad - pre_grad
update = grad + beta2 * diff
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) # m_t
exp_avg_diff.mul_(beta2).add_(diff, alpha=1 - beta2) # diff_t
exp_avg_sq.mul_(beta3).addcmul_(update, update, value=1 - beta3) # n_t
denom = ((exp_avg_sq).sqrt() / bias_correction3_sqrt).add_(eps)
update = ((exp_avg / bias_correction1 + beta2 * exp_avg_diff / bias_correction2)).div_(denom)
if no_prox:
param.mul_(1 - lr * weight_decay)
param.add_(update, alpha=-lr)
else:
param.add_(update, alpha=-lr)
param.div_(1 + lr * weight_decay)
return copy_grads
def _multi_tensor_adan(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
exp_avg_diffs: List[Tensor],
pre_grads: List[Tensor],
*,
beta1: float,
beta2: float,
beta3: float,
bias_correction1: float,
bias_correction2: float,
bias_correction3_sqrt: float,
lr: float,
weight_decay: float,
eps: float,
no_prox: bool,
clip_global_grad_norm: Tensor,
):
if clip_global_grad_norm<1.0:
torch._foreach_mul_(grads, clip_global_grad_norm.item())
copy_grads = [g.clone() for g in grads]
diff = torch._foreach_sub(grads, pre_grads)
# NOTE: line below while looking identical gives different result, due to float precision errors.
# using mul+add produces identical results to single-tensor, using add+alpha doesn't
# On cuda this difference doesn't matter due to its' own precision non-determinism
# update = torch._foreach_add(grads, torch._foreach_mul(diff, beta2))
update = torch._foreach_add(grads, diff, alpha=beta2)
torch._foreach_mul_(exp_avgs, beta1)
torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) # m_t
torch._foreach_mul_(exp_avg_diffs, beta2)
torch._foreach_add_(exp_avg_diffs, diff, alpha=1 - beta2) # diff_t
torch._foreach_mul_(exp_avg_sqs, beta3)
torch._foreach_addcmul_(exp_avg_sqs, update, update, value=1 - beta3) # n_t
denom = torch._foreach_sqrt(exp_avg_sqs)
torch._foreach_div_(denom, bias_correction3_sqrt)
torch._foreach_add_(denom, eps)
update = torch._foreach_div(exp_avgs, bias_correction1)
# NOTE: same issue as above. beta2 * diff / bias_correction2 != diff * (beta2 / bias_correction2)
# using faster version by default.
# torch._foreach_add_(update, torch._foreach_div(torch._foreach_mul(exp_avg_diffs, beta2), bias_correction2))
torch._foreach_add_(update, torch._foreach_mul(exp_avg_diffs, beta2 / bias_correction2))
torch._foreach_div_(update, denom)
if no_prox:
torch._foreach_mul_(params, 1 - lr * weight_decay)
torch._foreach_add_(params, update, alpha=-lr)
else:
torch._foreach_add_(params, update, alpha=-lr)
torch._foreach_div_(params, 1 + lr * weight_decay)
return copy_grads
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# timm: https://github.com/rwightman/pytorch-image-models/tree/master/timm
# DeiT: https://github.com/facebookresearch/deit
# --------------------------------------------------------
from functools import partial
import torch
import torch.nn as nn
import timm.models.vision_transformer
class VisionTransformer(timm.models.vision_transformer.VisionTransformer):
""" Vision Transformer with support for global average pooling
"""
def __init__(self, global_pool=False, **kwargs):
super(VisionTransformer, self).__init__(**kwargs)
self.global_pool = global_pool
if self.global_pool:
norm_layer = kwargs['norm_layer']
embed_dim = kwargs['embed_dim']
self.fc_norm = norm_layer(embed_dim)
del self.norm # remove the original norm
def forward_features(self, x):
B = x.shape[0]
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
x = x + self.pos_embed
x = self.pos_drop(x)
for blk in self.blocks:
x = blk(x)
if self.global_pool:
x = x[:, 1:, :].mean(dim=1) # global pool without cls token
outcome = self.fc_norm(x)
else:
x = self.norm(x)
outcome = x[:, 0]
return outcome
def vit_base_patch16(**kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_large_patch16(**kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_huge_patch14(**kwargs):
model = VisionTransformer(
patch_size=14, embed_dim=1280, depth=32, num_heads=16, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# DeiT: https://github.com/facebookresearch/deit
# BEiT: https://github.com/microsoft/unilm/tree/master/beit
# --------------------------------------------------------
import math
import sys
from typing import Iterable, Optional
import torch
from timm.data import Mixup
from timm.utils import accuracy
import util.misc as misc
import util.lr_sched as lr_sched
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,
mixup_fn: Optional[Mixup] = None, log_writer=None,
args=None):
model.train(True)
metric_logger = misc.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 20
accum_iter = args.accum_iter
optimizer.zero_grad()
if log_writer is not None:
print('log_dir: {}'.format(log_writer.log_dir))
for data_iter_step, (samples, targets) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
# we use a per iteration (instead of per epoch) lr scheduler
if data_iter_step % accum_iter == 0:
lr_sched.adjust_learning_rate(optimizer, data_iter_step / len(data_loader) + epoch, args)
samples = samples.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
if mixup_fn is not None:
samples, targets = mixup_fn(samples, targets)
with torch.cuda.amp.autocast():
outputs = model(samples)
loss = criterion(outputs, targets)
loss_value = loss.item()
loss /= accum_iter
loss_scaler(loss, optimizer, clip_grad=max_norm,
parameters=model.parameters(), create_graph=False,
update_grad=(data_iter_step + 1) % accum_iter == 0)
if (data_iter_step + 1) % accum_iter == 0:
optimizer.zero_grad()
torch.cuda.synchronize()
metric_logger.update(loss=loss_value)
min_lr = 10.
max_lr = 0.
for group in optimizer.param_groups:
min_lr = min(min_lr, group["lr"])
max_lr = max(max_lr, group["lr"])
metric_logger.update(lr=max_lr)
loss_value_reduce = misc.all_reduce_mean(loss_value)
if not math.isfinite(loss_value_reduce):
print("Loss is {}, stopping training".format(loss_value_reduce))
sys.exit(1)
if log_writer is not None and (data_iter_step + 1) % accum_iter == 0:
""" We use epoch_1000x as the x-axis in tensorboard.
This calibrates different curves when batch size changes.
"""
epoch_1000x = int((data_iter_step / len(data_loader) + epoch) * 1000)
log_writer.add_scalar('loss', loss_value_reduce, epoch_1000x)
log_writer.add_scalar('lr', max_lr, epoch_1000x)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def evaluate(data_loader, model, device):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = misc.MetricLogger(delimiter=" ")
header = 'Test:'
# switch to evaluation mode
model.eval()
for batch in metric_logger.log_every(data_loader, 10, header):
images = batch[0]
target = batch[-1]
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
# compute output
with torch.cuda.amp.autocast():
output = model(images)
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'
.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# DeiT: https://github.com/facebookresearch/deit
# BEiT: https://github.com/microsoft/unilm/tree/master/beit
# --------------------------------------------------------
import math
import sys
from typing import Iterable
import torch
import util.misc as misc
import util.lr_sched as lr_sched
def train_one_epoch(model: torch.nn.Module,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, loss_scaler,
log_writer=None,
args=None):
model.train(True)
metric_logger = misc.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 20
accum_iter = args.accum_iter
optimizer.zero_grad()
if log_writer is not None:
print('log_dir: {}'.format(log_writer.log_dir))
for data_iter_step, (samples, _) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
# we use a per iteration (instead of per epoch) lr scheduler
if data_iter_step % accum_iter == 0:
lr_sched.adjust_learning_rate(optimizer, data_iter_step / len(data_loader) + epoch, args)
samples = samples.to(device, non_blocking=True)
with torch.cuda.amp.autocast():
loss, _, _ = model(samples, mask_ratio=args.mask_ratio)
loss_value = loss.item()
loss /= accum_iter
loss_scaler(loss, optimizer, parameters=model.parameters(),
update_grad=(data_iter_step + 1) % accum_iter == 0)
if (data_iter_step + 1) % accum_iter == 0:
optimizer.zero_grad()
torch.cuda.synchronize()
metric_logger.update(loss=loss_value)
lr = optimizer.param_groups[0]["lr"]
metric_logger.update(lr=lr)
loss_value_reduce = misc.all_reduce_mean(loss_value)
if not math.isfinite(loss_value_reduce):
print("Loss is {}, stopping training".format(loss_value_reduce))
sys.exit(1)
if log_writer is not None and (data_iter_step + 1) % accum_iter == 0:
""" We use epoch_1000x as the x-axis in tensorboard.
This calibrates different curves when batch size changes.
"""
epoch_1000x = int((data_iter_step / len(data_loader) + epoch) * 1000)
log_writer.add_scalar('train_loss', loss_value_reduce, epoch_1000x)
log_writer.add_scalar('lr', lr, epoch_1000x)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# timm: https://github.com/rwightman/pytorch-image-models/tree/master/timm
# DeiT: https://github.com/facebookresearch/deit
# --------------------------------------------------------
from functools import partial
import torch
import torch.nn as nn
from timm.models.vision_transformer import PatchEmbed, Block
from util.pos_embed import get_2d_sincos_pos_embed
class MaskedAutoencoderViT(nn.Module):
""" Masked Autoencoder with VisionTransformer backbone
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3,
embed_dim=1024, depth=24, num_heads=16,
decoder_embed_dim=512, decoder_depth=8, decoder_num_heads=16,
mlp_ratio=4., norm_layer=nn.LayerNorm, norm_pix_loss=False):
super().__init__()
# --------------------------------------------------------------------------
# MAE encoder specifics
self.patch_embed = PatchEmbed(img_size, patch_size, in_chans, embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim), requires_grad=False) # fixed sin-cos embedding
self.blocks = nn.ModuleList([
Block(embed_dim, num_heads, mlp_ratio, qkv_bias=True, qk_scale=None, norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# MAE decoder specifics
self.decoder_embed = nn.Linear(embed_dim, decoder_embed_dim, bias=True)
self.mask_token = nn.Parameter(torch.zeros(1, 1, decoder_embed_dim))
self.decoder_pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, decoder_embed_dim), requires_grad=False) # fixed sin-cos embedding
self.decoder_blocks = nn.ModuleList([
Block(decoder_embed_dim, decoder_num_heads, mlp_ratio, qkv_bias=True, qk_scale=None, norm_layer=norm_layer)
for i in range(decoder_depth)])
self.decoder_norm = norm_layer(decoder_embed_dim)
self.decoder_pred = nn.Linear(decoder_embed_dim, patch_size**2 * in_chans, bias=True) # decoder to patch
# --------------------------------------------------------------------------
self.norm_pix_loss = norm_pix_loss
self.initialize_weights()
def initialize_weights(self):
# initialization
# initialize (and freeze) pos_embed by sin-cos embedding
pos_embed = get_2d_sincos_pos_embed(self.pos_embed.shape[-1], int(self.patch_embed.num_patches**.5), cls_token=True)
self.pos_embed.data.copy_(torch.from_numpy(pos_embed).float().unsqueeze(0))
decoder_pos_embed = get_2d_sincos_pos_embed(self.decoder_pos_embed.shape[-1], int(self.patch_embed.num_patches**.5), cls_token=True)
self.decoder_pos_embed.data.copy_(torch.from_numpy(decoder_pos_embed).float().unsqueeze(0))
# initialize patch_embed like nn.Linear (instead of nn.Conv2d)
w = self.patch_embed.proj.weight.data
torch.nn.init.xavier_uniform_(w.view([w.shape[0], -1]))
# timm's trunc_normal_(std=.02) is effectively normal_(std=0.02) as cutoff is too big (2.)
torch.nn.init.normal_(self.cls_token, std=.02)
torch.nn.init.normal_(self.mask_token, std=.02)
# initialize nn.Linear and nn.LayerNorm
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
# we use xavier_uniform following official JAX ViT:
torch.nn.init.xavier_uniform_(m.weight)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def patchify(self, imgs):
"""
imgs: (N, 3, H, W)
x: (N, L, patch_size**2 *3)
"""
p = self.patch_embed.patch_size[0]
assert imgs.shape[2] == imgs.shape[3] and imgs.shape[2] % p == 0
h = w = imgs.shape[2] // p
x = imgs.reshape(shape=(imgs.shape[0], 3, h, p, w, p))
x = torch.einsum('nchpwq->nhwpqc', x)
x = x.reshape(shape=(imgs.shape[0], h * w, p**2 * 3))
return x
def unpatchify(self, x):
"""
x: (N, L, patch_size**2 *3)
imgs: (N, 3, H, W)
"""
p = self.patch_embed.patch_size[0]
h = w = int(x.shape[1]**.5)
assert h * w == x.shape[1]
x = x.reshape(shape=(x.shape[0], h, w, p, p, 3))
x = torch.einsum('nhwpqc->nchpwq', x)
imgs = x.reshape(shape=(x.shape[0], 3, h * p, h * p))
return imgs
def random_masking(self, x, mask_ratio):
"""
Perform per-sample random masking by per-sample shuffling.
Per-sample shuffling is done by argsort random noise.
x: [N, L, D], sequence
"""
N, L, D = x.shape # batch, length, dim
len_keep = int(L * (1 - mask_ratio))
noise = torch.rand(N, L, device=x.device) # noise in [0, 1]
# sort noise for each sample
ids_shuffle = torch.argsort(noise, dim=1) # ascend: small is keep, large is remove
ids_restore = torch.argsort(ids_shuffle, dim=1)
# keep the first subset
ids_keep = ids_shuffle[:, :len_keep]
x_masked = torch.gather(x, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, D))
# generate the binary mask: 0 is keep, 1 is remove
mask = torch.ones([N, L], device=x.device)
mask[:, :len_keep] = 0
# unshuffle to get the binary mask
mask = torch.gather(mask, dim=1, index=ids_restore)
return x_masked, mask, ids_restore
def forward_encoder(self, x, mask_ratio):
# embed patches
x = self.patch_embed(x)
# add pos embed w/o cls token
x = x + self.pos_embed[:, 1:, :]
# masking: length -> length * mask_ratio
x, mask, ids_restore = self.random_masking(x, mask_ratio)
# append cls token
cls_token = self.cls_token + self.pos_embed[:, :1, :]
cls_tokens = cls_token.expand(x.shape[0], -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# apply Transformer blocks
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x, mask, ids_restore
def forward_decoder(self, x, ids_restore):
# embed tokens
x = self.decoder_embed(x)
# append mask tokens to sequence
mask_tokens = self.mask_token.repeat(x.shape[0], ids_restore.shape[1] + 1 - x.shape[1], 1)
x_ = torch.cat([x[:, 1:, :], mask_tokens], dim=1) # no cls token
x_ = torch.gather(x_, dim=1, index=ids_restore.unsqueeze(-1).repeat(1, 1, x.shape[2])) # unshuffle
x = torch.cat([x[:, :1, :], x_], dim=1) # append cls token
# add pos embed
x = x + self.decoder_pos_embed
# apply Transformer blocks
for blk in self.decoder_blocks:
x = blk(x)
x = self.decoder_norm(x)
# predictor projection
x = self.decoder_pred(x)
# remove cls token
x = x[:, 1:, :]
return x
def forward_loss(self, imgs, pred, mask):
"""
imgs: [N, 3, H, W]
pred: [N, L, p*p*3]
mask: [N, L], 0 is keep, 1 is remove,
"""
target = self.patchify(imgs)
if self.norm_pix_loss:
mean = target.mean(dim=-1, keepdim=True)
var = target.var(dim=-1, keepdim=True)
target = (target - mean) / (var + 1.e-6)**.5
loss = (pred - target) ** 2
loss = loss.mean(dim=-1) # [N, L], mean loss per patch
loss = (loss * mask).sum() / mask.sum() # mean loss on removed patches
return loss
def forward(self, imgs, mask_ratio=0.75):
latent, mask, ids_restore = self.forward_encoder(imgs, mask_ratio)
pred = self.forward_decoder(latent, ids_restore) # [N, L, p*p*3]
loss = self.forward_loss(imgs, pred, mask)
return loss, pred, mask
def mae_vit_base_patch16_dec512d8b(**kwargs):
model = MaskedAutoencoderViT(
patch_size=16, embed_dim=768, depth=12, num_heads=12,
decoder_embed_dim=512, decoder_depth=8, decoder_num_heads=16,
mlp_ratio=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def mae_vit_large_patch16_dec512d8b(**kwargs):
model = MaskedAutoencoderViT(
patch_size=16, embed_dim=1024, depth=24, num_heads=16,
decoder_embed_dim=512, decoder_depth=8, decoder_num_heads=16,
mlp_ratio=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def mae_vit_huge_patch14_dec512d8b(**kwargs):
model = MaskedAutoencoderViT(
patch_size=14, embed_dim=1280, depth=32, num_heads=16,
decoder_embed_dim=512, decoder_depth=8, decoder_num_heads=16,
mlp_ratio=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
# set recommended archs
mae_vit_base_patch16 = mae_vit_base_patch16_dec512d8b # decoder: 512 dim, 8 blocks
mae_vit_large_patch16 = mae_vit_large_patch16_dec512d8b # decoder: 512 dim, 8 blocks
mae_vit_huge_patch14 = mae_vit_huge_patch14_dec512d8b # decoder: 512 dim, 8 blocks
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# DeiT: https://github.com/facebookresearch/deit
# BEiT: https://github.com/microsoft/unilm/tree/master/beit
# --------------------------------------------------------
import argparse
from ast import arg
import datetime
import json
import numpy as np
import os
import time
from pathlib import Path
import torch
import torch.backends.cudnn as cudnn
from torch.utils.tensorboard import SummaryWriter
from timm.models.helpers import load_state_dict
import timm
#assert timm.__version__ == "0.3.2" # version check
from timm.models.layers import trunc_normal_
from timm.data.mixup import Mixup
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from adan import Adan
import util.lr_decay as lrd
import util.misc as misc
from util.datasets import build_dataset
from util.pos_embed import interpolate_pos_embed
from util.misc import NativeScalerWithGradNormCount as NativeScaler
import models_vit
from engine_finetune import train_one_epoch, evaluate
def get_args_parser():
parser = argparse.ArgumentParser('MAE fine-tuning for image classification', add_help=False)
parser.add_argument('--batch_size', default=64, type=int,
help='Batch size per GPU (effective batch size is batch_size * accum_iter * # gpus')
parser.add_argument('--epochs', default=50, type=int)
parser.add_argument('--accum_iter', default=1, type=int,
help='Accumulate gradient iterations (for increasing the effective batch size under memory constraints)')
# Model parameters
parser.add_argument('--model', default='vit_large_patch16', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--input_size', default=224, type=int,
help='images input size')
parser.add_argument('--drop_path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: 0.1)')
# Optimizer parameters
parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--weight_decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
parser.add_argument('--lr', type=float, default=None, metavar='LR',
help='learning rate (absolute lr)')
parser.add_argument('--blr', type=float, default=1e-3, metavar='LR',
help='base learning rate: absolute_lr = base_lr * total_batch_size / 256')
parser.add_argument('--layer_decay', type=float, default=0.75,
help='layer-wise lr decay from ELECTRA/BEiT')
parser.add_argument('--min-lr', type=float, default=1e-6, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0')
parser.add_argument('--warmup-epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR')
parser.add_argument('--use-adan', action='store_true', default=True,
help='whether to use Adan')
parser.add_argument('--max-grad-norm', type=float, default=0.0,
help='max grad norm (default: 0.0 for no clip)')
parser.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: None, use opt default)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--bias-decay', action='store_true', default=False,
help='whether to decay bias term')
# Augmentation parameters
parser.add_argument('--color_jitter', type=float, default=None, metavar='PCT',
help='Color jitter factor (enabled only when not using Auto/RandAug)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--smoothing', type=float, default=0.1,
help='Label smoothing (default: 0.1)')
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0,
help='mixup alpha, mixup enabled if > 0.')
parser.add_argument('--cutmix', type=float, default=0,
help='cutmix alpha, cutmix enabled if > 0.')
parser.add_argument('--cutmix_minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup_prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup_switch_prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup_mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# * Finetuning params
parser.add_argument('--finetune', default='',
help='finetune from checkpoint')
parser.add_argument('--global_pool', action='store_true')
parser.set_defaults(global_pool=True)
parser.add_argument('--cls_token', action='store_false', dest='global_pool',
help='Use class token instead of global pool for classification')
# Dataset parameters
parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str,
help='dataset path')
parser.add_argument('--nb_classes', default=1000, type=int,
help='number of the classification types')
parser.add_argument('--output_dir', default=None,
help='path where to save, empty for no saving')
parser.add_argument('--log_dir', default='./output_dir/',
help='path where to tensorboard log')
parser.add_argument('--device', default='cuda:0',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='',
help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true',
help='Perform evaluation only')
parser.add_argument('--dist_eval', action='store_true', default=False,
help='Enabling distributed evaluation (recommended during training for faster monitor')
parser.add_argument('--num_workers', default=8, type=int)
parser.add_argument('--pin_mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--local_rank', default=0, type=int)
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--dist_url', default='env://',
help='url used to set up distributed training')
return parser
def main(args):
args.device = 'cuda:0'
args.world_size = 1
args.rank = 0 # global rank
args.gpu = 0
#misc.init_distributed_mode(args)
misc.init_distributed_ddpjob(args)
print('job dir: {}'.format(os.path.dirname(os.path.realpath(__file__))))
print("{}".format(args).replace(', ', ',\n'))
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + misc.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
dataset_train = build_dataset(is_train=True, args=args)
dataset_val = build_dataset(is_train=False, args=args)
if True: # args.distributed:
num_tasks = misc.get_world_size()
global_rank = misc.get_rank()
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
print("Sampler_train = %s" % str(sampler_train))
if args.dist_eval:
if len(dataset_val) % num_tasks != 0:
print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
'This will slightly alter validation results as extra duplicate entries are added to achieve '
'equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(
dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=True) # shuffle=True to reduce monitor bias
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
if misc.is_main_process() and args.log_dir is not None and not args.eval:
TIMESTAMP = "{0:%Y-%m-%dT%H-%M-%S/}".format(datetime.datetime.now())
args.log_dir = args.log_dir+ 'mae-' + TIMESTAMP
os.makedirs(args.log_dir, exist_ok=True)
log_writer = SummaryWriter(log_dir=args.log_dir)
else:
log_writer = None
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False
)
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
print("Mixup is activated!")
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.nb_classes)
model = models_vit.__dict__[args.model](
num_classes=args.nb_classes,
drop_path_rate=args.drop_path,
global_pool=args.global_pool,
)
if args.finetune and not args.eval:
#checkpoint = torch.load(args.finetune, map_location='cpu')
print("Load pre-trained checkpoint from: %s" % args.finetune)
checkpoint_model = load_state_dict(args.finetune)
state_dict = model.state_dict()
for k in ['head.weight', 'head.bias']:
if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
# interpolate position embedding
interpolate_pos_embed(model, checkpoint_model)
# load pre-trained model
msg = model.load_state_dict(checkpoint_model, strict=False)
print(msg)
if args.global_pool:
assert set(msg.missing_keys) == {'head.weight', 'head.bias', 'fc_norm.weight', 'fc_norm.bias'}
else:
assert set(msg.missing_keys) == {'head.weight', 'head.bias'}
# manually initialize fc layer
trunc_normal_(model.head.weight, std=1e-5)
model.to(device)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Model = %s" % str(model_without_ddp))
print('number of params (M): %.2f' % (n_parameters / 1.e6))
eff_batch_size = args.batch_size * args.accum_iter * misc.get_world_size()
if args.lr is None: # only base_lr is specified
args.lr = args.blr * eff_batch_size / 256
print("base lr: %.2e" % (args.lr * 256 / eff_batch_size))
print("actual lr: %.2e" % args.lr)
print("accumulate grad iterations: %d" % args.accum_iter)
print("effective batch size: %d" % eff_batch_size)
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
# build optimizer with layer-wise lr decay (lrd)
if args.use_FAM:
param_groups = lrd.param_groups_lrd(model_without_ddp, args.weight_decay,
no_weight_decay_list=[] if args.bias_decay else model_without_ddp.no_weight_decay(),
layer_decay=args.layer_decay
)
optimizer = Name(param_groups, weight_decay=args.weight_decay,
lr=args.lr, betas=args.opt_betas, eps = args.opt_eps, max_grad_norm=args.max_grad_norm
)
else:
param_groups = lrd.param_groups_lrd(model_without_ddp, args.weight_decay,
no_weight_decay_list=model_without_ddp.no_weight_decay(),
layer_decay=args.layer_decay
)
optimizer = torch.optim.AdamW(param_groups, lr=args.lr)
#print(optimizer)
loss_scaler = NativeScaler()
if mixup_fn is not None:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy()
elif args.smoothing > 0.:
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
criterion = torch.nn.CrossEntropyLoss()
print("criterion = %s" % str(criterion))
misc.load_model(args=args, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler)
if args.eval:
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
exit(0)
print(f"Start training for {args.epochs} epochs")
start_time = time.time()
max_accuracy = 0.0
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
train_stats = train_one_epoch(
model, criterion, data_loader_train,
optimizer, device, epoch, loss_scaler,
args.clip_grad, mixup_fn,
log_writer=log_writer,
args=args
)
if args.output_dir and (epoch+1) % 10 == 0:
misc.save_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
loss_scaler=loss_scaler, epoch=epoch)
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
max_accuracy = max(max_accuracy, test_stats["acc1"])
print(f'Max accuracy: {max_accuracy:.2f}%')
if log_writer is not None:
log_writer.add_scalar('perf/test_acc1', test_stats['acc1'], epoch)
log_writer.add_scalar('perf/test_acc5', test_stats['acc5'], epoch)
log_writer.add_scalar('perf/test_loss', test_stats['loss'], epoch)
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
if args.log_dir and misc.is_main_process():
if log_writer is not None:
log_writer.flush()
with open(os.path.join(args.log_dir, "log.txt"), mode="a", encoding="utf-8") as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
args = get_args_parser()
args = args.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# ELECTRA https://github.com/google-research/electra
# BEiT: https://github.com/microsoft/unilm/tree/master/beit
# --------------------------------------------------------
import json
def param_groups_lrd(model, weight_decay=0.05, no_weight_decay_list=[], layer_decay=.75):
"""
Parameter groups for layer-wise lr decay
Following BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L58
"""
param_group_names = {}
param_groups = {}
num_layers = len(model.blocks) + 1
layer_scales = list(layer_decay ** (num_layers - i) for i in range(num_layers + 1))
for n, p in model.named_parameters():
if not p.requires_grad:
continue
# no decay: all 1D parameters and model specific ones
if p.ndim == 1 or n in no_weight_decay_list:
g_decay = "no_decay"
this_decay = 0.
else:
g_decay = "decay"
this_decay = weight_decay
layer_id = get_layer_id_for_vit(n, num_layers)
group_name = "layer_%d_%s" % (layer_id, g_decay)
if group_name not in param_group_names:
this_scale = layer_scales[layer_id]
param_group_names[group_name] = {
"lr_scale": this_scale,
"weight_decay": this_decay,
"params": [],
}
param_groups[group_name] = {
"lr_scale": this_scale,
"weight_decay": this_decay,
"params": [],
}
param_group_names[group_name]["params"].append(n)
param_groups[group_name]["params"].append(p)
# print("parameter groups: \n%s" % json.dumps(param_group_names, indent=2))
return list(param_groups.values())
def get_layer_id_for_vit(name, num_layers):
"""
Assign a parameter with its layer id
Following BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L33
"""
if name in ['cls_token', 'pos_embed']:
return 0
elif name.startswith('patch_embed'):
return 0
elif name.startswith('blocks'):
return int(name.split('.')[1]) + 1
else:
return num_layers
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# DeiT: https://github.com/facebookresearch/deit
# BEiT: https://github.com/microsoft/unilm/tree/master/beit
# --------------------------------------------------------
import builtins
import datetime
import os
import time
from collections import defaultdict, deque
from pathlib import Path
import torch
import torch.distributed as dist
from torch._six import inf
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if v is None:
continue
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
log_msg = [
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
]
if torch.cuda.is_available():
log_msg.append('max mem: {memory:.0f}')
log_msg = self.delimiter.join(log_msg)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
builtin_print = builtins.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
force = force or (get_world_size() > 8)
if is_master or force:
now = datetime.datetime.now().time()
builtin_print('[{}] '.format(now), end='') # print with time stamp
builtin_print(*args, **kwargs)
builtins.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_ddpjob(args=None):
"""
initialize the ddp job
"""
if not dist.is_available() or not dist.is_initialized():
try:
os.environ['MASTER_PORT'] = '40101'
torch.distributed.init_process_group(
backend='nccl')
except Exception:
world_size, rank = 1, 0
print('distributed training not available')
print(Exception)
world_size = dist.get_world_size()
rank = dist.get_rank()
assert rank >= 0
args.gpu = args.rank
args.world_size, args.rank = world_size, rank
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
#torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
def init_distributed_mode(args):
if args.dist_on_itp:
args.rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
args.dist_url = "tcp://%s:%s" % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT'])
os.environ['LOCAL_RANK'] = str(args.gpu)
os.environ['RANK'] = str(args.rank)
os.environ['WORLD_SIZE'] = str(args.world_size)
# ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"]
elif 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
setup_for_distributed(is_master=True) # hack
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}, gpu {}'.format(
args.rank, args.dist_url, args.gpu), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
class NativeScalerWithGradNormCount:
state_dict_key = "amp_scaler"
def __init__(self):
self._scaler = torch.cuda.amp.GradScaler()
def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True):
self._scaler.scale(loss).backward(create_graph=create_graph)
if update_grad:
if clip_grad is not None:
assert parameters is not None
self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place
norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad)
else:
self._scaler.unscale_(optimizer)
norm = get_grad_norm_(parameters)
self._scaler.step(optimizer)
self._scaler.update()
else:
norm = None
return norm
def state_dict(self):
return self._scaler.state_dict()
def load_state_dict(self, state_dict):
self._scaler.load_state_dict(state_dict)
def get_grad_norm_(parameters, norm_type: float = 2.0) -> torch.Tensor:
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = [p for p in parameters if p.grad is not None]
norm_type = float(norm_type)
if len(parameters) == 0:
return torch.tensor(0.)
device = parameters[0].grad.device
if norm_type == inf:
total_norm = max(p.grad.detach().abs().max().to(device) for p in parameters)
else:
total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type)
return total_norm
def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler):
output_dir = Path(args.output_dir)
epoch_name = str(epoch)
if loss_scaler is not None:
checkpoint_paths = [output_dir / ('checkpoint-%s.pth' % epoch_name)]
for checkpoint_path in checkpoint_paths:
to_save = {
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch,
'scaler': loss_scaler.state_dict(),
'args': args,
}
save_on_master(to_save, checkpoint_path)
else:
client_state = {'epoch': epoch}
model.save_checkpoint(save_dir=args.output_dir, tag="checkpoint-%s" % epoch_name, client_state=client_state)
def load_model(args, model_without_ddp, optimizer, loss_scaler):
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
print("Resume checkpoint %s" % args.resume)
if 'optimizer' in checkpoint and 'epoch' in checkpoint and not (hasattr(args, 'eval') and args.eval):
optimizer.load_state_dict(checkpoint['optimizer'])
args.start_epoch = checkpoint['epoch'] + 1
if 'scaler' in checkpoint:
loss_scaler.load_state_dict(checkpoint['scaler'])
print("With optim & sched!")
def all_reduce_mean(x):
world_size = get_world_size()
if world_size > 1:
x_reduce = torch.tensor(x).cuda()
dist.all_reduce(x_reduce)
x_reduce /= world_size
return x_reduce.item()
else:
return x
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# LARS optimizer, implementation from MoCo v3:
# https://github.com/facebookresearch/moco-v3
# --------------------------------------------------------
import torch
class LARS(torch.optim.Optimizer):
"""
LARS optimizer, no rate scaling or weight decay for parameters <= 1D.
"""
def __init__(self, params, lr=0, weight_decay=0, momentum=0.9, trust_coefficient=0.001):
defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum, trust_coefficient=trust_coefficient)
super().__init__(params, defaults)
@torch.no_grad()
def step(self):
for g in self.param_groups:
for p in g['params']:
dp = p.grad
if dp is None:
continue
if p.ndim > 1: # if not normalization gamma/beta or bias
dp = dp.add(p, alpha=g['weight_decay'])
param_norm = torch.norm(p)
update_norm = torch.norm(dp)
one = torch.ones_like(param_norm)
q = torch.where(param_norm > 0.,
torch.where(update_norm > 0,
(g['trust_coefficient'] * param_norm / update_norm), one),
one)
dp = dp.mul(q)
param_state = self.state[p]
if 'mu' not in param_state:
param_state['mu'] = torch.zeros_like(p)
mu = param_state['mu']
mu.mul_(g['momentum']).add_(dp)
p.add_(mu, alpha=-g['lr'])
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# DeiT: https://github.com/facebookresearch/deit
# --------------------------------------------------------
import os
import PIL
from torchvision import datasets, transforms
from timm.data import create_transform
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
def build_dataset(is_train, args):
transform = build_transform(is_train, args)
root = os.path.join(args.data_path, 'train' if is_train else 'val')
dataset = datasets.ImageFolder(root, transform=transform)
print(dataset)
return dataset
def build_transform(is_train, args):
mean = IMAGENET_DEFAULT_MEAN
std = IMAGENET_DEFAULT_STD
# train transform
if is_train:
# this should always dispatch to transforms_imagenet_train
transform = create_transform(
input_size=args.input_size,
is_training=True,
color_jitter=args.color_jitter,
auto_augment=args.aa,
interpolation='bicubic',
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
mean=mean,
std=std,
)
return transform
# eval transform
t = []
if args.input_size <= 224:
crop_pct = 224 / 256
else:
crop_pct = 1.0
size = int(args.input_size / crop_pct)
t.append(
transforms.Resize(size, interpolation=PIL.Image.BICUBIC), # to maintain same ratio w.r.t. 224 images
)
t.append(transforms.CenterCrop(args.input_size))
t.append(transforms.ToTensor())
t.append(transforms.Normalize(mean, std))
return transforms.Compose(t)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
from torchvision import transforms
from torchvision.transforms import functional as F
class RandomResizedCrop(transforms.RandomResizedCrop):
"""
RandomResizedCrop for matching TF/TPU implementation: no for-loop is used.
This may lead to results different with torchvision's version.
Following BYOL's TF code:
https://github.com/deepmind/deepmind-research/blob/master/byol/utils/dataset.py#L206
"""
@staticmethod
def get_params(img, scale, ratio):
width, height = F._get_image_size(img)
area = height * width
target_area = area * torch.empty(1).uniform_(scale[0], scale[1]).item()
log_ratio = torch.log(torch.tensor(ratio))
aspect_ratio = torch.exp(
torch.empty(1).uniform_(log_ratio[0], log_ratio[1])
).item()
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
w = min(w, width)
h = min(h, height)
i = torch.randint(0, height - h + 1, size=(1,)).item()
j = torch.randint(0, width - w + 1, size=(1,)).item()
return i, j, h, w
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# Position embedding utils
# --------------------------------------------------------
import numpy as np
import torch
# --------------------------------------------------------
# 2D sine-cosine position embedding
# References:
# Transformer: https://github.com/tensorflow/models/blob/master/official/nlp/transformer/model_utils.py
# MoCo v3: https://github.com/facebookresearch/moco-v3
# --------------------------------------------------------
def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False):
"""
grid_size: int of the grid height and width
return:
pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
"""
grid_h = np.arange(grid_size, dtype=np.float32)
grid_w = np.arange(grid_size, dtype=np.float32)
grid = np.meshgrid(grid_w, grid_h) # here w goes first
grid = np.stack(grid, axis=0)
grid = grid.reshape([2, 1, grid_size, grid_size])
pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
if cls_token:
pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)
return pos_embed
def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
assert embed_dim % 2 == 0
# use half of dimensions to encode grid_h
emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
return emb
def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
"""
embed_dim: output dimension for each position
pos: a list of positions to be encoded: size (M,)
out: (M, D)
"""
assert embed_dim % 2 == 0
omega = np.arange(embed_dim // 2, dtype=np.float)
omega /= embed_dim / 2.
omega = 1. / 10000**omega # (D/2,)
pos = pos.reshape(-1) # (M,)
out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product
emb_sin = np.sin(out) # (M, D/2)
emb_cos = np.cos(out) # (M, D/2)
emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
return emb
# --------------------------------------------------------
# Interpolate position embeddings for high-resolution
# References:
# DeiT: https://github.com/facebookresearch/deit
# --------------------------------------------------------
def interpolate_pos_embed(model, checkpoint_model):
if 'pos_embed' in checkpoint_model:
pos_embed_checkpoint = checkpoint_model['pos_embed']
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = model.patch_embed.num_patches
num_extra_tokens = model.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
new_size = int(num_patches ** 0.5)
# class_token and dist_token are kept unchanged
if orig_size != new_size:
print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size))
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
checkpoint_model['pos_embed'] = new_pos_embed
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
def adjust_learning_rate(optimizer, epoch, args):
"""Decay the learning rate with half-cycle cosine after warmup"""
if epoch < args.warmup_epochs:
lr = args.lr * epoch / args.warmup_epochs
else:
lr = args.min_lr + (args.lr - args.min_lr) * 0.5 * \
(1. + math.cos(math.pi * (epoch - args.warmup_epochs) / (args.epochs - args.warmup_epochs)))
for param_group in optimizer.param_groups:
if "lr_scale" in param_group:
param_group["lr"] = lr * param_group["lr_scale"]
else:
param_group["lr"] = lr
return lr
|
import flax.linen as nn
import jax
import jax.numpy as jnp
from jax.numpy import einsum
from einops import rearrange, repeat
from typing import Callable, Any
import numpy as np
import tensorflow as tf
def exists(val):
return val is not None
def pair(t):
return t if isinstance(t, tuple) else (t, t)
# adaptive token sampling functions and classes
def log(t, eps = 1e-6):
return jnp.log(t + eps)
def sample_gumbel(shape, dtype, eps = 1e-6):
u = jax.random.uniform(key, shape, dtype = dtype)
return -log(-log(u, eps), eps)
def torch_gather(x, indices, gather_axis):
# if pytorch gather indices are
# [[[0, 10, 20], [0, 10, 20], [0, 10, 20]],
# [[0, 10, 20], [0, 10, 20], [0, 10, 20]]]
# tf nd_gather needs to be
# [[0,0,0], [0,0,10], [0,0,20], [0,1,0], [0,1,10], [0,1,20], [0,2,0], [0,2,10], [0,2,20],
# [1,0,0], [1,0,10], [1,0,20], [1,1,0], [1,1,10], [1,1,20], [1,2,0], [1,2,10], [1,2,20]]
indices = tf.cast(indices, tf.int64)
# create a tensor containing indices of each element
all_indices = tf.where(tf.fill(indices.shape, True))
gather_locations = tf.reshape(indices, [indices.shape.num_elements()])
# splice in our pytorch style index at the correct axis
gather_indices = []
for axis in range(len(indices.shape)):
if axis == gather_axis:
gather_indices.append(gather_locations)
else:
gather_indices.append(all_indices[:, axis])
gather_indices = tf.stack(gather_indices, axis=-1)
gathered = tf.gather_nd(x, gather_indices)
reshaped = tf.reshape(gathered, indices.shape)
return reshaped
def batched_index_select(values, indices, dim = 1):
value_dims = values.shape[(dim + 1):]
values_shape, indices_shape = map(lambda t: list(t.shape), (values, indices))
indices = indices[(..., *((None,) * len(value_dims)))]
indices = tf.tile(indices, multiples=[1] * len(indices_shape) + [*value_dims])
value_expand_len = len(indices_shape) - (dim + 1)
values = values[(*((slice(None),) * dim), *((None,) * value_expand_len), ...)]
value_expand_shape = [-1] * len(values.shape)
expand_slice = slice(dim, (dim + value_expand_len))
value_expand_shape[expand_slice] = indices.shape[expand_slice]
dim += value_expand_len
values = torch_gather(values, indices, dim)
return jnp.array(values)
def jax_unstack(x, axis = 0):
return jnp.moveaxis(x, axis, 0)
class AdaptiveTokenSampling(nn.Module):
output_num_tokens: int
eps: float = 1e-6
@nn.compact
def __call__(self, attn, value=None, mask=None):
eps = self.eps
output_num_tokens = self.output_num_tokens
heads, output_num_tokens, eps, dtype = attn.shape[1], self.output_num_tokens, self.eps, attn.dtype
# first get the attention values for CLS token to all other tokens
cls_attn = attn[..., 0, 1:]
# calculate the norms of the values, for weighting the scores, as described in the paper
value_norms = jnp.linalg.norm(value[..., 1:, :], axis=-1)
# weigh the attention scores by the norm of the values, sum across all heads
cls_attn = einsum('b h n, b h n -> b n', cls_attn, value_norms)
# normalize to 1
normed_cls_attn = cls_attn / (jnp.sum(cls_attn, axis = -1, keepdims = True) + eps)
# instead of using inverse transform sampling, going to invert the softmax and use gumbel-max sampling instead
pseudo_logits = log(normed_cls_attn)
# mask out pseudo logits for gumbel-max sampling
mask_without_cls = mask[:, 1:]
mask_value = -jnp.finfo(attn).max / 2
pseudo_logits = jnp.where(~mask_without_cls, mask_value, pseudo_logits)
# expand k times, k being the adaptive sampling number
pseudo_logits = repeat(pseudo_logits, 'b n -> b k n', k = output_num_tokens)
pseudo_logits = pseudo_logits + sample_gumbel(pseudo_logits.shape, dtype = dtype)
# gumble-max and add one to reserve 0 for padding / mask
sampled_token_ids = jnp.argmax(pseudo_logits, axis=-1) + 1
# calculate unique using torch.unique and then pad the sequence from the right
unique_sampled_token_ids_list = []
unstack = jax_unstack(sampled_token_ids, axis = 0)
for t in unstack:
t = jnp.int32(t)
t = jnp.unique(t)
x = jnp.sort(t)
unique_sampled_token_ids_list.append(x)
unique_sampled_token_ids = tf.keras.preprocessing.sequence.pad_sequences(unique_sampled_token_ids_list)
# calculate the new mask, based on the padding
new_mask = unique_sampled_token_ids != 0
# CLS token never gets masked out (gets a value of True)
new_mask = jnp.pad(new_mask, pad_width=[[0, 0], [1, 0]], constant_values=True)
# prepend a 0 token id to keep the CLS attention scores
unique_sampled_token_ids = jnp.pad(unique_sampled_token_ids, pad_width=[[0, 0], [1, 0]])
expanded_unique_sampled_token_ids = repeat(unique_sampled_token_ids, 'b n -> b h n', h=heads)
# gather the new attention scores
new_attn = batched_index_select(attn, expanded_unique_sampled_token_ids, dim=2)
# return the sampled attention scores, new mask (denoting padding), as well as the sampled token indices (for the residual)
return new_attn, new_mask, unique_sampled_token_ids
class PreNorm(nn.Module):
fn: Callable
@nn.compact
def __call__(self, x, **kwargs):
x = nn.LayerNorm(epsilon = 1e-5, use_bias = False)(x)
return self.fn(x, **kwargs)
class FeedForward(nn.Module):
dim: int
hidden_dim: int
dropout: float = 0.
@nn.compact
def __call__(self, x):
x = nn.Dense(features = self.hidden_dim)(x)
x = nn.gelu(x)
x = nn.Dropout(rate = self.dropout)(x, deterministic = False)
x = nn.Dense(features = self.dim)(x)
x = nn.Dropout(rate = self.dropout)(x, deterministic = False)
return x
class Attention(nn.Module):
dim: int
heads: int = 8
dim_head: int = 64
dropout: float = 0.0
output_num_tokens: Any = None
@nn.compact
def __call__(self, x, mask = None):
output_num_tokens = self.output_num_tokens
ats = AdaptiveTokenSampling(output_num_tokens) if exists(output_num_tokens) else None
inner_dim = self.dim_head * self.heads
scale = self.dim_head ** -0.5
num_tokens = x.shape[1]
to_qkv = nn.Dense(features = inner_dim * 3, use_bias = False)(x)
qkv = jnp.split(to_qkv, 3, axis = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv)
dots = einsum('b h i d, b h j d -> b h i j', q, k) * scale
if exists(mask):
mask_f = mask #tf.cast(mask, tf.float32)
dots_mask = rearrange(mask_f, 'b i -> b 1 i 1') * rearrange(mask_f, 'b j -> b 1 1 j')
dots_mask = dots_mask #tf.cast(dots_mask, tf.bool)
mask_value = -jnp.finfo(dots).max
dots = jnp.where(~dots_mask, mask_value, dots)
attn = nn.softmax(dots, axis = -1)
sampled_token_ids = None
# if adaptive token sampling is enabled
# and number of tokens is greater than the number of output tokens
if exists(output_num_tokens) and (num_tokens - 1) > output_num_tokens:
attn, mask, sampled_token_ids = ats(attn, v, mask = mask)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
out = nn.Dense(features = self.dim)(out)
out = nn.Dropout(rate = self.dropout)(out, deterministic = False)
return out, mask, sampled_token_ids
class Transformer(nn.Module):
dim: int
depth: int
max_tokens_per_depth: tuple
heads: int
dim_head: int
mlp_dim: int
dropout: float = 0.0
@nn.compact
def __call__(self, x):
assert len(self.max_tokens_per_depth) == self.depth, 'max_tokens_per_depth must be a tuple of length that is equal to the depth of the transformer'
assert sorted(self.max_tokens_per_depth, reverse=True) == list(self.max_tokens_per_depth), 'max_tokens_per_depth must be in decreasing order'
assert min(self.max_tokens_per_depth) > 0, 'max_tokens_per_depth must have at least 1 token at any layer'
layers = []
for _, output_num_tokens in zip(range(self.depth), self.max_tokens_per_depth):
layers.append([
PreNorm(Attention(self.dim, output_num_tokens = output_num_tokens, heads = self.heads, dim_head = self.dim_head, dropout = self.dropout)),
PreNorm(FeedForward(self.dim, self.mlp_dim, dropout = self.dropout))
])
b, n = x.shape[:2]
# use mask to keep track of the paddings when sampling tokens
# as the duplicates (when sampling) are just removed, as mentioned in the paper
mask = jnp.ones([b, n], dtype = np.bool)
token_ids = jnp.arange(n)
token_ids = repeat(token_ids, 'n -> b n', b = b)
for attn, ff in layers:
attn_out, mask, sampled_token_ids = attn(x, mask=mask)
# when token sampling, one needs to then gather the residual tokens with the sampled token ids
if exists(sampled_token_ids):
x = batched_index_select(x, sampled_token_ids, dim=1)
token_ids = batched_index_select(token_ids, sampled_token_ids, dim=1)
x = x + attn_out
x = ff(x) + x
return x, token_ids
class ViT(nn.Module):
image_size: int
patch_size: int
num_classes: int
dim: int
depth: int
max_tokens_per_depth: tuple
heads: int
mlp_dim: int
dim_head = 64
dropout: float = 0.0
emb_dropout: float = 0.0
@nn.compact
def __call__(self, img, return_sampled_token_ids=False, training=True, **kwargs):
image_height, image_width = pair(self.image_size)
patch_height, patch_width = pair(self.patch_size)
assert image_height % patch_height == 0
assert image_width % patch_width == 0
num_patches = (image_height // patch_height) * (image_width // patch_width)
pos_embedding = self.param('pos_embedding', nn.initializers.zeros, [1, num_patches + 1, self.dim])
cls_token = self.param('cls', nn.initializers.zeros, [1, 1, self.dim])
x = rearrange(img, 'b (h p1) (w p2) c -> b (h w) (p1 p2 c)', p1=patch_height, p2=patch_width)
x = nn.Dense(features = self.dim)(x)
b, n, _ = x.shape
cls_tokens = repeat(cls_token, '() n d -> b n d', b=b)
x = jnp.concatenate([cls_tokens, x], axis=1)
x += pos_embedding[:, :(n + 1)]
x = nn.Dropout(rate = self.emb_dropout)(x, deterministic = False)
x, token_ids = Transformer(self.dim, self.depth, self.max_tokens_per_depth, self.heads, self.dim_head, self.mlp_dim, self.dropout)(x)
mlp_head = nn.Sequential([
nn.LayerNorm(epsilon = 1e-5, use_bias = False),
nn.Dense(features = self.num_classes)
])
logits = mlp_head(x[:, 0])
if return_sampled_token_ids:
# remove CLS token and decrement by 1 to make -1 the padding
token_ids = token_ids[:, 1:] - 1
return logits, token_ids
return logits
if __name__ == '__main__':
v = ViT(
image_size = 256,
patch_size = 16,
num_classes = 1000,
dim = 1024,
depth = 6,
max_tokens_per_depth = (256, 128, 64, 32, 16, 8), # a tuple that denotes the maximum number of tokens that any given layer should have. if the layer has greater than this amount, it will undergo adaptive token sampling
heads = 16,
mlp_dim = 2048,
dropout = 0.1,
emb_dropout = 0.1
)
key = jax.random.PRNGKey(0)
img = jax.random.normal(key, (4, 256, 256, 3))
init_rngs = {'params': jax.random.PRNGKey(1),
'dropout': jax.random.PRNGKey(2),
'emb_dropout': jax.random.PRNGKey(3)}
params = v.init(init_rngs, img)
output = v.apply(params, img, rngs=init_rngs)
print(output.shape)
n_params_flax = sum(
jax.tree_leaves(jax.tree_map(lambda x: np.prod(x.shape), params))
)
print(f"Number of parameters in Flax model: {n_params_flax}")
|
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
from jax.numpy import einsum
from einops import rearrange, repeat
from random import randrange
def exists(val):
return val is not None
def dropout_layers(layers, dropout):
if dropout == 0:
return layers
num_layers = len(layers)
key = jax.random.PRNGKey(0)
to_drop = jax.random.uniform(key, minval=0.0, maxval=1.0, shape=[num_layers]) < dropout
# make sure at least one layer makes it
if all(to_drop):
rand_index = randrange(num_layers)
to_drop[rand_index] = False
layers = [layer for (layer, drop) in zip(layers, to_drop) if not drop]
return layers
class LayerScale(nn.Module):
dim: int
fn: Callable
depth: int
@nn.compact
def __call__(self, x, **kwargs):
if self.depth <= 18: # epsilon detailed in section 2 of paper
init_eps = 0.1
elif self.depth > 18 and self.depth <= 24:
init_eps = 1e-5
else:
init_eps = 1e-6
scale = jnp.full([1, 1, self.dim], init_eps)
return self.fn(x, **kwargs) * scale
class PreNorm(nn.Module):
fn: Callable
@nn.compact
def __call__(self, x, **kwargs):
x = nn.LayerNorm(epsilon = 1e-5, use_bias = False)(x)
return self.fn(x, **kwargs)
class FeedForward(nn.Module):
dim: int
hidden_dim: int
dropout: float = 0.
@nn.compact
def __call__(self, x):
x = nn.Dense(features = self.hidden_dim)(x)
x = nn.gelu(x)
x = nn.Dropout(rate = self.dropout)(x, deterministic = False)
x = nn.Dense(features = self.dim)(x)
x = nn.Dropout(rate = self.dropout)(x, deterministic = False)
return x
class Attention(nn.Module):
dim: int
heads: int = 8
dim_head: int = 64
dropout: float = 0.0
@nn.compact
def __call__(self, x, context = None):
inner_dim = self.dim_head * self.heads
heads = self.heads
scale = self.dim_head ** -0.5
mix_heads_pre_attn = self.param('mix_heads_pre_attn', nn.initializers.zeros, [heads, heads])
mix_heads_post_attn = self.param('mix_heads_post_attn', nn.initializers.zeros, [heads, heads])
if not exists(context):
context = x
else:
context = jnp.concatenate([x, context], axis = 1)
q = nn.Dense(features = inner_dim, use_bias = False)(x)
kv = nn.Dense(features = inner_dim * 2, use_bias = False)(context)
k, v = jnp.split(kv, 2, axis = -1)
qkv = (q, k, v)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = heads), qkv)
dots = einsum('b h i d, b h j d -> b h i j', q, k) * scale
dots = einsum('b h i j, h g -> b g i j', dots, mix_heads_pre_attn) # talking heads, pre-softmax
attn = nn.softmax(dots, axis = -1)
attn = einsum('b h i j, h g -> b g i j', attn, mix_heads_post_attn) # talking heads, post-softmax
x = einsum('b h i j, b h j d -> b h i d', attn, v)
x = rearrange(x, 'b h n d -> b n (h d)')
out = nn.Dense(features = self.dim)(x)
to_out = nn.Dropout(rate = self.dropout)(out, deterministic = False)
return to_out
class Transformer(nn.Module):
dim: int
depth: int
heads: int
dim_head: int
mlp_dim: int
dropout: float = 0.0
layer_dropout: float = 0.0
@nn.compact
def __call__(self, x, context=None):
layers = []
layer_dropout = self.layer_dropout
for ind in range(self.depth):
layers.append([
LayerScale(self.dim, PreNorm(Attention(self.dim, self.heads, self.dim_head, dropout = self.dropout)), depth = ind + 1),
LayerScale(self.dim, PreNorm(FeedForward(self.dim, self.mlp_dim, dropout = self.dropout)), depth = ind + 1)
])
layers = dropout_layers(layers, dropout = layer_dropout)
for attn, ff in layers:
x = attn(x, context=context) + x
x = ff(x) + x
return x
class CaiT(nn.Module):
image_size: int
patch_size: int
num_classes: int
dim: int
depth: int
cls_depth: int
heads: int
mlp_dim: int
dim_head: int = 64
dropout: float = 0.0
emb_dropout: float = 0.0
layer_dropout: float = 0.0
@nn.compact
def __call__(self, img):
assert self.image_size % self.patch_size == 0, 'Image dimensions must be divisible by the patch size.'
num_patches = (self.image_size // self.patch_size) ** 2
pos_embedding = self.param('pos_embedding', nn.initializers.zeros, [1, num_patches, self.dim])
cls_token = self.param('cls', nn.initializers.zeros, [1, 1, self.dim])
x = rearrange(img, 'b (h p1) (w p2) c -> b (h w) (p1 p2 c)', p1 = self.patch_size, p2 = self.patch_size)
x = nn.Dense(features = self.dim)(x)
b, n, d = x.shape
x += pos_embedding[:, :n]
x = nn.Dropout(rate = self.emb_dropout)(x, deterministic = False)
x = Transformer(self.dim, self.depth, self.heads, self.dim_head, self.mlp_dim, self.dropout, self.layer_dropout)(x)
cls_tokens = repeat(cls_token, '() n d -> b n d', b = b)
x = Transformer(self.dim, self.cls_depth, self.heads, self.dim_head, self.mlp_dim, self.dropout, self.layer_dropout)(cls_tokens, context = x)
mlp_head = nn.Sequential([
nn.LayerNorm(epsilon = 1e-5, use_bias = False),
nn.Dense(features = self.num_classes)
])
x = mlp_head(x[:, 0])
return x
if __name__ == "__main__":
import numpy as np
key = jax.random.PRNGKey(0)
img = jax.random.normal(key, (1, 256, 256, 3))
v = CaiT(
image_size = 256,
patch_size = 32,
num_classes = 1000,
dim = 1024,
depth = 12, # depth of transformer for patch to patch attention only
cls_depth = 2, # depth of cross attention of CLS tokens to patch
heads = 16,
mlp_dim = 2048,
dropout = 0.1,
emb_dropout = 0.1,
layer_dropout = 0.05 # randomly dropout 5% of the layers
)
init_rngs = {'params': jax.random.PRNGKey(1),
'dropout': jax.random.PRNGKey(2),
'emb_dropout': jax.random.PRNGKey(3)}
params = v.init(init_rngs, img)
output = v.apply(params, img, rngs=init_rngs)
print(output.shape)
n_params_flax = sum(
jax.tree_leaves(jax.tree_map(lambda x: np.prod(x.shape), params))
)
print(f"Number of parameters in Flax model: {n_params_flax}")
|
import torch
import torch.nn.functional as F
from einops import rearrange
from torch import einsum, nn
from math import log2, floor
def exists(val):
return val is not None
# residual wrapper
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) + x
# pre-normalization wrapper
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
# feedforward layer with GELU activation function
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4, dropout = 0.):
super().__init__()
inner_dim = int(dim * mult)
self.net = nn.Sequential(
nn.Linear(dim, inner_dim),
nn.GELU(),
nn.Dropout(dropout), # optional dropout
nn.Linear(inner_dim, dim)
)
def forward(self, x):
return self.net(x)
# AliBi Positional Bias
class AlibiPositionalBias(nn.Module):
def __init__(self, heads):
super().__init__()
self.heads = heads
slopes = torch.Tensor(self._get_slopes(heads))
slopes = rearrange(slopes, 'h -> h 1 1')
self.register_buffer('slopes', slopes, persistent = False)
self.register_buffer('bias', None, persistent = False)
def get_bias(self, i, j, device):
i_arange = torch.arange(i, device = device)
j_arange = torch.arange(j, device = device)
bias = -torch.abs(rearrange(j_arange, 'j -> 1 1 j') - rearrange(i_arange, 'i -> 1 i 1'))
return bias
@staticmethod
def _get_slopes(heads):
def get_slopes_power_of_2(n):
start = (2**(-2**-(log2(n)-3)))
ratio = start
return [start*ratio**i for i in range(n)]
if log2(heads).is_integer():
return get_slopes_power_of_2(heads)
closest_power_of_2 = 2 ** floor(log2(heads))
return get_slopes_power_of_2(closest_power_of_2) + get_slopes_power_of_2(2 * closest_power_of_2)[0::2][:heads-closest_power_of_2]
def forward(self, qk_sim):
h, i, j, device = *qk_sim.shape[-3:], qk_sim.device
if exists(self.bias) and self.bias.shape[-1] >= j:
return self.bias[..., :i, :j]
bias = self.get_bias(i, j, device)
bias = bias * self.slopes
num_heads_unalibied = h - bias.shape[0]
bias = F.pad(bias, (0, 0, 0, 0, 0, num_heads_unalibied))
self.register_buffer('bias', bias, persistent=False)
return bias
# attention
class Attention(nn.Module):
def __init__(
self,
*,
dim,
heads = 8,
dim_head = 64,
dropout = 0.
):
super().__init__()
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = heads * dim_head
self.dropout = nn.Dropout(dropout)
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_k = nn.Linear(dim, inner_dim, bias = False)
self.to_v = nn.Linear(dim, inner_dim, bias = False)
self.to_out = nn.Linear(inner_dim, dim)
self.alibi_pos_biases = AlibiPositionalBias(heads = self.heads)
# for caching causal mask
self.register_buffer("mask", None, persistent=False)
def get_mask(self, n, device):
if self.mask is not None and self.mask.shape[-1] >= n:
return self.mask[:n, :n]
mask = torch.triu(torch.ones((n, n), device=device, dtype=torch.bool), 1)
self.register_buffer("mask", mask, persistent=False)
return mask
def forward(self, x):
n, h, device = x.shape[1], self.heads, x.device
q, k, v = self.to_q(x), self.to_k(x), self.to_v(x)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
q = q * self.scale
sim = einsum('b h i d, b h j d -> b h i j', q, k)
# ALiBi positional bias
sim = sim + self.alibi_pos_biases(sim)
# causal mask
mask_value = -torch.finfo(sim.dtype).max
causal_mask = self.get_mask(n, device)
sim = sim.masked_fill(causal_mask, mask_value)
# attention
attn = sim.softmax(dim = -1)
attn = self.dropout(attn) # Optional dropout
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
# Encoder
class Encoder(nn.Module):
def __init__(
self,
dim,
depth,
heads,
dim_head,
dropout = 0.
):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
Residual(PreNorm(dim, Attention(dim = dim, heads = heads, dim_head = dim_head, dropout = dropout))),
Residual(PreNorm(dim, FeedForward(dim = dim, dropout = dropout)))
]))
def forward(self, x):
for attn, ff in self.layers:
x = attn(x)
x = ff(x)
return x
# Cross Attention
# class CrossAttention():
# ALiBi Model
class ALiBi(nn.Module):
def __init__(self, *, num_tokens, dim, depth, dim_head, heads):
super().__init__()
self.token_emb = nn.Embedding(num_tokens, dim)
self.transformer = Encoder(dim, depth, dim_head, heads)
self.to_logits = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, num_tokens)
)
def forward(self, x):
x = self.token_emb(x)
x = self.transformer(x)
logits = self.to_logits(x)
return logits
if __name__ == "__main__":
alibi = ALiBi(
num_tokens = 20000,
dim = 512,
depth = 12,
heads = 8,
dim_head = 64,
)
tokens = torch.randint(0, 20000, (1, 512))
logits = alibi(tokens) # (1, 1024, 20000)
print(logits.shape)
n_params_torch = sum(
p.numel() for p in alibi.parameters() if p.requires_grad
)
print(f"Number of parameters in torch model: {n_params_torch}")
|
from torchvision.datasets import CIFAR10
class ArtBench10(CIFAR10):
base_folder = "artbench-10-batches-py"
url = ""
filename = "artbench-10-python.tar.gz"
tgz_md5 = "b116ffdc5e07e162f119149c2ad7403f"
train_list = [
["data_batch_1", "c2e02a78dcea81fe6fead5f1540e542f"],
["data_batch_2", "1102a4dcf41d4dd63e20c10691193448"],
["data_batch_3", "177fc43579af15ecc80eb506953ec26f"],
["data_batch_4", "566b2a02ccfbafa026fbb2bcec856ff6"],
["data_batch_5", "faa6a572469542010a1c8a2a9a7bf436"],
]
test_list = [
["test_batch", "fa44530c8b8158467e00899609c19e52"],
]
meta = {
"filename": "meta",
"key": "styles",
"md5": "5bdcafa7398aa6b75d569baaec5cd4aa",
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.