python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Linear API"""
import warnings
from typing import Union, Optional, Callable, Tuple, List, Dict, Any
import torch
from torch.nn.parameter import Parameter
import transformer_engine_extensions as tex
from .base import (
get_workspace,
_prepare_backward,
get_ub,
TransformerEngineBaseModule,
_2X_ACC_FPROP,
_2X_ACC_DGRAD,
_2X_ACC_WGRAD,
)
from ..fp8 import get_fp8_te_dtype
from ..utils import (
divide,
get_default_init_method,
cast_if_needed,
assert_dim_for_fp8_exec,
)
from ..distributed import (
set_tensor_model_parallel_attributes,
get_distributed_world_size,
allreduce,
initialize_affine_weight_gpu,
reduce_scatter_along_first_dim,
gather_along_first_dim,
gather_along_last_dim,
)
from ..cpp_extensions import (
fp8_gemm,
gemm,
fp8_cast_transpose_fused,
cast_to_fp8,
)
from ..constants import GemmParallelModes, dist_group_type
from ..jit import no_torch_dynamo
__all__ = ["Linear"]
class _Linear(torch.autograd.Function):
"""Linear semi-top level module
Calls custom cuda extensions.
"""
@staticmethod
def forward(
ctx,
weight: torch.Tensor,
weight_fp8: Union[torch.Tensor, None],
weight_t_fp8: Union[torch.Tensor, None],
inp: torch.Tensor,
bias: torch.Tensor,
use_bias: bool,
is_first_microbatch: Union[bool, None],
fp8: bool,
fp8_calibration: bool,
fp8_meta: Dict[str, Any],
fuse_wgrad_accumulation: bool,
tp_group: Union[dist_group_type, None],
tp_size: int,
sequence_parallel: bool,
tensor_parallel: bool,
activation_dtype: torch.dtype,
parallel_mode: Union[str, None],
is_grad_enabled: bool,
ub_split_rs: bool,
ub_split_ag: bool,
) -> torch.Tensor:
# Make sure input dimensions are compatible
in_features = weight.shape[-1]
assert inp.shape[-1] == in_features, "GEMM not possible"
inputmat = inp.view((-1, in_features))
if fp8:
assert_dim_for_fp8_exec(inputmat)
assert_dim_for_fp8_exec(weight)
update_fp8_weights = is_first_microbatch is None or is_first_microbatch
if ub_split_rs:
tp_world_size = get_distributed_world_size(tp_group)
if tp_world_size == 1:
ub_split_rs = False
# Cast for native AMP
inputmat = cast_if_needed(inputmat, activation_dtype)
inputmat_no_fp8 = inputmat
if fp8:
fp8_dtype_forward = get_fp8_te_dtype(fp8_meta["recipe"], fprop_tensor=True)
if not fp8_meta["recipe"].override_linear_precision.wgrad:
if is_grad_enabled:
inputmat, inputmat_t = fp8_cast_transpose_fused(
inputmat,
fp8_meta["scaling_fwd"],
tex.FP8FwdTensors.GEMM1_INPUT,
fp8_dtype_forward,
)
else:
inputmat = cast_to_fp8(
inputmat,
fp8_meta["scaling_fwd"],
tex.FP8FwdTensors.GEMM1_INPUT,
fp8_dtype_forward,
)
else:
inputmat, inputmat_t = cast_to_fp8(
inputmat,
fp8_meta["scaling_fwd"],
tex.FP8FwdTensors.GEMM1_INPUT,
fp8_dtype_forward,
), None
# Column Parallel Linear
if parallel_mode == "column" and sequence_parallel:
inputmat_total, _ = gather_along_first_dim(inputmat, tp_group)
else:
inputmat_total = inputmat
if fp8:
bias_dtype = (
torch.bfloat16
if activation_dtype == torch.float32
else activation_dtype
)
bias = cast_if_needed(bias, bias_dtype) if use_bias else bias
if update_fp8_weights:
if is_grad_enabled:
fp8_cast_transpose_fused(
weight,
fp8_meta["scaling_fwd"],
tex.FP8FwdTensors.GEMM1_WEIGHT,
fp8_dtype_forward,
cast_out=weight_fp8,
transpose_out=weight_t_fp8,
)
else:
weight_t_fp8 = None
weight_fp8 = cast_to_fp8(
weight,
fp8_meta["scaling_fwd"],
tex.FP8FwdTensors.GEMM1_WEIGHT,
fp8_dtype_forward,
)
if ub_split_rs:
ub_obj_projout = get_ub("proj_fprop")
out = ub_obj_projout.get_ubuf_output(1)
dim_size = list(inputmat_total.size())
dim_size[0] = dim_size[0] // tp_world_size
dim_size[1] = weight.size(0)
rs_out = torch.empty(dim_size, dtype=activation_dtype, device=inputmat_total.device)
else:
dim_size = list(inputmat_total.size())
dim_size[1] = weight.size(0)
out = torch.empty(dim_size, dtype=activation_dtype, device=inputmat_total.device)
_ = fp8_gemm(
weight_fp8,
fp8_meta["scaling_fwd"].scale_inv,
tex.FP8FwdTensors.GEMM1_WEIGHT,
fp8_dtype_forward,
inputmat_total,
fp8_meta["scaling_fwd"].scale_inv,
tex.FP8FwdTensors.GEMM1_INPUT,
fp8_dtype_forward,
activation_dtype,
get_workspace(),
bias=bias,
use_bias=use_bias,
use_split_accumulator=_2X_ACC_FPROP,
out=out,
ub_algo=tex.UbufOverlapAlgo.SPLIT_PIPELINED_RS if ub_split_rs else None,
ub=ub_obj_projout if ub_split_rs else None,
extra_output_tensor=rs_out if ub_split_rs else None,
)
else:
# Cast for native AMP
weight = cast_if_needed(weight, activation_dtype)
bias = cast_if_needed(bias, activation_dtype) if use_bias else bias
if fp8_calibration:
# amax of input
fp8_meta["scaling_fwd"].amax_history[0][tex.FP8FwdTensors.GEMM1_INPUT] = \
torch.amax(inputmat_total).float()
# amax of weight
fp8_meta["scaling_fwd"].amax_history[0][tex.FP8FwdTensors.GEMM1_WEIGHT] = \
torch.amax(weight).float()
if ub_split_rs:
ub_obj_projout = get_ub("proj_fprop")
out = ub_obj_projout.get_ubuf_output(1)
dim_size = list(inputmat_total.size())
dim_size[0] = dim_size[0] // tp_world_size
dim_size[1] = weight.size(0)
rs_out = torch.empty(dim_size, dtype=activation_dtype, device=inputmat_total.device)
else:
dim_size = list(inputmat_total.size())
dim_size[1] = weight.size(0)
out = torch.empty(dim_size, dtype=activation_dtype, device=inputmat_total.device)
_, _, _ = gemm(
weight,
inputmat_total,
activation_dtype,
get_workspace(),
bias=bias,
use_bias=use_bias,
out=out,
ub_algo=tex.UbufOverlapAlgo.SPLIT_PIPELINED_RS if ub_split_rs else None,
ub=ub_obj_projout if ub_split_rs else None,
extra_output_tensor=rs_out if ub_split_rs else None,
)
if is_grad_enabled:
fp8_wgrad = fp8 and not fp8_meta["recipe"].override_linear_precision.wgrad
ctx.save_for_backward(
inputmat_no_fp8 if weight.requires_grad and not fp8_wgrad else None,
inputmat_t if weight.requires_grad and fp8_wgrad else None,
weight,
weight_t_fp8 if fp8 else None,
fp8_meta["scaling_fwd"].scale_inv.clone() if fp8 else None,
)
ctx.activation_dtype = activation_dtype
ctx.fp8 = fp8
ctx.fp8_meta = fp8_meta
ctx.fuse_wgrad_accumulation = fuse_wgrad_accumulation
ctx.is_first_microbatch = is_first_microbatch
ctx.use_bias = use_bias
ctx.sequence_parallel = sequence_parallel
ctx.tensor_parallel = tensor_parallel
ctx.inp_shape = inp.shape
ctx.parallel_mode = parallel_mode
ctx.tp_group = tp_group
ctx.ub_split_ag = ub_split_ag
ctx.tp_size = tp_size
ctx.requires_dgrad = inp.requires_grad
# Row Parallel Linear
if ub_split_rs:
out = rs_out
elif parallel_mode == "row" and sequence_parallel:
out, _ = reduce_scatter_along_first_dim(out, tp_group)
elif parallel_mode == "row" and tensor_parallel:
out, _ = allreduce(out, tp_group)
# [*, in_features] -> [*, out_features] except first dimension changes for SP
return out.view(-1, *inp.shape[1:-1], out.shape[-1])
@staticmethod
def backward(
ctx, grad_output: torch.Tensor
) -> Tuple[Union[torch.Tensor, None], ...]:
with _prepare_backward(
ctx.fp8, ctx.fp8_meta, ctx.tp_group, ctx.tp_size, name="_Linear"
):
(
inputmat,
inputmat_t,
weight,
weight_t_fp8,
fwd_scale_inverses,
) = ctx.saved_tensors
if ctx.ub_split_ag:
tp_world_size = get_distributed_world_size(ctx.tp_group)
if tp_world_size == 1:
ctx.ub_split_ag = False
if ctx.ub_split_ag:
dim_size = list(grad_output.size())
dim_size[0] = dim_size[0] * tp_world_size
ctx.ub_obj_gradout = get_ub("proj_dgrad")
(
grad_output,
grad_output_c,
grad_output_t,
grad_bias,
) = TransformerEngineBaseModule.grad_output_preprocess(
ctx, grad_output, ctx.parallel_mode == "row"
)
# Column Parallel Linear
# Overlap input AG with dgrad
if weight.requires_grad and ctx.parallel_mode == "column" and ctx.sequence_parallel:
if ctx.fp8 and not ctx.fp8_meta["recipe"].override_linear_precision.wgrad:
inputmat_t_total, handle = gather_along_last_dim(
inputmat_t, ctx.tp_group, async_op=ctx.requires_dgrad
)
else:
inputmat_total, handle = gather_along_first_dim(
inputmat, ctx.tp_group, async_op=ctx.requires_dgrad
)
else:
inputmat_t_total = inputmat_t
inputmat_total = inputmat
handle = None
if ctx.is_first_microbatch is not None:
accumulate_wgrad_into_param_main_grad = (
ctx.fuse_wgrad_accumulation and not ctx.is_first_microbatch
)
else:
accumulate_wgrad_into_param_main_grad = ctx.fuse_wgrad_accumulation
if ctx.fp8:
fp8_dtype_forward = get_fp8_te_dtype(
ctx.fp8_meta["recipe"], fprop_tensor=True
)
fp8_dtype_backward = get_fp8_te_dtype(
ctx.fp8_meta["recipe"], fprop_tensor=False
)
if ctx.requires_dgrad:
if ctx.fp8:
dgrad = fp8_gemm(
weight_t_fp8,
fwd_scale_inverses,
tex.FP8FwdTensors.GEMM1_WEIGHT,
fp8_dtype_forward,
grad_output_c,
ctx.fp8_meta["scaling_bwd"].scale_inv,
tex.FP8BwdTensors.GRAD_OUTPUT1,
fp8_dtype_backward,
ctx.activation_dtype,
get_workspace(),
use_split_accumulator=_2X_ACC_DGRAD,
ub_algo=tex.UbufOverlapAlgo.SPLIT_PIPELINED_AG if ctx.ub_split_ag else None,
ub=ctx.ub_obj_gradout if ctx.ub_split_ag else None,
)
else:
dgrad, _, _ = gemm(
weight,
grad_output,
ctx.activation_dtype,
get_workspace(),
layout="NN",
grad=True,
ub_algo=tex.UbufOverlapAlgo.SPLIT_PIPELINED_AG if ctx.ub_split_ag else None,
ub=ctx.ub_obj_gradout if ctx.ub_split_ag else None,
)
# Overlap dgrad-RS/AR with wgrad
if ctx.parallel_mode == "column" and ctx.sequence_parallel:
if handle is not None:
handle.wait()
dgrad, handle = reduce_scatter_along_first_dim(
dgrad, ctx.tp_group, async_op=True
)
elif ctx.parallel_mode == "column" and ctx.tensor_parallel:
dgrad, handle = allreduce(dgrad, ctx.tp_group, async_op=True)
if weight.requires_grad:
if ctx.fp8:
# WGRAD
if not ctx.fp8_meta["recipe"].override_linear_precision.wgrad:
if ctx.ub_split_ag:
grad_output_t = tex.fp8_transpose(grad_output_c, fp8_dtype_backward)
wgrad = fp8_gemm(
inputmat_t_total,
fwd_scale_inverses,
tex.FP8FwdTensors.GEMM1_INPUT,
fp8_dtype_forward,
grad_output_t,
ctx.fp8_meta["scaling_bwd"].scale_inv,
tex.FP8BwdTensors.GRAD_OUTPUT1,
fp8_dtype_backward,
ctx.activation_dtype,
get_workspace(),
accumulate=accumulate_wgrad_into_param_main_grad,
out=weight.main_grad if ctx.fuse_wgrad_accumulation else None,
use_split_accumulator=_2X_ACC_WGRAD,
)
else:
wgrad, _, _ = gemm(
inputmat_total,
grad_output,
ctx.activation_dtype,
get_workspace(),
layout="NT",
grad=True,
accumulate=accumulate_wgrad_into_param_main_grad,
out=weight.main_grad if ctx.fuse_wgrad_accumulation else None,
)
else:
# WGRAD
wgrad, grad_bias, _ = gemm(
inputmat_total,
grad_output,
ctx.activation_dtype,
get_workspace(),
layout="NT",
grad=True,
use_bias=ctx.use_bias,
accumulate=accumulate_wgrad_into_param_main_grad,
out=weight.main_grad if ctx.fuse_wgrad_accumulation else None,
)
# Column Parallel Linear
if ctx.parallel_mode == "column" and ctx.tensor_parallel and handle is not None:
handle.wait()
if not ctx.use_bias:
grad_bias = None
return (
wgrad if weight.requires_grad else None,
None,
None,
dgrad.view(ctx.inp_shape) if ctx.requires_dgrad else None,
grad_bias,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
)
class Linear(TransformerEngineBaseModule):
"""
Applies a linear transformation to the incoming data :math:`y = xA^T + b`
On NVIDIA GPUs it is a drop-in replacement for `torch.nn.Linear`.
.. warning::
Argument :attr:`skip_weight_param_allocation` is deprecated and will
be fully removed in future releases.
Parameters
----------
in_features : int
size of each input sample.
out_features : int
size of each output sample.
bias : bool, default = `True`
if set to `False`, the layer will not learn an additive bias.
init_method : Callable, default = `None`
used for initializing weights in the following way: `init_method(weight)`.
When set to `None`, defaults to `torch.nn.init.normal_(mean=0.0, std=0.023)`.
parameters_split : Tuple[str, ...], default = None
if a tuple of strings is provided, the weight and bias parameters of the
module are exposed as `N` separate `torch.nn.parameter.Parameter`s each,
split along the first dimension, where `N` is the length of the argument
and the strings contained are the names of the split parameters.
device : Union[torch.device, str], default = "cuda"
The device on which the parameters of the model will allocated. It is the user's
responsibility to ensure all parameters are moved to the GPU before running the
forward pass.
Parallelism parameters
----------------------
sequence_parallel : bool, default = `False`
if set to `True`, uses sequence parallelism.
tp_group : ProcessGroup, default = `None`
tensor parallel process group.
tp_size : int, default = 1
used as TP (tensor parallel) world size when TP groups are not formed during
initialization. In this case, users must call the
`set_tensor_parallel_group(tp_group)` method on the initialized module before the
forward pass to supply the tensor parallel group needed for tensor and sequence
parallel collectives.
parallel_mode : {None, 'Column', 'Row'}, default = `None`
used to decide whether this Linear layer is Column Parallel Linear or Row
Parallel Linear as described `here <https://arxiv.org/pdf/1909.08053.pdf>`_.
When set to `None`, no communication is performed.
Optimization parameters
-----------------------
fuse_wgrad_accumulation : bool, default = 'False'
if set to `True`, enables fusing of creation and accumulation of
the weight gradient. When enabled, it is assumed that the weights
have an additional `main_grad` attribute (used instead of the
regular `grad`) which is a pre-allocated buffer of the correct
size to accumulate gradients in.
return_bias : bool, default = `False`
when set to `True`, this module will not apply the additive bias itself, but
instead return the bias value during the forward pass together with the
output of the linear transformation :math:`y = xA^T`. This is useful when
the bias addition can be fused to subsequent operations.
params_dtype : torch.dtype, default = `torch.get_default_dtype()`
it controls the type used to allocate the initial parameters. Useful when
the model is trained with lower precision and the original FP32 parameters
would not fit in GPU memory.
"""
def __init__(
self,
in_features: int,
out_features: int,
sequence_parallel: bool = False,
fuse_wgrad_accumulation: bool = False,
tp_group: Optional[dist_group_type] = None,
tp_size: int = 1,
get_rng_state_tracker: Optional[Callable] = None,
init_method: Optional[Callable] = None,
bias: bool = True,
return_bias: bool = False,
params_dtype: Optional[torch.dtype] = None,
parallel_mode: Optional[str] = None,
skip_weight_param_allocation: bool = False,
parameters_split: Optional[Tuple[str, ...]] = None,
ub_split_rs: bool = False,
ub_split_ag: bool = False,
device: Union[torch.device, str] = "cuda",
) -> None:
super().__init__()
if skip_weight_param_allocation:
warnings.warn(
"Argument `skip_weight_param_allocation` is deprecated and"
"will be fully removed in future releases. It has ignored"
"starting from v0.11.",
category=DeprecationWarning,
)
params_dtype = torch.get_default_dtype() if params_dtype is None else params_dtype
self.in_features = in_features
self.out_features = out_features
self.fuse_wgrad_accumulation = fuse_wgrad_accumulation
self.use_bias = bias
self.return_bias = return_bias
self.apply_bias = bias and not return_bias
self.parameters_split = parameters_split
self.ub_split_rs = ub_split_rs
self.ub_split_ag = ub_split_ag
if ub_split_rs or ub_split_ag:
assert (
tex.userbuf_comm_available()
), "Userbuffer communication backend not available."
if tp_group is None:
self.tp_size = tp_size
if tp_size == 1:
self.set_tensor_parallel_group(tp_group)
else:
self.tp_size = get_distributed_world_size(tp_group)
self.set_tensor_parallel_group(tp_group)
self.set_nccl_overlap_warning_if_tp()
self.parallel_mode = parallel_mode
assert (
self.parallel_mode in GemmParallelModes
), f"parallel_mode {parallel_mode} not supported"
if self.parallel_mode == "column":
self.out_features = divide(self.out_features, self.tp_size)
elif self.parallel_mode == "row":
self.in_features = divide(self.in_features, self.tp_size)
if init_method is None:
init_method = get_default_init_method()
self.sequence_parallel = (self.tp_size > 1) and sequence_parallel
self.weight_tensor = torch.empty(
self.out_features, self.in_features,
device=device, dtype=params_dtype)
initialize_affine_weight_gpu(
self.weight_tensor,
init_method,
get_rng_state_tracker,
partition_dim=1 if self.parallel_mode == "row" else 0,
stride=1,
)
if self.use_bias:
self.bias_tensor = torch.empty(self.out_features, device=device, dtype=params_dtype)
else:
self.bias_tensor = torch.Tensor().to(dtype=params_dtype, device=device)
with torch.no_grad():
self.bias_tensor.zero_()
if parameters_split is None:
parameters_split = ("",)
assert (
self.out_features % len(parameters_split) == 0
), f"Weight and bias params cannot be split into {len(parameters_split)} parts"
split_size = self.out_features // len(parameters_split)
self.weight_names = []
self.bias_names = []
for i, pname in enumerate(parameters_split):
wname = pname + "weight"
bname = pname + "bias"
self.register_parameter(
wname, Parameter(self.weight_tensor[i * split_size : (i+1) * split_size])
)
set_tensor_model_parallel_attributes(
tensor=getattr(self, wname),
is_parallel=True,
dim=1 if parallel_mode == "row" else 0,
stride=1,
)
if self.use_bias:
self.register_parameter(
bname, Parameter(self.bias_tensor[i * split_size : (i+1) * split_size])
)
else:
setattr(self, bname, torch.Tensor().to(dtype=params_dtype, device=device))
if parallel_mode == "column":
set_tensor_model_parallel_attributes(getattr(self, bname), True, 0, 1)
self.weight_names.append(wname)
self.bias_names.append(bname)
self.fp8_weight_shapes.append(torch.Size((self.out_features, self.in_features)))
# For RPL, bias has to be added after TP collectives
# So it cannot be fused with the GEMM
if self.parallel_mode == "row" and self.apply_bias:
self.gemm_bias_unfused_add = True
else:
self.gemm_bias_unfused_add = False
def get_fp8_weights_scratchpad(
self,
is_first_microbatch: Union[bool, None],
) -> List[torch.Tensor]:
"""
Fetch the fp8 weight tensor placeholders if they exist (when
`is_first_microbatch` is not `None`) or return empty fp8 weight
tensors (if `is_first_microbatch is None`)
"""
if not self.fp8:
return [None, None]
if is_first_microbatch is None:
# Return empty weight placeholders for each fwd/bwd pass
fp8_weight_tensors = self.get_fp8_weights_empty_tensors(
is_first_microbatch
)
else:
# These persistent weight placeholders should've been created in
# `set_fp8_weights` method
fp8_weight_tensors = [self.weight1_fp8, self.weight1_t_fp8]
return fp8_weight_tensors
@no_torch_dynamo
def forward(
self,
inp: torch.Tensor,
weight: Optional[torch.Tensor] = None,
bias: Optional[torch.Tensor] = None,
is_first_microbatch: Optional[bool] = None,
) -> Union[torch.Tensor, Tuple[torch.Tensor, ...]]:
"""
Apply the linear transformation to the input.
.. warning::
Arguments :attr:`weight` and :attr:`bias` are deprecated and will
be fully removed in future releases.
Parameters
----------
inp : torch.Tensor
Input tensor.
is_first_microbatch : {True, False, None}, default = None
During training using either gradient accumulation or
pipeline parallelism a minibatch of data is further split
into microbatches. Between the microbatches of the same minibatch
the model weights are not updated. Setting this parameter indicates
whether the current microbatch is the first in a minibatch or not.
When set, this parameter enables additional optimizations:
* during FP8 training, it allows caching of the FP8 versions of
the weights
* it also allows skipping gradient accumulation during the
first microbatch (since it is the first gradient being
produced)
"""
if weight is not None or bias is not None:
raise RuntimeError(
"Arguments `weight` and `bias` are deprecated and "
"will be fully removed in future releases."
)
with self.prepare_forward(inp, is_first_microbatch) as inp:
bias_tensor = (
self.bias if self.parameters_split is None
else self.bias_tensor if not torch.is_grad_enabled()
else self.noop_cat("bias_tensor", self.bias_names)
)
weight_tensor = (
self.weight if self.parameters_split is None
else self.weight_tensor if not torch.is_grad_enabled()
else self.noop_cat("weight_tensor", self.weight_names)
)
# Fetch the fp8 weights placeholders (for linear/gemm)
weight1_fp8, weight1_t_fp8 = self.get_fp8_weights_scratchpad(
is_first_microbatch
)
if torch.is_grad_enabled():
linear_fn = _Linear.apply
args = []
else:
linear_fn = _Linear.forward
args = [None]
args += (
weight_tensor,
weight1_fp8,
weight1_t_fp8,
inp,
bias_tensor,
self.apply_bias and not self.gemm_bias_unfused_add,
is_first_microbatch,
self.fp8,
self.fp8_calibration,
self.fp8_meta,
self.fuse_wgrad_accumulation,
self.tp_group,
self.tp_size,
self.sequence_parallel,
self.tp_size > 1,
self.activation_dtype,
self.parallel_mode,
torch.is_grad_enabled(),
self.ub_split_rs,
self.ub_split_ag,
)
out = linear_fn(*args)
if self.gemm_bias_unfused_add:
out = out + cast_if_needed(bias_tensor, self.activation_dtype)
if self.return_bias:
return out, cast_if_needed(bias_tensor, self.activation_dtype)
return out
| TransformerEngine-main | transformer_engine/pytorch/module/linear.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Internal function used by multiple modules."""
from typing import Union, Dict, Any
import torch
from .. import cpp_extensions as tex
from ..fp8 import get_fp8_te_dtype
def _get_normalization_func(normalization: str,
fp8_output: bool,
is_grad_enabled: bool,
forward: bool):
fwd_normalization_funcs = {
('LayerNorm', True, True): tex.layernorm_fwd_fp8,
('LayerNorm', True, False): tex.layernorm_fwd_fp8_inf,
('LayerNorm', False, True): tex.layernorm_fwd_noalloc,
('LayerNorm', False, False): tex.layernorm_fwd_inf,
('RMSNorm', True, True): tex.rmsnorm_fwd_fp8,
('RMSNorm', True, False): tex.rmsnorm_fwd_fp8_inf,
('RMSNorm', False, True): tex.rmsnorm_fwd_noalloc,
('RMSNorm', False, False): tex.rmsnorm_fwd_inf,
}
bwd_normalization_funcs = {
'LayerNorm': tex.layernorm_bwd,
'RMSNorm': tex.rmsnorm_bwd,
}
if forward:
return fwd_normalization_funcs[(normalization, fp8_output, is_grad_enabled)]
assert not fp8_output, "FP8 output is not supported in backward normalization!"
assert is_grad_enabled, "Gradient has to be enabled to call backward normalization!"
return bwd_normalization_funcs[normalization]
def _apply_normalization(inputmat:torch.Tensor,
ln_out: torch.Tensor,
ln_weight: torch.Tensor,
ln_bias: Union[torch.Tensor, None],
eps: float,
fp8_out: bool,
fp8_meta: Dict[str, Any],
normalization: str,
fwd_ln_sm_margin: int,
zero_centered_gamma: bool,
is_grad_enabled: bool):
normalization_func = _get_normalization_func(normalization,
fp8_out,
is_grad_enabled,
True)
inputs = (inputmat, ln_weight) if ln_bias is None else (inputmat, ln_weight, ln_bias)
if fp8_out:
fp8_dtype_forward = get_fp8_te_dtype(fp8_meta["recipe"], fprop_tensor=True)
if is_grad_enabled:
output_key = "ln_out" if normalization == "LayerNorm" else "rmsnorm_out"
output_kwarg = {output_key: ln_out}
output = normalization_func(
*inputs,
eps,
fp8_meta["scaling_fwd"],
tex.FP8FwdTensors.GEMM1_INPUT,
fp8_dtype_forward,
fwd_ln_sm_margin,
zero_centered_gamma,
**output_kwarg,
)
else:
return normalization_func(
*inputs,
eps,
fp8_meta["scaling_fwd"],
tex.FP8FwdTensors.GEMM1_INPUT,
fp8_dtype_forward,
zero_centered_gamma,
), None, None
else:
if is_grad_enabled:
output = normalization_func(
*inputs, ln_out, eps,
fwd_ln_sm_margin, zero_centered_gamma
)
else:
return normalization_func(
*inputs, eps, zero_centered_gamma
), None, None
if normalization == "RMSNorm":
output = (ln_out, None, output[1])
elif normalization == "LayerNorm":
output = (ln_out, output[1], output[2])
return output
| TransformerEngine-main | transformer_engine/pytorch/module/_common.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Module level PyTorch APIs"""
from .layernorm_linear import LayerNormLinear
from .linear import Linear
from .layernorm_mlp import LayerNormMLP
from .layernorm import LayerNorm
from .rmsnorm import RMSNorm
| TransformerEngine-main | transformer_engine/pytorch/module/__init__.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""LayerNormLinear API"""
import os
import warnings
from typing import Union, Optional, Callable, Tuple, List, Dict, Any
import torch
from torch.nn.parameter import Parameter
from torch.nn import init
from .. import cpp_extensions as tex
from .base import (
get_workspace,
_prepare_backward,
get_ub,
TransformerEngineBaseModule,
_2X_ACC_FPROP,
_2X_ACC_DGRAD,
_2X_ACC_WGRAD,
)
from ..fp8 import get_fp8_te_dtype
from ..utils import (
divide,
get_default_init_method,
cast_if_needed,
assert_dim_for_fp8_exec,
)
from ..distributed import (
set_tensor_model_parallel_attributes,
get_distributed_world_size,
allreduce,
initialize_affine_weight_gpu,
reduce_scatter_along_first_dim,
gather_along_first_dim,
)
from ..constants import GemmParallelModes, dist_group_type, TE_DType
from ..jit import no_torch_dynamo
from ._common import _apply_normalization
__all__ = ["LayerNormLinear"]
class _LayerNormLinear(torch.autograd.Function):
"""LayerNormLinear semi-top level module
Calls custom cuda extensions.
"""
@staticmethod
def forward(
ctx,
inp: torch.Tensor,
ln_weight: torch.Tensor,
ln_bias: Union[torch.Tensor, None],
weight: torch.Tensor,
weight_fp8: Union[torch.Tensor, None],
weight_t_fp8: Union[torch.Tensor, None],
bias: torch.Tensor,
use_bias: bool,
eps: float,
is_first_microbatch: Union[bool, None],
fp8: bool,
fp8_calibration: bool,
fp8_meta: Dict[str, Any],
fuse_wgrad_accumulation: bool,
tp_group: Union[dist_group_type, None],
tp_size: int,
sequence_parallel: bool,
tensor_parallel: bool,
activation_dtype: torch.dtype,
parallel_mode: Union[str, None],
return_layernorm_output: bool,
is_grad_enabled: bool,
fwd_ln_sm_margin: int,
bwd_ln_sm_margin: int,
zero_centered_gamma: bool,
ub_bulk_wgrad: bool,
ub_bulk_dgrad: bool,
ub_split_ag: bool,
normalization: str,
) -> Union[Tuple[torch.Tensor, ...], torch.Tensor]:
# Make sure input dimensions are compatible
in_features = ln_weight.numel()
assert inp.shape[-1] == in_features, "GEMM not possible"
inputmat = inp.view((-1, in_features))
if fp8:
assert_dim_for_fp8_exec(inputmat)
assert_dim_for_fp8_exec(weight)
update_fp8_weights = is_first_microbatch is None or is_first_microbatch
# Cast for native AMP
inputmat = cast_if_needed(inputmat, activation_dtype)
ln_weight = cast_if_needed(ln_weight, activation_dtype)
if ln_bias is not None:
ln_bias = cast_if_needed(ln_bias, activation_dtype)
if ub_split_ag:
tp_world_size = get_distributed_world_size(tp_group)
if tp_world_size == 1 or (not is_grad_enabled) or return_layernorm_output:
ub_split_ag = False
if ub_split_ag:
dim_size = list(inputmat.size())
dim_size[0] = dim_size[0] * tp_world_size
ub_obj_lnout = get_ub("qkv_fprop")
ln_out = ub_obj_lnout.get_ubuf_output(0)
else:
ln_out_dtype = torch.uint8 if fp8 else inputmat.dtype
ln_out = torch.empty_like(inputmat, dtype=ln_out_dtype)
fp8_dtype_forward = get_fp8_te_dtype(fp8_meta["recipe"], fprop_tensor=True)
ln_out, mu, rsigma = _apply_normalization(inputmat,
ln_out,
ln_weight,
ln_bias,
eps,
fp8 and not return_layernorm_output,
fp8_meta,
normalization,
fwd_ln_sm_margin,
zero_centered_gamma,
is_grad_enabled)
# If residual connection is after LN, we need `ln_out_return`
# tensor in higher precision, this comes at the cost
# of an extra fp8 cast.
if return_layernorm_output:
ln_out_return = ln_out
if fp8:
ln_out = tex.cast_to_fp8(
ln_out,
fp8_meta["scaling_fwd"],
tex.FP8FwdTensors.GEMM1_INPUT,
fp8_dtype_forward,
)
# Column Parallel Linear
if ub_split_ag:
ln_out_total = ub_obj_lnout.get_ubuf_output(1)
ln_out = torch.empty_like(ln_out)
elif parallel_mode == "column" and sequence_parallel:
ln_out_total, _ = gather_along_first_dim(ln_out, tp_group)
else:
ln_out_total = ln_out
if fp8:
bias_dtype = (
torch.bfloat16
if activation_dtype == torch.float32
else activation_dtype
)
bias = cast_if_needed(bias, bias_dtype) if use_bias else bias
if update_fp8_weights:
if is_grad_enabled:
tex.fp8_cast_transpose_fused(
weight,
fp8_meta["scaling_fwd"],
tex.FP8FwdTensors.GEMM1_WEIGHT,
fp8_dtype_forward,
cast_out=weight_fp8,
transpose_out=weight_t_fp8,
)
else:
weight_t_fp8 = None
weight_fp8 = tex.cast_to_fp8(
weight,
fp8_meta["scaling_fwd"],
tex.FP8FwdTensors.GEMM1_WEIGHT,
fp8_dtype_forward)
out = tex.fp8_gemm(
weight_fp8,
fp8_meta["scaling_fwd"].scale_inv,
tex.FP8FwdTensors.GEMM1_WEIGHT,
fp8_dtype_forward,
ln_out_total,
fp8_meta["scaling_fwd"].scale_inv,
tex.FP8FwdTensors.GEMM1_INPUT,
fp8_dtype_forward,
activation_dtype,
get_workspace(),
bias=bias,
use_bias=use_bias,
use_split_accumulator=_2X_ACC_FPROP,
ub_algo=tex.UbufOverlapAlgo.SPLIT_PIPELINED_AG if ub_split_ag else None,
ub=ub_obj_lnout if ub_split_ag else None,
extra_output_tensor=ln_out if ub_split_ag else None,
)
else:
# Cast for native AMP
weight = cast_if_needed(weight, activation_dtype)
bias = cast_if_needed(bias, activation_dtype) if use_bias else bias
if fp8_calibration:
# amax of input
fp8_meta["scaling_fwd"].amax_history[0][tex.FP8FwdTensors.GEMM1_INPUT] = \
torch.amax(ln_out_total).float()
# amax of weight
fp8_meta["scaling_fwd"].amax_history[0][tex.FP8FwdTensors.GEMM1_WEIGHT] = \
torch.amax(weight).float()
out, _, _ = tex.gemm(
weight,
ln_out_total,
activation_dtype,
get_workspace(),
bias=bias,
use_bias=use_bias,
ub_algo=tex.UbufOverlapAlgo.SPLIT_PIPELINED_AG if ub_split_ag else None,
ub=ub_obj_lnout if ub_split_ag else None,
extra_output_tensor=ln_out if ub_split_ag else None,
)
if is_grad_enabled:
ctx.save_for_backward(
inputmat,
ln_weight,
mu,
rsigma,
weight,
weight_t_fp8,
ln_out,
fp8_meta["scaling_fwd"].scale_inv.clone() if fp8 else None,
)
ctx.activation_dtype = activation_dtype
ctx.fp8 = fp8
ctx.fp8_meta = fp8_meta
ctx.fuse_wgrad_accumulation = fuse_wgrad_accumulation
ctx.is_first_microbatch = is_first_microbatch
ctx.use_bias = use_bias
ctx.sequence_parallel = sequence_parallel
ctx.tensor_parallel = tensor_parallel
ctx.inp_shape = inp.shape
ctx.parallel_mode = parallel_mode
ctx.tp_group = tp_group
ctx.tp_size = tp_size
ctx.return_layernorm_output = return_layernorm_output
ctx.bwd_ln_sm_margin = bwd_ln_sm_margin
ctx.zero_centered_gamma = zero_centered_gamma
ctx.ub_bulk_wgrad = ub_bulk_wgrad
ctx.ub_bulk_dgrad = ub_bulk_dgrad
ctx.requires_dgrad = inp.requires_grad
ctx.normalization = normalization
# Row Parallel Linear
if parallel_mode == "row" and sequence_parallel:
out, _ = reduce_scatter_along_first_dim(out, tp_group)
elif parallel_mode == "row" and tensor_parallel:
out, _ = allreduce(out, tp_group)
# [*, in_features] -> [*, out_features] except first dimension changes for SP
out = out.view(-1, *inp.shape[1:-1], out.shape[-1])
if return_layernorm_output:
return out, ln_out_return.view_as(inp)
return out
@staticmethod
def backward(
ctx, *grad_outputs: Tuple[torch.Tensor, ...]
) -> Tuple[Union[torch.Tensor, None], ...]:
with _prepare_backward(
ctx.fp8, ctx.fp8_meta, ctx.tp_group, ctx.tp_size, name="_LayerNormLinear"
):
(
inputmat,
ln_weight,
mu,
rsigma,
weight,
weight_t_fp8,
ln_out,
fwd_scale_inverses,
) = ctx.saved_tensors
if ctx.ub_bulk_dgrad:
tp_world_size = get_distributed_world_size(ctx.tp_group)
if tp_world_size == 1:
ctx.ub_bulk_dgrad = False
if ctx.ub_bulk_dgrad:
dim_size = list(ln_out.size())
dim_size[0] = dim_size[0] * tp_world_size
ub_obj_lnout = get_ub("qkv_dgrad")
ub_obj_lnout.copy_input_to_ubuf(ln_out, 1)
(
grad_output,
grad_output_c,
grad_output_t,
grad_bias,
) = TransformerEngineBaseModule.grad_output_preprocess(
ctx, grad_outputs[0], ctx.parallel_mode == "row"
)
if ctx.ub_bulk_wgrad:
tp_world_size = get_distributed_world_size(ctx.tp_group)
if tp_world_size == 1:
ctx.ub_bulk_wgrad = False
# Column Parallel Linear
# Overlap input AG with dgrad
if (weight.requires_grad
and (not ctx.ub_bulk_dgrad)
and ctx.parallel_mode == "column"
and ctx.sequence_parallel):
ln_out_total, handle = gather_along_first_dim(
ln_out, ctx.tp_group, async_op=True
)
else:
ln_out_total = ln_out
handle = None
if ctx.is_first_microbatch is not None:
accumulate_wgrad_into_param_main_grad = (
ctx.fuse_wgrad_accumulation and not ctx.is_first_microbatch
)
else:
accumulate_wgrad_into_param_main_grad = ctx.fuse_wgrad_accumulation
dgrad_size = list(grad_output.size())
dgrad_size[1] = weight.size(1)
if ctx.ub_bulk_wgrad: # allocate dgrad output
ub_obj_dgrad = get_ub("qkv_wgrad")
dgrad = ub_obj_dgrad.get_ubuf_output(1) # AllGather output
else:
dgrad = torch.empty (dgrad_size, dtype=ctx.activation_dtype, device=weight.device)
if ctx.fp8:
fp8_dtype_forward = get_fp8_te_dtype(
ctx.fp8_meta["recipe"], fprop_tensor=True
)
fp8_dtype_backward = get_fp8_te_dtype(
ctx.fp8_meta["recipe"], fprop_tensor=False
)
# DGRAD: Evaluated unconditionally to feed into Linear backward
_ = tex.fp8_gemm(
weight_t_fp8,
fwd_scale_inverses,
tex.FP8FwdTensors.GEMM1_WEIGHT,
fp8_dtype_forward,
grad_output_c,
ctx.fp8_meta["scaling_bwd"].scale_inv,
tex.FP8BwdTensors.GRAD_OUTPUT1,
fp8_dtype_backward,
ctx.activation_dtype,
get_workspace(),
out=dgrad,
use_split_accumulator=_2X_ACC_DGRAD,
ub_algo=tex.UbufOverlapAlgo.BULK_OVERLAP_AG if ctx.ub_bulk_dgrad else None,
ub=ub_obj_lnout if ctx.ub_bulk_dgrad else None
)
else:
# DGRAD: Evaluated unconditionally to feed into Linear backward
_, _, _ = tex.gemm(
weight,
grad_output,
ctx.activation_dtype,
get_workspace(),
out=dgrad,
layout="NN",
grad=True,
ub_algo=tex.UbufOverlapAlgo.BULK_OVERLAP_AG if ctx.ub_bulk_dgrad else None,
ub=ub_obj_lnout if ctx.ub_bulk_dgrad else None
)
if ctx.ub_bulk_dgrad:
ln_out_total = ub_obj_lnout.get_ubuf_output(1)
# Overlap dgrad-RS/AR with wgrad
if ctx.parallel_mode == "column" and ctx.sequence_parallel:
if not ctx.ub_bulk_dgrad and handle is not None:
handle.wait()
if not ctx.ub_bulk_wgrad:
dgrad, handle = reduce_scatter_along_first_dim(
dgrad, ctx.tp_group, async_op=True
)
elif ctx.parallel_mode == "column" and ctx.tensor_parallel:
dgrad, handle = allreduce(dgrad, ctx.tp_group, async_op=True)
if weight.requires_grad:
if ctx.fp8:
# WGRAD
if not ctx.fp8_meta["recipe"].override_linear_precision.wgrad:
ln_out_total_t = tex.fp8_transpose(ln_out_total, fp8_dtype_forward)
wgrad = tex.fp8_gemm(
ln_out_total_t,
fwd_scale_inverses,
tex.FP8FwdTensors.GEMM1_INPUT,
fp8_dtype_forward,
grad_output_t,
ctx.fp8_meta["scaling_bwd"].scale_inv,
tex.FP8BwdTensors.GRAD_OUTPUT1,
fp8_dtype_backward,
ctx.activation_dtype,
get_workspace(),
accumulate=accumulate_wgrad_into_param_main_grad,
out=weight.main_grad if ctx.fuse_wgrad_accumulation else None,
use_split_accumulator=_2X_ACC_WGRAD,
ub_algo=tex.UbufOverlapAlgo.BULK_OVERLAP_RS
if ctx.ub_bulk_wgrad else None,
ub=ub_obj_dgrad if ctx.ub_bulk_wgrad else None
)
else:
ln_out_total_c = tex.cast_from_fp8(
ln_out_total,
ctx.fp8_meta["scaling_fwd"],
tex.FP8FwdTensors.GEMM1_INPUT,
fp8_dtype_forward,
TE_DType[ctx.activation_dtype],
)
wgrad, _, _ = tex.gemm(
ln_out_total_c,
grad_output,
ctx.activation_dtype,
get_workspace(),
layout="NT",
grad=True,
accumulate=accumulate_wgrad_into_param_main_grad,
out=weight.main_grad if ctx.fuse_wgrad_accumulation else None,
ub_algo=tex.UbufOverlapAlgo.BULK_OVERLAP_RS
if ctx.ub_bulk_wgrad else None,
ub=ub_obj_dgrad if ctx.ub_bulk_wgrad else None
)
else:
# WGRAD
wgrad, grad_bias, _ = tex.gemm(
ln_out_total,
grad_output,
ctx.activation_dtype,
get_workspace(),
layout="NT",
grad=True,
use_bias=ctx.use_bias,
accumulate=accumulate_wgrad_into_param_main_grad,
out=weight.main_grad if ctx.fuse_wgrad_accumulation else None,
ub_algo=tex.UbufOverlapAlgo.BULK_OVERLAP_RS if ctx.ub_bulk_wgrad else None,
ub=ub_obj_dgrad if ctx.ub_bulk_wgrad else None
)
if ctx.ub_bulk_wgrad:
dgrad = ub_obj_dgrad.get_ubuf_output(0) # Reduce-scatter output
# Column Parallel Linear
elif ctx.parallel_mode == "column" and ctx.tensor_parallel and handle is not None:
handle.wait()
# LayerNorm gradient
d_ln_out = dgrad.view(inputmat.shape)
# Residual gradient
if ctx.return_layernorm_output:
d_ln_out = d_ln_out + grad_outputs[1].view_as(d_ln_out)
if ctx.normalization == "LayerNorm":
dxmat, dgamma, dbeta = tex.layernorm_bwd(
d_ln_out, inputmat, mu, rsigma, ln_weight,
ctx.bwd_ln_sm_margin, ctx.zero_centered_gamma
)
elif ctx.normalization == "RMSNorm":
dxmat, dgamma = tex.rmsnorm_bwd(
d_ln_out, inputmat, rsigma, ln_weight,
ctx.bwd_ln_sm_margin, ctx.zero_centered_gamma
)
dbeta = None
if not ctx.use_bias:
grad_bias = None
return (
dxmat.view(ctx.inp_shape) if ctx.requires_dgrad else None,
dgamma,
dbeta,
wgrad if weight.requires_grad else None,
None,
None,
grad_bias,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
)
class LayerNormLinear(TransformerEngineBaseModule):
r"""
Applies layer normalization followed by linear transformation to the incoming data.
.. warning::
Argument :attr:`skip_weight_param_allocation` is deprecated and will
be fully removed in future releases.
Parameters
----------
in_features : int
size of each input sample.
out_features : int
size of each output sample.
eps : float, default = 1e-5
a value added to the denominator of layer normalization for numerical stability.
bias : bool, default = `True`
if set to `False`, the layer will not learn an additive bias.
normalization : { 'LayerNorm', 'RMSNorm' }, default = 'LayerNorm'
type of normalization applied.
init_method : Callable, default = `None`
used for initializing weights in the following way: `init_method(weight)`.
When set to `None`, defaults to `torch.nn.init.normal_(mean=0.0, std=0.023)`.
return_layernorm_output : bool, default = `False`
if set to `True`, output of layernorm is returned from the forward
together with the output of the linear transformation.
Example use case: residual connection for transformer module is
taken post layernorm.
parameters_split : Tuple[str, ...], default = None
if a tuple of strings is provided, the weight and bias parameters of the
module are exposed as `N` separate `torch.nn.parameter.Parameter`s each,
split along the first dimension, where `N` is the length of the argument
and the strings contained are the names of the split parameters.
zero_centered_gamma : bool, default = 'False'
if set to 'True', gamma parameter in LayerNorm is initialized to 0 and
the LayerNorm formula changes to
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \varepsilon}} *
(1 + \gamma) + \beta
device : Union[torch.device, str], default = "cuda"
The device on which the parameters of the model will allocated. It is the user's
responsibility to ensure all parameters are moved to the GPU before running the
forward pass.
Parallelism parameters
----------------------
sequence_parallel : bool, default = `False`
if set to `True`, uses sequence parallelism.
tp_group : ProcessGroup, default = `None`
tensor parallel process group.
tp_size : int, default = 1
used as TP (tensor parallel) world size when TP groups are not formed during
initialization. In this case, users must call the
`set_tensor_parallel_group(tp_group)` method on the initialized module before the
forward pass to supply the tensor parallel group needed for tensor and sequence
parallel collectives.
parallel_mode : {None, 'Column', 'Row'}, default = `None`
used to decide whether this Linear layer is Column Parallel Linear or Row
Parallel Linear as described `here <https://arxiv.org/pdf/1909.08053.pdf>`_.
When set to `None`, no communication is performed.
Optimization parameters
-----------------------
fuse_wgrad_accumulation : bool, default = 'False'
if set to `True`, enables fusing of creation and accumulation of
the weight gradient. When enabled, it is assumed that the weights
have an additional `main_grad` attribute (used instead of the
regular `grad`) which is a pre-allocated buffer of the correct
size to accumulate gradients in.
return_bias : bool, default = `False`
when set to `True`, this module will not apply the additive bias itself, but
instead return the bias value during the forward pass together with the
output of the linear transformation :math:`y = xA^T`. This is useful when
the bias addition can be fused to subsequent operations.
params_dtype : torch.dtype, default = `torch.get_default_dtype()`
it controls the type used to allocate the initial parameters. Useful when
the model is trained with lower precision and the original FP32 parameters
would not fit in GPU memory.
"""
def __init__(
self,
in_features: int,
out_features: int,
eps: float = 1e-5,
sequence_parallel: bool = False,
fuse_wgrad_accumulation: bool = False,
tp_group: Optional[dist_group_type] = None,
tp_size: int = 1,
get_rng_state_tracker: Optional[Callable] = None,
init_method: Optional[Callable] = None,
bias: bool = True,
normalization: str = 'LayerNorm',
return_bias: bool = False,
params_dtype: Optional[torch.dtype] = None,
parallel_mode: Optional[str] = None,
return_layernorm_output: bool = False,
skip_weight_param_allocation: bool = False,
parameters_split: Optional[Tuple[str, ...]] = None,
zero_centered_gamma: bool = False,
ub_bulk_wgrad: bool = False,
ub_bulk_dgrad: bool = False,
ub_split_ag: bool = False,
device: Union[torch.device, str] = "cuda",
) -> None:
super().__init__()
if skip_weight_param_allocation:
warnings.warn(
"Argument `skip_weight_param_allocation` is deprecated and"
"will be fully removed in future releases. It is ignored"
"starting from v0.11.",
category=DeprecationWarning,
)
params_dtype = torch.get_default_dtype() if params_dtype is None else params_dtype
self.in_features = in_features
self.out_features = out_features
self.fuse_wgrad_accumulation = fuse_wgrad_accumulation
self.normalization = normalization
assert normalization in ['LayerNorm', 'RMSNorm'], "Unsupported normalization type!"
self.use_bias = bias
self.return_bias = return_bias
self.apply_bias = self.use_bias and not return_bias
self.return_layernorm_output = return_layernorm_output
self.parameters_split = parameters_split
self.zero_centered_gamma = zero_centered_gamma
self.ub_bulk_wgrad = ub_bulk_wgrad
self.ub_bulk_dgrad = ub_bulk_dgrad
self.ub_split_ag = ub_split_ag
if ub_bulk_wgrad or ub_bulk_dgrad or ub_split_ag:
assert (
tex.userbuf_comm_available()
), "Userbuffer communication backend not available."
if tp_group is None:
self.tp_size = tp_size
if tp_size == 1:
self.set_tensor_parallel_group(tp_group)
else:
self.tp_size = get_distributed_world_size(tp_group)
self.set_tensor_parallel_group(tp_group)
self.set_nccl_overlap_warning_if_tp()
self.parallel_mode = parallel_mode
assert (
self.parallel_mode in GemmParallelModes
), f"parallel_mode {parallel_mode} not supported"
if self.parallel_mode == "column":
self.out_features = divide(self.out_features, self.tp_size)
elif self.parallel_mode == "row":
self.in_features = divide(self.in_features, self.tp_size)
if init_method is None:
init_method = get_default_init_method()
self.sequence_parallel = (self.tp_size > 1) and sequence_parallel
self.eps = eps
self.layer_norm_weight = Parameter(
torch.empty(in_features, device=device, dtype=params_dtype)
)
setattr(self.layer_norm_weight, "sequence_parallel", self.sequence_parallel)
if self.normalization != "RMSNorm":
self.layer_norm_bias = Parameter(
torch.empty(in_features, device=device, dtype=params_dtype)
)
setattr(self.layer_norm_bias, "sequence_parallel", self.sequence_parallel)
else:
self.layer_norm_bias = None
self.reset_layer_norm_parameters()
self.weight_tensor = torch.empty(
self.out_features, self.in_features,
device=device, dtype=params_dtype)
initialize_affine_weight_gpu(
self.weight_tensor,
init_method,
get_rng_state_tracker,
partition_dim=1 if self.parallel_mode == "row" else 0,
stride=1,
)
if self.use_bias:
self.bias_tensor = torch.empty(
self.out_features,
device=device,
dtype=params_dtype)
else:
self.bias_tensor = torch.Tensor().to(dtype=params_dtype, device=device)
with torch.no_grad():
self.bias_tensor.zero_()
if parameters_split is None:
parameters_split = ("",)
assert (
self.out_features % len(parameters_split) == 0
), f"Weight and bias params cannot be split into {len(parameters_split)} parts"
split_size = self.out_features // len(parameters_split)
self.weight_names = []
self.bias_names = []
for i, pname in enumerate(parameters_split):
wname = pname + "weight"
bname = pname + "bias"
self.register_parameter(
wname, Parameter(self.weight_tensor[i * split_size : (i+1) * split_size])
)
set_tensor_model_parallel_attributes(
tensor=getattr(self, wname),
is_parallel=True,
dim=1 if parallel_mode == "row" else 0,
stride=1,
)
if self.use_bias:
self.register_parameter(
bname, Parameter(self.bias_tensor[i * split_size : (i+1) * split_size])
)
else:
setattr(self, bname, torch.Tensor().to(dtype=params_dtype, device=device))
if parallel_mode == "column":
set_tensor_model_parallel_attributes(getattr(self, bname), True, 0, 1)
self.weight_names.append(wname)
self.bias_names.append(bname)
self.fp8_weight_shapes.append(torch.Size((self.out_features, self.in_features)))
# For RPL, bias has to be added after TP collectives
# So it cannot be fused with the GEMM
if self.parallel_mode == "row" and self.apply_bias:
self.gemm_bias_unfused_add = True
else:
self.gemm_bias_unfused_add = False
# These many SMs are subtracted from the total SM count when calling forward
# and backward LayerNorm C APIs. These envvars can be used to prevent the LN
# kernels from using all SMs in the device. This is useful for cases such as
# communication overlap with LN.
self.fwd_ln_sm_margin = int(os.getenv("NVTE_FWD_LAYERNORM_SM_MARGIN", "0"))
self.bwd_ln_sm_margin = int(os.getenv("NVTE_BWD_LAYERNORM_SM_MARGIN", "0"))
def reset_layer_norm_parameters(self) -> None:
"""Init LN params"""
if not self.zero_centered_gamma:
init.ones_(self.layer_norm_weight)
else:
init.zeros_(self.layer_norm_weight)
if self.layer_norm_bias is not None:
init.zeros_(self.layer_norm_bias)
def get_fp8_weights_scratchpad(
self,
is_first_microbatch: Union[bool, None],
) -> List[torch.Tensor]:
"""
Fetch the fp8 weight tensor placeholders if they exist (when
`is_first_microbatch` is not `None`) or return empty fp8 weight
tensors (if `is_first_microbatch is None`)
"""
if not self.fp8:
return [None, None]
if is_first_microbatch is None:
# Return empty weight placeholders for each fwd/bwd pass
fp8_weight_tensors = self.get_fp8_weights_empty_tensors(
is_first_microbatch
)
else:
# These persistent weight placeholders should've been created in
# `set_fp8_weights` method
fp8_weight_tensors = [self.weight1_fp8, self.weight1_t_fp8]
return fp8_weight_tensors
@no_torch_dynamo
def forward(
self,
inp: torch.Tensor,
weight: Optional[torch.Tensor] = None,
bias: Optional[torch.Tensor] = None,
is_first_microbatch: Optional[bool] = None,
) -> Union[torch.Tensor, Tuple[torch.Tensor, ...]]:
"""
Apply layer normalization to the input followed by a linear transformation.
.. warning::
Arguments :attr:`weight` and :attr:`bias` are deprecated and will
be fully removed in future releases.
Parameters
----------
inp : torch.Tensor
Input tensor.
is_first_microbatch : {True, False, None}, default = None
During training using either gradient accumulation or
pipeline parallelism a minibatch of data is further split
into microbatches. Between the microbatches of the same minibatch
the model weights are not updated. Setting this parameter indicates
whether the current microbatch is the first in a minibatch or not.
When set, this parameter enables additional optimizations:
* during FP8 training, it allows caching of the FP8 versions of
the weights
* it also allows skipping gradient accumulation during the
first microbatch (since it is the first gradient being
produced)
"""
if weight is not None or bias is not None:
raise RuntimeError(
"Arguments `weight` and `bias` are deprecated and "
"will be fully removed in future releases."
)
with self.prepare_forward(inp, is_first_microbatch) as inp:
bias_tensor = (
self.bias if self.parameters_split is None
else self.bias_tensor if not torch.is_grad_enabled()
else self.noop_cat("bias_tensor", self.bias_names)
)
weight_tensor = (
self.weight if self.parameters_split is None
else self.weight_tensor if not torch.is_grad_enabled()
else self.noop_cat("weight_tensor", self.weight_names)
)
# Fetch the fp8 weights placeholders (for linear/gemm)
weight1_fp8, weight1_t_fp8 = self.get_fp8_weights_scratchpad(
is_first_microbatch
)
if torch.is_grad_enabled():
fwd_fn = _LayerNormLinear.apply
args = []
else:
fwd_fn = _LayerNormLinear.forward
args = [None]
args += (
inp,
self.layer_norm_weight,
self.layer_norm_bias,
weight_tensor,
weight1_fp8,
weight1_t_fp8,
bias_tensor,
self.apply_bias and not self.gemm_bias_unfused_add,
self.eps,
is_first_microbatch,
self.fp8,
self.fp8_calibration,
self.fp8_meta,
self.fuse_wgrad_accumulation,
self.tp_group,
self.tp_size,
self.sequence_parallel,
self.tp_size > 1,
self.activation_dtype,
self.parallel_mode,
self.return_layernorm_output,
torch.is_grad_enabled(),
self.fwd_ln_sm_margin,
self.bwd_ln_sm_margin,
self.zero_centered_gamma,
self.ub_bulk_wgrad,
self.ub_bulk_dgrad,
self.ub_split_ag,
self.normalization,
)
out = fwd_fn(*args)
if self.return_layernorm_output:
out, ln_out = out
if self.gemm_bias_unfused_add:
out = out + cast_if_needed(bias_tensor, self.activation_dtype)
if self.return_bias:
if self.return_layernorm_output:
return out, cast_if_needed(bias_tensor, self.activation_dtype), ln_out
return out, cast_if_needed(bias_tensor, self.activation_dtype)
if self.return_layernorm_output:
return out, ln_out
return out
| TransformerEngine-main | transformer_engine/pytorch/module/layernorm_linear.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Base modules and utilities for TransformerEngine PyTorch API"""
import io
import os
import pickle
import warnings
from abc import ABC, abstractmethod
from typing import Generator, Union, Optional, Tuple, Dict, Any, List
from functools import partial
from contextlib import contextmanager
import torch
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import transformer_engine_extensions as tex
from ..export import is_in_onnx_export_mode
from ..fp8 import (
get_default_fp8_recipe,
get_fp8_te_dtype,
FP8GlobalStateManager,
amax_and_scale_update,
)
from ..distributed import (
gather_along_first_dim,
is_fp8_activation_recompute_enabled,
in_fp8_activation_recompute_phase,
)
from ..cpp_extensions import (
fp8_cast_transpose_fused,
fp8_cast_transpose_bgrad_fused,
cast_to_fp8,
)
from ..constants import dist_group_type
_2X_ACC_FPROP = False
_2X_ACC_DGRAD = True
_2X_ACC_WGRAD = True
_cublas_workspace = None
_ub_communicators = None
_NUM_MAX_UB_STREAMS = 3
_amax_reduce_handle_bwd = None
def get_cublas_workspace_size_bytes() -> None:
"""Return 32 MiB if using hopper, 4 MiB for all other architectures."""
if torch.cuda.get_device_properties(torch.cuda.current_device()).major >= 9:
return 33_554_432
return 4_194_304
def get_workspace() -> torch.Tensor:
"""Returns workspace for cublas."""
global _cublas_workspace
if _cublas_workspace is None:
_cublas_workspace = torch.empty(
get_cublas_workspace_size_bytes(), dtype=torch.uint8, device="cuda"
)
return _cublas_workspace
@contextmanager
def _prepare_backward(
fp8: bool,
fp8_meta: Dict[str, Any],
tp_group: dist_group_type,
tp_size: int,
name: str = ""
) -> Generator[None, None, None]:
"""Checks and prep for BWD."""
if fp8:
global _amax_reduce_handle_bwd
if _amax_reduce_handle_bwd is not None:
_amax_reduce_handle_bwd.wait()
_amax_reduce_handle_bwd = None
# Update amax and scale; Skip all setup for global amax reduction
if not fp8_meta["recipe"].reduce_amax:
amax_and_scale_update(fp8_meta, False)
else:
# From previous iteration
FP8GlobalStateManager.copy_amax_from_global_buffer(fp8_meta, forward=False)
amax_and_scale_update(fp8_meta, False)
FP8GlobalStateManager.set_amax_buffer_key_deletion(fp8_meta, forward=False)
# Get new backward key.
fp8_meta["autocast_id_bwd"] = fp8_meta["autocast_id_fwd_stack"].pop(0)
FP8GlobalStateManager.add_amax_to_global_buffer(fp8_meta, forward=False)
with torch.cuda.nvtx.range(name + " backward"):
yield
if fp8 and fp8_meta["recipe"].reduce_amax:
if fp8_meta["first_module"]:
_amax_reduce_handle_bwd = FP8GlobalStateManager.global_amax_reduction(
fp8_meta,
tp_group,
tp_size,
forward=False
)
FP8GlobalStateManager.delete_key_from_amax_buffer(forward=False)
def initialize_ub(
shape: list,
tp_size: int,
use_fp8: bool = False,
ub_cfgs: Optional[dict] = None
) -> None:
"""Initialize communicators for TP comm overlap using userbuffers."""
global _ub_communicators
assert _ub_communicators is None, "UB communicators are already initialized."
_ub_communicators = {}
rank_id = torch.distributed.get_rank()
# Increase the workspace by the number of maximum concurrent streams
global _cublas_workspace
_cublas_workspace = get_workspace().repeat(_NUM_MAX_UB_STREAMS)
# Default buffer precision: AllGather buffers use fp8 when using fp8 recipe
fp8_buf = [
"qkv_fprop", "qkv_dgrad", "proj_dgrad", "fc1_fprop", "fc1_dgrad", "fc2_dgrad"
]
# Default overlap methods for layers
methods = {
"ring_exchange":["qkv_fprop", "fc1_fprop", "proj_dgrad", "fc2_dgrad"],
"pipeline":["proj_fprop", "fc2_fprop"],
"bulk":["qkv_dgrad", "qkv_wgrad", "fc1_dgrad", "fc1_wgrad"],
}
def get_method(name):
for method, names in methods.items():
if name in names:
return method
raise KeyError(f"Given layer name {name} does not exist.")
def add_ub(
name: str,
method: str,
num_sm: int = 16,
cga_size: int = 2,
set_sm_margin: int = 0,
num_splits: int = 4,
aggregate: int = 0,
) -> None:
dtype = torch.uint8 if (use_fp8 and name in fp8_buf) else torch.bfloat16
sample_buffer = torch.empty(shape, dtype=dtype, device='cuda')
if method == 'ring_exchange':
ub_obj = tex.UbufP2PCommOverlap(
sample_buffer, # Sample userbuffer
rank_id, # Rank id
tp_size, # TP size
aggregate, # Aggregate 2X GEMM chunks
_NUM_MAX_UB_STREAMS, # Max concurrent GEMM streams
)
else:
ub_obj = tex.UbufCommOverlap(
sample_buffer, # Sample userbuffer
rank_id, # Rank id
tp_size, # TP size
num_sm, # Number of communication SMs
cga_size, # CGA cluster size
num_splits, # Number of communication splits
set_sm_margin, # Set SM margin
_NUM_MAX_UB_STREAMS, # Max concurrent GEMM streams
)
_ub_communicators[name] = ub_obj
for name in (methods["ring_exchange"]+methods["pipeline"]+methods["bulk"]):
if ub_cfgs is not None and name in ub_cfgs:
ub_cfg = ub_cfgs[name]
method = ub_cfg["method"] if "method" in ub_cfg else get_method(name)
num_sm = ub_cfg["num_sm"] if "num_sm" in ub_cfg else 16
cga_size = ub_cfg["cga_size"] if "cga_size" in ub_cfg else 2
num_splits = ub_cfg["num_splits"] if "num_splits" in ub_cfg else 0
set_sm_margin = ub_cfg["set_sm_margin"] if "set_sm_margin" in ub_cfg else 0
aggregate = ub_cfg["aggregate"] if "aggregate" in ub_cfg else 0
add_ub(
name,
method,
num_sm,
cga_size,
set_sm_margin,
num_splits,
aggregate
)
else:
method = get_method(name)
if method == "pipeline":
add_ub(name, method)
else:
add_ub(name, method, num_splits=0)
def get_ub(name: str):
"""Get userbuffer communicator corresponding to give key."""
global _ub_communicators
assert _ub_communicators is not None, "UB manager is not initialized."
assert name in _ub_communicators, f"UB for {name} is not registered."
return _ub_communicators[name]
class _NoopCat(torch.autograd.Function):
"""This class is a no-op replacement for `torch.cat`."""
@staticmethod
def forward(ctx,
full_param_buffer: torch.Tensor,
*params_split: Tuple[torch.Tensor, ...],
) -> torch.Tensor:
assert not full_param_buffer.requires_grad, "Buffers should not require gradient"
assert (
full_param_buffer.shape[0] % len(params_split) == 0
), "Dimensions not compatible for concatenation"
param_temp = full_param_buffer.new()
param_temp.set_(full_param_buffer.storage(),
full_param_buffer.storage_offset(),
full_param_buffer.size(),
full_param_buffer.stride())
param_temp.requires_grad = True
ctx.save_for_backward(full_param_buffer, *params_split)
return param_temp
@staticmethod
def backward(ctx, grad_output: torch.Tensor) -> Tuple[Union[torch.Tensor, None], ...]:
full_param_buffer, *params_split = ctx.saved_tensors
split_size = full_param_buffer.shape[0] // len(params_split)
grads = []
for i, _ in enumerate(params_split):
grads.append(grad_output[i * split_size : (i+1) * split_size])
return None, *grads
class TransformerEngineBaseModule(torch.nn.Module, ABC):
"""Base TE module."""
def __init__(self) -> None:
super().__init__()
assert torch.cuda.is_available(), "TransformerEngine needs CUDA."
self.fp8_initialized = False
self.fp8 = False
self.fp8_calibration = False
self.fp8_meta = {}
self.fp8_meta["fp8_checkpoint"] = False
self.fp8_meta["fp8_group"] = None
self.fp8_meta["recipe"] = get_default_fp8_recipe()
self.fp8_meta_tensors_initialized = False
self.tp_group = None
self.tp_size = 1
self.sequence_parallel = False
self.fp8_weight_shapes = []
self.fp8_meta["autocast_id_fwd_stack"] = []
self.fp8_meta["async_amax_reduction"] = bool(
int(os.getenv("NVTE_ASYNC_AMAX_REDUCTION", "0"))
)
def set_meta_tensor(self, fwd: bool) -> None:
"""Init scales and amaxes for fwd | bwd."""
fp8_meta_tensor_key = "scaling_fwd" if fwd else "scaling_bwd"
if self.fp8_meta_tensors_initialized:
# Handle changed amax history size.
curr_len = self.fp8_meta[fp8_meta_tensor_key].amax_history.shape[0]
need_len = self.fp8_meta["recipe"].amax_history_len
if need_len < curr_len:
self.fp8_meta[fp8_meta_tensor_key].amax_history = (
self.fp8_meta[fp8_meta_tensor_key]
.amax_history[: self.fp8_meta["recipe"].amax_history_len].clone()
)
elif need_len > curr_len:
extra_rows = need_len - curr_len
self.fp8_meta[fp8_meta_tensor_key].amax_history = F.pad(
self.fp8_meta[fp8_meta_tensor_key].amax_history, pad=(0, 0, 0, extra_rows)
)
return
# Max. number of fp8 tensors per GEMM = 3 (input, weight, output) for fwd and
# 2 (grad_output and grad_input) for bwd
num_fp8_tensors = (
self.fp8_meta["num_gemms"] * 3 if fwd else self.fp8_meta["num_gemms"] * 2
)
self.fp8_meta[fp8_meta_tensor_key] = tex.FP8TensorMeta()
self.fp8_meta[fp8_meta_tensor_key].scale = torch.ones(
num_fp8_tensors, dtype=torch.float32, device="cuda"
)
self.fp8_meta[fp8_meta_tensor_key].scale_inv = torch.ones(
num_fp8_tensors, dtype=torch.float32, device="cuda"
)
self.fp8_meta[fp8_meta_tensor_key].amax_history = torch.zeros(
self.fp8_meta["recipe"].amax_history_len,
num_fp8_tensors,
dtype=torch.float32,
device="cuda",
)
# Needed for calculation of scale inverses to
# preserve scale_inv when caching FP8 weights
if fwd:
# [True, False, True]: -> [input, weight, output]
self.fp8_meta[fp8_meta_tensor_key + "_non_weight_mask"] = torch.BoolTensor(
[True, False, True] * self.fp8_meta["num_gemms"]
).cuda()
else:
# [True, True]: -> [grad_output, grad_input]
self.fp8_meta[fp8_meta_tensor_key + "_non_weight_mask"] = torch.BoolTensor(
[True, True] * self.fp8_meta["num_gemms"]
).cuda()
def init_fp8_meta_tensors(self) -> None:
"""Init scales and amaxes."""
self.set_meta_tensor(True)
self.set_meta_tensor(False)
self.fp8_meta_tensors_initialized = True
def get_extra_state(self) -> torch.Tensor:
"""Save before checkpointing."""
state = None
# Maintain backward compatibility.
fp8_checkpoint = "fp8_checkpoint" in self.fp8_meta and self.fp8_meta["fp8_checkpoint"]
fp8_checkpoint = fp8_checkpoint or self.fp8 or self.fp8_calibration
if fp8_checkpoint:
state = {}
state["scale_fwd"] = self.fp8_meta["scaling_fwd"].scale
state["scale_inv_fwd"] = self.fp8_meta["scaling_fwd"].scale_inv
state["amax_history_fwd"] = self.fp8_meta["scaling_fwd"].amax_history
state["scale_bwd"] = self.fp8_meta["scaling_bwd"].scale
state["scale_inv_bwd"] = self.fp8_meta["scaling_bwd"].scale_inv
state["amax_history_bwd"] = self.fp8_meta["scaling_bwd"].amax_history
state["global_fp8_buffer"] = FP8GlobalStateManager.get_global_fp8_buffer_checkpoint()
state["global_fp8_state"] = FP8GlobalStateManager.get_global_fp8_state_checkpoint()
# Store other pickelable values.
extra = {}
for k, v in self.fp8_meta.items():
if isinstance(v, (bool, int, float, str, list)):
extra[k] = v
state["extra_fp8_variables"] = extra
if is_in_onnx_export_mode():
state_serialized = torch.frombuffer(pickle.dumps(state), dtype=torch.uint8)
else:
state_serialized = io.BytesIO()
torch.save(state, state_serialized)
return state_serialized
def set_extra_state(self, state: torch.Tensor) -> None:
"""Load previous state."""
if state is None:
return
# Maintain backward compatibility with v0.2.0 and older.
if isinstance(state, list):
warnings.warn(
"This checkpoint format is deprecated and will be"
"removed in a future release of Transformer Engine"
)
# Retrieve checkpointed items.
scale_fwd = state[0]
amax_history_fwd = state[1]
scale_bwd = state[2]
amax_history_bwd = state[3]
self.fp8_meta["recipe"].amax_history_len = amax_history_fwd.shape[0]
self.fp8_meta["num_gemms"] = (
amax_history_fwd.shape[1] // 2
) # Two FWD tensors per GEMM
# Initialize before loading
self.init_fp8_meta_tensors()
self.fp8_meta["scaling_fwd"].scale.copy_(scale_fwd)
self.fp8_meta["scaling_fwd"].amax_history.copy_(amax_history_fwd)
self.fp8_meta["scaling_bwd"].scale.copy_(scale_bwd)
self.fp8_meta["scaling_bwd"].amax_history.copy_(amax_history_bwd)
# Restore global FP8 buffer state.
FP8GlobalStateManager.set_global_fp8_buffer_checkpoint(state[4])
self.fp8_meta["update_amax_and_scale_fwd"] = state[5]
self.fp8_meta["global_fp8_buffer_pos_fwd"] = state[6]
self.fp8_meta["global_fp8_buffer_pos_bwd"] = state[7]
self.fp8_meta["autocast_id_fwd"] = state[8]
self.fp8_meta["autocast_id_bwd"] = state[9]
return
if isinstance(state, torch.Tensor):
state = pickle.loads(state.detach().cpu().numpy().tobytes())
elif isinstance(state, io.BytesIO):
state.seek(0)
state = torch.load(state, map_location='cuda')
if state is None:
return
# Restore global FP8 amax buffer.
FP8GlobalStateManager.set_global_fp8_buffer_checkpoint(state["global_fp8_buffer"])
# Restore global FP8 state.
if "global_fp8_state" in state:
FP8GlobalStateManager.set_global_fp8_state_checkpoint(state["global_fp8_state"])
else:
warnings.warn(
"This checkpoint format is deprecated and will be"
"removed in a future release of Transformer Engine"
)
# Load extra items.
self.fp8_meta.update(state["extra_fp8_variables"])
self.fp8_meta["recipe"].amax_history_len = state["amax_history_fwd"].shape[0]
if "global_fp8_buffer_pos_fwd_recompute" in self.fp8_meta:
del self.fp8_meta["global_fp8_buffer_pos_fwd_recompute"]
# Initialize before loading.
self.init_fp8_meta_tensors()
self.fp8_meta["scaling_fwd"].scale.copy_(state["scale_fwd"])
self.fp8_meta["scaling_fwd"].amax_history.copy_(state["amax_history_fwd"])
self.fp8_meta["scaling_bwd"].scale.copy_(state["scale_bwd"])
self.fp8_meta["scaling_bwd"].amax_history.copy_(state["amax_history_bwd"])
# Backwards compatibility: compute scale inv if it wasn't saved in the extra state.
if "scale_inv_fwd" not in state or "scale_inv_bwd" not in state:
assert (
"scale_inv_fwd" not in state and "scale_inv_bwd" not in state
), "Invalid state, began saving scale_inv_fwd and scale_inv_bwd at the same time"
self.fp8_meta["scaling_fwd"].scale_inv.copy_(1.0/state["scale_fwd"])
self.fp8_meta["scaling_bwd"].scale_inv.copy_(1.0/state["scale_bwd"])
else:
self.fp8_meta["scaling_fwd"].scale_inv.copy_(state["scale_inv_fwd"])
self.fp8_meta["scaling_bwd"].scale_inv.copy_(state["scale_inv_bwd"])
def set_activation_dtype(self, inp: torch.Tensor) -> None:
"""Get activation data type for AMP."""
# Native AMP (`torch.autocast`) gets highest priority
if torch.is_autocast_enabled():
self.activation_dtype = torch.get_autocast_gpu_dtype()
return
# All checks after this have already been performed once, thus skip
# We assume that user doesn't change input types across iterations
if hasattr(self, "activation_dtype"):
return
dtype = inp.dtype
for name, param in self.named_parameters():
if param is not None:
assert dtype == param.dtype, (
"Data types for parameters must match when outside of autocasted region. "
f" Found input dtype: {dtype} and {name!r} dtype: {param.dtype}"
)
for name, buf in self.named_buffers():
if buf is not None:
assert dtype == buf.dtype, (
"Data types for buffers must match when outside of autocasted region. "
f" Found input dtype: {dtype} and {name!r} dtype: {buf.dtype}"
)
self.activation_dtype = dtype
def set_fp8_weights(self) -> None:
"""Initializes FP8 weights for the module as class attributes. These
are not parameters or buffers since we do not want functions such as
`.to(dtype)` or `.to(device)` to effect them. These also do not need
to be checkpointed. During `init` phase of the module, the attribute
`fp8_weight_shapes` must be populated with the tensor shapes for FP8
weights. This function will iterate over those shapes and initialize
respective attributed named `weight1_fp8`, `weight2_fp8`, ...
"""
if not self.fp8:
return
for i, shape in enumerate(self.fp8_weight_shapes, start=1):
weight_cast_attr = f"weight{i}_fp8"
weight_transpose_attr = f"weight{i}_t_fp8"
if (
hasattr(self, weight_cast_attr)
and getattr(self, weight_cast_attr).shape == shape
):
return
setattr(
self,
weight_cast_attr,
torch.empty(
shape,
device=torch.cuda.current_device(),
dtype=torch.uint8,
),
)
setattr(
self,
weight_transpose_attr,
torch.empty(
shape[1],
shape[0],
device=torch.cuda.current_device(),
dtype=torch.uint8,
),
)
def set_tensor_parallel_group(self, tp_group: Union[dist_group_type, None]) -> None:
"""Set TP group."""
self.tp_group = tp_group
self.tp_group_initialized = True
# This routine is shared across FP8 and FP8_calibration paths so should not actually
# assume FP8 execution.
def fp8_init(self, num_gemms: int = 1) -> None:
"""Initialize fp8 related metadata and tensors during fprop."""
self.fp8 = FP8GlobalStateManager.is_fp8_enabled()
self.fp8_calibration = FP8GlobalStateManager.is_fp8_calibration()
self.fp8_meta["fp8_checkpoint"] = self.fp8 or self.fp8_calibration
if self.fp8 or self.fp8_calibration:
# FP8 init has already been run and recipe is the same, don't do anything.
if (self.fp8_initialized
and FP8GlobalStateManager.get_fp8_recipe() == self.fp8_meta["recipe"]):
return
# Set FP8, recipe, and other FP8 metadata
self.fp8_meta["recipe"] = FP8GlobalStateManager.get_fp8_recipe()
self.fp8_meta["num_gemms"] = num_gemms
self.fp8_meta["fp8_group"] = FP8GlobalStateManager.get_fp8_group()
# Set FP8_MAX per tensor according to recipe
self.fp8_meta["fp8_max_fwd"] = self.fp8_meta["recipe"].fp8_format.value.max_fwd
self.fp8_meta["fp8_max_bwd"] = self.fp8_meta["recipe"].fp8_format.value.max_bwd
# Allocate scales and amaxes
self.init_fp8_meta_tensors()
self.fp8_initialized = True
else:
# If fp8 isn't enabled, turn off and return.
self.fp8_initialized = False
return
@contextmanager
def prepare_forward(
self,
inp: torch.Tensor,
is_first_microbatch: Union[bool, None],
num_gemms: int = 1,
) -> Generator[torch.Tensor, None, None]:
"""Checks and prep for FWD.
The context manager is needed because there isn't a way for a module to know
if it's the last FP8 module in the forward autocast. It is useful
to setup the forward aggregated amax reduction for every module
just in case. The autocast exit will pick up the most recent one.
"""
# Activation recomputation is used and this is the second forward phase.
if self.fp8 and in_fp8_activation_recompute_phase():
FP8GlobalStateManager.get_old_fp8_meta_tensors_for_recompute(self.fp8_meta)
else:
assert inp.is_cuda, "TransformerEngine needs CUDA."
if self.tp_size > 1:
assert self.tp_group_initialized, "TP group not initialized."
self.set_activation_dtype(inp)
self.fp8_init(num_gemms=num_gemms)
# Create persistent tensors for fp8 weights and their transposes
# only when fp8 weight caching is used.
if is_first_microbatch is not None:
self.set_fp8_weights()
update_weight_scale_inv = is_first_microbatch is None or is_first_microbatch
if self.fp8 and self.sequence_parallel:
assert self.fp8_meta["recipe"].reduce_amax, \
"Amax reduction across tensor parallel group is " \
"necessary when using sequence parallelism with FP8."
# Previous iteration was grad_enabled
if self.fp8_meta.get("update_amax_and_scale_fwd", False):
if self.fp8_meta["recipe"].reduce_amax:
FP8GlobalStateManager.copy_amax_from_global_buffer(self.fp8_meta, forward=True)
amax_and_scale_update(
self.fp8_meta, True, update_weight_scale_inv=update_weight_scale_inv
)
FP8GlobalStateManager.set_amax_buffer_key_deletion(self.fp8_meta, forward=True)
else:
amax_and_scale_update(
self.fp8_meta, True, update_weight_scale_inv=update_weight_scale_inv
)
if self.fp8 and self.training:
# Setup for amax reduction
if self.fp8_meta["recipe"].reduce_amax:
self.fp8_meta["first_module"] = FP8GlobalStateManager.is_first_fp8_module()
if self.fp8_meta["first_module"]:
# Wait for the prior AMAX reduction to finish
amax_reduce_handle_fwd = FP8GlobalStateManager.get_amax_reduce_handle_fwd()
if amax_reduce_handle_fwd is not None:
amax_reduce_handle_fwd.wait()
self.fp8_meta["autocast_id_fwd"] = (
FP8GlobalStateManager.new_fp8_context_id())
FP8GlobalStateManager.set_fp8_context_id(self.fp8_meta["autocast_id_fwd"])
else:
self.fp8_meta["autocast_id_fwd"] = (
FP8GlobalStateManager.get_fp8_context_id())
self.fp8_meta["autocast_id_fwd_stack"].append(
self.fp8_meta["autocast_id_fwd"]
)
FP8GlobalStateManager.add_amax_to_global_buffer(self.fp8_meta, forward=True)
self.fp8_meta["update_amax_and_scale_fwd"] = True
else:
self.fp8_meta["update_amax_and_scale_fwd"] = False
# Activation recomputation is used and this is the first forward phase.
if (
self.fp8
and self.training
and is_fp8_activation_recompute_enabled()
and not in_fp8_activation_recompute_phase()
):
FP8GlobalStateManager.copy_forward_fp8_meta_tensors_for_recompute(self.fp8_meta)
with torch.cuda.nvtx.range(self.__class__.__name__ + " forward"):
yield inp.contiguous()
if self.fp8 and in_fp8_activation_recompute_phase():
FP8GlobalStateManager.restore_fp8_meta_tensors(self.fp8_meta)
return
if self.fp8 and self.training and self.fp8_meta["recipe"].reduce_amax:
FP8GlobalStateManager.set_fp8_context_id(self.fp8_meta["autocast_id_fwd"])
reduce_func = partial(
FP8GlobalStateManager.global_amax_reduction,
self.fp8_meta,
self.tp_group,
self.tp_size,
forward=True
)
FP8GlobalStateManager.setup_amax_forward_global_reduce_func(reduce_func)
def set_nccl_overlap_warning_if_tp(self) -> None:
"""When using TP, the NCCL communication needs to be scheduled
before the GEMM for there to be a guaranteed overlap. From the
host side in TE, the comm calls are always launched first, but
to ensure that the GEMM isn't scheduled first, the environment
variable `CUDA_DEVICE_MAX_CONNECTIONS` needs to be set to 1 to
force a single channel.
"""
if self.tp_size == 1:
return
num_cuda_work_queues = int(os.getenv("CUDA_DEVICE_MAX_CONNECTIONS", "0"))
if num_cuda_work_queues != 1:
warnings.warn(
"To guarantee overlapping TP and SP collectives with the backward"
"GEMMs, set environment variable CUDA_DEVICE_MAX_CONNECTIONS = 1"
)
@staticmethod
def grad_output_preprocess(
ctx, grad_output: torch.Tensor, row_parallel_mode: bool
) -> Tuple[Union[torch.Tensor, None], ...]:
"""Utility function for backward.
Returns tuple in order (all optional/None based on training precion/recipe):
R1: gathered `grad_output` in higher precision.
R2: gathered `grad_output` in FP8.
R3: R2 transposed.
R4: bias gradient on R1.
"""
grad_output = grad_output.contiguous()
grad_output_mat = grad_output.view((-1, grad_output.shape[-1]))
gather_grad_output = row_parallel_mode and ctx.sequence_parallel
# No-FP8 case: bgrad is fused with wgrad for this case.
if not ctx.fp8:
if gather_grad_output:
if not ctx.ub_split_ag:
grad_output_mat, _ = gather_along_first_dim(
grad_output_mat, ctx.tp_group
)
else:
ctx.ub_obj_gradout.copy_input_to_ubuf(grad_output, True)
grad_output_mat = ctx.ub_obj_gradout.get_ubuf_output(1)
return grad_output_mat, None, None, None
fp8_dtype_backward = get_fp8_te_dtype(
ctx.fp8_meta["recipe"], fprop_tensor=False
)
# FP8 case with non-FP8 wgrad
if (
gather_grad_output
and ctx.fp8_meta["recipe"].override_linear_precision.wgrad
):
assert (
not ctx.ub_split_ag
), "override_linear_precision.wgrad not supported with ub_split_ag"
grad_output_mat, _ = gather_along_first_dim(grad_output_mat, ctx.tp_group)
# FP8 case with gather: unfused bgrad, cast, transpose for efficient gather
elif gather_grad_output:
if ctx.use_bias:
grad_bias = grad_output_mat.sum(dim=0)
else:
grad_bias = None
if ctx.ub_split_ag:
grad_output_c = ctx.ub_obj_gradout.get_ubuf_output(0)
else:
grad_output_c = torch.empty_like(grad_output_mat, dtype=torch.uint8)
cast_to_fp8(
grad_output_mat,
ctx.fp8_meta["scaling_bwd"],
tex.FP8BwdTensors.GRAD_OUTPUT1,
fp8_dtype_backward,
out=grad_output_c,
)
if not ctx.ub_split_ag:
grad_output_c, _ = gather_along_first_dim(grad_output_c, ctx.tp_group)
grad_output_t = tex.fp8_transpose(grad_output_c, fp8_dtype_backward)
else:
grad_output_c = ctx.ub_obj_gradout.get_ubuf_output(1)
grad_output_t = None
return grad_output_mat, grad_output_c, grad_output_t, grad_bias
# FP8 case without gather: cast, transpose, bgrad fused
if ctx.use_bias:
grad_bias, grad_output_c, grad_output_t = fp8_cast_transpose_bgrad_fused(
grad_output_mat,
ctx.fp8_meta["scaling_bwd"],
tex.FP8BwdTensors.GRAD_OUTPUT1,
fp8_dtype_backward,
)
else:
if not ctx.fp8_meta["recipe"].override_linear_precision.wgrad:
grad_output_c, grad_output_t = fp8_cast_transpose_fused(
grad_output_mat,
ctx.fp8_meta["scaling_bwd"],
tex.FP8BwdTensors.GRAD_OUTPUT1,
fp8_dtype_backward,
)
else:
grad_output_t = None
grad_output_c = cast_to_fp8(
grad_output_mat,
ctx.fp8_meta["scaling_bwd"],
tex.FP8BwdTensors.GRAD_OUTPUT1,
fp8_dtype_backward,
)
grad_bias = None
return grad_output_mat, grad_output_c, grad_output_t, grad_bias
def noop_cat(self, buffer_name: str, pnames: List[str]) -> torch.Tensor:
"""No-op replacement of `torch.cat`. The buffer and split parameters must occupy
the same memory region. If this is not the case, then the split parameters
are concatenated and the buffer is overwritten. The parameters' memory is then
re-assigned to point to the buffer to avoid subsequent concatenations.
"""
assert hasattr(self, buffer_name), f"No buffer named {buffer_name}"
full_param_buffer = getattr(self, buffer_name)
split_size = full_param_buffer.shape[0] // len(pnames)
params = [getattr(self, name) for name in pnames]
for i, p in enumerate(params):
if p.data.data_ptr() != full_param_buffer[i*split_size : (i+1)*split_size].data_ptr():
with torch.no_grad():
setattr(self, buffer_name, torch.cat(params))
for j, pname in enumerate(pnames):
full_param_buffer = getattr(self, buffer_name)
setattr(self, pname,
Parameter(full_param_buffer[j*split_size : (j+1)*split_size]))
break
return _NoopCat.apply(getattr(self, buffer_name), *[getattr(self, name) for name in pnames])
def get_fp8_weights_empty_tensors(
self,
is_first_microbatch: Union[bool, None],
) -> List[torch.Tensor]:
"""
Returns empty tensors to be later used to store fp8 version of weights
and their transposes (for the bwd pass) for this batch (or microbatch).
When `is_first_microbatch` is `None`, this is especially useful since
we then don't need to store the fp8 weights that are needed for one time
only in the forward pass. Note that we still need to store the tensor
for the fp8 weight transpose which is at least needed in the backward
pass but that's taken care of by storing the transpose tensor in
`ctx.save_for_backward`.
"""
assert is_first_microbatch is None, "Should only be here when "\
"`is_first_microbatch` is None!"
fp8_weight_tensors = []
for shape in self.fp8_weight_shapes:
fp8_weight_tensors.append(
torch.empty(
shape,
device=torch.cuda.current_device(),
dtype=torch.uint8,
)
)
fp8_weight_tensors.append(
torch.empty(
shape[1],
shape[0],
device=torch.cuda.current_device(),
dtype=torch.uint8,
)
)
return fp8_weight_tensors
@abstractmethod
def forward(self):
"""Needs override."""
@abstractmethod
def get_fp8_weights_scratchpad(
self,
is_first_microbatch: Union[bool, None],
) -> List[torch.Tensor]:
"""Needs override."""
| TransformerEngine-main | transformer_engine/pytorch/module/base.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""
Sharding Meta for xmap with CustomCall
"""
from contextlib import contextmanager
from dataclasses import dataclass
from enum import Enum
from itertools import repeat
from typing import Union, Tuple, Dict, Callable, Sequence
from jax.interpreters import pxla
import jax
import jax.numpy as jnp
from jax.experimental.maps import xmap
from jax.sharding import PartitionSpec
jax.config.update('experimental_xmap_spmd_lowering', True)
jax.config.update('experimental_xmap_spmd_lowering_manual', True)
_PXLA_THREAD_RESOURCES = pxla.thread_resources
def _get_mesh_info(resource: str):
mesh = _PXLA_THREAD_RESOURCES.env.physical_mesh
assert resource in mesh.axis_names, \
f"{resource} is not in the axis_names of Mesh {mesh}."
return mesh.shape[resource], resource
def with_sharding_constraint(x: jnp.array, pspec: PartitionSpec):
"""
A wrapper function to jax.lax.with_sharding_constraint to
support the case that Mesh is empty.
"""
mesh = _PXLA_THREAD_RESOURCES.env.physical_mesh
if mesh.empty:
return x
return jax.lax.with_sharding_constraint(x, pspec)
@dataclass
class ShardingResource:
"""
A data container to indicate which axis in Mesh for data parallelism and
which for tensor parallelism.
Parameters
----------
dp_resource : str, default = None
The axis name in Mesh used to shard batches along.
If it is None, then data parallelism is disabled.
tp_resource : str, default = None
The axis name in Mesh used to split the hidden dimensions along.
If it is None, then tensor parallelism is disabled.
"""
dp_resource: str = None
tp_resource: str = None
fsdp_resource: str = None
_GLOBAL_SHARD_RESOURCE = ShardingResource()
@contextmanager
def global_shard_guard(resource: ShardingResource):
"""
A context manager to switch the global ShardingResource
"""
global _GLOBAL_SHARD_RESOURCE
prev_gsr = _GLOBAL_SHARD_RESOURCE
try:
_GLOBAL_SHARD_RESOURCE = resource
yield
finally:
_GLOBAL_SHARD_RESOURCE = prev_gsr
def global_shard_resource() -> ShardingResource:
"""
A getter of the global ShardingResource
"""
return _GLOBAL_SHARD_RESOURCE
class MajorShardingType(Enum):
r"""
The major sharding type to indicate sharding pattern.
Values
----------
SINGLE:
Single process training.
DP:
Data parallel training.
TP:
Standard tensor parallel training.
DPTP:
Data and Standard tensor parallel training.
"""
SINGLE = 0
DP = 1
TP = 2
DPTP = 3
class ShardingType(Enum):
"""
The sharding type to indicate sharding pattern.
Values
----------
SINGLE:
No sharding.
DP:
Sharding along data parallelism.
TP_COL:
Sharding along column-split tensor parallelism.
TP_ROW:
Sharding along row-split tensor parallelism.
DP_TP_COL:
Sharding along data and column-split tensor parallelism.
DP_TP_ROW:
Sharding along data and row-split tensor parallelism.
"""
SINGLE = (MajorShardingType.SINGLE, "single")
DP = (MajorShardingType.DP, "dp")
TP_COL = (MajorShardingType.TP, "tp_col")
TP_ROW = (MajorShardingType.TP, "tp_row")
DP_TP_COL = (MajorShardingType.DPTP, "dp_tp_col")
DP_TP_ROW = (MajorShardingType.DPTP, "dp_tp_row")
def infer_major_sharding_type() -> MajorShardingType:
"""
Infer MajorShardingType from _GLOBAL_SHARD_RESOURCE
"""
gsr = global_shard_resource()
resources = [gsr.dp_resource, gsr.tp_resource, gsr.fsdp_resource]
for idx, rs in enumerate(resources):
try:
size, _ = _get_mesh_info(rs)
if size <= 1:
resources[idx] = None
except AssertionError as _:
resources[idx] = None
dp_resource = resources[0]
tp_resource = resources[1]
fsdp_resource = resources[2]
def dp_enabled():
return (fsdp_resource is not None) or (dp_resource is not None)
if dp_enabled() and tp_resource is not None:
return MajorShardingType.DPTP
if dp_enabled():
return MajorShardingType.DP
if tp_resource is not None:
return MajorShardingType.TP
return MajorShardingType.SINGLE
def infer_sharding_type(major_st: MajorShardingType = None) -> Tuple[ShardingType, ShardingType]:
"""
Infer ShardingType via given MajorShardingType
"""
if major_st is None:
major_st = infer_major_sharding_type()
if major_st is MajorShardingType.DP:
return ShardingType.DP, ShardingType.DP
if major_st is MajorShardingType.TP:
return ShardingType.TP_COL, ShardingType.TP_ROW
if major_st is MajorShardingType.DPTP:
return ShardingType.DP_TP_COL, ShardingType.DP_TP_ROW
return ShardingType.SINGLE, ShardingType.SINGLE
def is_dp_enabled(mst: MajorShardingType) -> bool:
"""
is_dp_enabled
"""
return mst in (MajorShardingType.DP, MajorShardingType.DPTP)
def is_tp_enabled(mst: MajorShardingType) -> bool:
"""
is_tp_enabled
"""
return mst in (MajorShardingType.TP, MajorShardingType.DPTP)
def merge_axis_resources(ars: Tuple[Dict]) -> Dict:
"""
merge_axis_resources
"""
output = {}
for ar in ars:
for key in ar:
if key not in output:
output[key] = ar[key]
else:
assert output[key] == ar[key]
return output
@dataclass
class ShardingMeta:
"""ShardingMeta"""
in_axes: Union[Dict, Tuple[str, ...], Tuple[Union[Dict, Tuple], ...]]
out_axes: Union[Dict, Tuple[str, ...], Tuple[Union[Dict, Tuple], ...]]
axis_resources: Dict
input_shapes: Tuple[Tuple[int, ...]]
output_shapes: Tuple[Tuple[int, ...]]
class ShardingMetaGenerator:
"""
ShardingMetaGenerator
"""
def __init__(self):
def get_single_sharding_meta(*argv, **kwargs) -> ShardingMeta: # pylint: disable=unused-argument
return None
self.sharding_type_meta_map = {
ShardingType.SINGLE: get_single_sharding_meta,
ShardingType.DP: self.get_dp_sharding_meta,
ShardingType.TP_COL: self.get_tp_col_sharding_meta,
ShardingType.TP_ROW: self.get_tp_row_sharding_meta,
ShardingType.DP_TP_COL: self.get_dp_tp_col_sharding_meta,
ShardingType.DP_TP_ROW: self.get_dp_tp_row_sharding_meta
}
def get_sharding_meta(self, stype: ShardingType, *argv, **kwargs) -> ShardingMeta:
"""get_sharding_meta"""
return self.sharding_type_meta_map[stype](*argv, **kwargs)
def get_dp_sharding_meta(self, *argv, **kwargs) -> ShardingMeta:
"""get_dp_sharding_meta"""
raise NotImplementedError
def get_tp_col_sharding_meta(self, *argv, **kwargs) -> ShardingMeta:
"""get_tp_col_sharding_meta"""
raise NotImplementedError
def get_tp_row_sharding_meta(self, *argv, **kwargs) -> ShardingMeta:
"""get_tp_row_sharding_meta"""
raise NotImplementedError
def get_dp_tp_col_sharding_meta(self, *argv, **kwargs) -> ShardingMeta:
"""get_dp_tp_col_sharding_meta"""
raise NotImplementedError
def get_dp_tp_row_sharding_meta(self, *argv, **kwargs) -> ShardingMeta:
"""get_dp_tp_row_sharding_meta"""
raise NotImplementedError
class FP8MetaShardingMetaGenerator(ShardingMetaGenerator):
"""
FP8MetaShardingMetaGenerator
"""
def get_dp_sharding_meta(self,
num_of_meta: int,
dp_axis_name: str = 'data',
tp_axis_name: str = 'model') -> ShardingMeta:
return FP8MetaShardingMetaGenerator._generate_sharding_meta(MajorShardingType.DP,
num_of_meta, dp_axis_name,
tp_axis_name)
def get_tp_col_sharding_meta(self,
num_of_meta: int,
dp_axis_name: str = 'data',
tp_axis_name: str = 'model') -> ShardingMeta:
return FP8MetaShardingMetaGenerator._generate_sharding_meta(MajorShardingType.TP,
num_of_meta, dp_axis_name,
tp_axis_name)
def get_tp_row_sharding_meta(self,
num_of_meta: int,
dp_axis_name: str = 'data',
tp_axis_name: str = 'model') -> ShardingMeta:
return FP8MetaShardingMetaGenerator._generate_sharding_meta(MajorShardingType.TP,
num_of_meta, dp_axis_name,
tp_axis_name)
def get_dp_tp_col_sharding_meta(self,
num_of_meta: int,
dp_axis_name: str = 'data',
tp_axis_name: str = 'model') -> ShardingMeta:
return FP8MetaShardingMetaGenerator._generate_sharding_meta(MajorShardingType.DPTP,
num_of_meta, dp_axis_name,
tp_axis_name)
def get_dp_tp_row_sharding_meta(self,
num_of_meta: int,
dp_axis_name: str = 'data',
tp_axis_name: str = 'model') -> ShardingMeta:
return FP8MetaShardingMetaGenerator._generate_sharding_meta(MajorShardingType.DPTP,
num_of_meta, dp_axis_name,
tp_axis_name)
@staticmethod
def _stack_axes_meta(num_of_meta: int, mapping: Dict) -> Tuple:
return tuple(mapping for _ in range(num_of_meta))
@staticmethod
def _generate_sharding_meta(type_: MajorShardingType,
num_of_meta: int,
dp_axis_name: str = 'data',
tp_axis_name: str = 'model') -> ShardingMeta:
axis_resource = {}
if is_dp_enabled(type_):
axis_resource[dp_axis_name] = global_shard_resource().dp_resource
if is_tp_enabled(type_):
axis_resource[tp_axis_name] = global_shard_resource().tp_resource
return ShardingMeta(FP8MetaShardingMetaGenerator._stack_axes_meta(num_of_meta, {}),
FP8MetaShardingMetaGenerator._stack_axes_meta(num_of_meta, {}),
axis_resource, (), ())
class FusedAttnShardingMetaGenerator(ShardingMetaGenerator):
"""
FusedAttnShardingMetaGenerator
"""
def get_dp_sharding_meta(
self,
input_shapes: Tuple[Tuple[int, ...]],
output_shapes: Tuple[Tuple[int, ...]],
dp_dims: Tuple[Tuple[int, ...]],
tp_dims: Tuple[Tuple[int, ...]], # pylint: disable=unused-argument
dp_axis_name: str = 'data',
tp_axis_name: str = 'model' # pylint: disable=unused-argument
) -> ShardingMeta:
"""get_dp_sharding_meta"""
dummy_tp_dims = [repeat(None), repeat(None)]
return FusedAttnShardingMetaGenerator._get_dptp_sharding_meta(input_shapes, output_shapes,
dp_dims, dummy_tp_dims,
dp_axis_name, None)
def get_tp_col_sharding_meta(self, *argv, **kwargs) -> ShardingMeta:
"""get_tp_col_sharding_meta"""
return FusedAttnShardingMetaGenerator._get_tp_sharding_meta(*argv, **kwargs)
def get_tp_row_sharding_meta(self, *argv, **kwargs) -> ShardingMeta:
"""get_tp_row_sharding_meta"""
return FusedAttnShardingMetaGenerator._get_tp_sharding_meta(*argv, **kwargs)
def get_dp_tp_col_sharding_meta(self, *argv, **kwargs) -> ShardingMeta:
"""get_dp_tp_col_sharding_meta"""
return FusedAttnShardingMetaGenerator._get_dptp_sharding_meta(*argv, **kwargs)
def get_dp_tp_row_sharding_meta(self, *argv, **kwargs) -> ShardingMeta:
"""get_dp_tp_row_sharding_meta"""
return FusedAttnShardingMetaGenerator._get_dptp_sharding_meta(*argv, **kwargs)
@staticmethod
def _get_tp_sharding_meta(
input_shapes: Tuple[Tuple[int, ...]],
output_shapes: Tuple[Tuple[int, ...]],
dp_dims: Tuple[Tuple[int, ...]], # pylint: disable=unused-argument
tp_dims: Tuple[Tuple[int, ...]],
dp_axis_name: str = 'data', # pylint: disable=unused-argument
tp_axis_name: str = 'model') -> ShardingMeta:
"""get_tp_sharding_meta"""
dummy_dp_dims = [repeat(None), repeat(None)]
return FusedAttnShardingMetaGenerator._get_dptp_sharding_meta(input_shapes, output_shapes,
dummy_dp_dims, tp_dims, None,
tp_axis_name)
@staticmethod
def _get_dptp_sharding_meta(input_shapes: Tuple[Tuple[int, ...]],
output_shapes: Tuple[Tuple[int, ...]],
dp_dims: Tuple[Tuple[int, ...]],
tp_dims: Tuple[Tuple[int, ...]],
dp_axis_name: str = 'data',
tp_axis_name: str = 'model') -> ShardingMeta:
"""get_dp_tp_sharding_meta"""
dp_size, dp_mesh_axis = _get_mesh_info(global_shard_resource().dp_resource)
tp_size, tp_mesh_axis = _get_mesh_info(global_shard_resource().tp_resource)
input_dp_dims, output_dp_dims = dp_dims
input_tp_dims, output_tp_dims = tp_dims
input_new_shapes = []
in_axes = []
for input_shape, dp_dim, tp_dim in zip(input_shapes, input_dp_dims, input_tp_dims):
in_axis = {}
if dp_dim is not None and input_shape is not None:
in_axis[dp_dim] = dp_axis_name
assert input_shape[dp_dim] % dp_size == 0, \
f"The dimension of batch in input_shape should be a multiple of " \
f"data parallelism size, but got {input_shape[dp_dim]=} and {dp_size=}."
input_shape = (*input_shape[:dp_dim], dp_size, input_shape[dp_dim] // dp_size,
*input_shape[dp_dim + 1:])
# the input shape has been expanded for dp_dim, tp_dim should +1 if tp_dim >= dp_dim
if tp_dim is not None and tp_dim >= dp_dim:
tp_dim = tp_dim + 1
if tp_dim is not None and input_shape is not None:
in_axis[tp_dim] = tp_axis_name
assert input_shape[tp_dim] % tp_size == 0, \
f"The dimension of tensor parallel in input_shape should be a multiple of " \
f"tensor parallelism size, but got {input_shape[tp_dim]=} and {tp_size=}."
input_shape = (*input_shape[:tp_dim], tp_size, input_shape[tp_dim] // tp_size,
*input_shape[tp_dim + 1:])
in_axes.append(in_axis)
input_new_shapes.append(input_shape)
output_new_shapes = output_shapes
out_axes = []
for dp_dim, tp_dim in zip(output_dp_dims, output_tp_dims):
out_axis = {}
if dp_dim is not None:
out_axis[dp_dim] = dp_axis_name
if tp_dim is not None and tp_dim >= dp_dim:
tp_dim = tp_dim + 1
if tp_dim is not None:
out_axis[tp_dim] = tp_axis_name
out_axes.append(out_axis)
assert len(out_axes) == 1, "Only allow single output at this moment."
assert len(output_new_shapes) == 1, "Only allow single output at this moment."
out_axes = out_axes[0]
output_new_shapes = output_new_shapes[0]
axis_resources = {}
if dp_axis_name is not None:
axis_resources[dp_axis_name] = dp_mesh_axis
if tp_axis_name is not None:
axis_resources[tp_axis_name] = tp_mesh_axis
return ShardingMeta(tuple(in_axes), out_axes, axis_resources, input_new_shapes,
output_new_shapes)
class DotShardingMetaGenerator(ShardingMetaGenerator):
"""
DotShardingMetaGenerator
"""
def get_dp_sharding_meta(
self,
a_shape: Tuple,
b_shape: Tuple,
batch_dim_of_a: int,
model_dim_of_a: int, # pylint: disable=unused-argument
model_dim_of_b: int, # pylint: disable=unused-argument
contracting_dims: Tuple[Sequence[int], Sequence[int]],
dp_axis_name: str = 'data',
tp_axis_name: str = 'model' # pylint: disable=unused-argument
) -> ShardingMeta:
DotShardingMetaGenerator._is_supported(a_shape, b_shape, batch_dim_of_a, None,
contracting_dims)
out_shape = DotShardingMetaGenerator._infer_output_shape(a_shape, b_shape, contracting_dims)
out_batch_dim = batch_dim_of_a
dp_size, dp_mesh_axis = _get_mesh_info(global_shard_resource().dp_resource)
assert a_shape[batch_dim_of_a] % dp_size == 0, \
f"The dimension of batch in a_shape should be a multiple of data parallelism size," \
f" but got {a_shape[batch_dim_of_a]=} and {dp_size=}."
a_new_shape = (*a_shape[:batch_dim_of_a], dp_size, -1, *a_shape[batch_dim_of_a + 1:])
return ShardingMeta(({
batch_dim_of_a: dp_axis_name
}, {}), ({
out_batch_dim: dp_axis_name
}), {dp_axis_name: dp_mesh_axis}, [a_new_shape, b_shape], [out_shape])
def get_tp_col_sharding_meta(
self,
a_shape: Tuple,
b_shape: Tuple,
batch_dim_of_a: int,
model_dim_of_a: int, # pylint: disable=unused-argument
model_dim_of_b: int,
contracting_dims: Tuple[Sequence[int], Sequence[int]],
dp_axis_name: str = 'data', # pylint: disable=unused-argument
tp_axis_name: str = 'model') -> ShardingMeta:
DotShardingMetaGenerator._is_supported(a_shape, b_shape, batch_dim_of_a, None,
contracting_dims)
out_shape = DotShardingMetaGenerator._infer_output_shape(a_shape, b_shape, contracting_dims)
out_model_idx = len(out_shape) - (len(b_shape) - model_dim_of_b)
tp_size, tp_mesh_axis = _get_mesh_info(global_shard_resource().tp_resource)
assert b_shape[model_dim_of_b] % tp_size == 0, \
f"The dimension of model parallelism in b_shape should be a multiple of " \
f"tensor parallelism size,but got {b_shape[model_dim_of_b]=} and {tp_size=}."
b_new_shape = (*b_shape[:model_dim_of_b], tp_size, b_shape[model_dim_of_b] // tp_size,
*b_shape[model_dim_of_b + 1:])
return ShardingMeta(({}, {
model_dim_of_b: tp_axis_name
}), ({
out_model_idx: tp_axis_name
}), {tp_axis_name: tp_mesh_axis}, [a_shape, b_new_shape], [out_shape])
def get_tp_row_sharding_meta(
self,
a_shape: Tuple,
b_shape: Tuple,
batch_dim_of_a: int,
model_dim_of_a: int,
model_dim_of_b: int,
contracting_dims: Tuple[Sequence[int], Sequence[int]],
dp_axis_name: str = 'data', # pylint: disable=unused-argument
tp_axis_name: str = 'model') -> ShardingMeta:
DotShardingMetaGenerator._is_supported(a_shape, b_shape, batch_dim_of_a, model_dim_of_a,
contracting_dims)
out_shape = DotShardingMetaGenerator._infer_output_shape(a_shape, b_shape, contracting_dims)
tp_size, tp_mesh_axis = _get_mesh_info(global_shard_resource().tp_resource)
assert a_shape[model_dim_of_a] % tp_size == 0, \
f"The dimension of model parallelism in a_shape should be a multiple of " \
f"tensor parallelism size,but got {a_shape[model_dim_of_a]=} and {tp_size=}."
assert b_shape[model_dim_of_b] % tp_size == 0, \
f"The dimension of model parallelism in b_shape should be a multiple of " \
f"tensor parallelism size,but got {b_shape[model_dim_of_b]=} and {tp_size=}."
a_new_shape = (*a_shape[:model_dim_of_a], tp_size, a_shape[model_dim_of_a] // tp_size,
*a_shape[model_dim_of_a + 1:])
b_new_shape = (*b_shape[:model_dim_of_b], tp_size, b_shape[model_dim_of_b] // tp_size,
*b_shape[model_dim_of_b + 1:])
return ShardingMeta(({
model_dim_of_a: tp_axis_name
}, {
model_dim_of_b: tp_axis_name
}), ({}), {tp_axis_name: tp_mesh_axis}, [a_new_shape, b_new_shape], [out_shape])
def get_dp_tp_col_sharding_meta(
self,
a_shape: Tuple,
b_shape: Tuple,
batch_dim_of_a: int,
model_dim_of_a: int, # pylint: disable=unused-argument
model_dim_of_b: int,
contracting_dims: Tuple[Sequence[int], Sequence[int]],
dp_axis_name: str = 'data',
tp_axis_name: str = 'model') -> ShardingMeta:
DotShardingMetaGenerator._is_supported(a_shape, b_shape, batch_dim_of_a, None,
contracting_dims)
out_shape = DotShardingMetaGenerator._infer_output_shape(a_shape, b_shape, contracting_dims)
out_model_idx = len(out_shape) + 1 - (len(b_shape) - model_dim_of_b)
dp_size, dp_mesh_axis = _get_mesh_info(global_shard_resource().dp_resource)
tp_size, tp_mesh_axis = _get_mesh_info(global_shard_resource().tp_resource)
assert a_shape[batch_dim_of_a] % dp_size == 0, \
f"The dimension of batch in a_shape should be a multiple of data parallelism size," \
f" but got {a_shape[batch_dim_of_a]=} and {dp_size=}."
assert b_shape[model_dim_of_b] % tp_size == 0, \
f"The dimension of model parallelism in b_shape should be a multiple of " \
f"tensor parallelism size,but got {b_shape[model_dim_of_b]=} and {tp_size=}."
a_new_shape = (*a_shape[:batch_dim_of_a], dp_size, a_shape[batch_dim_of_a] // dp_size,
*a_shape[batch_dim_of_a + 1:])
b_new_shape = (*b_shape[:model_dim_of_b], tp_size, b_shape[model_dim_of_b] // tp_size,
*b_shape[model_dim_of_b + 1:])
return ShardingMeta(({
batch_dim_of_a: dp_axis_name
}, {
model_dim_of_b: tp_axis_name
}), ({
batch_dim_of_a: dp_axis_name,
out_model_idx: tp_axis_name
}), {
dp_axis_name: dp_mesh_axis,
tp_axis_name: tp_mesh_axis
}, [a_new_shape, b_new_shape], [out_shape])
def get_dp_tp_row_sharding_meta(self,
a_shape: Tuple,
b_shape: Tuple,
batch_dim_of_a: int,
model_dim_of_a: int,
model_dim_of_b: int,
contracting_dims: Tuple[Sequence[int], Sequence[int]],
dp_axis_name: str = 'data',
tp_axis_name: str = 'model') -> ShardingMeta:
DotShardingMetaGenerator._is_supported(a_shape, b_shape, batch_dim_of_a, model_dim_of_a,
contracting_dims)
out_shape = DotShardingMetaGenerator._infer_output_shape(a_shape, b_shape, contracting_dims)
dp_size, dp_mesh_axis = _get_mesh_info(global_shard_resource().dp_resource)
tp_size, tp_mesh_axis = _get_mesh_info(global_shard_resource().tp_resource)
assert a_shape[batch_dim_of_a] % dp_size == 0, \
f"The dimension of batch in a_shape should be a multiple of data parallelism size," \
f" but got {a_shape[batch_dim_of_a]=} and {dp_size=}."
assert a_shape[model_dim_of_a] % tp_size == 0, \
f"The dimension of model parallelism in a_shape should be a multiple of " \
f"tensor parallelism size,but got {a_shape[model_dim_of_a]=} and {tp_size=}."
assert b_shape[model_dim_of_b] % tp_size == 0, \
f"The dimension of model parallelism in b_shape should be a multiple of " \
f"tensor parallelism size,but {b_shape[model_dim_of_b]=} and {tp_size=}."
a_new_shape = (*a_shape[:batch_dim_of_a], dp_size, a_shape[batch_dim_of_a] // dp_size,
*a_shape[batch_dim_of_a + 1:model_dim_of_a], tp_size,
a_shape[model_dim_of_a] // tp_size, *a_shape[model_dim_of_a + 1:])
b_new_shape = (*b_shape[:model_dim_of_b], tp_size, b_shape[model_dim_of_b] // tp_size,
*b_shape[model_dim_of_b + 1:])
return ShardingMeta(
(
{
batch_dim_of_a:
dp_axis_name,
# "model_dim_of_a+1" is the index to tp_size in a_new_shape
model_dim_of_a + 1:
tp_axis_name
},
{
model_dim_of_b: tp_axis_name
}),
({
batch_dim_of_a: dp_axis_name
}),
{
dp_axis_name: dp_mesh_axis,
tp_axis_name: tp_mesh_axis
},
[a_new_shape, b_new_shape],
[out_shape])
@staticmethod
def _is_supported(
a_shape: Tuple, # pylint: disable=unused-argument
b_shape: Tuple, # pylint: disable=unused-argument
batch_dim_of_a: int,
model_dim_of_a: int,
contracting_dims: Tuple[Sequence[int], Sequence[int]],
):
assert batch_dim_of_a not in contracting_dims[0], \
"batch_dim_of_a should be one of contracting_dims[0]"
assert batch_dim_of_a >= 0, \
"Only support non-negative value of batch_dim_of_a."
if model_dim_of_a is not None:
assert model_dim_of_a >= 0, \
"Only support non-negative value of model_dim_of_a"
assert model_dim_of_a > batch_dim_of_a, \
"Only support the case that model_dim_of_a > batch_dim_of_a."
@staticmethod
def _infer_output_shape(
a_shape: Tuple,
b_shape: Tuple,
contracting_dims: Tuple[Sequence[int], Sequence[int]],
):
lhs_contracting_dims, rhs_contracting_dims = contracting_dims
return (*a_shape[:min(lhs_contracting_dims)], *b_shape[max(rhs_contracting_dims) + 1:])
class ElementwiseShardingMetaGenerator(ShardingMetaGenerator):
"""
ElementwiseShardingMetaGenerator
"""
def get_dp_sharding_meta(
self,
input_shape: Tuple,
other_shape: Tuple,
batch_dim: int,
dp_axis_name: str = 'data',
tp_axis_name: str = 'model' # pylint: disable=unused-argument
) -> ShardingMeta:
"""get_dp_sharding_meta"""
ElementwiseShardingMetaGenerator._is_supported(input_shape, other_shape, batch_dim)
dp_size, dp_mesh_axis = _get_mesh_info(global_shard_resource().dp_resource)
assert input_shape[batch_dim] % dp_size == 0, \
f"The dimension of batch in input_shape should be a multiple of data parallelism " \
f"size, but got {input_shape[batch_dim]=} and {dp_size=}."
input_new_shape = (*input_shape[:batch_dim], dp_size, -1, *input_shape[batch_dim + 1:])
in_axes = [{batch_dim: dp_axis_name}]
input_new_shapes = [input_new_shape]
if other_shape is not None:
input_new_shapes.append(other_shape)
in_axes.append({})
return ShardingMeta(tuple(in_axes), ({
batch_dim: dp_axis_name
}), {dp_axis_name: dp_mesh_axis}, input_new_shapes, [input_shape])
def get_tp_col_sharding_meta(
self,
input_shape: Tuple,
other_shape: Tuple,
batch_dim: int, # pylint: disable=unused-argument
dp_axis_name: str = 'data', # pylint: disable=unused-argument
tp_axis_name: str = 'model' # pylint: disable=unused-argument
) -> ShardingMeta:
"""get_tp_col_sharding_meta"""
ElementwiseShardingMetaGenerator._is_supported(input_shape, other_shape, 0)
in_axes = [{}]
input_new_shapes = [input_shape]
if other_shape is not None:
in_axes.append({})
input_new_shapes.append(other_shape)
return ShardingMeta(tuple(in_axes), ({}), {}, input_new_shapes, [input_shape])
def get_tp_row_sharding_meta(
self,
input_shape: Tuple,
other_shape: Tuple,
batch_dim: int, # pylint: disable=unused-argument
dp_axis_name: str = 'data', # pylint: disable=unused-argument
tp_axis_name: str = 'model') -> ShardingMeta:
"""get_tp_row_sharding_meta"""
ElementwiseShardingMetaGenerator._is_supported(input_shape, other_shape, 0)
tp_size, tp_mesh_axis = _get_mesh_info(global_shard_resource().tp_resource)
assert input_shape[-1] % tp_size == 0, \
f"The last dimension in input_shape should be a multiple of tensor parallelism size," \
f" but got {input_shape[-1]=} and {tp_size=}."
input_new_shape = (*input_shape[:-1], tp_size, -1)
in_axes = [{
# "len(a_new_shape)-2" is the index to tp_size in a_new_shape
len(input_new_shape) - 2:
tp_axis_name
}]
input_new_shapes = [input_new_shape]
if other_shape is not None:
assert other_shape[0] % tp_size == 0, \
f"The first dimension in other_shape should be a multiple of tensor parallelism size," \
f" but got {other_shape[0]=} and {tp_size=}."
other_new_shape = (tp_size, -1)
in_axes.append({0: tp_axis_name})
input_new_shapes.append(other_new_shape)
return ShardingMeta(tuple(in_axes), ({
len(input_new_shape) - 2: tp_axis_name
}), {tp_axis_name: tp_mesh_axis}, input_new_shapes, [input_shape])
def get_dp_tp_col_sharding_meta(self,
input_shape: Tuple,
other_shape: Tuple,
batch_dim: int,
dp_axis_name: str = 'data',
tp_axis_name: str = 'model') -> ShardingMeta:
"""get_dp_tp_col_sharding_meta"""
return self.get_dp_sharding_meta(input_shape, other_shape, batch_dim, dp_axis_name,
tp_axis_name)
def get_dp_tp_row_sharding_meta(self,
input_shape: Tuple,
other_shape: Tuple,
batch_dim: int,
dp_axis_name: str = 'data',
tp_axis_name: str = 'model') -> ShardingMeta:
"""get_dp_tp_row_sharding_meta"""
ElementwiseShardingMetaGenerator._is_supported(input_shape, other_shape, batch_dim)
dp_size, dp_mesh_axis = _get_mesh_info(global_shard_resource().dp_resource)
tp_size, tp_mesh_axis = _get_mesh_info(global_shard_resource().tp_resource)
assert input_shape[batch_dim] % dp_size == 0, \
f"The dimension of batch in input_shape should be a multiple of data parallelism" \
f"size, but got {input_shape[batch_dim]=} and {dp_size=}."
assert input_shape[-1] % tp_size == 0, \
f"The last dimension in input_shape should be a multiple of tensor parallelism size," \
f" but got {input_shape[-1]=} and {tp_size=}."
input_new_shape = (*input_shape[:batch_dim], dp_size, -1, *input_shape[batch_dim + 1:-1],
tp_size, input_shape[-1] // tp_size)
in_axes = [{
batch_dim:
dp_axis_name,
# "len(a_new_shape)-2" is the index to tp_size in a_new_shape
len(input_new_shape) - 2:
tp_axis_name
}]
input_new_shapes = [input_new_shape]
other_new_shape = other_shape
if other_shape is not None:
assert other_shape[0] % tp_size == 0, \
f"The first dimension in other_shape should be a multiple of tensor parallelism size," \
f" but got {other_shape[0]=} and {tp_size=}."
other_new_shape = (tp_size, -1)
in_axes.append({0: tp_axis_name})
input_new_shapes.append(other_new_shape)
return ShardingMeta(tuple(in_axes), ({
batch_dim: dp_axis_name,
len(input_new_shape) - 2: tp_axis_name
}), {
dp_axis_name: dp_mesh_axis,
tp_axis_name: tp_mesh_axis
}, input_new_shapes, [input_shape])
@staticmethod
def _is_supported(input_shape: Tuple, other_shape: Tuple, batch_dim: int):
if other_shape is not None:
assert len(other_shape) == 1, "Only support 1 dimension of other_shapes currently."
assert input_shape[-1] == other_shape[0], \
f"input_shape[-1] should equal to oshape[0], " \
f"but got {input_shape[-1]} and {other_shape[0]}."
assert batch_dim < len(input_shape)-1, \
"batch_dim cannot be the latest dim"
class SoftmaxShardingMetaGenerator(ShardingMetaGenerator):
"""
SoftmaxShardingMetaGenerator
"""
def get_dp_sharding_meta(
self,
input_shape: Tuple,
dp_dim: int = 0,
tp_dim: int = 1,
dp_axis_name: str = 'data',
tp_axis_name: str = 'model' # pylint: disable=unused-argument
) -> ShardingMeta:
"""get_dp_sharding_meta"""
SoftmaxShardingMetaGenerator._is_supported(input_shape, dp_dim, tp_dim)
dp_size, dp_mesh_axis = _get_mesh_info(global_shard_resource().dp_resource)
assert input_shape[dp_dim] % dp_size == 0, \
f"The dimension of batch in input_shape should be a multiple of data parallelism " \
f"size, but got {input_shape[dp_dim]=} and {dp_size=}."
input_new_shape = (*input_shape[:dp_dim], dp_size, -1, *input_shape[dp_dim + 1:])
in_axes = [{dp_dim: dp_axis_name}]
input_new_shapes = [input_new_shape]
out_axes = in_axes[0]
return ShardingMeta(tuple(in_axes), out_axes, {dp_axis_name: dp_mesh_axis},
input_new_shapes, [input_shape])
def get_tp_col_sharding_meta(self,
input_shape: Tuple,
dp_dim: int = 0,
tp_dim: int = 1,
dp_axis_name: str = 'data',
tp_axis_name: str = 'model') -> ShardingMeta:
"""get_tp_col_sharding_meta"""
return SoftmaxShardingMetaGenerator._get_tp_sharding_meta(input_shape, dp_dim, tp_dim,
dp_axis_name, tp_axis_name)
def get_tp_row_sharding_meta(self,
input_shape: Tuple,
dp_dim: int = 0,
tp_dim: int = 1,
dp_axis_name: str = 'data',
tp_axis_name: str = 'model') -> ShardingMeta:
"""get_tp_row_sharding_meta"""
return SoftmaxShardingMetaGenerator._get_tp_sharding_meta(input_shape, dp_dim, tp_dim,
dp_axis_name, tp_axis_name)
def get_dp_tp_col_sharding_meta(self,
input_shape: Tuple,
dp_dim: int = 0,
tp_dim: int = 1,
dp_axis_name: str = 'data',
tp_axis_name: str = 'model') -> ShardingMeta:
"""get_dp_tp_col_sharding_meta"""
return SoftmaxShardingMetaGenerator._get_dptp_sharding_meta(input_shape, dp_dim, tp_dim,
dp_axis_name, tp_axis_name)
def get_dp_tp_row_sharding_meta(self,
input_shape: Tuple,
dp_dim: int = 0,
tp_dim: int = 1,
dp_axis_name: str = 'data',
tp_axis_name: str = 'model') -> ShardingMeta:
"""get_dp_tp_row_sharding_meta"""
return SoftmaxShardingMetaGenerator._get_dptp_sharding_meta(input_shape, dp_dim, tp_dim,
dp_axis_name, tp_axis_name)
@staticmethod
def _is_supported(input_shape: Tuple, dp_dim: int, tp_dim: int):
assert len(input_shape) == 4
assert dp_dim == 0
assert tp_dim == 1
@staticmethod
def _get_tp_sharding_meta(
input_shape: Tuple,
dp_dim: int = 0,
tp_dim: int = 1,
dp_axis_name: str = 'data', # pylint: disable=unused-argument
tp_axis_name: str = 'model' # pylint: disable=unused-argument
) -> ShardingMeta:
"""get_tp_sharding_meta"""
SoftmaxShardingMetaGenerator._is_supported(input_shape, dp_dim, tp_dim)
tp_size, tp_mesh_axis = _get_mesh_info(global_shard_resource().tp_resource)
assert input_shape[tp_dim] % tp_size == 0, \
f"The dimension of tensor parallel in input_shape should be a multiple of data " \
f"parallelism size, but got {input_shape[tp_dim]=} and {tp_size=}."
input_new_shape = (*input_shape[:tp_dim], tp_size, -1, *input_shape[tp_dim + 1:])
in_axes = [{tp_dim: tp_axis_name}]
input_new_shapes = [input_new_shape]
out_axes = in_axes[0]
return ShardingMeta(tuple(in_axes), out_axes, {tp_axis_name: tp_mesh_axis},
input_new_shapes, [input_shape])
@staticmethod
def _get_dptp_sharding_meta(input_shape: Tuple,
dp_dim: int = 0,
tp_dim: int = 1,
dp_axis_name: str = 'data',
tp_axis_name: str = 'model') -> ShardingMeta:
"""get_dp_tp_sharding_meta"""
SoftmaxShardingMetaGenerator._is_supported(input_shape, dp_dim, tp_dim)
dp_size, dp_mesh_axis = _get_mesh_info(global_shard_resource().dp_resource)
tp_size, tp_mesh_axis = _get_mesh_info(global_shard_resource().tp_resource)
assert input_shape[dp_dim] % dp_size == 0, \
f"The dimension of batch in input_shape should be a multiple of data parallelism " \
f"size, but got {input_shape[dp_dim]=} and {dp_size=}."
assert input_shape[tp_dim] % tp_size == 0, \
f"The dimension of tensor parallel in input_shape should be a multiple of data " \
f"parallelism size, but got {input_shape[tp_dim]=} and {tp_size=}."
input_new_shape = (*input_shape[:dp_dim], dp_size, input_shape[dp_dim] // dp_size,
*input_shape[dp_dim + 1:tp_dim], tp_size, input_shape[tp_dim] // tp_size,
*input_shape[tp_dim + 1:])
in_axes = [{dp_dim: dp_axis_name, tp_dim + 1: tp_axis_name}]
input_new_shapes = [input_new_shape]
out_axes = in_axes[0]
return ShardingMeta(tuple(in_axes), out_axes, {
dp_axis_name: dp_mesh_axis,
tp_axis_name: tp_mesh_axis
}, input_new_shapes, [input_shape])
def get_fp8_meta_sharding_meta(stype: ShardingType,
num_of_meta: int,
dp_axis_name: str = 'data',
tp_axis_name: str = 'model') -> ShardingMeta:
"""
get_fp8_meta_sharding_meta
"""
return FP8MetaShardingMetaGenerator().get_sharding_meta(stype, num_of_meta, dp_axis_name,
tp_axis_name)
def get_dot_sharding_meta(stype: ShardingType,
a_shape: Tuple,
b_shape: Tuple,
batch_dim_of_a: int,
model_dim_of_a: int,
model_dim_of_b: int,
contracting_dims: Tuple[Sequence[int], Sequence[int]] = ((-1,), (0,)),
dp_axis_name: str = 'data',
tp_axis_name: str = 'model') -> ShardingMeta:
"""
get_dot_sharding_meta
"""
if stype in (ShardingType.TP_ROW, ShardingType.DP_TP_ROW):
assert model_dim_of_b <= max(contracting_dims[1]), \
f"The dimension of model parallelism in b_shape should be smaller than the max of" \
f" contracting_dims[1], but got {model_dim_of_b=} and {contracting_dims[1]=}."
if stype in (ShardingType.TP_COL, ShardingType.DP_TP_COL):
assert model_dim_of_b > max(contracting_dims[1]), \
f"The dimension of model parallelism in b_shape should be larger than the max of" \
f" contracting_dims[1], but got {model_dim_of_b=} and {contracting_dims[1]=}."
return DotShardingMetaGenerator().get_sharding_meta(stype, a_shape, b_shape, batch_dim_of_a,
model_dim_of_a, model_dim_of_b,
contracting_dims, dp_axis_name,
tp_axis_name)
def get_elementwise_sharding_meta(stype: ShardingType,
input_shape: Tuple,
other_shape: Tuple,
batch_dim: int,
dp_axis_name: str = 'data',
tp_axis_name: str = 'model') -> ShardingMeta:
"""
get_elementwise_sharding_meta
"""
return ElementwiseShardingMetaGenerator().get_sharding_meta(stype, input_shape, other_shape,
batch_dim, dp_axis_name,
tp_axis_name)
def get_softmax_sharding_meta(stype: ShardingType,
input_shape: Tuple,
dp_dim: int = 0,
tp_dim: int = 1,
dp_axis_name: str = 'data',
tp_axis_name: str = 'model') -> ShardingMeta:
"""
get_softmax_sharding_meta
"""
return SoftmaxShardingMetaGenerator().get_sharding_meta(stype, input_shape, dp_dim, tp_dim,
dp_axis_name, tp_axis_name)
def get_fused_attn_sharding_meta(stype: ShardingType,
input_shapes: Tuple[Tuple[int, ...]],
output_shapes: Tuple[Tuple[int, ...]],
dp_dims: Tuple[Tuple[int, ...]],
tp_dims: Tuple[Tuple[int, ...]],
dp_axis_name: str = 'data',
tp_axis_name: str = 'model') -> ShardingMeta:
"""
get_self_fused_attn_sharding_meta
"""
return FusedAttnShardingMetaGenerator().get_sharding_meta(stype, input_shapes, output_shapes,
dp_dims, tp_dims, dp_axis_name,
tp_axis_name)
def extend_fsdp_sharding_meta(sharding_meta: ShardingMeta,
weight_fsdp_dim_map: Dict[int, int]) -> Tuple[ShardingMeta, str]:
"""
Extending the given ShardingMeta to be compatible with FSDP (ZeRO3) sharding pattern.
.. note::
The extending helper assumes the first shape in sharding_meta.input_shapes
corresponding to the input tensor. Please be sure that 0-idx is in
`weight_fsdp_dim_map`.
Parameters
----------
sharding_meta : ShardingMeta
the sharding meta object to extend with FSDP.
weight_fsdp_dim_map: Dict[int, int]
The dict, which key is idx of sharding_meta.input_shapes and value is the dimension
to extend FSDP. default is None, means no other sharding_meta.input_shapes to extend.
Returns
-------
updated_sharding_meta : ShardingMeta
a sharding_meta with the FSDP extenstion.
fsdp_axis_name: str
The name of FSDP named axis for further xmap projection.
"""
assert 0 in weight_fsdp_dim_map, \
"0-idx is required to be in 'weight_fsdp_dim_map' for the input."
mst = infer_major_sharding_type()
if mst is MajorShardingType.SINGLE:
return sharding_meta, ""
gsr = global_shard_resource()
dp_mesh_axis = gsr.dp_resource
fsdp_mesh_axis = gsr.fsdp_resource
if fsdp_mesh_axis == dp_mesh_axis:
return sharding_meta, ""
if fsdp_mesh_axis is None:
return sharding_meta, ""
fsdp_dim_size, _ = _get_mesh_info(fsdp_mesh_axis)
fsdp_axis_name = "fsdp"
def get_idx_to_extend(sharded_indices, target_idx):
idx_to_extend = target_idx
for i in sharded_indices:
if i <= target_idx:
idx_to_extend += 1
return idx_to_extend
def extend_exist_sharding(idx, shape):
remain_size = shape[idx]
assert remain_size == -1 or remain_size % fsdp_dim_size == 0
remain_size = remain_size // fsdp_dim_size
new_shape = tuple([*shape[:idx], fsdp_dim_size, remain_size, *shape[idx + 1:]])
return new_shape
new_input_shapes = []
new_in_axes = []
for i, shape in enumerate(sharding_meta.input_shapes):
idx_to_extend = -1
if i == 0: # Assume first shape corresponds to input
input_dp_dim = weight_fsdp_dim_map[i]
# idx_to_extend = input_dp_dim + 1 if is_dp_enabled(mst) else input_dp_dim
idx_to_extend = get_idx_to_extend(list(sharding_meta.in_axes[i].keys()), input_dp_dim)
new_shape = extend_exist_sharding(idx_to_extend, shape)
# assume one output only and have the same batch sharding like input
assert isinstance(sharding_meta.out_axes, dict)
new_out_axes = {}
for key in sharding_meta.out_axes:
if key < idx_to_extend:
new_out_axes[key] = sharding_meta.out_axes[key]
else:
new_out_axes[key + 1] = sharding_meta.out_axes[key]
new_out_axes[idx_to_extend] = fsdp_axis_name
sharding_meta.out_axes = new_out_axes
else:
new_shape = shape
if i in weight_fsdp_dim_map:
idx_to_extend = get_idx_to_extend(list(sharding_meta.in_axes[i].keys()),
weight_fsdp_dim_map[i])
if weight_fsdp_dim_map[i] in sharding_meta.in_axes[i]:
new_shape = extend_exist_sharding(idx_to_extend, shape)
else:
assert shape[idx_to_extend] % fsdp_dim_size == 0
remain_dim_size = shape[idx_to_extend] // fsdp_dim_size
new_shape = tuple([
*shape[:idx_to_extend], fsdp_dim_size, remain_dim_size,
*shape[idx_to_extend + 1:]
])
if idx_to_extend >= 0:
new_ia = {}
for key in sharding_meta.in_axes[i]:
if key < idx_to_extend:
new_ia[key] = sharding_meta.in_axes[i][key]
else:
new_ia[key + 1] = sharding_meta.in_axes[i][key]
new_ia[idx_to_extend] = fsdp_axis_name
else:
new_ia = sharding_meta.in_axes[i]
new_input_shapes.append(new_shape)
new_in_axes.append(new_ia)
sharding_meta.input_shapes = tuple(new_input_shapes)
sharding_meta.in_axes = tuple(new_in_axes)
sharding_meta.axis_resources[fsdp_axis_name] = fsdp_mesh_axis
return sharding_meta, fsdp_axis_name
def xmap_runner(func: Callable, in_axes: Tuple[Dict, ...],
out_axes: Union[Dict, Tuple[str, ...], Tuple[Union[Dict, Tuple], ...]],
axis_resources: Dict, inputs: Tuple):
"""
xmap_runner
"""
assert isinstance(inputs, tuple)
assert isinstance(in_axes, tuple)
mesh = _PXLA_THREAD_RESOURCES.env.physical_mesh
fake_in_axes = {}
fake_axis_resource = {}
# Fake related setup is a workaround to "NotImplementedError:
# Collectives in manually partitioned computations are only supported
# when all mesh axes are partitioned manually (no partial automatic
# sharding). Make sure that you mention all mesh axes in axis_resources!"
fake_idx_counter = 0
for mesh_axis_names in mesh.axis_names:
if mesh_axis_names not in axis_resources.values():
fake_idx_counter += 1
fake_axis_name = f"{mesh_axis_names}_fake_{fake_idx_counter}"
fake_in_axes[fake_idx_counter] = fake_axis_name
fake_axis_resource[fake_axis_name] = mesh_axis_names
fake_input = jnp.zeros(tuple(64 for _ in range(len(fake_in_axes) + 1)))
xmapped = xmap(lambda func_input, _: func(*func_input),
in_axes=(in_axes, fake_in_axes),
out_axes=out_axes,
axis_resources={
**axis_resources,
**fake_axis_resource
})
output = xmapped(inputs, fake_input)
return output
| TransformerEngine-main | transformer_engine/jax/sharding.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""JAX layernorm modules"""
from typing import Tuple, Sequence
from functools import partial, reduce
import operator
import jax
import jax.numpy as jnp
from transformer_engine_jax import DType as TEDType
from .cpp_extensions import cast_transpose, gemm, jax_dtype_to_te_dtype
from .cpp_extensions import transpose
from .cpp_extensions import rmsnorm_fwd, rmsnorm_fwd_fp8, rmsnorm_bwd
from .cpp_extensions import layernorm_fwd, layernorm_fwd_fp8, layernorm_bwd
from .fp8 import FP8Helper, FP8GemmPackage
from .sharding import ShardingType, get_elementwise_sharding_meta
from .sharding import get_dot_sharding_meta, get_fp8_meta_sharding_meta
from .sharding import is_dp_enabled, is_tp_enabled, merge_axis_resources
from .sharding import xmap_runner, extend_fsdp_sharding_meta
jax.config.update('experimental_xmap_spmd_lowering', True)
jax.config.update('experimental_xmap_spmd_lowering_manual', True)
def canonicalize_layernorm_type(x):
'''
Canonicalize the layernorm type
'''
canonicalized = x.lower().strip().replace('-', '').replace('_', '')
assert canonicalized in ['layernorm', 'rmsnorm']
return canonicalized
def layernorm(inputs: jnp.ndarray,
gamma: jnp.ndarray,
beta: jnp.ndarray,
layernorm_type: str,
zero_centered_gamma: bool = False,
epsilon: float = 1e-6,
sharding_type: ShardingType = ShardingType.SINGLE,
dp_dim_index: int = 0):
"""
Layernorm wrapper
"""
assert sharding_type not in (ShardingType.TP_ROW, ShardingType.DP_TP_ROW), \
"layernorm does not support row-split tensor parallelism currently."
layernorm_type = canonicalize_layernorm_type(layernorm_type)
if layernorm_type == 'rmsnorm':
assert beta is None, "beta should be None if layernorm_type is 'rmsnorm'"
assert not zero_centered_gamma, "zero_centered_gamma is not supported " \
"if layernorm_type is 'rmsnorm'"
if sharding_type is ShardingType.SINGLE:
output = _layernorm(inputs,
gamma,
beta,
layernorm_type=layernorm_type,
zero_centered_gamma=zero_centered_gamma,
epsilon=epsilon,
sharding_type=sharding_type,
dp_axis_name="",
fsdp_axis_name="")
else:
dp_axis_name = "batch"
tp_axis_name = "model"
sharding_meta = get_elementwise_sharding_meta(sharding_type, inputs.shape, gamma.shape,
dp_dim_index, dp_axis_name, tp_axis_name)
sharding_meta, fsdp_axis_name = extend_fsdp_sharding_meta(sharding_meta, {0: dp_dim_index})
inputs_ = jnp.reshape(inputs, sharding_meta.input_shapes[0]) # 0 for input
gamma_ = jnp.reshape(gamma, sharding_meta.input_shapes[1]) # 1 for gamma
beta_ = beta
beta_in_axis = {}
if beta_ is not None:
beta_ = jnp.reshape(beta_, sharding_meta.input_shapes[1]) # 1 for beta
beta_in_axis = sharding_meta.in_axes[1]
in_axes = (*sharding_meta.in_axes, beta_in_axis)
partial_ln = partial(_layernorm,
layernorm_type=layernorm_type,
zero_centered_gamma=zero_centered_gamma,
epsilon=epsilon,
sharding_type=sharding_type,
dp_axis_name=dp_axis_name,
fsdp_axis_name=fsdp_axis_name)
output = xmap_runner(partial_ln, in_axes, sharding_meta.out_axes,
sharding_meta.axis_resources, (inputs_, gamma_, beta_))
output = jnp.reshape(output, sharding_meta.output_shapes[0])
return output
@partial(jax.custom_vjp, nondiff_argnums=(3, 4, 5, 6, 7, 8))
def _layernorm(x, gamma, beta, layernorm_type, zero_centered_gamma, epsilon, sharding_type,
dp_axis_name, fsdp_axis_name):
output, _ = _layernorm_fwd(x, gamma, beta, layernorm_type, zero_centered_gamma, epsilon,
sharding_type, dp_axis_name, fsdp_axis_name)
return output
def _layernorm_fwd(
x,
gamma,
beta,
layernorm_type,
zero_centered_gamma,
epsilon,
sharding_type, # pylint: disable=unused-argument
dp_axis_name, # pylint: disable=unused-argument
fsdp_axis_name # pylint: disable=unused-argument
):
if layernorm_type == 'layernorm':
output, mu, rsigma = layernorm_fwd(x, gamma, beta, zero_centered_gamma, epsilon)
else:
assert not zero_centered_gamma, "zero_centered_gamma is not supported " \
"if layernorm_type is 'rmsnorm'"
output, rsigma = rmsnorm_fwd(x, gamma, epsilon)
mu = None
return output, (mu, rsigma, x, gamma)
def _layernorm_bwd(layernorm_type, zero_centered_gamma, epsilon, sharding_type, dp_axis_name,
fsdp_axis_name, ctx, g):
mu, rsigma, x, gamma = ctx
if layernorm_type == 'layernorm':
grad_input, grad_gamma, grad_beta = layernorm_bwd(g,
mu,
rsigma,
x,
gamma,
zero_centered_gamma=zero_centered_gamma,
epsilon=epsilon)
else:
assert not zero_centered_gamma, "zero_centered_gamma is not supported " \
"if layernorm_type is 'rmsnorm'"
grad_input, grad_gamma = rmsnorm_bwd(g, rsigma, x, gamma, epsilon=epsilon)
grad_beta = None
if is_dp_enabled(sharding_type.value[0]):
grad_gamma = jax.lax.psum(grad_gamma, dp_axis_name)
if grad_beta is not None:
grad_beta = jax.lax.psum(grad_beta, dp_axis_name)
if len(fsdp_axis_name) > 0:
grad_gamma = jax.lax.psum(grad_gamma, fsdp_axis_name)
if grad_beta is not None:
grad_beta = jax.lax.psum(grad_beta, fsdp_axis_name)
return grad_input, grad_gamma, grad_beta
_layernorm.defvjp(_layernorm_fwd, _layernorm_bwd)
def layernorm_fp8_dot(fp8_gemm_pkg: FP8GemmPackage,
gamma: jnp.ndarray,
beta: jnp.ndarray,
layernorm_type: str,
fwd_dtype: TEDType,
bwd_dtype: TEDType,
contracting_dims: Tuple[Sequence[int], Sequence[int]] = ((-1,), (0,)),
zero_centered_gamma: bool = False,
epsilon: float = 1e-6,
sharding_type: ShardingType = ShardingType.SINGLE,
dp_dim_index: int = 0) -> jnp.ndarray:
"""
LN + fp8 dot fusion wrapper
"""
assert sharding_type not in (ShardingType.TP_ROW, ShardingType.DP_TP_ROW), \
"layernorm_fp8_dot does not support row-split tensor parallelism currently."
layernorm_type = canonicalize_layernorm_type(layernorm_type)
if layernorm_type == 'rmsnorm':
assert beta is None, "beta should be None if layernorm_type is 'rmsnorm'"
assert not zero_centered_gamma, "zero_centered_gamma is not supported " \
"if layernorm_type is 'rmsnorm'"
assert fp8_gemm_pkg.num_of_gemm == 1
inputs = fp8_gemm_pkg.inputs
kernel = fp8_gemm_pkg.kernels[0]
fp8_max = fp8_gemm_pkg.fp8_max
amax = fp8_gemm_pkg.amax
scale = fp8_gemm_pkg.scale
scale_inv = fp8_gemm_pkg.scale_inv
if sharding_type is ShardingType.SINGLE:
output = _layernorm_fp8_dot(inputs,
kernel,
gamma,
beta,
fp8_max,
amax,
scale,
scale_inv,
layernorm_type,
fwd_dtype,
bwd_dtype,
contracting_dims,
zero_centered_gamma=zero_centered_gamma,
epsilon=epsilon,
sharding_type=sharding_type,
dp_axis_name="",
tp_axis_name="",
fsdp_axis_name="")
else:
dp_axis_name = "batch"
tp_axis_name = "model"
ln_sharding_meta = get_elementwise_sharding_meta(sharding_type, inputs.shape, gamma.shape,
dp_dim_index, dp_axis_name, tp_axis_name)
ln_sharding_meta, _ = extend_fsdp_sharding_meta(ln_sharding_meta, {0: dp_dim_index})
inputs_ = jnp.reshape(inputs, ln_sharding_meta.input_shapes[0]) # 0 for input
gamma_ = jnp.reshape(gamma, ln_sharding_meta.input_shapes[1]) # 1 for gamma
beta_ = beta
beta_in_axis = {}
if beta_ is not None:
beta_ = jnp.reshape(beta_, ln_sharding_meta.input_shapes[1]) # 1 for beta
beta_in_axis = ln_sharding_meta.in_axes[1]
kernel_tp_index = None
# TODO (Ming Huang): Should we add a new argument to support general sharding to kernel? # pylint: disable=fixme
if sharding_type in (ShardingType.TP_COL, ShardingType.DP_TP_COL):
kernel_tp_index = len(kernel.shape) - 1
elif sharding_type in (ShardingType.TP_ROW, ShardingType.DP_TP_ROW):
kernel_tp_index = 0
input_tp_index = len(inputs.shape) - 1
dot_sharding_meta = get_dot_sharding_meta(sharding_type, inputs.shape, kernel.shape,
dp_dim_index, input_tp_index, kernel_tp_index,
contracting_dims, dp_axis_name, tp_axis_name)
dot_sharding_meta, fsdp_axis_name = extend_fsdp_sharding_meta(dot_sharding_meta,
{0: dp_dim_index})
kernel_ = jnp.reshape(kernel, dot_sharding_meta.input_shapes[1]) # 1 for kernel
num_of_fp8_meta_kind = 4 # fp8_max, amax, scale, scale_inv
fp8_sharding_meta = get_fp8_meta_sharding_meta(sharding_type, num_of_fp8_meta_kind,
dp_axis_name, tp_axis_name)
axis_resource = merge_axis_resources([
ln_sharding_meta.axis_resources, dot_sharding_meta.axis_resources,
fp8_sharding_meta.axis_resources
])
partial_ln_fp8_dot = partial(_layernorm_fp8_dot,
layernorm_type=layernorm_type,
fwd_dtype=fwd_dtype,
bwd_dtype=bwd_dtype,
contracting_dims=contracting_dims,
zero_centered_gamma=zero_centered_gamma,
epsilon=epsilon,
sharding_type=sharding_type,
dp_axis_name=dp_axis_name,
tp_axis_name=tp_axis_name,
fsdp_axis_name=fsdp_axis_name)
# input, kernel, gamma, beta, fp8_metas
in_axes = (ln_sharding_meta.in_axes[0], dot_sharding_meta.in_axes[1],
ln_sharding_meta.in_axes[1], beta_in_axis, *fp8_sharding_meta.in_axes)
output = xmap_runner(partial_ln_fp8_dot, in_axes, dot_sharding_meta.out_axes, axis_resource,
(inputs_, kernel_, gamma_, beta_, fp8_max, amax, scale, scale_inv))
output = jnp.reshape(output, dot_sharding_meta.output_shapes[0])
return output
@partial(jax.custom_vjp, nondiff_argnums=(8, 9, 10, 11, 12, 13, 14, 15, 16, 17))
def _layernorm_fp8_dot(inputs: jnp.ndarray, kernel: jnp.ndarray, gamma: jnp.ndarray,
beta: jnp.ndarray, fp8_maxs: jnp.ndarray, amax: jnp.ndarray,
scale: jnp.ndarray, scale_inv: jnp.ndarray, layernorm_type: str,
fwd_dtype: TEDType, bwd_dtype: TEDType,
contracting_dims: Tuple[Sequence[int], Sequence[int]],
zero_centered_gamma: bool, epsilon: float, sharding_type: ShardingType,
dp_axis_name: str, tp_axis_name: str, fsdp_axis_name: str) -> jnp.ndarray:
output, _ = _layernorm_fp8_dot_fwd(inputs, kernel, gamma, beta, fp8_maxs, amax, scale,
scale_inv, layernorm_type, fwd_dtype, bwd_dtype,
contracting_dims, zero_centered_gamma, epsilon,
sharding_type, dp_axis_name, tp_axis_name, fsdp_axis_name)
return output
def _layernorm_fp8_dot_fwd(
inputs,
kernel,
gamma,
beta,
fp8_maxs,
amax,
scale,
scale_inv,
layernorm_type,
fwd_dtype,
bwd_dtype, # pylint: disable=unused-argument
contracting_dims,
zero_centered_gamma,
epsilon,
sharding_type,
dp_axis_name, # pylint: disable=unused-argument
tp_axis_name,
fsdp_axis_name): # pylint: disable=unused-argument
lhs_contracting_dims, rhs_contracting_dims = contracting_dims
input_shape_pre = inputs.shape[:min(lhs_contracting_dims)]
input_shape_suf = inputs.shape[min(lhs_contracting_dims):]
kernel_shape_pre = kernel.shape[:max(rhs_contracting_dims) + 1]
kernel_shape_suf = kernel.shape[max(rhs_contracting_dims) + 1:]
input_contracting_size = reduce(operator.mul, input_shape_suf)
kernel_contracting_size = reduce(operator.mul, kernel_shape_pre)
assert input_contracting_size == kernel_contracting_size
amax = FP8Helper.update_amax_history(amax)
gemm_input_idx, gemm_kernel_idx, _ = FP8Helper.get_fp8_meta_indices(0)
input_amax = amax[gemm_input_idx, 0:1]
input_scale = scale[gemm_input_idx]
input_scale_inv = scale_inv[gemm_input_idx]
if layernorm_type == 'layernorm':
ln_out, mu, rsigma, input_amax = layernorm_fwd_fp8(inputs,
gamma,
beta,
input_amax,
input_scale,
input_scale_inv,
zero_centered_gamma=zero_centered_gamma,
epsilon=epsilon)
else:
assert not zero_centered_gamma, "zero_centered_gamma is not supported " \
"if layernorm_type is 'rmsnorm'"
ln_out, rsigma, input_amax = rmsnorm_fwd_fp8(inputs,
gamma,
input_amax,
input_scale,
input_scale_inv,
epsilon=epsilon)
mu = None
assert inputs.shape == ln_out.shape
ln_out_ = jnp.reshape(ln_out, (-1, input_contracting_size))
kernel_ = jnp.reshape(kernel, (kernel_contracting_size, -1))
kernel_amax = amax[gemm_kernel_idx, 0:1]
kernel_scale = scale[gemm_kernel_idx]
kernel_scale_inv = scale_inv[gemm_kernel_idx]
kernel_cast, kernel_cast_trans, kernel_amax = cast_transpose(kernel_, kernel_amax, kernel_scale,
kernel_scale_inv, fwd_dtype)
output = gemm(kernel_cast_trans, kernel_scale_inv, fwd_dtype, True, ln_out_, input_scale_inv,
fwd_dtype, False, jax_dtype_to_te_dtype(inputs.dtype), FP8Helper.FP8_2X_ACC_FPROP)
if sharding_type in (ShardingType.TP_ROW, ShardingType.DP_TP_ROW):
output = jax.lax.psum(output, tp_axis_name)
# (input_shape_pre, input_shape_suf)
# x (kernel_shape_pre, kernel_shape_suf)
# = (input_shape_pre, kernel_shape_suf)
output_shape = input_shape_pre + kernel_shape_suf
output = jnp.reshape(output, output_shape)
ctx = (ln_out_, kernel_cast, fp8_maxs, amax, scale, scale_inv, input_amax, kernel_amax,
inputs.shape, kernel.shape, mu, rsigma, inputs, gamma)
return output, ctx
def _layernorm_fp8_dot_bwd(
layernorm_type,
fwd_dtype,
bwd_dtype,
contracting_dims, # pylint: disable=unused-argument
zero_centered_gamma,
epsilon,
sharding_type,
dp_axis_name,
tp_axis_name,
fsdp_axis_name,
ctx,
g):
ln_out_, kernel_cast, \
fp8_maxs, amax, scale, scale_inv, \
input_amax, kernel_amax, \
inputs_shape, kernel_shape, \
mu, rsigma, inputs, gamma = ctx
gemm_input_idx, gemm_kernel_idx, gemm_grad_idx = \
FP8Helper.get_fp8_meta_indices(0)
grad_amax = amax[gemm_grad_idx, 0:1]
grad_scale = scale[gemm_grad_idx]
grad_scale_inv = scale_inv[gemm_grad_idx]
ln_out_trans = transpose(ln_out_, fwd_dtype)
g = jnp.reshape(g, (ln_out_trans.shape[1], -1))
# cast and transpose the grad_output
grad_cast, grad_cast_trans, grad_amax = cast_transpose(g, grad_amax, grad_scale, grad_scale_inv,
bwd_dtype)
input_scale_inv = scale_inv[gemm_input_idx]
wgrad = gemm(grad_cast_trans, grad_scale_inv, bwd_dtype, True, ln_out_trans, input_scale_inv,
fwd_dtype, False, jax_dtype_to_te_dtype(g.dtype), FP8Helper.FP8_2X_ACC_WGRAD)
kernel_scale_inv = scale_inv[gemm_kernel_idx]
dgrad = gemm(kernel_cast, kernel_scale_inv, fwd_dtype, True, grad_cast, grad_scale_inv,
bwd_dtype, False, jax_dtype_to_te_dtype(g.dtype), FP8Helper.FP8_2X_ACC_DGRAD)
dgrad = jnp.reshape(dgrad, inputs_shape)
if sharding_type in (ShardingType.TP_COL, ShardingType.DP_TP_COL):
dgrad = jax.lax.psum(dgrad, tp_axis_name)
if layernorm_type == 'layernorm':
grad_input, grad_gamma, grad_beta = layernorm_bwd(dgrad,
mu,
rsigma,
inputs,
gamma,
zero_centered_gamma=zero_centered_gamma,
epsilon=epsilon)
else:
assert not zero_centered_gamma, "zero_centered_gamma is not supported " \
"if layernorm_type is 'rmsnorm'"
grad_input, grad_gamma = rmsnorm_bwd(dgrad, rsigma, inputs, gamma, epsilon=epsilon)
grad_beta = None
amax = amax.at[gemm_input_idx, 0].set(input_amax[0])
amax = amax.at[gemm_kernel_idx, 0].set(kernel_amax[0])
amax = amax.at[gemm_grad_idx, 0].set(grad_amax[0])
if is_dp_enabled(sharding_type.value[0]):
wgrad = jax.lax.psum(wgrad, dp_axis_name)
grad_gamma = jax.lax.psum(grad_gamma, dp_axis_name)
if grad_beta is not None:
grad_beta = jax.lax.psum(grad_beta, dp_axis_name)
amax = jax.lax.pmax(amax, dp_axis_name)
if len(fsdp_axis_name) > 0:
wgrad = jax.lax.psum(wgrad, fsdp_axis_name)
grad_gamma = jax.lax.psum(grad_gamma, fsdp_axis_name)
if grad_beta is not None:
grad_beta = jax.lax.psum(grad_beta, fsdp_axis_name)
amax = jax.lax.pmax(amax, fsdp_axis_name)
if is_tp_enabled(sharding_type.value[0]):
amax = jax.lax.pmax(amax, tp_axis_name)
wgrad = jnp.reshape(wgrad, kernel_shape)
return grad_input, wgrad, \
grad_gamma, grad_beta, \
fp8_maxs, amax, scale, scale_inv
_layernorm_fp8_dot.defvjp(_layernorm_fp8_dot_fwd, _layernorm_fp8_dot_bwd)
| TransformerEngine-main | transformer_engine/jax/layernorm.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""
Helper module for fp8 meta management
"""
from contextlib import contextmanager
from enum import Enum
from typing import Dict, List, Optional, Tuple, Union
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from transformer_engine_jax import DType
from transformer_engine_jax import get_cublasLt_version
from transformer_engine_jax import get_cuda_version, get_device_compute_capability
from transformer_engine.common.recipe import DelayedScaling, Format
from transformer_engine.jax.sharding import global_shard_guard
from transformer_engine.jax.sharding import ShardingResource
_is_fp8_available = None
_reason_for_no_fp8 = ""
Collection = Union[Dict, FrozenDict]
def _check_fp8_support(gpu_id) -> Tuple[bool, str]:
"""Return if fp8 support is available"""
gpu_arch = get_device_compute_capability(gpu_id)
if gpu_arch >= 90: # hopper and above
return True, ""
if gpu_arch < 89: # pre-ada
return False, "Device compute capability 8.9 or higher required for FP8 execution."
if get_cublasLt_version() < 120103:
return False, "CublasLt version 12.1.3.x or higher required for FP8 execution on Ada."
if get_cuda_version() < 12010:
return False, "Cuda version 12.1 or higher required for FP8 execution on Ada."
return True, ""
def is_fp8_available(gpu_id=None) -> Tuple[bool, str]:
"""Return if fp8 support is available"""
if gpu_id is not None:
return _check_fp8_support(gpu_id)
global _is_fp8_available, _reason_for_no_fp8
if _is_fp8_available is None:
_is_fp8_available = True
# JAX doesn't provide the local GPU id.
for local_gpu_id in range(len(jax.local_devices())):
ret, msg = _check_fp8_support(local_gpu_id)
if ret is False:
_is_fp8_available = ret
_reason_for_no_fp8 = msg
break
return _is_fp8_available, _reason_for_no_fp8
def _format2dtypes(format_: Format):
if format_ == Format.E4M3:
return DType.kFloat8E4M3, DType.kFloat8E4M3
if format_ == Format.E5M2:
return DType.kFloat8E5M2, DType.kFloat8E5M2
if format_ == Format.HYBRID:
return DType.kFloat8E4M3, DType.kFloat8E5M2
return DType.kBFloat16, DType.kBFloat16
class FP8GemmPackage:
"""
A container that contains all required data for
FP8 GEMM
"""
def __init__(
self,
num_of_gemm: int,
inputs: jnp.ndarray,
kernels: List[jnp.ndarray],
fp8_max: jnp.ndarray,
amax: jnp.ndarray,
scale: jnp.ndarray,
scale_inv: jnp.ndarray,
) -> None:
self._num_of_gemm = num_of_gemm
self._inputs = inputs
assert len(kernels) == self._num_of_gemm
self._kernels = kernels
total_num_of_meta = self._num_of_gemm * FP8Helper.NUM_META_PER_GEMM
assert fp8_max.shape[0] == total_num_of_meta
self._fp8_max = fp8_max
assert amax.shape[0] == total_num_of_meta
self._amax = amax
assert scale.shape[0] == total_num_of_meta
self._scale = scale
assert scale_inv.shape[0] == total_num_of_meta
self._scale_inv = scale_inv
@property
def num_of_gemm(self) -> int:
"""
num_of_gemm of this package
"""
return self._num_of_gemm
@property
def inputs(self) -> jnp.ndarray:
"""
inputs of this package
"""
return self._inputs
@property
def kernels(self) -> List[jnp.ndarray]:
"""
kernels of this package
"""
return self._kernels
@property
def fp8_max(self) -> jnp.ndarray:
"""
fp8_max of this package
"""
return self._fp8_max
@property
def amax(self) -> jnp.ndarray:
"""
amax of this package
"""
return self._amax
@property
def scale(self) -> jnp.ndarray:
"""
scale of this package
"""
return self._scale
@property
def scale_inv(self) -> jnp.ndarray:
"""
scale_inv of this package
"""
return self._scale_inv
class AmaxComputeAlgo(Enum):
"""AmaxComputeAlgo."""
MAX = "max"
MOST_RECENT = "most_recent"
class FP8Helper:
"""
FP8 helper to manage the FP8 meta
"""
INITIALIZED = False
MARGIN: float = 0.0
FP8_FORMAT: Format = Format.HYBRID
FWD_DTYPE: DType = DType.kFloat8E4M3
BWD_DTYPE: DType = DType.kFloat8E5M2
UPDATE_FP8META_INTERVAL: int = 1
AMAX_HISTORY_LEN: int = 1024
AMAX_COMPUTE_ALGO: AmaxComputeAlgo = AmaxComputeAlgo.MAX
NUM_META_PER_GEMM: int = 3
INPUT_META_IDX_PER_GEMM: int = 0
KERNEL_META_IDX_PER_GEMM: int = 1
GRAD_META_IDX_PER_GEMM: int = 2
FP8_COLLECTION_NAME: str = "fp8_meta_collection"
FP8_AMAX_NAME: str = "fp8_meta_amax"
FP8_SCALE_NAME: str = "fp8_meta_scale"
FP8_SCALE_INV_NAME: str = "fp8_meta_scale_inv"
FP8_MAX_NAME: str = "fp8_max"
FP8_2X_ACC_FPROP: bool = False
FP8_2X_ACC_DGRAD: bool = True
FP8_2X_ACC_WGRAD: bool = True
@staticmethod
def is_fp8_enabled():
"""
Indicate if fp8 training is enable or not.
"""
return FP8Helper.INITIALIZED
@staticmethod
def initialize(margin: float = 0.0,
fp8_format: Format = Format.HYBRID,
update_fp8meta_interval: int = 1,
amax_history_len: int = 1,
amax_compute_algo: AmaxComputeAlgo = AmaxComputeAlgo.MAX) -> None:
"""
Initialize the FP8 meta
"""
FP8Helper.INITIALIZED = True
FP8Helper.MARGIN = margin
FP8Helper.FP8_FORMAT = fp8_format
FP8Helper.FWD_DTYPE, FP8Helper.BWD_DTYPE = \
_format2dtypes(FP8Helper.FP8_FORMAT)
FP8Helper.UPDATE_FP8META_INTERVAL = update_fp8meta_interval
FP8Helper.AMAX_HISTORY_LEN = amax_history_len
FP8Helper.AMAX_COMPUTE_ALGO = amax_compute_algo
FP8Helper.FP8_2X_ACC_FPROP = False
FP8Helper.FP8_2X_ACC_DGRAD = True
FP8Helper.FP8_2X_ACC_WGRAD = True
@staticmethod
def finalize() -> None:
"""
FP8 helper finalize
"""
FP8Helper.INITIALIZED = False
FP8Helper.MARGIN = 0.0
FP8Helper.FP8_FORMAT = Format.HYBRID
FP8Helper.FWD_DTYPE = DType.kFloat8E4M3
FP8Helper.BWD_DTYPE = DType.kFloat8E5M2
FP8Helper.UPDATE_FP8META_INTERVAL = 1
FP8Helper.AMAX_HISTORY_LEN = 1024
FP8Helper.AMAX_COMPUTE_ALGO = AmaxComputeAlgo.MAX
@staticmethod
def update_amax_history(amax_buffers: jnp.ndarray) -> jnp.ndarray:
"""
Update the amax history
"""
updated_amax_buffers = jnp.roll(amax_buffers, -1, 1)
updated_amax_buffers = updated_amax_buffers.at[:, 0].set(0)
return updated_amax_buffers
@staticmethod
def update_collections(new: Collection, original: Collection) -> Collection:
"""
Update the collections
"""
assert isinstance(original, (dict, FrozenDict))
assert isinstance(new, (dict, FrozenDict))
frozen_original = FrozenDict(original) if not isinstance(original, FrozenDict) else original
for key in new:
if key in frozen_original:
frozen_original, _ = frozen_original.pop(key)
new_coll = FrozenDict({**new, **frozen_original})
if not isinstance(original, FrozenDict):
new_coll = new_coll.unfreeze()
return new_coll
@staticmethod
def update_fp8_metas(state: Collection) -> Collection:
"""
Update the FP8 metas
"""
assert isinstance(state, (dict, FrozenDict))
if FP8Helper.FP8_COLLECTION_NAME in state:
frozen_state = FrozenDict(state) if not isinstance(state, FrozenDict) else state
others, fp8_metas = frozen_state.pop(FP8Helper.FP8_COLLECTION_NAME)
fp8_metas = FP8Helper._update_fp8_metas_impl(fp8_metas)
new_state = FrozenDict({**others, FP8Helper.FP8_COLLECTION_NAME: fp8_metas})
if not isinstance(state, FrozenDict):
new_state = new_state.unfreeze()
return new_state
return state
@staticmethod
def generate_fp8_max_array(num_of_meta):
"""
Generate the FP8 max array
"""
num_of_gemm = num_of_meta // FP8Helper.NUM_META_PER_GEMM
fp8_max_fwd = FP8Helper.FP8_FORMAT.value.max_fwd
fp8_max_bwd = FP8Helper.FP8_FORMAT.value.max_bwd
fp8_max_per_gemm = []
for i in range(FP8Helper.NUM_META_PER_GEMM):
val = fp8_max_bwd if i == FP8Helper.GRAD_META_IDX_PER_GEMM \
else fp8_max_fwd
fp8_max_per_gemm.append([val])
fp8_max_per_gemm = jnp.asarray(fp8_max_per_gemm, dtype=jnp.float32)
return jnp.vstack([fp8_max_per_gemm] * num_of_gemm)
@staticmethod
def get_fp8_meta_indices(gemm_idx: int) -> Tuple[int, int, int]:
"""
Obtain the index about FP8 metas by the given GEMM index.
"""
input_idx = FP8Helper.NUM_META_PER_GEMM * gemm_idx + FP8Helper.INPUT_META_IDX_PER_GEMM
kernel_idx = FP8Helper.NUM_META_PER_GEMM * gemm_idx + FP8Helper.KERNEL_META_IDX_PER_GEMM
grad_idx = FP8Helper.NUM_META_PER_GEMM * gemm_idx + FP8Helper.GRAD_META_IDX_PER_GEMM
return input_idx, kernel_idx, grad_idx
@staticmethod
@jax.jit
def _update_fp8_metas_impl(fp8_metas: Collection) -> Collection:
fp8_meta_arrays, treedef = jax.tree_util.tree_flatten(fp8_metas)
num_of_meta_with_max = FP8Helper.NUM_META_PER_GEMM + 1
num_of_gemm = len(fp8_meta_arrays) // num_of_meta_with_max
for i in range(num_of_gemm):
# flattern array is ordered in alphabetical order of collection names
fp8_max_idx = i * num_of_meta_with_max
fp8_amax_idx = fp8_max_idx + 1
fp8_scale_idx = fp8_amax_idx + 1
fp8_scale_inv_idx = fp8_scale_idx + 1
fp8_max = fp8_meta_arrays[fp8_max_idx]
if FP8Helper.AMAX_COMPUTE_ALGO is AmaxComputeAlgo.MAX:
amax = jnp.max(fp8_meta_arrays[fp8_amax_idx], axis=-1, keepdims=True)
else:
amax = fp8_meta_arrays[fp8_amax_idx][..., 0:1]
scale = fp8_meta_arrays[fp8_scale_idx]
exp = jnp.floor(jnp.log2(fp8_max / amax)) - FP8Helper.MARGIN
sf = jnp.round(jnp.power(2, jnp.abs(exp)))
sf = jnp.where(amax > 0.0, sf, scale)
sf = jnp.where(jnp.isfinite(amax), sf, scale)
scale = jnp.where(exp < 0, 1 / sf, sf)
fp8_meta_arrays[fp8_scale_idx] = scale
fp8_meta_arrays[fp8_scale_inv_idx] = 1 / scale
return jax.tree_util.tree_unflatten(treedef, fp8_meta_arrays)
@contextmanager
def fp8_autocast(enabled: bool = False,
fp8_recipe: Optional[DelayedScaling] = None,
sharding_resource: Optional[ShardingResource] = None) -> None:
r"""
Context manager for FP8 usage.
.. code-block:: python
mesh_shape = (4, 2)
dp_mesh_axis_name = 'data_parallel'
tp_mesh_axis_name = 'tensor_parallel'
devices = np.asarray(jax.devices()).reshape(*mesh_shape)
with maps.Mesh(devices, (dp_mesh_axis_name, tp_mesh_axis_name)):
sharding_resource=ShardingResource(dp_mesh_axis_name, tp_mesh_axis_name)
with fp8_autocast(enabled=True, sharding_resource=sharding_resource):
rules = extend_logical_axis_rules(tuple())
transformer = TransformerLayer()
with partitioning.axis_rules(rules):
pjit(transformer.init, ...)(...)
.. note::
We only support :attr:`margin`, :attr:`fp8_format`,
:attr:`interval`, :attr:`amax_history_len` and
:attr:`amax_compute_algo`(with value 'max' and 'most_recent')
in recipe.DelayedScaling currently. Other parameters in
recipe.DelayedScaling will trigger an assertion.
Parameters
----------
enabled: bool, default = False
Whether or not to enable fp8
fp8_recipe: recipe.DelayedScaling, default = None
Recipe used for FP8 training.
sharding_resource: ShardingResource, default = None
Specify the mesh axes for data and tensor parallelism to shard along.
If set to None, then no data or tensor parallelism will be used.
"""
if fp8_recipe is None:
fp8_recipe = DelayedScaling()
assert fp8_recipe.amax_compute_algo in [
"max", "most_recent"
], ("DelayedScaling amax_compute_algo only supports max and most_recent with TE/JAX.")
assert fp8_recipe.scaling_factor_compute_algo is None, (
"DelayedScaling scaling_factor_compute_algo isn't supported by TE/JAX.")
assert fp8_recipe.override_linear_precision == (False, False, False), (
"DelayedScaling override_linear_precision isn't supported by TE/JAX.")
assert fp8_recipe.reduce_amax, ("DelayedScaling reduce_amax should be enabled for TE/JAX.")
if sharding_resource is None:
sharding_resource = ShardingResource()
try:
with global_shard_guard(sharding_resource):
if enabled:
fp8_available, reason_for_no_fp8 = is_fp8_available()
assert fp8_available, reason_for_no_fp8
amax_compute_algo = AmaxComputeAlgo.MOST_RECENT
if fp8_recipe.amax_compute_algo == 'max':
amax_compute_algo = AmaxComputeAlgo.MAX
FP8Helper.initialize(margin=fp8_recipe.margin,
fp8_format=fp8_recipe.fp8_format,
update_fp8meta_interval=fp8_recipe.interval,
amax_history_len=fp8_recipe.amax_history_len,
amax_compute_algo=amax_compute_algo)
yield
finally:
FP8Helper.finalize()
# Function Wrappers
def update_collections(new: Collection, original: Collection) -> FrozenDict:
r"""
A helper to update Flax's Collection.
Collection = [dict, flax.core.frozen_dict.FrozenDict]
Parameters
----------
new: Collection
A collection that includes new data.
original: Collection
The base collection.
Returns
-------
outputs : Collection
The updated collection.
"""
return FP8Helper.update_collections(new, original)
def update_fp8_metas(state: Collection) -> Collection:
r"""
Calculate new fp8 scales and its inverse via the followed formula
.. code-block:: python
exp = floor(log2(fp8_max / amax)) - margin
sf = round(power(2, abs(exp)))
sf = sf if amax > 0.0, else original_scale
sf = sf if isfinite(amax), else original_scale)
updated_scale = 1/sf if exp < 0, else sf
updated_scale_inv = 1/updated_scale
Collection = [dict, flax.core.frozen_dict.FrozenDict]
Parameters
----------
state: Collection
A collection that includes FP8 metas.
Returns
-------
outputs : Collection
The collection with updated FP8 metas.
"""
return FP8Helper.update_fp8_metas(state)
def get_delayed_scaling():
r"""
Obtain an instance of DelayedScaling which is set via fp8_autocast.
.. note::
We only store :attr:`margin`, :attr:`fp8_format`, :attr:`interval`,
:attr:`amax_history_len` and :attr:`amax_compute_algo` via fp8_autocast.
Other parameters in recipe.DelayedScaling would be returned as the default
values.
Returns
-------
delay_scaling : DelayedScaling
an instance of DelayedScaling which is set via fp8_autocast.
"""
amax_compute_algo = "max" if FP8Helper.AMAX_COMPUTE_ALGO is AmaxComputeAlgo.MAX \
else "most_recent"
return DelayedScaling(margin=int(FP8Helper.MARGIN),
interval=FP8Helper.UPDATE_FP8META_INTERVAL,
fp8_format=FP8Helper.FP8_FORMAT,
amax_history_len=FP8Helper.AMAX_HISTORY_LEN,
amax_compute_algo=amax_compute_algo)
| TransformerEngine-main | transformer_engine/jax/fp8.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Transformer Engine bindings for JAX"""
from . import flax
from .fp8 import fp8_autocast, update_collections, update_fp8_metas, get_delayed_scaling
from .sharding import MajorShardingType, ShardingResource, ShardingType
from ..common.utils import deprecate_wrapper
extend_logical_axis_rules = deprecate_wrapper(
flax.extend_logical_axis_rules,
"extend_logical_axis_rules is moving to transformer_engine.jax.flax module")
DenseGeneral = deprecate_wrapper(flax.DenseGeneral,
"DenseGeneral is moving to transformer_engine.jax.flax module")
LayerNorm = deprecate_wrapper(flax.LayerNorm,
"LayerNorm is moving to transformer_engine.jax.flax module")
LayerNormDenseGeneral = deprecate_wrapper(
flax.LayerNormDenseGeneral,
"LayerNormDenseGeneral is moving to transformer_engine.jax.flax module")
LayerNormMLP = deprecate_wrapper(flax.LayerNormMLP,
"LayerNormMLP is moving to transformer_engine.jax.flax module")
TransformerEngineBase = deprecate_wrapper(
flax.TransformerEngineBase,
"TransformerEngineBase is moving to transformer_engine.jax.flax module")
MultiHeadAttention = deprecate_wrapper(
flax.MultiHeadAttention, "MultiHeadAttention is moving to transformer_engine.jax.flax module")
RelativePositionBiases = deprecate_wrapper(
flax.RelativePositionBiases,
"RelativePositionBiases is moving to transformer_engine.jax.flax module")
TransformerLayer = deprecate_wrapper(
flax.TransformerLayer, "TransformerLayer is moving to transformer_engine.jax.flax module")
TransformerLayerType = deprecate_wrapper(
flax.TransformerLayerType,
"TransformerLayerType is moving to transformer_engine.jax.flax module")
__all__ = [
'fp8_autocast', 'update_collections', 'update_fp8_metas', 'get_delayed_scaling',
'MajorShardingType', 'ShardingResource', 'ShardingType', 'flax', 'praxis', 'DenseGeneral',
'LayerNorm', 'LayerNormDenseGeneral', 'LayerNormMLP', 'TransformerEngineBase',
'MultiHeadAttention', 'RelativePositionBiases', 'TransformerLayer', 'TransformerLayerType'
]
| TransformerEngine-main | transformer_engine/jax/__init__.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""JAX multi-head attention modules"""
from enum import Enum
from functools import partial
import jax
import jax.numpy as jnp
from transformer_engine_jax import NVTE_Bias_Type
from transformer_engine_jax import NVTE_Mask_Type
from .cpp_extensions import FusedAttnHelper
from .cpp_extensions import cross_fused_attn_fwd, cross_fused_attn_bwd
from .cpp_extensions import self_fused_attn_fwd, self_fused_attn_bwd
from .sharding import get_fused_attn_sharding_meta
from .sharding import ShardingType
from .sharding import xmap_runner, extend_fsdp_sharding_meta
jax.config.update('experimental_xmap_spmd_lowering', True)
jax.config.update('experimental_xmap_spmd_lowering_manual', True)
class AttnBiasType(Enum):
"""Attention Bias Type."""
NO_BIAS = NVTE_Bias_Type.NVTE_NO_BIAS
PRE_SCALE_BIAS = NVTE_Bias_Type.NVTE_PRE_SCALE_BIAS
POST_SCALE_BIAS = NVTE_Bias_Type.NVTE_POST_SCALE_BIAS
class AttnMaskType(Enum):
"""Attention Mask Type."""
NO_MASK = NVTE_Mask_Type.NVTE_NO_MASK
PADDING_MASK = NVTE_Mask_Type.NVTE_PADDING_MASK
CAUSAL_MASK = NVTE_Mask_Type.NVTE_CAUSAL_MASK
def is_fused_attn_kernel_available(q_type, kv_type, attn_bias_type, attn_mask_type,
dropout_probability, max_seqlen_q, max_seqlen_kv, head_dim):
"""
To check whether the fused attention kernel is available
"""
return FusedAttnHelper(q_type, kv_type, attn_bias_type.value, attn_mask_type.value,
dropout_probability, max_seqlen_q, max_seqlen_kv,
head_dim).is_fused_attn_kernel_available()
def self_fused_attn(qkv: jnp.ndarray,
bias: jnp.ndarray,
mask: jnp.ndarray,
seed: jnp.ndarray,
attn_bias_type: AttnBiasType,
attn_mask_type: AttnMaskType,
scaling_factor: float,
dropout_probability: float,
is_training: bool,
sharding_type: ShardingType = ShardingType.SINGLE):
"""
Self fused attention wrapper
"""
assert sharding_type not in (ShardingType.TP_ROW, ShardingType.DP_TP_ROW), \
"self_fused_attn does not support row-split tensor parallelism currently."
if sharding_type is ShardingType.SINGLE:
output = _self_fused_attn(qkv,
bias,
mask,
seed,
attn_bias_type=attn_bias_type,
attn_mask_type=attn_mask_type,
scaling_factor=scaling_factor,
dropout_probability=dropout_probability,
is_training=is_training)
else:
dp_axis_name = "batch"
tp_axis_name = "model"
inputs = [qkv, bias, mask, seed]
batch, seqlen, _, num_head, head_dim = qkv.shape
output_shape = [batch, seqlen, num_head, head_dim]
sharding_meta = get_fused_attn_sharding_meta(
sharding_type, [x.shape if x is not None else None for x in inputs], [output_shape],
dp_dims=([0, None, 0, 0], [0]),
tp_dims=([3, 1, None, 0], [2]),
dp_axis_name=dp_axis_name,
tp_axis_name=tp_axis_name)
sharding_meta, _ = extend_fsdp_sharding_meta(sharding_meta, {0: 0, 2: 0})
inputs_ = tuple(
jnp.reshape(x, new_shape) if x is not None else None
for x, new_shape in zip(inputs, sharding_meta.input_shapes))
partial_self_fused_attn = partial(_self_fused_attn,
attn_bias_type=attn_bias_type,
attn_mask_type=attn_mask_type,
scaling_factor=scaling_factor,
dropout_probability=dropout_probability,
is_training=is_training)
output_ = xmap_runner(partial_self_fused_attn, sharding_meta.in_axes,
sharding_meta.out_axes, sharding_meta.axis_resources, inputs_)
output = jnp.reshape(output_, sharding_meta.output_shapes)
return output
@partial(jax.custom_vjp, nondiff_argnums=(4, 5, 6, 7, 8))
def _self_fused_attn(qkv: jnp.ndarray, bias: jnp.ndarray, mask: jnp.ndarray, seed: jnp.ndarray,
attn_bias_type: AttnBiasType, attn_mask_type: AttnMaskType,
scaling_factor: float, dropout_probability: float, is_training: bool):
output, _ = _self_fused_attn_fwd(qkv,
bias,
mask,
seed,
attn_bias_type=attn_bias_type,
attn_mask_type=attn_mask_type,
scaling_factor=scaling_factor,
dropout_probability=dropout_probability,
is_training=is_training)
return output
def _self_fused_attn_fwd(qkv, bias, mask, seed, attn_bias_type, attn_mask_type, scaling_factor,
dropout_probability, is_training):
seqlen = jnp.sum(mask[:, :, :, 0] == 0, axis=(-1, -2), dtype=jnp.int32)
cu_seqlen = jnp.cumsum(seqlen)
cu_seqlen = jnp.hstack((0, cu_seqlen))
output, softmax_aux, rng_state = self_fused_attn_fwd(qkv,
bias,
cu_seqlen,
seed,
attn_bias_type=attn_bias_type.value,
attn_mask_type=attn_mask_type.value,
scaling_factor=scaling_factor,
dropout_probability=dropout_probability,
is_training=is_training)
return output, (qkv, softmax_aux, rng_state, output, cu_seqlen)
def _self_fused_attn_bwd(attn_bias_type, attn_mask_type, scaling_factor, dropout_probability,
is_training, ctx, grad):
qkv, softmax_aux, rng_state, output, cu_seqlen = ctx
doutput = grad
grad_qkv, grad_bias = self_fused_attn_bwd(qkv,
softmax_aux,
rng_state,
output,
doutput,
cu_seqlen,
attn_bias_type=attn_bias_type.value,
attn_mask_type=attn_mask_type.value,
scaling_factor=scaling_factor,
dropout_probability=dropout_probability,
is_training=is_training)
if attn_bias_type == NVTE_Bias_Type.NVTE_NO_BIAS:
grad_bias = None
return grad_qkv, grad_bias, None, None
_self_fused_attn.defvjp(_self_fused_attn_fwd, _self_fused_attn_bwd)
def cross_fused_attn(q: jnp.ndarray,
kv: jnp.ndarray,
mask: jnp.ndarray,
seed: jnp.ndarray,
attn_bias_type: AttnBiasType,
attn_mask_type: AttnMaskType,
scaling_factor: float,
dropout_probability: float,
is_training: bool,
sharding_type: ShardingType = ShardingType.SINGLE):
"""
Cross multi-head attention wrapper
"""
assert sharding_type not in (ShardingType.TP_ROW, ShardingType.DP_TP_ROW), \
"cross_fused_attn does not support row-split tensor parallelism currently."
if sharding_type is ShardingType.SINGLE:
output = _cross_fused_attn(q,
kv,
mask,
seed,
attn_bias_type=attn_bias_type,
attn_mask_type=attn_mask_type,
scaling_factor=scaling_factor,
dropout_probability=dropout_probability,
is_training=is_training)
else:
dp_axis_name = "batch"
tp_axis_name = "model"
inputs = [q, kv, mask, seed]
output_shape = q.shape
sharding_meta = get_fused_attn_sharding_meta(
sharding_type, [x.shape if x is not None else None for x in inputs], [output_shape],
dp_dims=([0, 0, 0, None], [0]),
tp_dims=([2, 3, None, None], [2]),
dp_axis_name=dp_axis_name,
tp_axis_name=tp_axis_name)
sharding_meta = extend_fsdp_sharding_meta(sharding_meta, {0: 0, 2: 0})
inputs_ = tuple(
jnp.reshape(x, new_shape) if x is not None else None
for x, new_shape in zip(inputs, sharding_meta.input_shapes))
partial_cross_fused_attn = partial(_cross_fused_attn,
attn_bias_type=attn_bias_type,
attn_mask_type=attn_mask_type,
scaling_factor=scaling_factor,
dropout_probability=dropout_probability,
is_training=is_training)
output_ = xmap_runner(partial_cross_fused_attn, sharding_meta.in_axes,
sharding_meta.out_axes, sharding_meta.axis_resources, inputs_)
output = jnp.reshape(output_, sharding_meta.output_shapes)
return output
@partial(jax.custom_vjp, nondiff_argnums=(4, 5, 6, 7, 8))
def _cross_fused_attn(q: jnp.ndarray, kv: jnp.ndarray, mask: jnp.ndarray, seed: jnp.ndarray,
attn_bias_type: AttnBiasType, attn_mask_type: AttnMaskType,
scaling_factor: float, dropout_probability: float, is_training: bool):
output, _ = _cross_fused_attn_fwd(q,
kv,
mask,
seed,
attn_bias_type=attn_bias_type,
attn_mask_type=attn_mask_type,
scaling_factor=scaling_factor,
dropout_probability=dropout_probability,
is_training=is_training)
return output
def _cross_fused_attn_fwd(q, kv, mask, seed, attn_bias_type, attn_mask_type, scaling_factor,
dropout_probability, is_training):
q_seqlen = jnp.sum(mask[:, :, :, 0] == 0, axis=(-1, -2), dtype=jnp.int32)
q_cu_seqlen = jnp.cumsum(q_seqlen)
q_cu_seqlen = jnp.hstack((0, q_cu_seqlen))
kv_seqlen = jnp.sum(mask[:, :, 0, :] == 0, axis=(-1, -2), dtype=jnp.int32)
kv_cu_seqlen = jnp.cumsum(kv_seqlen)
kv_cu_seqlen = jnp.hstack((0, kv_cu_seqlen))
output, softmax_aux = cross_fused_attn_fwd(q,
kv,
q_cu_seqlen,
kv_cu_seqlen,
seed,
attn_bias_type=attn_bias_type.value,
attn_mask_type=attn_mask_type.value,
scaling_factor=scaling_factor,
dropout_probability=dropout_probability,
is_training=is_training)
return output, (softmax_aux, q, kv, q_cu_seqlen, kv_cu_seqlen)
def _cross_fused_attn_bwd(attn_bias_type, attn_mask_type, scaling_factor, dropout_probability,
is_training, ctx, grad):
softmax_aux, q, kv, q_cu_seqlen, kv_cu_seqlen = ctx
doutput = grad
grad_q, grad_kv = cross_fused_attn_bwd(q,
kv,
softmax_aux,
doutput,
q_cu_seqlen,
kv_cu_seqlen,
attn_bias_type=attn_bias_type.value,
attn_mask_type=attn_mask_type.value,
scaling_factor=scaling_factor,
dropout_probability=dropout_probability,
is_training=is_training)
return grad_q, grad_kv, None, None
_cross_fused_attn.defvjp(_cross_fused_attn_fwd, _cross_fused_attn_bwd)
| TransformerEngine-main | transformer_engine/jax/fused_attn.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""JAX MLP modules"""
from typing import Tuple, Sequence, Union, Callable
from functools import partial, reduce
import operator
import jax
import jax.numpy as jnp
from jax.interpreters import pxla
from transformer_engine_jax import DType as TEDType
from .cpp_extensions import jax_dtype_to_te_dtype
from .cpp_extensions import transpose, cast_transpose
from .cpp_extensions import gated_gelu, gated_gelu_fp8
from .cpp_extensions import dgated_gelu, dgated_gelu_cast_transpose
from .cpp_extensions import rmsnorm_fwd_fp8, rmsnorm_bwd
from .cpp_extensions import layernorm_fwd_fp8, layernorm_bwd
from .cpp_extensions import gemm
from .sharding import MajorShardingType, ShardingType
from .sharding import get_elementwise_sharding_meta
from .sharding import get_dot_sharding_meta, get_fp8_meta_sharding_meta
from .sharding import merge_axis_resources, infer_sharding_type
from .sharding import xmap_runner, extend_fsdp_sharding_meta
from .layernorm import canonicalize_layernorm_type
from .fp8 import FP8Helper, FP8GemmPackage
jax.config.update('experimental_xmap_spmd_lowering', True)
jax.config.update('experimental_xmap_spmd_lowering_manual', True)
thread_resources = pxla.thread_resources
def geglu(
inputs: jnp.ndarray,
contracting_dims: Sequence[int] = (-1,),
sharding_type: ShardingType = ShardingType.SINGLE,
dp_dim_index: int = 0, # pylint: disable=unused-argument
):
"""
Gated gelu
"""
input_shape_suf_size = reduce(operator.mul, inputs.shape[min(contracting_dims):])
assert input_shape_suf_size % 2 == 0
output_shape = (*inputs.shape[:min(contracting_dims)], input_shape_suf_size // 2)
if sharding_type is ShardingType.SINGLE:
output = _geglu(inputs, contracting_dims)
else:
dp_axis_name = "batch"
tp_axis_name = "model"
sharding_meta = get_elementwise_sharding_meta(sharding_type, inputs.shape, None,
dp_dim_index, dp_axis_name, tp_axis_name)
sharding_meta, _ = extend_fsdp_sharding_meta(sharding_meta, {0: dp_dim_index})
inputs_ = jnp.reshape(inputs, sharding_meta.input_shapes[0]) # 0 for input
partial_geglu = partial(_geglu, contracting_dims=contracting_dims)
output = xmap_runner(partial_geglu, sharding_meta.in_axes, sharding_meta.out_axes,
sharding_meta.axis_resources, (inputs_,))
output = jnp.reshape(output, output_shape)
return output
@partial(jax.custom_vjp, nondiff_argnums=(1,))
def _geglu(inputs: jnp.ndarray, contracting_dims: Sequence[int] = (-1,)):
geglu_output, _ = _geglu_fwd(inputs, contracting_dims)
return geglu_output
def _geglu_fwd(inputs, contracting_dims):
inputs_real_shape = (*inputs.shape[:min(contracting_dims)],
reduce(operator.mul, inputs.shape[min(contracting_dims):]))
inputs_ = jnp.reshape(inputs, inputs_real_shape)
geglu_output = gated_gelu(inputs_)
geglu_output = jnp.expand_dims(geglu_output, min(contracting_dims))
return geglu_output, (inputs_, inputs.shape)
def _geglu_bwd(contracting_dims, ctx, g):
inputs_, inputs_shape = ctx
g = jnp.squeeze(g, min(contracting_dims))
assert inputs_.dtype == g.dtype
dgelu = dgated_gelu(g, inputs_)
dgelu = jnp.reshape(dgelu, inputs_shape)
return (dgelu,)
_geglu.defvjp(_geglu_fwd, _geglu_bwd)
def fp8_ln_mlp(
fp8_gemm_pkg: FP8GemmPackage,
ln_scale: jnp.ndarray,
ln_bias: jnp.ndarray,
layernorm_type: str,
fwd_dtype: TEDType,
bwd_dtype: TEDType,
zero_centered_gamma: bool = False,
epsilon: float = 1e-6,
contracting_dims: Tuple[Sequence[int], Sequence[int]] = ((-1,), (0,)),
major_sharding_type: MajorShardingType = MajorShardingType.SINGLE,
dp_dim_index: int = 0, # pylint: disable=unused-argument
activations: Sequence[Union[str, Callable]] = ('gelu', 'linear')
) -> jnp.ndarray:
"""
FP8 layernorm MLP wrapper
(LN + Dense + act + Dense)
"""
assert fp8_gemm_pkg.num_of_gemm == 2
inputs = fp8_gemm_pkg.inputs
kernel_1 = fp8_gemm_pkg.kernels[0]
kernel_2 = fp8_gemm_pkg.kernels[1]
fp8_max = fp8_gemm_pkg.fp8_max
amax = fp8_gemm_pkg.amax
scale = fp8_gemm_pkg.scale
scale_inv = fp8_gemm_pkg.scale_inv
layernorm_type = canonicalize_layernorm_type(layernorm_type)
if layernorm_type == 'rmsnorm':
assert ln_bias is None, "ln_bias should be None if layernorm_type is 'rmsnorm'"
assert not zero_centered_gamma, "zero_centered_gamma is not supported " \
"if layernorm_type is 'rmsnorm'"
assert activations == ('gelu', 'linear')
if major_sharding_type is MajorShardingType.SINGLE:
res = _fp8_mlp(inputs, ln_scale, ln_bias, kernel_1, kernel_2, fp8_max, amax, scale,
scale_inv, layernorm_type, activations, zero_centered_gamma, epsilon,
fwd_dtype, bwd_dtype, contracting_dims, major_sharding_type, "", "", "")
else:
dp_axis_name = "batch"
tp_axis_name = "model"
first_part_st, second_part_st = infer_sharding_type(major_sharding_type)
ln_sharding_meta = get_elementwise_sharding_meta(first_part_st, inputs.shape,
ln_scale.shape, dp_dim_index, dp_axis_name,
tp_axis_name)
ln_sharding_meta, _ = extend_fsdp_sharding_meta(ln_sharding_meta, {0: dp_dim_index})
input_tp_index = len(inputs.shape) - 1
first_dot_sharding_meta = get_dot_sharding_meta(first_part_st, inputs.shape, kernel_1.shape,
dp_dim_index, input_tp_index, 2,
contracting_dims, dp_axis_name,
tp_axis_name)
first_dot_sharding_meta, fsdp_axis_name = extend_fsdp_sharding_meta(
first_dot_sharding_meta, {0: dp_dim_index})
second_input_shape = (*first_dot_sharding_meta.output_shapes[0][:-2],
first_dot_sharding_meta.output_shapes[0][-1])
second_dot_sharding_meta = get_dot_sharding_meta(second_part_st, second_input_shape,
kernel_2.shape, dp_dim_index,
len(second_input_shape) - 1, 0,
contracting_dims, dp_axis_name,
tp_axis_name)
second_dot_sharding_meta, _ = extend_fsdp_sharding_meta(second_dot_sharding_meta,
{0: dp_dim_index})
num_of_fp8_meta_kind = 4 # fp8_max, amax, scale, scale_inv
fp8_sharding_meta = get_fp8_meta_sharding_meta(first_part_st, num_of_fp8_meta_kind,
dp_axis_name, tp_axis_name)
inputs_ = jnp.reshape(inputs, ln_sharding_meta.input_shapes[0]) # 0 for input
ln_scale_ = jnp.reshape(ln_scale, ln_sharding_meta.input_shapes[1]) # 1 for gamma
ln_bias_ = ln_bias
ln_bias_in_axis = {}
if ln_bias_ is not None:
ln_bias_ = jnp.reshape(ln_bias_, ln_sharding_meta.input_shapes[1]) # 1 for beta
ln_bias_in_axis = ln_sharding_meta.in_axes[1]
kernel_1_ = jnp.reshape(kernel_1, first_dot_sharding_meta.input_shapes[1]) # 1 for kernel
kernel_2_ = jnp.reshape(kernel_2,
second_dot_sharding_meta.input_shapes[1]) # 1 for kernel
axis_resource = merge_axis_resources([
ln_sharding_meta.axis_resources, first_dot_sharding_meta.axis_resources,
second_dot_sharding_meta.axis_resources, fp8_sharding_meta.axis_resources
])
partial_fp8_mlp = partial(_fp8_mlp,
layernorm_type=layernorm_type,
activations=activations,
zero_centered_gamma=zero_centered_gamma,
epsilon=epsilon,
fwd_dtype=fwd_dtype,
bwd_dtype=bwd_dtype,
contracting_dims=contracting_dims,
major_sharding_type=major_sharding_type,
dp_axis_name=dp_axis_name,
tp_axis_name=tp_axis_name,
fsdp_axis_name=fsdp_axis_name)
in_axes = (ln_sharding_meta.in_axes[0], ln_sharding_meta.in_axes[1], ln_bias_in_axis,
first_dot_sharding_meta.in_axes[1], second_dot_sharding_meta.in_axes[1],
*fp8_sharding_meta.in_axes)
res = xmap_runner(
partial_fp8_mlp, in_axes, second_dot_sharding_meta.out_axes, axis_resource,
(inputs_, ln_scale_, ln_bias_, kernel_1_, kernel_2_, fp8_max, amax, scale, scale_inv))
res = jnp.reshape(res, second_dot_sharding_meta.output_shapes[0])
return res
@partial(jax.custom_vjp, nondiff_argnums=(9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19))
def _fp8_mlp(inputs: jnp.ndarray, ln_scale: jnp.ndarray, ln_bias: jnp.ndarray,
kernel_1: jnp.ndarray, kernel_2: jnp.ndarray, fp8_maxs: jnp.ndarray, amax: jnp.ndarray,
scale: jnp.ndarray, scale_inv: jnp.ndarray, layernorm_type: str,
activations: Sequence[Union[str, Callable]], zero_centered_gamma: bool, epsilon: float,
fwd_dtype: TEDType, bwd_dtype: TEDType, contracting_dims: Tuple[Sequence[int],
Sequence[int]],
major_sharding_type: MajorShardingType, dp_axis_name: str, tp_axis_name: str,
fsdp_axis_name: str):
res, _ = _fp8_mlp_fwd(inputs,
ln_scale,
ln_bias,
kernel_1,
kernel_2,
fp8_maxs,
amax,
scale,
scale_inv,
layernorm_type,
activations,
zero_centered_gamma,
epsilon,
fwd_dtype,
bwd_dtype,
contracting_dims=contracting_dims,
major_sharding_type=major_sharding_type,
dp_axis_name=dp_axis_name,
tp_axis_name=tp_axis_name,
fsdp_axis_name=fsdp_axis_name)
return res
def _fp8_mlp_fwd(
inputs,
gamma,
beta,
kernel_1,
kernel_2,
fp8_maxs,
amax,
scale,
scale_inv,
layernorm_type,
activations,
zero_centered_gamma,
epsilon,
fwd_dtype,
bwd_dtype, # pylint: disable=unused-argument
contracting_dims,
major_sharding_type,
dp_axis_name, # pylint: disable=unused-argument
tp_axis_name,
fsdp_axis_name): # pylint: disable=unused-argument
if activations != ('gelu', 'linear'):
raise NotImplementedError("activations only support ('gelu', 'linear') for now.")
lhs_contracting_dims, rhs_contracting_dims = contracting_dims
input_shape_pre = inputs.shape[:min(lhs_contracting_dims)]
input_shape_suf = inputs.shape[min(lhs_contracting_dims):]
kernel_1_shape_pre = kernel_1.shape[:max(rhs_contracting_dims) + 1]
kernel_1_shape_suf = kernel_1.shape[max(rhs_contracting_dims) + 1:]
kernel_2_shape_pre = kernel_2.shape[:max(rhs_contracting_dims) + 1]
kernel_2_shape_suf = kernel_2.shape[max(rhs_contracting_dims) + 1:]
input_contracting_size = reduce(operator.mul, input_shape_suf)
kernel_1_pre_size = reduce(operator.mul, kernel_1_shape_pre)
kernel_1_suf_size = reduce(operator.mul, kernel_1_shape_suf)
kernel_2_pre_size = reduce(operator.mul, kernel_2_shape_pre)
assert input_contracting_size == kernel_1_pre_size
assert kernel_1_suf_size == kernel_2_pre_size * len(activations)
inputs_ = jnp.reshape(inputs, (-1, input_contracting_size))
kernel_1_ = jnp.reshape(kernel_1, (kernel_1_pre_size, -1))
kernel_2_ = jnp.reshape(kernel_2, (kernel_2_pre_size, -1))
amax = FP8Helper.update_amax_history(amax)
gemm1_input_idx, gemm1_kernel_idx, _ = FP8Helper.get_fp8_meta_indices(0)
input_amax = amax[gemm1_input_idx, 0:1]
input_scale = scale[gemm1_input_idx]
input_scale_inv = scale_inv[gemm1_input_idx]
if layernorm_type == 'layernorm':
ln_out, mu, rsigma, ln_out_amax = layernorm_fwd_fp8(inputs_,
gamma,
beta,
input_amax,
input_scale,
input_scale_inv,
zero_centered_gamma=zero_centered_gamma,
epsilon=epsilon)
else:
assert not zero_centered_gamma, "zero_centered_gamma is not supported " \
"if layernorm_type is 'rmsnorm'"
ln_out, rsigma, ln_out_amax = rmsnorm_fwd_fp8(inputs_,
gamma,
input_amax,
input_scale,
input_scale_inv,
epsilon=epsilon)
mu = None
kernel_1_amax = amax[gemm1_kernel_idx, 0:1]
kernel_1_scale = scale[gemm1_kernel_idx]
kernel_1_scale_inv = scale_inv[gemm1_kernel_idx]
kernel_1_cast, kernel_1_cast_trans, kernel_1_amax = cast_transpose(
kernel_1_, kernel_1_amax, kernel_1_scale, kernel_1_scale_inv, fwd_dtype)
dense_1_output = gemm(kernel_1_cast_trans, kernel_1_scale_inv, fwd_dtype, True, ln_out,
scale_inv[gemm1_input_idx], fwd_dtype, False,
jax_dtype_to_te_dtype(inputs.dtype), FP8Helper.FP8_2X_ACC_FPROP)
gemm2_input_idx, gemm2_kernel_idx, _ = FP8Helper.get_fp8_meta_indices(1)
kernel_2_amax = amax[gemm2_kernel_idx, 0:1]
kernel_2_scale = scale[gemm2_kernel_idx]
kernel_2_scale_inv = scale_inv[gemm2_kernel_idx]
kernel_2_cast, kernel_2_cast_trans, kernel_2_amax = cast_transpose(
kernel_2_, kernel_2_amax, kernel_2_scale, kernel_2_scale_inv, fwd_dtype)
dense_1_out_amax = amax[gemm2_input_idx, 0:1]
dense_1_out_scale = scale[gemm2_input_idx]
dense_1_out_scale_inv = scale_inv[gemm2_input_idx]
gated_gelu_output_cast, gated_gelu_amax = gated_gelu_fp8(dense_1_output, dense_1_out_amax,
dense_1_out_scale,
dense_1_out_scale_inv, fwd_dtype)
res = gemm(kernel_2_cast_trans, kernel_2_scale_inv, fwd_dtype, True,
gated_gelu_output_cast, dense_1_out_scale_inv, fwd_dtype, False,
jax_dtype_to_te_dtype(inputs.dtype), FP8Helper.FP8_2X_ACC_FPROP)
if major_sharding_type in (MajorShardingType.TP, MajorShardingType.DPTP):
res = jax.lax.psum(res, tp_axis_name)
# (input_shape_pre, input_shape_suf)
# x (kernel_1_shape_pre, kernel_1_shape_suf)
# x (kernel_2_shape_pre, kernel_2_shape_suf)
# = (input_shape_pre, kernel_2_shape_suf)
output_shape = input_shape_pre + kernel_2_shape_suf
res = jnp.reshape(res, output_shape)
ctx = (inputs_, ln_out, mu, rsigma, gamma, dense_1_output, gated_gelu_output_cast,
kernel_1_cast, kernel_2_cast, fp8_maxs, amax, scale, scale_inv, ln_out_amax,
gated_gelu_amax, kernel_1_amax, kernel_2_amax, inputs.shape, kernel_1.shape,
kernel_2.shape)
return res, ctx
def _fp8_mlp_bwd(
layernorm_type,
activations, # pylint: disable=unused-argument
zero_centered_gamma,
epsilon,
fwd_dtype,
bwd_dtype,
contracting_dims, # pylint: disable=unused-argument
major_sharding_type,
dp_axis_name,
tp_axis_name,
fsdp_axis_name,
ctx,
g):
inputs_, ln_out, mu, rsigma, gamma, \
dense_1_output, gated_gelu_output_cast, \
kernel_1_cast, kernel_2_cast, \
fp8_maxs, amax, scale, scale_inv, \
ln_out_amax, gated_gelu_amax, kernel_1_amax, kernel_2_amax, \
input_shape, kernel_1_shape, kernel_2_shape = ctx
g = jnp.reshape(g, (ln_out.shape[0], -1))
gemm2_input_idx, gemm2_kernel_idx, gemm2_grad_idx = FP8Helper.get_fp8_meta_indices(1)
grad_amax = amax[gemm2_grad_idx, 0:1]
grad_scale = scale[gemm2_grad_idx]
grad_scale_inv = scale_inv[gemm2_grad_idx]
grad_cast, grad_cast_trans, grad_amax = cast_transpose(g, grad_amax, grad_scale, grad_scale_inv,
bwd_dtype)
gated_gelu_output_cast_trans = transpose(gated_gelu_output_cast, fwd_dtype)
gemm2_input_scale_inv = scale_inv[gemm2_input_idx]
wgrad_2 = gemm(grad_cast_trans, grad_scale_inv, bwd_dtype, True,
gated_gelu_output_cast_trans, gemm2_input_scale_inv, fwd_dtype, False,
jax_dtype_to_te_dtype(g.dtype), FP8Helper.FP8_2X_ACC_WGRAD)
kernel_2_scale_inv = scale_inv[gemm2_kernel_idx]
dgrad_2 = gemm(kernel_2_cast, kernel_2_scale_inv, fwd_dtype, True, grad_cast, grad_scale_inv,
bwd_dtype, False, jax_dtype_to_te_dtype(g.dtype), FP8Helper.FP8_2X_ACC_DGRAD)
gemm1_input_idx, gemm1_kernel_idx, gemm1_grad_idx = FP8Helper.get_fp8_meta_indices(0)
dgrad_2_amax = amax[gemm1_grad_idx, 0:1]
dgrad_2_scale = scale[gemm1_grad_idx]
dgrad_2_scale_inv = scale_inv[gemm1_grad_idx]
dgelu, dgelu_trans, dgelu_amax = dgated_gelu_cast_transpose(dgrad_2, dense_1_output,
dgrad_2_amax, dgrad_2_scale,
dgrad_2_scale_inv, bwd_dtype)
ln_out_trans = transpose(ln_out, fwd_dtype)
gemm1_input_scale_inv = scale_inv[gemm1_input_idx]
wgrad_1 = gemm(dgelu_trans, dgrad_2_scale_inv, bwd_dtype, True,
ln_out_trans, gemm1_input_scale_inv, fwd_dtype, False,
jax_dtype_to_te_dtype(g.dtype), FP8Helper.FP8_2X_ACC_WGRAD)
kernel_1_scale_inv = scale_inv[gemm1_kernel_idx]
dgrad_1 = gemm(kernel_1_cast, kernel_1_scale_inv, fwd_dtype, True, dgelu, dgrad_2_scale_inv,
bwd_dtype, False, jax_dtype_to_te_dtype(g.dtype), FP8Helper.FP8_2X_ACC_DGRAD)
if major_sharding_type in (MajorShardingType.TP, MajorShardingType.DPTP):
dgrad_1 = jax.lax.psum(dgrad_1, tp_axis_name)
if layernorm_type == 'layernorm':
grad_input, grad_gamma, grad_beta = layernorm_bwd(dgrad_1,
mu,
rsigma,
inputs_,
gamma,
zero_centered_gamma=zero_centered_gamma,
epsilon=epsilon)
else:
assert not zero_centered_gamma, "zero_centered_gamma is not supported " \
"if layernorm_type is 'rmsnorm'"
grad_input, grad_gamma = rmsnorm_bwd(dgrad_1, rsigma, inputs_, gamma, epsilon=epsilon)
grad_beta = None
amax = amax.at[gemm1_input_idx, 0].set(ln_out_amax[0])
amax = amax.at[gemm1_kernel_idx, 0].set(kernel_1_amax[0])
amax = amax.at[gemm1_grad_idx, 0].set(dgelu_amax[0])
amax = amax.at[gemm2_input_idx, 0].set(gated_gelu_amax[0])
amax = amax.at[gemm2_kernel_idx, 0].set(kernel_2_amax[0])
amax = amax.at[gemm2_grad_idx, 0].set(grad_amax[0])
if major_sharding_type in (MajorShardingType.DP, MajorShardingType.DPTP):
wgrad_1 = jax.lax.psum(wgrad_1, dp_axis_name)
wgrad_2 = jax.lax.psum(wgrad_2, dp_axis_name)
grad_gamma = jax.lax.psum(grad_gamma, dp_axis_name)
if grad_beta is not None:
grad_beta = jax.lax.psum(grad_beta, dp_axis_name)
amax = jax.lax.pmax(amax, dp_axis_name)
if len(fsdp_axis_name) > 0:
wgrad_1 = jax.lax.psum(wgrad_1, fsdp_axis_name)
wgrad_2 = jax.lax.psum(wgrad_2, fsdp_axis_name)
grad_gamma = jax.lax.psum(grad_gamma, fsdp_axis_name)
if grad_beta is not None:
grad_beta = jax.lax.psum(grad_beta, fsdp_axis_name)
amax = jax.lax.pmax(amax, fsdp_axis_name)
if major_sharding_type in (MajorShardingType.TP, MajorShardingType.DPTP):
amax = jax.lax.pmax(amax, tp_axis_name)
grad_input = jnp.reshape(grad_input, input_shape)
wgrad_1 = jnp.reshape(wgrad_1, kernel_1_shape)
wgrad_2 = jnp.reshape(wgrad_2, kernel_2_shape)
return grad_input, grad_gamma, grad_beta, \
wgrad_1, wgrad_2, \
fp8_maxs, amax, scale, scale_inv
_fp8_mlp.defvjp(_fp8_mlp_fwd, _fp8_mlp_bwd)
| TransformerEngine-main | transformer_engine/jax/mlp.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""JAX te custom call"""
from abc import ABCMeta, abstractmethod
from dataclasses import dataclass
from typing import Tuple
from functools import partial, reduce
import operator
import warnings
import numpy as np
from jaxlib.hlo_helpers import custom_call
import jax.numpy as jnp
from jax.lib import xla_client
from jax import core, dtypes
from jax.core import ShapedArray
from jax.interpreters import xla, mlir
from jax.interpreters.mlir import ir, dtype_to_ir_type
import transformer_engine_jax
from transformer_engine_jax import DType as TEDType
from transformer_engine_jax import NVTE_Bias_Type
from transformer_engine_jax import NVTE_Mask_Type
from transformer_engine_jax import NVTE_QKV_Layout
from transformer_engine_jax import NVTE_Fused_Attn_Backend
for _name, _value in transformer_engine_jax.registrations().items():
xla_client.register_custom_call_target(_name, _value, platform="CUDA")
def te_dtype_to_jax_dtype(te_dtype):
"""
convert TE dtype to jax dtype
"""
assert isinstance(te_dtype, TEDType)
if te_dtype == TEDType.kFloat32:
return jnp.float32
if te_dtype == TEDType.kFloat16:
return jnp.float16
if te_dtype == TEDType.kBFloat16:
return jnp.bfloat16
if te_dtype == TEDType.kInt32:
return jnp.int32
if te_dtype == TEDType.kInt64:
return jnp.int64
return jnp.int8
def te_dtype_to_ir_dtype(te_dtype):
"""
convert TE dtype to MLIR dtype
"""
return dtype_to_ir_type(np.dtype(te_dtype_to_jax_dtype(te_dtype)))
def jax_dtype_to_te_dtype(jax_dtype):
"""
convert jax dtype to TE dtype
"""
if jax_dtype == jnp.float32:
return TEDType.kFloat32
if jax_dtype == jnp.float16:
return TEDType.kFloat16
if jax_dtype == jnp.bfloat16:
return TEDType.kBFloat16
raise ValueError(f"Not support the {jax_dtype=}")
@dataclass(frozen=True)
class FusedAttnHelper:
"""
Helper for the fused attention backend
"""
q_type: jnp.dtype
kv_type: jnp.dtype
attn_bias_type: NVTE_Bias_Type
attn_mask_type: NVTE_Mask_Type
dropout_probability: float
max_seqlen_q: int
max_seqlen_kv: int
head_dim: int
def is_fused_attn_kernel_available(self):
"""Check if there is available fused attention kernel"""
return self.get_fused_attn_backend() != NVTE_Fused_Attn_Backend.NVTE_No_Backend
def get_fused_attn_backend(self):
"""Get the fused attention kernel backend"""
return transformer_engine_jax.get_fused_attn_backend(
jax_dtype_to_te_dtype(self.q_type), jax_dtype_to_te_dtype(self.kv_type),
NVTE_QKV_Layout.NVTE_QKV_INTERLEAVED, self.attn_bias_type, self.attn_mask_type,
self.dropout_probability, self.max_seqlen_q, self.max_seqlen_kv, self.head_dim)
def merge_named_shape(base, new):
"""
merge named shape(ie, dict), no key conflict
"""
output_named_shape = {**base}
for key in new:
if key in output_named_shape:
assert output_named_shape[key] == new[key], \
f"The value of named shape with a same name should be equal between" \
f" base and new in merge_named_shape, but got base[{key}]=" \
f"{output_named_shape[key]} and {new[key]=}"
else:
output_named_shape[key] = new[key]
return output_named_shape
class BasePrimitive(metaclass=ABCMeta):
"""
jax premitive
"""
@staticmethod
@abstractmethod
def abstract():
"""
to describe computing graph
"""
return NotImplemented
@staticmethod
@abstractmethod
def lowering():
"""
to describe MLIR
"""
return NotImplemented
def register_primitive(cls):
"""
register jax primitive
"""
p = core.Primitive(cls.name)
p.multiple_results = cls.multiple_results
p.def_impl(partial(xla.apply_primitive, p))
p.def_abstract_eval(cls.abstract)
mlir.register_lowering(p, cls.lowering, platform='cuda')
return p
@dataclass
class CustomCallArgsWrapper:
"""
wrapper of XLA custom call args
"""
def __init__(self,
output_types,
operands,
operand_shapes,
operand_specific_layouts=None,
output_specific_layouts=None):
self.output_types = output_types
self.operands = operands
self.operand_layouts = CustomCallArgsWrapper.generate_layouts(operand_shapes,
operand_specific_layouts)
output_shapes = [x.shape for x in output_types]
self.output_layouts = CustomCallArgsWrapper.generate_layouts(output_shapes,
output_specific_layouts)
@staticmethod
def generate_layouts(shapes, specific_layouts):
"""
setup layouts for XLA custom call
"""
def default_layout(shape):
return range(len(shape) - 1, -1, -1)
if specific_layouts is None:
specific_layouts = {}
layouts = []
for idx, shape in enumerate(shapes):
if idx in specific_layouts:
layouts.append(specific_layouts[idx])
else:
layouts.append(default_layout(shape))
return layouts
def custom_caller(name, args, opaque, has_side_effect, **kwargs):
"""
XLA custom call warpper
"""
if hasattr(mlir, "custom_call"):
out = mlir.custom_call(name,
result_types=args.output_types,
operands=args.operands,
operand_layouts=args.operand_layouts,
result_layouts=args.output_layouts,
backend_config=opaque,
has_side_effect=has_side_effect,
**kwargs).results
else:
# Need to disable one pylint error as the second function
# parameter name recenctly in JAX. Otherwise we won't be
# compatible with multiple JAX version.
out = custom_call(name, # pylint: disable=too-many-function-args
args.output_types,
operands=args.operands,
operand_layouts=args.operand_layouts,
result_layouts=args.output_layouts,
backend_config=opaque,
has_side_effect=has_side_effect,
**kwargs)
return out
class TransposePrimitive(BasePrimitive):
"""
Transpose Primitive
"""
name = "te_transpose"
multiple_results = False
@staticmethod
def abstract(inputs, *, dtype):
"""
_transpose abstract
"""
in_dtype = dtypes.canonicalize_dtype(inputs.dtype)
out_dtype = te_dtype_to_jax_dtype(dtype)
assert len(inputs.shape) == 2
assert isinstance(dtype, TEDType)
assert in_dtype == out_dtype
return ShapedArray((inputs.shape[1], inputs.shape[0]),
in_dtype,
named_shape=inputs.named_shape)
@staticmethod
def lowering(ctx, inputs, *, dtype):
"""
_transpose cuda lowering
"""
in_aval = ctx.avals_in[0]
assert in_aval.dtype in [jnp.float32, jnp.float16, jnp.bfloat16, jnp.int8]
ir_in_type = ir.RankedTensorType(inputs.type)
ir_in_shape = ir_in_type.shape
ir_out_dtype = te_dtype_to_ir_dtype(dtype)
out_types = [ir.RankedTensorType.get([ir_in_shape[1], ir_in_shape[0]], ir_out_dtype)]
operands = [inputs]
operand_shapes = [ir_in_shape]
args = CustomCallArgsWrapper(out_types, operands, operand_shapes)
assert len(ir_in_shape) == 2
opaque = transformer_engine_jax.pack_common_descriptor(ir_in_shape, dtype, dtype)
out = custom_caller(TransposePrimitive.name, args, opaque, False)
return [out]
_transpose_p = register_primitive(TransposePrimitive)
def transpose(inputs: jnp.ndarray, dtype: TEDType) -> jnp.ndarray:
"""
transpose wrapper
Assume input has two dimension shape
"""
return _transpose_p.bind(inputs, dtype=dtype)
class CastTransposePrimitive(BasePrimitive):
"""
Cast Transpose Primitive
"""
name = "te_cast_transpose"
multiple_results = True
@staticmethod
def abstract(inputs, amax, scale, scale_inv, *, out_dtype):
"""
te_cast_transpose_p abstract
"""
dtype = dtypes.canonicalize_dtype(inputs.dtype)
assert len(inputs.shape) == 2
assert dtype in [jnp.float32, jnp.float16, jnp.bfloat16]
assert amax.dtype == jnp.float32
assert scale.dtype == jnp.float32
assert scale_inv.dtype == jnp.float32
out_dtype = te_dtype_to_jax_dtype(out_dtype)
# input_cast, input_cast_trans, amax
return (ShapedArray((inputs.shape[0], inputs.shape[1]),
out_dtype,
named_shape=inputs.named_shape),
ShapedArray((inputs.shape[1], inputs.shape[0]),
out_dtype,
named_shape=inputs.named_shape),
ShapedArray((1,), amax.dtype, named_shape=amax.named_shape))
@staticmethod
def lowering(ctx, inputs, amax, scale, scale_inv, *, out_dtype):
"""
te_cast_transpose_p lowering rules
"""
in_aval, amax_aval, scale_aval, scale_inv_aval = ctx.avals_in
assert in_aval.dtype in [jnp.float32, jnp.float16, jnp.bfloat16]
assert amax_aval.dtype == jnp.float32
assert scale_aval.dtype == jnp.float32
assert scale_inv_aval.dtype == jnp.float32
ir_in_type = ir.RankedTensorType(inputs.type)
ir_in_shape = ir_in_type.shape
ir_out_dtype = te_dtype_to_ir_dtype(out_dtype)
ir_amax_type = ir.RankedTensorType(amax.type)
ir_amax_dtype = ir_amax_type.element_type
ir_amax_shape = ir_amax_type.shape
ir_scale_shape = ir_amax_shape
ir_scale_inv_shape = ir_amax_shape
out_types = [
ir.RankedTensorType.get([ir_in_shape[0], ir_in_shape[1]], ir_out_dtype),
ir.RankedTensorType.get([ir_in_shape[1], ir_in_shape[0]], ir_out_dtype),
ir.RankedTensorType.get(ir_amax_shape, ir_amax_dtype),
]
operands = [inputs, amax, scale, scale_inv]
operand_shapes = [ir_in_shape, ir_amax_shape, ir_scale_shape, ir_scale_inv_shape]
args = CustomCallArgsWrapper(out_types, operands, operand_shapes)
assert len(ir_in_shape) == 2
opaque = transformer_engine_jax.pack_common_descriptor(ir_in_shape,
jax_dtype_to_te_dtype(in_aval.dtype),
out_dtype)
out = custom_caller(CastTransposePrimitive.name,
args,
opaque,
False,
operand_output_aliases={1: 2})
return out
_cast_transpose_p = register_primitive(CastTransposePrimitive)
def cast_transpose(inputs: jnp.ndarray, amax: jnp.ndarray, scale: jnp.ndarray,
scale_inv: jnp.ndarray,
out_dtype: TEDType) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray]:
"""
cast transpose wrapper
Return two tensors, FP8(inputs) and FP8(inputs.T), which are scaled by `scale`
"""
return _cast_transpose_p.bind(inputs, amax, scale, scale_inv, out_dtype=out_dtype)
class GatedGeluPrimitive(BasePrimitive):
"""
Gated Gelu Primitive
"""
name = "te_gated_gelu"
multiple_results = False
@staticmethod
def abstract(inputs):
"""
te_gated_gelu_p abstract
"""
dtype = dtypes.canonicalize_dtype(inputs.dtype)
assert dtype in [jnp.float32, jnp.float16, jnp.bfloat16]
inputs_shape = inputs.shape
hidden_size = inputs_shape[-1]
# In Transformer, batch_shape = (batch, seqlen, )
batch_shapes = inputs_shape[:-1]
assert hidden_size % 2 == 0
inputs_shape = inputs.shape
out_shape = (batch_shapes) + (hidden_size // 2,)
return ShapedArray(out_shape, dtype, named_shape=inputs.named_shape)
@staticmethod
def lowering(ctx, inputs):
"""
te_gated_gelu_p lowering rules
"""
(in_aval,) = ctx.avals_in
assert in_aval.dtype in [jnp.float32, jnp.float16, jnp.bfloat16]
ir_in_type = ir.RankedTensorType(inputs.type)
ir_in_shape = ir_in_type.shape
out_shape = ir_in_shape[:-1] + [ir_in_shape[-1] // 2]
out_types = [
ir.RankedTensorType.get(out_shape, ir_in_type.element_type),
]
operands = [inputs]
operand_shapes = [ir_in_shape]
args = CustomCallArgsWrapper(out_types, operands, operand_shapes)
hidden_size = ir_in_shape[-1]
# In Transformer, batch_size = batch x seqlen
batch_size = reduce(operator.mul, ir_in_shape[:-1])
in_dtype = jax_dtype_to_te_dtype(in_aval.dtype)
opaque = transformer_engine_jax.pack_common_descriptor((batch_size, hidden_size // 2),
in_dtype, in_dtype)
out = custom_caller(GatedGeluPrimitive.name, args, opaque, False)
return [out]
_gated_gelu_p = register_primitive(GatedGeluPrimitive)
def gated_gelu(inputs: jnp.ndarray) -> jnp.ndarray:
"""
gated gelu wrapper
Return FP8(geglu(inputs))
Assume inputs has two dimensions shape and the memory layout is (N, 2, H)
"""
return _gated_gelu_p.bind(inputs)
class GatedGeluFp8Primitive(BasePrimitive):
"""
Gated Gelu FP8 Primitive
"""
name = "te_gated_gelu_fp8"
multiple_results = True
@staticmethod
def abstract(inputs, amax, scale, scale_inv, *, out_dtype):
"""
te_gated_gelu_p abstract
"""
dtype = dtypes.canonicalize_dtype(inputs.dtype)
assert dtype in [jnp.float32, jnp.float16, jnp.bfloat16]
assert amax.dtype == jnp.float32
assert scale.dtype == jnp.float32
assert scale_inv.dtype == jnp.float32
out_dtype = te_dtype_to_jax_dtype(out_dtype)
assert len(inputs.shape) == 2
hidden_size = inputs.shape[1]
batch_size = inputs.shape[0] # In Transformer, batch_size = batch x seqlen
# input_cast, input_cast_trans, amax
return (ShapedArray((batch_size, hidden_size // 2),
out_dtype,
named_shape=inputs.named_shape),
ShapedArray((1,), amax.dtype, named_shape=amax.named_shape))
@staticmethod
def lowering(ctx, inputs, amax, scale, scale_inv, *, out_dtype):
"""
te_gated_gelu_p lowering rules
"""
in_aval, amax_aval, scale_aval, scale_inv_aval = ctx.avals_in
assert in_aval.dtype in [jnp.float32, jnp.float16, jnp.bfloat16]
assert amax_aval.dtype == jnp.float32
assert scale_aval.dtype == jnp.float32
assert scale_inv_aval.dtype == jnp.float32
ir_in_type = ir.RankedTensorType(inputs.type)
ir_in_shape = ir_in_type.shape
ir_out_dtype = te_dtype_to_ir_dtype(out_dtype)
ir_amax_type = ir.RankedTensorType(amax.type)
ir_amax_dtype = ir_amax_type.element_type
ir_amax_shape = ir_amax_type.shape
ir_scale_shape = ir_amax_shape
ir_scale_inv_shape = ir_amax_shape
hidden_size = ir_in_shape[1]
batch_size = ir_in_shape[0] # In Transformer, batch_size = batch x seqlen
out_types = [
ir.RankedTensorType.get([batch_size, hidden_size // 2], ir_out_dtype),
ir.RankedTensorType.get(ir_amax_shape, ir_amax_dtype),
]
operands = [inputs, amax, scale, scale_inv]
operand_shapes = [ir_in_shape, ir_amax_shape, ir_scale_shape, ir_scale_inv_shape]
args = CustomCallArgsWrapper(out_types, operands, operand_shapes)
opaque = transformer_engine_jax.pack_common_descriptor(
(ir_in_shape[0], ir_in_shape[1] // 2), jax_dtype_to_te_dtype(in_aval.dtype), out_dtype)
out = custom_caller(GatedGeluFp8Primitive.name,
args,
opaque,
False,
operand_output_aliases={1: 1})
return out
_gated_gelu_fp8_p = register_primitive(GatedGeluFp8Primitive)
def gated_gelu_fp8(inputs: jnp.ndarray, amax: jnp.ndarray, scale: jnp.ndarray,
scale_inv: jnp.ndarray,
out_dtype: TEDType) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray]:
"""
cast gated gelu wrapper
Return FP8(geglu(inputs))
Assume inputs has two dimensions shape and the memory layout is (N, 2, H)
"""
return _gated_gelu_fp8_p.bind(inputs, amax, scale, scale_inv, out_dtype=out_dtype)
class DgatedGeluPrimitive(BasePrimitive):
"""
Dgated Gelu Primitive
"""
name = "te_dgated_gelu"
multiple_results = False
@staticmethod
def abstract(inputs, gelu_inputs):
"""
te_dgated_gelu_p abstract
"""
dtype = dtypes.canonicalize_dtype(inputs.dtype)
assert dtype in [jnp.float32, jnp.float16, jnp.bfloat16]
assert gelu_inputs.dtype == dtype
for axis in range(len(inputs.shape) - 1):
assert inputs.shape[axis] == gelu_inputs.shape[axis]
i_hidden_size = inputs.shape[-1]
g_hidden_szie = gelu_inputs.shape[-1]
assert i_hidden_size * 2 == g_hidden_szie
return ShapedArray(gelu_inputs.shape, dtype, named_shape=inputs.named_shape)
@staticmethod
def lowering(ctx, inputs, gelu_inputs):
"""
te_dgated_gelu_p lowering rules
"""
in_aval, gi_aval = ctx.avals_in
assert in_aval.dtype in [jnp.float32, jnp.float16, jnp.bfloat16]
assert gi_aval.dtype == in_aval.dtype
ir_in_type = ir.RankedTensorType(inputs.type)
ir_in_shape = ir_in_type.shape
gi_type = ir.RankedTensorType(gelu_inputs.type)
gi_shape = gi_type.shape
for axis in range(len(ir_in_shape) - 1):
assert ir_in_shape[axis] == gi_shape[axis]
# In Transformer, batch_size = batch x seqlen
ir_batch_szie = reduce(operator.mul, ir_in_shape[:-1])
i_hidden_size = ir_in_shape[-1]
g_hidden_szie = gi_shape[-1]
assert i_hidden_size * 2 == g_hidden_szie
out_dtype = ir_in_type.element_type
out_shape = gi_shape
out_types = [
ir.RankedTensorType.get(out_shape, out_dtype),
]
operands = [inputs, gelu_inputs]
operand_shapes = [ir_in_shape, gi_shape]
args = CustomCallArgsWrapper(out_types, operands, operand_shapes)
in_dtype = jax_dtype_to_te_dtype(in_aval.dtype)
opaque = transformer_engine_jax.pack_common_descriptor((ir_batch_szie, i_hidden_size),
in_dtype, in_dtype)
out = custom_caller(DgatedGeluPrimitive.name, args, opaque, False)
return [out]
_dgated_gelu_p = register_primitive(DgatedGeluPrimitive)
def dgated_gelu(inputs: jnp.ndarray, gelu_inputs: jnp.ndarray) -> jnp.ndarray:
"""
dgated_gelu fusion wrapper
Return dgeglu(inputs)
"""
return _dgated_gelu_p.bind(inputs, gelu_inputs)
class DgatedGeluCastTransposePrimitive(BasePrimitive):
"""
Dgated Gelu Cast Transpose Primitive
"""
name = "te_dgated_gelu_cast_transpose"
multiple_results = True
@staticmethod
def abstract(inputs, gelu_inputs, amax, scale, scale_inv, *, out_dtype):
"""
te_dgated_gelu_cast_transpose_p abstract
"""
dtype = dtypes.canonicalize_dtype(inputs.dtype)
assert dtype in [jnp.float32, jnp.float16, jnp.bfloat16]
assert gelu_inputs.dtype == dtype
assert len(inputs.shape) == 2
assert len(gelu_inputs.shape) == 2
ir_batch_szie = inputs.shape[0]
gi_batch_size = gelu_inputs.shape[0]
assert ir_batch_szie == gi_batch_size
ir_hidden_szie = inputs.shape[1]
gi_hidden_size = gelu_inputs.shape[1]
assert ir_hidden_szie * 2 == gi_hidden_size
assert amax.dtype == jnp.float32
assert scale.dtype == jnp.float32
assert scale_inv.dtype == jnp.float32
out_dtype = te_dtype_to_jax_dtype(out_dtype)
# input_cast, input_cast_trans, amax
return (ShapedArray((gi_batch_size, gi_hidden_size),
out_dtype,
named_shape=inputs.named_shape),
ShapedArray((gi_hidden_size, gi_batch_size),
out_dtype,
named_shape=inputs.named_shape),
ShapedArray((1,), amax.dtype, named_shape=amax.named_shape))
@staticmethod
def lowering(ctx, inputs, gelu_inputs, amax, scale, scale_inv, *, out_dtype):
"""
te_dgated_gelu_cast_transpose_p lowering rules
"""
in_aval, gi_aval, amax_aval, scale_aval, scale_inv_aval = ctx.avals_in
assert in_aval.dtype in [jnp.float32, jnp.float16, jnp.bfloat16]
assert gi_aval.dtype == in_aval.dtype
assert amax_aval.dtype == jnp.float32
assert scale_aval.dtype == jnp.float32
assert scale_inv_aval.dtype == jnp.float32
ir_in_type = ir.RankedTensorType(inputs.type)
ir_in_shape = ir_in_type.shape
gi_type = ir.RankedTensorType(gelu_inputs.type)
gi_shape = gi_type.shape
ir_batch_szie = ir_in_shape[0]
gi_batch_size = gi_shape[0]
assert ir_batch_szie == gi_batch_size
ir_hidden_szie = ir_in_shape[1]
gi_hidden_size = gi_shape[1]
assert ir_hidden_szie * 2 == gi_hidden_size
ir_out_dtype = te_dtype_to_ir_dtype(out_dtype)
ir_amax_type = ir.RankedTensorType(amax.type)
ir_amax_dtype = ir_amax_type.element_type
ir_amax_shape = ir_amax_type.shape
ir_scale_shape = ir_amax_shape
ir_scale_inv_shape = ir_amax_shape
out_types = [
ir.RankedTensorType.get([gi_batch_size, gi_hidden_size], ir_out_dtype),
ir.RankedTensorType.get([gi_hidden_size, gi_batch_size], ir_out_dtype),
ir.RankedTensorType.get(ir_amax_shape, ir_amax_dtype),
]
operands = [inputs, gelu_inputs, amax, scale, scale_inv]
operand_shapes = [ir_in_shape, gi_shape, ir_amax_shape, ir_scale_shape, ir_scale_inv_shape]
args = CustomCallArgsWrapper(out_types, operands, operand_shapes)
opaque = transformer_engine_jax.pack_common_descriptor((ir_batch_szie, ir_hidden_szie),
jax_dtype_to_te_dtype(in_aval.dtype),
out_dtype)
out = custom_caller(DgatedGeluCastTransposePrimitive.name,
args,
opaque,
False,
operand_output_aliases={2: 2})
return out
_dgated_gelu_cast_transpose_p = register_primitive(DgatedGeluCastTransposePrimitive)
def dgated_gelu_cast_transpose(inputs: jnp.ndarray, gelu_inputs: jnp.ndarray, amax: jnp.ndarray,
scale: jnp.ndarray, scale_inv: jnp.ndarray,
out_dtype: TEDType) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray]:
"""
cast transpose d_gated_gelu fusion wrapper
Return FP8(dgeglu(inputs))
"""
return _dgated_gelu_cast_transpose_p.bind(inputs,
gelu_inputs,
amax,
scale,
scale_inv,
out_dtype=out_dtype)
class GemmPrimitive(BasePrimitive):
"""
Gemm Primitive
"""
name = "te_gemm"
multiple_results = False
@staticmethod
def abstract(A, B, A_scale_inv, B_scale_inv, *, A_dtype, B_dtype, D_dtype, transa, transb,
use_split_accumulator): # pylint: disable=unused-argument
"""
te_gemm_p abstract
"""
atype = dtypes.canonicalize_dtype(A.dtype)
btype = dtypes.canonicalize_dtype(B.dtype)
assert atype == te_dtype_to_jax_dtype(A_dtype)
assert btype == te_dtype_to_jax_dtype(B_dtype)
assert A_scale_inv.dtype == jnp.float32
assert B_scale_inv.dtype == jnp.float32
m = A.shape[0] if transa else A.shape[1]
k = A.shape[1] if transa else A.shape[0]
n = B.shape[1] if transb else B.shape[0]
assert (transb and k == B.shape[0]) or k == B.shape[1]
out_dtype = te_dtype_to_jax_dtype(D_dtype)
return ShapedArray((n, m),
out_dtype,
named_shape=merge_named_shape(A.named_shape, B.named_shape))
@staticmethod
def lowering(ctx, A, B, A_scale_inv, B_scale_inv, *, A_dtype, B_dtype, D_dtype, transa, transb,
use_split_accumulator):
"""
te_gemm_p lowering rules
"""
A_aval, B_aval, A_scale_inv_aval, B_scale_inv_aval = ctx.avals_in
assert A_aval.dtype == te_dtype_to_jax_dtype(A_dtype)
assert B_aval.dtype == te_dtype_to_jax_dtype(B_dtype)
assert A_scale_inv_aval.dtype == jnp.float32
assert B_scale_inv_aval.dtype == jnp.float32
A_type = ir.RankedTensorType(A.type)
B_type = ir.RankedTensorType(B.type)
A_shape = A_type.shape
B_shape = B_type.shape
A_scale_inv_shape = ir.RankedTensorType(A_scale_inv.type).shape
B_scale_inv_shape = ir.RankedTensorType(B_scale_inv.type).shape
m = A_shape[0] if transa else A_shape[1]
k = A_shape[1] if transa else A_shape[0]
n = B_shape[1] if transb else B_shape[0]
assert (transb and k == B_shape[0]) or k == B_shape[1]
ir_out_dtype = dtype_to_ir_type(np.dtype(te_dtype_to_jax_dtype(D_dtype)))
out_types = [
ir.RankedTensorType.get([n, m], ir_out_dtype),
]
operands = [A, B, A_scale_inv, B_scale_inv]
operand_shapes = [A_shape, B_shape, A_scale_inv_shape, B_scale_inv_shape]
args = CustomCallArgsWrapper(out_types, operands, operand_shapes)
# m, n, k here should be equal to transa=False and transb=False,
# due to te_gemm's implementation.
# Therefore, m=A_shape[1], n=B_shape[0], k=A_shape[0]
opaque = transformer_engine_jax.pack_gemm_descriptor(A_shape[1], B_shape[0], A_shape[0],
A_dtype, B_dtype, D_dtype, transa,
transb, use_split_accumulator)
out = custom_caller(GemmPrimitive.name, args, opaque, False)
return [out]
_gemm_p = register_primitive(GemmPrimitive)
def gemm(A: jnp.ndarray,
A_scale_inv: jnp.ndarray,
A_type: TEDType,
transa: bool,
B: jnp.ndarray,
B_scale_inv: jnp.ndarray,
B_type: TEDType,
transb: bool,
D_type: TEDType,
use_split_accumulator: bool = False) -> jnp.ndarray:
"""
gemm wrapper
"""
return _gemm_p.bind(A,
B,
A_scale_inv,
B_scale_inv,
A_dtype=A_type,
B_dtype=B_type,
D_dtype=D_type,
transa=transa,
transb=transb,
use_split_accumulator=use_split_accumulator)
class LayerNormFwdPrimitive(BasePrimitive):
"""
Layer Normalization Forward Primitive
"""
name = "te_layernorm_forward"
multiple_results = True
@staticmethod
def abstract(x, gamma, beta, **kwargs): # pylint: disable=unused-argument
"""
LayerNorm fwd abstract
"""
x_dtype = dtypes.canonicalize_dtype(x.dtype)
assert x_dtype in [jnp.float32, jnp.float16, jnp.bfloat16]
mu_dtype = jnp.float32
rsigma_dtype = jnp.float32
assert gamma.size == beta.size
hidden_size = gamma.size
assert x.size % hidden_size == 0
# In Transformer, batch_size = batch x seqlen
batch_size = x.size // hidden_size
return (
ShapedArray(x.shape, x_dtype, named_shape=x.named_shape), # output
ShapedArray((batch_size,), mu_dtype, named_shape=x.named_shape), # mu
ShapedArray((batch_size,), rsigma_dtype, named_shape=x.named_shape), # rsigma
)
@staticmethod
def lowering(ctx, x, gamma, beta, *, zero_centered_gamma, epsilon):
"""
LayerNorm fwd lowering rules
"""
x_aval, gamma_aval, beta_aval = ctx.avals_in
assert gamma_aval.dtype == beta_aval.dtype
x_type = ir.RankedTensorType(x.type)
x_shape = x_type.shape
w_type = ir.RankedTensorType(gamma.type)
w_shape = w_type.shape
b_type = ir.RankedTensorType(beta.type)
b_shape = b_type.shape
assert w_type == b_type
assert w_shape == b_shape
# Output shape is same as the input shape, but the output type is same as the weight type.
# See ln_api.cpp
out_shape = x_shape
output_type = w_type.element_type
ir_mu_dtype = ir.F32Type.get()
ir_rsigma_dtype = ir.F32Type.get()
hidden_size = reduce(operator.mul, w_shape)
# In Transformer, batch_size = batch x seqlen
batch_size = reduce(operator.mul, x_shape) // hidden_size
out_types = [
ir.RankedTensorType.get(out_shape, output_type),
ir.RankedTensorType.get((batch_size,), ir_mu_dtype),
ir.RankedTensorType.get((batch_size,), ir_rsigma_dtype),
]
operands = [x, gamma, beta]
operand_shapes = [x_shape, w_shape, b_shape]
args = CustomCallArgsWrapper(out_types, operands, operand_shapes)
opaque = transformer_engine_jax.pack_norm_descriptor(
batch_size,
hidden_size,
jax_dtype_to_te_dtype(x_aval.dtype),
jax_dtype_to_te_dtype(gamma_aval.dtype),
zero_centered_gamma,
epsilon,
)
out = custom_caller(LayerNormFwdPrimitive.name, args, opaque, False)
return out
_layernorm_fwd_p = register_primitive(LayerNormFwdPrimitive)
def layernorm_fwd(x: jnp.ndarray, gamma: jnp.ndarray, beta: jnp.ndarray, zero_centered_gamma: bool,
epsilon: float):
"""
Wrapper for TE layernorm fwd
"""
return _layernorm_fwd_p.bind(x,
gamma,
beta,
zero_centered_gamma=zero_centered_gamma,
epsilon=epsilon)
class LayerNormFwdFp8Primitive(BasePrimitive):
"""
Layer Normalization Forward FP8 Primitive
"""
name = "te_layernorm_forward_fp8"
multiple_results = True
@staticmethod
def abstract(
x,
gamma,
beta,
amax,
scale,
scale_inv,
**kwargs # pylint: disable=unused-argument
):
"""
LayerNorm fwd (fp8 out) abstract
"""
x_dtype = dtypes.canonicalize_dtype(x.dtype)
assert x_dtype in [jnp.float32, jnp.float16, jnp.bfloat16]
assert amax.dtype == jnp.float32
assert scale.dtype == jnp.float32
assert scale_inv.dtype == jnp.float32
out_dtype = jnp.int8
mu_dtype = jnp.float32
rsigma_dtype = jnp.float32
assert gamma.size == beta.size
hidden_szie = gamma.size
# In Transformer, batch_size = batch x seqlen
batch_size = x.size // hidden_szie
return (
ShapedArray(x.shape, out_dtype, named_shape=x.named_shape), # output
ShapedArray((batch_size,), mu_dtype, named_shape=x.named_shape), # mu
ShapedArray((batch_size,), rsigma_dtype, named_shape=x.named_shape), # rsigma
ShapedArray((1,), amax.dtype, named_shape=amax.named_shape), # amax
)
@staticmethod
def lowering(ctx, x, gamma, beta, amax, scale, scale_inv, *, zero_centered_gamma, epsilon):
"""
LayerNorm fwd (fp8 out) lowering rules
"""
x_aval, gamma_aval, beta_aval, amax_aval, scale_aval, scale_inv_aval = ctx.avals_in
assert x_aval.dtype in [jnp.float32, jnp.float16, jnp.bfloat16]
assert gamma_aval.dtype == beta_aval.dtype
assert amax_aval.dtype == jnp.float32
assert scale_aval.dtype == jnp.float32
assert scale_inv_aval.dtype == jnp.float32
x_type = ir.RankedTensorType(x.type)
x_shape = x_type.shape
w_type = ir.RankedTensorType(gamma.type)
w_shape = w_type.shape
b_type = ir.RankedTensorType(beta.type)
b_shape = b_type.shape
ir_out_dtype = dtype_to_ir_type(np.dtype(np.int8))
ir_mu_dtype = ir.F32Type.get()
ir_rsigma_dtype = ir.F32Type.get()
ir_amax_type = ir.RankedTensorType(amax.type)
ir_amax_dtype = ir_amax_type.element_type
ir_amax_shape = ir_amax_type.shape
ir_scale_shape = ir_amax_shape
ir_scale_inv_shape = ir_amax_shape
hidden_size = reduce(operator.mul, w_shape)
# In Transformer, batch_size = batch x seqlen
batch_size = reduce(operator.mul, x_shape) // hidden_size
out_types = [
ir.RankedTensorType.get(x_shape, ir_out_dtype),
ir.RankedTensorType.get((batch_size,), ir_mu_dtype),
ir.RankedTensorType.get((batch_size,), ir_rsigma_dtype),
ir.RankedTensorType.get(ir_amax_shape, ir_amax_dtype),
]
operands = [x, gamma, beta, amax, scale, scale_inv]
operand_shapes = [
x_shape, w_shape, b_shape, ir_amax_shape, ir_scale_shape, ir_scale_inv_shape
]
args = CustomCallArgsWrapper(out_types, operands, operand_shapes)
opaque = transformer_engine_jax.pack_norm_descriptor(
batch_size,
hidden_size,
jax_dtype_to_te_dtype(x_aval.dtype),
jax_dtype_to_te_dtype(gamma_aval.dtype),
zero_centered_gamma,
epsilon,
)
out = custom_caller(LayerNormFwdFp8Primitive.name,
args,
opaque,
False,
operand_output_aliases={3: 3})
return out
_layernorm_fwd_fp8_p = register_primitive(LayerNormFwdFp8Primitive)
def layernorm_fwd_fp8(x: jnp.ndarray, gamma: jnp.ndarray, beta: jnp.ndarray, amax: jnp.ndarray,
scale: jnp.ndarray, scale_inv: jnp.ndarray, zero_centered_gamma: bool,
epsilon: float):
"""
Wrapper for TE layernorm fwd (fp8 out)
"""
return _layernorm_fwd_fp8_p.bind(x,
gamma,
beta,
amax,
scale,
scale_inv,
zero_centered_gamma=zero_centered_gamma,
epsilon=epsilon)
class LayerNormBwdPrimitive(BasePrimitive):
"""
Layer Normalization Backward Primitive
"""
name = "te_layernorm_backward"
multiple_results = True
@staticmethod
def abstract(grad_output, mu, rsigma, x, gamma, **kwargs): # pylint: disable=unused-argument
"""
Layernorm bwd abstract
"""
x_dtype = dtypes.canonicalize_dtype(x.dtype)
w_dtype = dtypes.canonicalize_dtype(gamma.dtype)
mu_dtype = dtypes.canonicalize_dtype(mu.dtype)
rsigma_dtype = dtypes.canonicalize_dtype(rsigma.dtype)
hidden_size = gamma.size
# In Transformer, batch_size = batch x seqlen
batch_size = x.size // hidden_size
assert dtypes.canonicalize_dtype(grad_output.dtype) == w_dtype
assert grad_output.shape == x.shape
assert mu.shape == rsigma.shape == (batch_size,)
assert mu_dtype == rsigma_dtype == jnp.float32
assert grad_output.named_shape == x.named_shape
return (
ShapedArray(x.shape, x_dtype, named_shape=grad_output.named_shape), # grad input
ShapedArray(gamma.shape, w_dtype, named_shape=gamma.named_shape), # grad gamma
ShapedArray(gamma.shape, w_dtype, named_shape=gamma.named_shape), # grad beta
)
@staticmethod
def lowering(ctx, grad_output, mu, rsigma, x, gamma, *, zero_centered_gamma, epsilon):
"""
Layernorm bwd lowering rules
"""
_, _, _, x_aval, gamma_aval = ctx.avals_in
x_type = ir.RankedTensorType(x.type)
x_shape = x_type.shape
w_type = ir.RankedTensorType(gamma.type)
w_shape = w_type.shape
b_type = ir.RankedTensorType(gamma.type)
b_shape = b_type.shape
assert w_type == b_type
assert w_shape == b_shape
go_shape = ir.RankedTensorType(grad_output.type).shape
mu_shape = ir.RankedTensorType(mu.type).shape
rsigma_shape = ir.RankedTensorType(rsigma.type).shape
hidden_size = reduce(operator.mul, w_shape)
# In Transformer, batch_size = batch x seqlen
batch_size = reduce(operator.mul, x_shape) // hidden_size
out_types = [
ir.RankedTensorType.get(x_shape, x_type.element_type),
ir.RankedTensorType.get(w_shape, w_type.element_type),
ir.RankedTensorType.get(b_shape, b_type.element_type),
]
operands = [grad_output, mu, rsigma, x, gamma]
operand_shapes = [go_shape, mu_shape, rsigma_shape, x_shape, w_shape]
args = CustomCallArgsWrapper(out_types, operands, operand_shapes)
opaque = transformer_engine_jax.pack_norm_descriptor(
batch_size,
hidden_size,
jax_dtype_to_te_dtype(x_aval.dtype),
jax_dtype_to_te_dtype(gamma_aval.dtype),
zero_centered_gamma,
epsilon,
)
out = custom_caller(LayerNormBwdPrimitive.name, args, opaque, False)
return out
_layernorm_bwd_p = register_primitive(LayerNormBwdPrimitive)
def layernorm_bwd(g: jnp.ndarray, mu: jnp.ndarray, rsigma: jnp.ndarray, x: jnp.ndarray,
gamma: jnp.ndarray, zero_centered_gamma: bool, epsilon: float):
"""
Wrapper for TE layernorm bwd
"""
return _layernorm_bwd_p.bind(g,
mu,
rsigma,
x,
gamma,
zero_centered_gamma=zero_centered_gamma,
epsilon=epsilon)
class RmsNormFwdPrimitive(BasePrimitive):
"""
RMS Normalization Forward Primitive
"""
name = "te_rmsnorm_forward"
multiple_results = True
@staticmethod
def abstract(x, gamma, **kwargs): # pylint: disable=unused-argument
"""
RMSNorm fwd abstract
"""
x_dtype = dtypes.canonicalize_dtype(x.dtype)
rsigma_dtype = jnp.float32
hidden_size = gamma.size
# In Transformer, batch_size = batch x seqlen
batch_size = x.size // hidden_size
return (
ShapedArray(x.shape, x_dtype, named_shape=x.named_shape), # output
ShapedArray((batch_size,), rsigma_dtype, named_shape=x.named_shape), # rsigma
)
@staticmethod
def lowering(ctx, x, gamma, *, epsilon):
"""
RMSNorm fwd lowering rules
"""
x_aval, gamma_aval = ctx.avals_in
x_type = ir.RankedTensorType(x.type)
x_shape = x_type.shape
w_type = ir.RankedTensorType(gamma.type)
w_shape = w_type.shape
iv_element_type = ir.F32Type.get()
hidden_size = reduce(operator.mul, w_shape)
# In Transformer, batch_size = batch x seqlen
batch_size = reduce(operator.mul, x_shape) // hidden_size
out_types = [
ir.RankedTensorType.get(x_shape, w_type.element_type),
ir.RankedTensorType.get((batch_size,), iv_element_type),
]
operands = [x, gamma]
operand_shapes = [x_shape, w_shape]
args = CustomCallArgsWrapper(out_types, operands, operand_shapes)
opaque = transformer_engine_jax.pack_norm_descriptor(
batch_size,
hidden_size,
jax_dtype_to_te_dtype(x_aval.dtype),
jax_dtype_to_te_dtype(gamma_aval.dtype),
False, # RMSNorm doesn't support zero_centered_gamma
epsilon,
)
out = custom_caller(RmsNormFwdPrimitive.name, args, opaque, False)
return out
_rmsnorm_fwd_p = register_primitive(RmsNormFwdPrimitive)
def rmsnorm_fwd(x: jnp.ndarray, gamma: jnp.ndarray, epsilon: float):
"""
Wrapper for TE rmsnorm fwd
"""
return _rmsnorm_fwd_p.bind(x, gamma, epsilon=epsilon)
class RmsNormFwdFp8Primitive(BasePrimitive):
"""
RMS Normalization Forward FP8 Primitive
"""
name = "te_rmsnorm_forward_fp8"
multiple_results = True
@staticmethod
def abstract(
x,
gamma,
amax,
scale,
scale_inv,
**kwargs # pylint: disable=unused-argument
):
"""
RMSNorm fwd (fp8 out) abstract
"""
x_dtype = dtypes.canonicalize_dtype(x.dtype)
assert x_dtype in [jnp.float32, jnp.float16, jnp.bfloat16]
assert amax.dtype == jnp.float32
assert scale.dtype == jnp.float32
assert scale_inv.dtype == jnp.float32
out_dtype = jnp.int8
rsigma_dtype = jnp.float32
hidden_size = gamma.size
# In Transformer, batch_size = batch x seqlen
batch_size = x.size // hidden_size
return (
ShapedArray(x.shape, out_dtype, named_shape=x.named_shape), # output
ShapedArray((batch_size,), rsigma_dtype, named_shape=x.named_shape), # rsigma
ShapedArray((1,), amax.dtype, named_shape=amax.named_shape), # amax
)
@staticmethod
def lowering(ctx, x, gamma, amax, scale, scale_inv, *, epsilon):
"""
RMSNorm fwd (fp8 out) lowering rules
"""
x_aval, gamma_aval, amax_aval, scale_aval, scale_inv_aval = ctx.avals_in
assert x_aval.dtype in [jnp.float32, jnp.float16, jnp.bfloat16]
assert amax_aval.dtype == jnp.float32
assert scale_aval.dtype == jnp.float32
assert scale_inv_aval.dtype == jnp.float32
x_type = ir.RankedTensorType(x.type)
x_shape = x_type.shape
w_type = ir.RankedTensorType(gamma.type)
w_shape = w_type.shape
ir_out_dtype = dtype_to_ir_type(np.dtype(np.int8))
ir_rsigma_dtype = ir.F32Type.get()
ir_amax_type = ir.RankedTensorType(amax.type)
ir_amax_dtype = ir_amax_type.element_type
ir_amax_shape = ir_amax_type.shape
ir_scale_shape = ir_amax_shape
ir_scale_inv_shape = ir_amax_shape
hidden_size = reduce(operator.mul, w_shape)
# In Transformer, batch_size = batch x seqlen
batch_size = reduce(operator.mul, x_shape) // hidden_size
out_types = [
ir.RankedTensorType.get(x_shape, ir_out_dtype),
ir.RankedTensorType.get((batch_size,), ir_rsigma_dtype),
ir.RankedTensorType.get(ir_amax_shape, ir_amax_dtype),
]
operands = [x, gamma, amax, scale, scale_inv]
operand_shapes = [x_shape, w_shape, ir_amax_shape, ir_scale_shape, ir_scale_inv_shape]
args = CustomCallArgsWrapper(out_types, operands, operand_shapes)
opaque = transformer_engine_jax.pack_norm_descriptor(
batch_size,
hidden_size,
jax_dtype_to_te_dtype(x_aval.dtype),
jax_dtype_to_te_dtype(gamma_aval.dtype),
False, # RMSNorm doesn't support zero_centered_gamma
epsilon,
)
out = custom_caller(RmsNormFwdFp8Primitive.name,
args,
opaque,
False,
operand_output_aliases={2: 2})
return out
_rmsnorm_fwd_fp8_p = register_primitive(RmsNormFwdFp8Primitive)
def rmsnorm_fwd_fp8(x: jnp.ndarray, gamma: jnp.ndarray, amax: jnp.ndarray, scale: jnp.ndarray,
scale_inv: jnp.ndarray, epsilon: float):
"""
Wrapper for TE rmsnorm fwd (fp8 out)
"""
return _rmsnorm_fwd_fp8_p.bind(x, gamma, amax, scale, scale_inv, epsilon=epsilon)
class RmsNormBwdPrimitive(BasePrimitive):
"""
RMS Normalization Backward Primitive
"""
name = "te_rmsnorm_backward"
multiple_results = True
@staticmethod
def abstract(
grad_output,
rsigma,
x,
gamma,
**kwargs # pylint: disable=unused-argument
):
"""
RMSNorm bwd abstract
"""
w_dtype = dtypes.canonicalize_dtype(gamma.dtype)
x_dtype = dtypes.canonicalize_dtype(x.dtype)
rsigma_dtype = dtypes.canonicalize_dtype(rsigma.dtype)
hidden_size = gamma.size
# In Transformer, batch_size = batch x seqlen
batch_size = x.size // hidden_size
assert dtypes.canonicalize_dtype(grad_output.dtype) == w_dtype
assert grad_output.shape == x.shape
assert rsigma.shape == (batch_size,)
assert rsigma_dtype == jnp.float32
assert grad_output.named_shape == x.named_shape
return (
ShapedArray(x.shape, x_dtype, named_shape=grad_output.named_shape), # grad input
ShapedArray(gamma.shape, w_dtype, named_shape=gamma.named_shape), # grad gamma
)
@staticmethod
def lowering(ctx, grad_output, inv_var, x, gamma, *, epsilon):
"""
RMSNorm bwd lowering rules
"""
_, _, x_aval, gamma_aval = ctx.avals_in
x_type = ir.RankedTensorType(x.type)
x_shape = x_type.shape
w_type = ir.RankedTensorType(gamma.type)
w_shape = w_type.shape
go_shape = ir.RankedTensorType(grad_output.type).shape
inv_var_shape = ir.RankedTensorType(inv_var.type).shape
hidden_size = reduce(operator.mul, w_shape)
# In Transformer, batch_size = batch x seqlen
batch_size = reduce(operator.mul, x_shape) // hidden_size
out_types = [
ir.RankedTensorType.get(x_shape, x_type.element_type),
ir.RankedTensorType.get(w_shape, w_type.element_type),
]
operands = [grad_output, inv_var, x, gamma]
operand_shapes = [go_shape, inv_var_shape, x_shape, w_shape]
args = CustomCallArgsWrapper(out_types, operands, operand_shapes)
opaque = transformer_engine_jax.pack_norm_descriptor(
batch_size,
hidden_size,
jax_dtype_to_te_dtype(x_aval.dtype),
jax_dtype_to_te_dtype(gamma_aval.dtype),
False, # RMSNorm doesn't support zero_centered_gamma
epsilon,
)
out = custom_caller(RmsNormBwdPrimitive.name, args, opaque, False)
return out
_rmsnorm_bwd_p = register_primitive(RmsNormBwdPrimitive)
def rmsnorm_bwd(grad: jnp.ndarray, inv_var: jnp.ndarray, x: jnp.ndarray, gamma: jnp.ndarray,
epsilon: float):
"""
Wrapper for TE rmsnorm bwd
"""
return _rmsnorm_bwd_p.bind(grad, inv_var, x, gamma, epsilon=epsilon)
class QuantizePrimitive(BasePrimitive):
"""
Quantize Primitive
"""
name = "te_quantize"
multiple_results = True
@staticmethod
def abstract(inputs, amax, scale, scale_inv, *, out_dtype):
"""
te_quantize abstract
"""
in_dtype = dtypes.canonicalize_dtype(inputs.dtype)
assert in_dtype in [jnp.float32, jnp.float16, jnp.bfloat16]
assert isinstance(out_dtype, TEDType)
out_dtype = te_dtype_to_jax_dtype(out_dtype)
assert amax.dtype == jnp.float32
assert scale.dtype == jnp.float32
assert scale_inv.dtype == jnp.float32
return (ShapedArray(inputs.shape, out_dtype, named_shape=inputs.named_shape),
ShapedArray((1,), amax.dtype, named_shape=amax.named_shape))
@staticmethod
def lowering(ctx, inputs, amax, scale, scale_inv, *, out_dtype):
"""
te_quantize lowering rules
"""
in_aval, amax_aval, scale_aval, scale_inv_aval = ctx.avals_in
assert in_aval.dtype in [jnp.float32, jnp.float16, jnp.bfloat16]
assert amax_aval.dtype == jnp.float32
assert scale_aval.dtype == jnp.float32
assert scale_inv_aval.dtype == jnp.float32
ir_in_type = ir.RankedTensorType(inputs.type)
ir_in_shape = ir_in_type.shape
ir_out_dtype = te_dtype_to_ir_dtype(out_dtype)
ir_out_shape = ir_in_shape
ir_amax_type = ir.RankedTensorType(amax.type)
ir_amax_shape = ir_amax_type.shape
ir_amax_dtype = ir_amax_type.element_type
ir_scale_shape = ir_amax_shape
ir_scale_inv_shape = ir_amax_shape
out_types = [
ir.RankedTensorType.get(ir_out_shape, ir_out_dtype),
ir.RankedTensorType.get(ir_amax_shape, ir_amax_dtype),
]
operands = [inputs, amax, scale, scale_inv]
operand_shapes = [ir_in_shape, ir_amax_shape, ir_scale_shape, ir_scale_inv_shape]
args = CustomCallArgsWrapper(out_types, operands, operand_shapes)
opaque = transformer_engine_jax.pack_common_descriptor(in_aval.shape,
jax_dtype_to_te_dtype(in_aval.dtype),
out_dtype)
out = custom_caller(QuantizePrimitive.name,
args,
opaque,
False,
operand_output_aliases={1: 1})
return out
_quantize_p = register_primitive(QuantizePrimitive)
def quantize(inputs: jnp.ndarray, amax: jnp.ndarray, scale: jnp.ndarray, scale_inv: jnp.ndarray,
out_dtype: TEDType) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""
quantize wrapper
Return FP8 tensor
"""
return _quantize_p.bind(inputs, amax, scale, scale_inv, out_dtype=out_dtype)
class DequantizePrimitive(BasePrimitive):
"""
Dequantize Primitive
"""
name = "te_dequantize"
multiple_results = False
@staticmethod
def abstract(inputs, amax, scale, scale_inv, *, fp8_dtype, out_dtype):
"""
te_dquantize abstract
"""
in_dtype = dtypes.canonicalize_dtype(inputs.dtype)
assert in_dtype == jnp.int8
assert isinstance(fp8_dtype, TEDType)
assert isinstance(out_dtype, TEDType)
out_dtype = te_dtype_to_jax_dtype(out_dtype)
assert out_dtype in [jnp.float32, jnp.float16, jnp.bfloat16]
assert amax.dtype == jnp.float32
assert scale.dtype == jnp.float32
assert scale_inv.dtype == jnp.float32
return ShapedArray(inputs.shape, out_dtype, named_shape=inputs.named_shape)
@staticmethod
def lowering(ctx, inputs, amax, scale, scale_inv, *, fp8_dtype, out_dtype):
"""
te_dquantize lowering rules
"""
in_aval, amax_aval, scale_aval, scale_inv_aval = ctx.avals_in
assert in_aval.dtype == jnp.int8
assert amax_aval.dtype == jnp.float32
assert scale_aval.dtype == jnp.float32
assert scale_inv_aval.dtype == jnp.float32
ir_in_type = ir.RankedTensorType(inputs.type)
ir_in_shape = ir_in_type.shape
ir_out_dtype = te_dtype_to_ir_dtype(out_dtype)
ir_out_shape = ir_in_shape
ir_amax_type = ir.RankedTensorType(amax.type)
ir_amax_shape = ir_amax_type.shape
ir_scale_shape = ir_amax_shape
ir_scale_inv_shape = ir_amax_shape
out_types = [ir.RankedTensorType.get(ir_out_shape, ir_out_dtype)]
operands = [inputs, amax, scale, scale_inv]
operand_shapes = [ir_in_shape, ir_amax_shape, ir_scale_shape, ir_scale_inv_shape]
args = CustomCallArgsWrapper(out_types, operands, operand_shapes)
opaque = transformer_engine_jax.pack_common_descriptor(in_aval.shape, fp8_dtype, out_dtype)
out = custom_caller(DequantizePrimitive.name, args, opaque, False)
return [out]
_dequantize_p = register_primitive(DequantizePrimitive)
def dequantize(inputs: jnp.ndarray, amax: jnp.ndarray, scale: jnp.ndarray, scale_inv: jnp.ndarray,
fp8_dtype: TEDType, out_dtype: TEDType) -> jnp.ndarray:
"""
dequantize wrapper
Return FP16/BF16/FP32 tensor
"""
return _dequantize_p.bind(inputs,
amax,
scale,
scale_inv,
fp8_dtype=fp8_dtype,
out_dtype=out_dtype)
class SoftmaxPrimitive(BasePrimitive):
"""
Softmax Primitive
"""
max_k_seqlen_supported = 4096
@staticmethod
def get_batch_per_block(k_seqlen: int) -> int:
"""Get batch per CTA in Softmax kernels"""
threads_per_warp = 32
threads_per_block = 128 # Depends on the kernel implmentation
pow2 = 1 << (k_seqlen - 1).bit_length()
warp_size = pow2 if pow2 < threads_per_warp else threads_per_warp
batches_per_warp = 2 if pow2 <= 128 else 1
warps_per_block = threads_per_block // warp_size
batches_per_block = warps_per_block * batches_per_warp
return batches_per_block
@staticmethod
def is_kernel_available(batch: int, heads: int, q_seqlen: int, k_seqlen: int,
dtype: jnp.dtype) -> bool:
"""Check Softmax kernel availability based on size"""
raise NotImplementedError
@staticmethod
def softmax_backward_abstract(grad_outputs, softmax_outputs, scale_factor=None): # pylint: disable=unused-argument
"""
MLIR abstract
"""
grad_outputs_dtype = dtypes.canonicalize_dtype(grad_outputs.dtype)
softmax_outputs_dtype = dtypes.canonicalize_dtype(softmax_outputs.dtype)
assert grad_outputs_dtype == softmax_outputs_dtype
assert grad_outputs_dtype in [jnp.float16, jnp.bfloat16]
assert softmax_outputs_dtype in [jnp.float16, jnp.bfloat16]
assert grad_outputs.shape == softmax_outputs.shape
return ShapedArray(softmax_outputs.shape,
softmax_outputs_dtype,
named_shape=softmax_outputs.named_shape)
@staticmethod
def softmax_backward_lowering(name, ctx, grad_outputs, softmax_outputs, scale_factor):
"""
MLIR abstract
"""
grad_outputs_aval, _ = ctx.avals_in
grad_outputs_type = ir.RankedTensorType(grad_outputs.type)
grad_outputs_shape = grad_outputs_type.shape
batch = grad_outputs_shape[0]
pad_batch = batch # unused
heads = grad_outputs_shape[1]
q_seqlen = grad_outputs_shape[2]
k_seqlen = grad_outputs_shape[3]
softmax_outputs_type = ir.RankedTensorType(softmax_outputs.type)
softmax_outputs_shape = softmax_outputs_type.shape
out_types = [
ir.RankedTensorType.get(softmax_outputs_shape, softmax_outputs_type.element_type)
]
operands = [grad_outputs, softmax_outputs]
operand_shapes = [grad_outputs_shape, softmax_outputs_shape]
args = CustomCallArgsWrapper(out_types, operands, operand_shapes)
opaque = transformer_engine_jax.pack_softmax_descriptor(
batch, pad_batch, heads, q_seqlen, k_seqlen,
jax_dtype_to_te_dtype(grad_outputs_aval.dtype), scale_factor)
out = custom_caller(name, args, opaque, False)
return [out]
class ScaledSoftmaxFwdPrimitive(SoftmaxPrimitive):
"""
Scaled Softmax Fwd Primitive
"""
name = "te_scaled_softmax_forward"
multiple_results = False
@staticmethod
def is_kernel_available(batch: int, heads: int, q_seqlen: int, k_seqlen: int,
dtype: jnp.dtype) -> bool:
"""Check Softmax kernel availability based on size"""
attn_batches = batch * heads
if (dtype in [jnp.float16, jnp.bfloat16]
and 16 < k_seqlen <= SoftmaxPrimitive.max_k_seqlen_supported
# k_seqlen must be 16 ~ 4096
and q_seqlen % 4 == 0 # q_seqlen must be divisor of 4
and attn_batches % 4 == 0 # batch * heads must be divisor of 4
):
if 0 <= k_seqlen <= SoftmaxPrimitive.max_k_seqlen_supported:
batch_per_block = SoftmaxPrimitive.get_batch_per_block(k_seqlen)
return q_seqlen % batch_per_block == 0
return False
@staticmethod
def abstract(inputs, *, scale_factor): # pylint: disable=unused-argument
"""
te_scaled_softmax_forward abstract
"""
shape_rank = 4 # batch, heads, q_seqlen and k_seqlen
i_dtype = dtypes.canonicalize_dtype(inputs.dtype)
assert i_dtype in [jnp.float16, jnp.bfloat16]
i_shape = inputs.shape
assert len(i_shape) == shape_rank
q_seqlen = i_shape[2]
k_seqlen = i_shape[3]
assert k_seqlen <= SoftmaxPrimitive.max_k_seqlen_supported
assert q_seqlen > 1
return ShapedArray(inputs.shape, i_dtype, named_shape=inputs.named_shape)
@staticmethod
def lowering(ctx, inputs, *, scale_factor):
"""
te_scaled_softmax_forward lowering rules
"""
shape_rank = 4 # batch, heads, q_seqlen and k_seqlen
i_aval, = ctx.avals_in
i_type = ir.RankedTensorType(inputs.type)
i_shape = i_type.shape
assert len(i_shape) == shape_rank
batch = i_shape[0]
pad_batch = batch
heads = i_shape[1]
q_seqlen = i_shape[2]
k_seqlen = i_shape[3]
out_types = [ir.RankedTensorType.get(i_shape, i_type.element_type)]
operands = [inputs]
operand_shapes = [i_shape]
args = CustomCallArgsWrapper(out_types, operands, operand_shapes)
opaque = transformer_engine_jax.pack_softmax_descriptor(batch, pad_batch, heads, q_seqlen,
k_seqlen,
jax_dtype_to_te_dtype(i_aval.dtype),
scale_factor)
out = custom_caller(ScaledSoftmaxFwdPrimitive.name, args, opaque, False)
return [out]
_scaled_softmax_fwd_p = register_primitive(ScaledSoftmaxFwdPrimitive)
def scaled_softmax_fwd(inputs: jnp.ndarray, scale_factor: float) -> jnp.ndarray:
"""
scaled_softmax_forward wrapper
Return FP16/BF16 tensor
"""
return _scaled_softmax_fwd_p.bind(inputs, scale_factor=scale_factor)
class ScaledSoftmaxBwdPrimitive(SoftmaxPrimitive):
"""
Scaled Softmax Bwd Primitive
"""
name = "te_scaled_softmax_backward"
multiple_results = False
@staticmethod
def is_kernel_available(batch: int, heads: int, q_seqlen: int, k_seqlen: int,
dtype: jnp.dtype) -> bool:
"""Check Softmax kernel availability based on size"""
return ScaledSoftmaxFwdPrimitive.is_kernel_available(batch, heads, q_seqlen, k_seqlen,
dtype)
@staticmethod
def abstract(grad_outputs, softmax_outputs, *, scale_factor):
"""
te_scaled_softmax_backward abstract
"""
return SoftmaxPrimitive.softmax_backward_abstract(grad_outputs, softmax_outputs,
scale_factor)
@staticmethod
def lowering(ctx, grad_outputs, softmax_outputs, *, scale_factor):
"""
te_scaled_softmax_backward lowering rules
"""
out = SoftmaxPrimitive.softmax_backward_lowering(ScaledSoftmaxBwdPrimitive.name, ctx,
grad_outputs, softmax_outputs,
scale_factor)
return out # out is iterable already
_scaled_softmax_bwd_p = register_primitive(ScaledSoftmaxBwdPrimitive)
def scaled_softmax_bwd(grad_outputs: jnp.ndarray, softmax_outputs: jnp.ndarray,
scale_factor: float) -> jnp.ndarray:
"""
scaled_softmax_backward wrapper
Return FP16/BF16 tensor
"""
return _scaled_softmax_bwd_p.bind(grad_outputs, softmax_outputs, scale_factor=scale_factor)
class ScaledMaskedSoftmaxFwdPrimitive(SoftmaxPrimitive):
"""
Scaled Masked Softmax Fwd Primitive
"""
name = "te_scaled_masked_softmax_forward"
multiple_results = False
@staticmethod
def is_kernel_available(batch: int, heads: int, q_seqlen: int, k_seqlen: int,
dtype: jnp.dtype) -> bool:
"""Check Softmax kernel availability based on size"""
attn_batches = batch * heads
if (dtype in [jnp.float16, jnp.bfloat16]
and 16 < k_seqlen <= SoftmaxPrimitive.max_k_seqlen_supported
# k_seqlen must be 16 ~ 4096
and q_seqlen % 4 == 0 # q_seqlen must be divisor of 4
and attn_batches % 4 == 0 # batch * heads must be divisor of 4
):
if 0 <= k_seqlen <= SoftmaxPrimitive.max_k_seqlen_supported:
batch_per_block = SoftmaxPrimitive.get_batch_per_block(k_seqlen)
return q_seqlen % batch_per_block == 0
return False
@staticmethod
def abstract(inputs, mask, *, scale_factor): # pylint: disable=unused-argument
"""
te_scaled_masked_softmax_forward abstract
"""
shape_rank = 4 # batch, heads, q_seqlen and k_seqlen
i_dtype = dtypes.canonicalize_dtype(inputs.dtype)
assert i_dtype in [jnp.float16, jnp.bfloat16]
i_shape = inputs.shape
assert len(i_shape) == shape_rank
batch = i_shape[0]
q_seqlen = i_shape[2]
k_seqlen = i_shape[3]
assert k_seqlen <= SoftmaxPrimitive.max_k_seqlen_supported
assert q_seqlen > 1
mask_dtype = dtypes.canonicalize_dtype(mask.dtype)
assert mask_dtype in [
jnp.uint8,
]
mask_shape = mask.shape
assert len(mask_shape) == shape_rank
pad_batch = mask_shape[0]
assert pad_batch in (1, batch) # 1 means broadcast
assert mask_shape[1] == 1 # 1 means broadcast
assert mask_shape[2] == q_seqlen
assert mask_shape[3] == k_seqlen
return ShapedArray(inputs.shape, i_dtype, named_shape=inputs.named_shape)
@staticmethod
def lowering(ctx, inputs, mask, *, scale_factor):
"""
te_scaled_masked_softmax_forward lowering rules
"""
shape_rank = 4 # batch, heads, q_seqlen and k_seqlen
i_aval, _ = ctx.avals_in
i_type = ir.RankedTensorType(inputs.type)
i_shape = i_type.shape
assert len(i_shape) == shape_rank
batch = i_shape[0]
heads = i_shape[1]
q_seqlen = i_shape[2]
k_seqlen = i_shape[3]
mask_type = ir.RankedTensorType(mask.type)
mask_shape = mask_type.shape
assert len(mask_shape) == shape_rank
pad_batch = mask_shape[0]
out_types = [ir.RankedTensorType.get(i_shape, i_type.element_type)]
operands = [inputs, mask]
operand_shapes = [i_shape, mask_shape]
args = CustomCallArgsWrapper(out_types, operands, operand_shapes)
opaque = transformer_engine_jax.pack_softmax_descriptor(batch, pad_batch, heads, q_seqlen,
k_seqlen,
jax_dtype_to_te_dtype(i_aval.dtype),
scale_factor)
out = custom_caller(ScaledMaskedSoftmaxFwdPrimitive.name, args, opaque, False)
return [out]
_scaled_masked_softmax_fwd_p = register_primitive(ScaledMaskedSoftmaxFwdPrimitive)
def scaled_masked_softmax_fwd(inputs: jnp.ndarray, mask: jnp.ndarray,
scale_factor: float) -> jnp.ndarray:
"""
scaled_masked_softmax_forward wrapper
Return FP16/BF16 tensor
"""
return _scaled_masked_softmax_fwd_p.bind(inputs, mask, scale_factor=scale_factor)
class ScaledMaskedSoftmaxBwdPrimitive(SoftmaxPrimitive):
"""
Scaled Masked Softmax Bwd Primitive
"""
name = "te_scaled_masked_softmax_backward"
multiple_results = False
@staticmethod
def is_kernel_available(batch: int, heads: int, q_seqlen: int, k_seqlen: int,
dtype: jnp.dtype) -> bool:
"""Check Softmax kernel availability based on size"""
return ScaledSoftmaxFwdPrimitive.is_kernel_available(batch, heads, q_seqlen, k_seqlen,
dtype)
@staticmethod
def abstract(grad_outputs, softmax_outputs, *, scale_factor):
"""
te_scaled_masked_softmax_backward abstract
"""
return SoftmaxPrimitive.softmax_backward_abstract(grad_outputs, softmax_outputs,
scale_factor)
@staticmethod
def lowering(ctx, grad_outputs, softmax_outputs, *, scale_factor):
"""
te_scaled_masked_softmax_backward lowering rules
"""
out = SoftmaxPrimitive.softmax_backward_lowering(ScaledMaskedSoftmaxBwdPrimitive.name, ctx,
grad_outputs, softmax_outputs,
scale_factor)
return out # out is iterable already
_scaled_masked_softmax_bwd_p = register_primitive(ScaledMaskedSoftmaxBwdPrimitive)
def scaled_masked_softmax_bwd(grad_outputs: jnp.ndarray, softmax_outputs: jnp.ndarray,
scale_factor: float) -> jnp.ndarray:
"""
scaled_masked_softmax_backward wrapper
Return FP16/BF16 tensor
"""
return _scaled_masked_softmax_bwd_p.bind(grad_outputs,
softmax_outputs,
scale_factor=scale_factor)
class ScaledUpperTriangMaskedSoftmaxFwdPrimitive(SoftmaxPrimitive):
"""
Scaled Upper Triang Masked Softmax Fwd Primitive
"""
name = "te_scaled_upper_triang_masked_softmax_forward"
multiple_results = False
@staticmethod
def is_kernel_available(batch: int, heads: int, q_seqlen: int, k_seqlen: int,
dtype: jnp.dtype) -> bool:
"""Check Softmax kernel availability based on size"""
attn_batches = batch * heads
if (dtype in [jnp.float16, jnp.bfloat16]
and 16 < k_seqlen <= SoftmaxPrimitive.max_k_seqlen_supported
# k_seqlen must be 16 ~ 4096
and q_seqlen % 4 == 0 # q_seqlen must be divisor of 4
and attn_batches % 4 == 0 # batch * heads must be divisor of 4
):
if 0 <= k_seqlen <= SoftmaxPrimitive.max_k_seqlen_supported:
batch_per_block = SoftmaxPrimitive.get_batch_per_block(k_seqlen)
return attn_batches % batch_per_block == 0
return False
@staticmethod
def abstract(inputs, *, scale_factor): # pylint: disable=unused-argument
"""
te_scaled_upper_triang_masked_softmax_forward abstract
"""
shape_rank = 4 # batch, heads, q_seqlen and k_seqlen
i_dtype = dtypes.canonicalize_dtype(inputs.dtype)
assert i_dtype in [jnp.float16, jnp.bfloat16]
i_shape = inputs.shape
assert len(i_shape) == shape_rank
q_seqlen = i_shape[2]
k_seqlen = i_shape[3]
assert q_seqlen == k_seqlen
assert k_seqlen <= SoftmaxPrimitive.max_k_seqlen_supported
assert q_seqlen > 1
return ShapedArray(inputs.shape, i_dtype, named_shape=inputs.named_shape)
@staticmethod
def lowering(ctx, inputs, *, scale_factor):
"""
te_scaled_upper_triang_masked_softmax_forward lowering rules
"""
shape_rank = 4 # batch, heads, q_seqlen and k_seqlen
i_aval, = ctx.avals_in
i_type = ir.RankedTensorType(inputs.type)
i_shape = i_type.shape
assert len(i_shape) == shape_rank
batch = i_shape[0]
pad_batch = batch
heads = i_shape[1]
q_seqlen = i_shape[2]
k_seqlen = i_shape[3]
out_types = [ir.RankedTensorType.get(i_shape, i_type.element_type)]
operands = [inputs]
operand_shapes = [i_shape]
args = CustomCallArgsWrapper(out_types, operands, operand_shapes)
opaque = transformer_engine_jax.pack_softmax_descriptor(batch, pad_batch, heads, q_seqlen,
k_seqlen,
jax_dtype_to_te_dtype(i_aval.dtype),
scale_factor)
out = custom_caller(ScaledUpperTriangMaskedSoftmaxFwdPrimitive.name, args, opaque, False)
return [out]
_scaled_upper_triang_masked_softmax_fwd_p = \
register_primitive(ScaledUpperTriangMaskedSoftmaxFwdPrimitive)
def scaled_upper_triang_masked_softmax_fwd(inputs: jnp.ndarray, scale_factor: float) -> jnp.ndarray:
"""
scaled_upper_triang_masked_softmax_forward wrapper
Return FP16/BF16 tensor
"""
return _scaled_upper_triang_masked_softmax_fwd_p.bind(inputs, scale_factor=scale_factor)
class ScaledUpperTriangMaskedSoftmaxBwdPrimitive(SoftmaxPrimitive):
"""
Scaled Upper Triang Masked Softmax Bwd Primitive
"""
name = "te_scaled_upper_triang_masked_softmax_backward"
multiple_results = False
@staticmethod
def is_kernel_available(batch: int, heads: int, q_seqlen: int, k_seqlen: int,
dtype: jnp.dtype) -> bool:
"""Check Softmax kernel availability based on size"""
return ScaledUpperTriangMaskedSoftmaxFwdPrimitive.is_kernel_available(
batch, heads, q_seqlen, k_seqlen, dtype)
@staticmethod
def abstract(grad_outputs, softmax_outputs, *, scale_factor):
"""
te_scaled_upper_triang_masked_softmax_backward abstract
"""
return SoftmaxPrimitive.softmax_backward_abstract(grad_outputs, softmax_outputs,
scale_factor)
@staticmethod
def lowering(ctx, grad_outputs, softmax_outputs, *, scale_factor):
"""
te_scaled_upper_triang_masked_softmax_backward lowering rules
"""
out = SoftmaxPrimitive.softmax_backward_lowering(
ScaledUpperTriangMaskedSoftmaxBwdPrimitive.name, ctx, grad_outputs, softmax_outputs,
scale_factor)
return out # out is iterable already
_scaled_upper_triang_masked_softmax_bwd_p = \
register_primitive(ScaledUpperTriangMaskedSoftmaxBwdPrimitive)
def scaled_upper_triang_masked_softmax_bwd(grad_outputs: jnp.ndarray, softmax_outputs: jnp.ndarray,
scale_factor: float) -> jnp.ndarray:
"""
scaled_upper_triang_masked_softmax_backward wrapper
Return FP16/BF16 tensor
"""
return _scaled_upper_triang_masked_softmax_bwd_p.bind(grad_outputs,
softmax_outputs,
scale_factor=scale_factor)
@dataclass(frozen=True)
class _FusedAttnRNGStateChecker:
"""
Checker for guarding the fused attention rng state.
The fused attention backend requires a 64 bits seed and a 64 bits offset.
However, JAX doesn't enable 64 bits by default,
so we have to emulate seed as two 32 bits array.
The offset calculation is maintained in the backend.
"""
rng_state_dtype: jnp.dtype = jnp.uint32
# (seed,) with internal dtype int64
seed_size: int = 2
# (seed, offset) with internal dtype int64
rng_state_size: int = 2 * 2
def check_seed(self, seed, dropout_probability, is_training):
"""
Check the seed and convert the data type of seed if possible.
"""
# Jax can't bind None, create a dummy tensor for None
if seed is None:
dropout_enabled = dropout_probability > 0 and is_training
assert not dropout_enabled, "seed is not allowed to be None when dropout is enabled."
seed = jnp.zeros(2, dtype=self.rng_state_dtype)
if seed.dtype != self.rng_state_dtype:
warnings.warn(
f"Requested {seed.dtype=} is not available, and will be "
f"casted to dtype {self.rng_state_dtype}. "
f"Please use threefry/rbg/unsafe_rbg PRNG implementations to remove this warning.")
seed = seed.astype(self.rng_state_dtype)
assert seed.dtype == self.rng_state_dtype
# Backend takes an int64_t seed, so only the first two u32 elements are taken
assert seed.size >= self.seed_size
return seed
class SelfFusedAttnFwdPrimitive(BasePrimitive):
"""
Self Fused Attention Forward Primitive
"""
name = "te_self_fused_attn_forward"
multiple_results = True
@staticmethod
def abstract(
qkv,
bias,
cu_seqlen, # pylint: disable=unused-argument
seed, # pylint: disable=unused-argument
*,
attn_bias_type, # pylint: disable=unused-argument
attn_mask_type, # pylint: disable=unused-argument
scaling_factor, # pylint: disable=unused-argument
dropout_probability, # pylint: disable=unused-argument
is_training # pylint: disable=unused-argument
):
"""
Self fused attention fwd abstract
"""
qkv_dtype = dtypes.canonicalize_dtype(qkv.dtype)
batch, max_seqlen, nqkv, num_head, head_dim = qkv.shape
assert nqkv == 3
assert qkv.dtype == bias.dtype
output_shape = (batch, max_seqlen, num_head, head_dim)
output_dtype = qkv_dtype
backend = FusedAttnHelper(qkv_dtype, qkv_dtype, attn_bias_type, attn_mask_type,
dropout_probability, max_seqlen, max_seqlen,
head_dim).get_fused_attn_backend()
if backend == NVTE_Fused_Attn_Backend.NVTE_F16_max512_seqlen:
softmax_aux_shape = (batch, num_head, max_seqlen, max_seqlen)
softmax_dtype = qkv_dtype
elif backend == NVTE_Fused_Attn_Backend.NVTE_F16_arbitrary_seqlen:
softmax_aux_shape = (batch, num_head, max_seqlen, 1)
softmax_dtype = dtypes.canonicalize_dtype(jnp.float32)
else:
raise ValueError(f'Not supported {backend=}')
checker = _FusedAttnRNGStateChecker()
seed_dtype = dtypes.canonicalize_dtype(seed.dtype)
assert seed_dtype == checker.rng_state_dtype
rng_state_shape = (checker.rng_state_size,)
rng_state_dtype = seed_dtype
return (
ShapedArray(output_shape, output_dtype, named_shape=qkv.named_shape), # output
ShapedArray(softmax_aux_shape, softmax_dtype,
named_shape=qkv.named_shape), # softmax_aux
ShapedArray(rng_state_shape, rng_state_dtype,
named_shape=seed.named_shape), # rng_state
)
@staticmethod
def lowering(ctx, qkv, bias, cu_seqlen, seed, *, attn_bias_type, attn_mask_type, scaling_factor,
dropout_probability, is_training):
"""
Self fused attention fwd lowering rules
"""
qkv_aval, _, _, _ = ctx.avals_in
batch, max_seqlen, _, num_head, head_dim = qkv_aval.shape
operands = [qkv, bias, cu_seqlen, seed]
operand_shapes = map(lambda x: x.type.shape, operands)
out_types = [
ir.RankedTensorType.get(output.shape, mlir.dtype_to_ir_type(output.dtype))
for output in ctx.avals_out
]
args = CustomCallArgsWrapper(out_types, operands, operand_shapes)
opaque = transformer_engine_jax.pack_fused_attn_descriptor(
batch, num_head, max_seqlen, max_seqlen, head_dim, scaling_factor, dropout_probability,
attn_bias_type, attn_mask_type, jax_dtype_to_te_dtype(qkv_aval.dtype), is_training)
out = custom_caller(SelfFusedAttnFwdPrimitive.name, args, opaque, has_side_effect=False)
return out
_self_fused_attn_fwd_p = register_primitive(SelfFusedAttnFwdPrimitive)
def self_fused_attn_fwd(qkv: jnp.ndarray, bias: jnp.ndarray, cu_seqlen: jnp.ndarray,
seed: jnp.ndarray, attn_bias_type: NVTE_Bias_Type,
attn_mask_type: NVTE_Mask_Type, scaling_factor: float,
dropout_probability: float, is_training: bool):
"""
Wrapper for TE self fused attention fwd
Return BMM1 -> (PreBias) -> ScaleMaskSoftmax -> (PostBias) -> (Dropout) -> BMM2
"""
checker = _FusedAttnRNGStateChecker()
seed = checker.check_seed(seed, dropout_probability, is_training)
if attn_bias_type == NVTE_Bias_Type.NVTE_NO_BIAS:
assert bias is None
bias = jnp.zeros(0, dtype=qkv.dtype)
return _self_fused_attn_fwd_p.bind(qkv,
bias,
cu_seqlen,
seed,
attn_bias_type=attn_bias_type,
attn_mask_type=attn_mask_type,
scaling_factor=scaling_factor,
dropout_probability=dropout_probability,
is_training=is_training)
class SelfFusedAttnBwdPrimitive(BasePrimitive):
"""
Self Fused Attention Backward Primitive
"""
name = "te_self_fused_attn_backward"
multiple_results = True
@staticmethod
def abstract(
qkv,
softmax_aux, # pylint: disable=unused-argument
rng_state, # pylint: disable=unused-argument
output, # pylint: disable=unused-argument
doutput,
cu_seqlen, # pylint: disable=unused-argument
*,
attn_bias_type, # pylint: disable=unused-argument
attn_mask_type, # pylint: disable=unused-argument
scaling_factor, # pylint: disable=unused-argument
dropout_probability, # pylint: disable=unused-argument
is_training # pylint: disable=unused-argument
):
"""
Self fused attention bwd abstract
"""
qkv_dtype = dtypes.canonicalize_dtype(qkv.dtype)
assert qkv.dtype == doutput.dtype
_, seqlen, _, num_head, _ = qkv.shape
if attn_bias_type == NVTE_Bias_Type.NVTE_NO_BIAS:
bias_shape = (0,)
else:
bias_shape = (1, num_head, seqlen, seqlen)
bias_dtype = qkv_dtype
return (
ShapedArray(qkv.shape, qkv_dtype, named_shape=qkv.named_shape), # dqkv
ShapedArray(bias_shape, bias_dtype, named_shape=qkv.named_shape))
@staticmethod
def lowering(ctx, qkv, softmax_aux, rng_state, output, doutput, cu_seqlen, *, attn_bias_type,
attn_mask_type, scaling_factor, dropout_probability, is_training):
"""
Self fused attention bwd lowering rules
"""
qkv_aval, _, _, _, _, _ = ctx.avals_in
batch, max_seqlen, _, num_head, head_dim = qkv_aval.shape
operands = [qkv, softmax_aux, rng_state, output, doutput, cu_seqlen]
operand_shapes = map(lambda x: x.type.shape, operands)
out_types = [
ir.RankedTensorType.get(output.shape, mlir.dtype_to_ir_type(output.dtype))
for output in ctx.avals_out
]
args = CustomCallArgsWrapper(out_types, operands, operand_shapes)
opaque = transformer_engine_jax.pack_fused_attn_descriptor(
batch, num_head, max_seqlen, max_seqlen, head_dim, scaling_factor, dropout_probability,
attn_bias_type, attn_mask_type, jax_dtype_to_te_dtype(qkv_aval.dtype), is_training)
out = custom_caller(SelfFusedAttnBwdPrimitive.name, args, opaque, has_side_effect=False)
return out
_self_fused_attn_bwd_p = register_primitive(SelfFusedAttnBwdPrimitive)
def self_fused_attn_bwd(qkv: jnp.ndarray, softmax_aux: jnp.ndarray, rng_state: jnp.ndarray,
output: jnp.ndarray, doutput: jnp.ndarray, cu_seqlen: jnp.ndarray,
attn_bias_type: NVTE_Bias_Type, attn_mask_type: NVTE_Mask_Type,
scaling_factor: float, dropout_probability: float, is_training: bool):
"""
Wrapper for TE self fused attention bwd
Return the gradients of self fused attention with packed qkv input
"""
return _self_fused_attn_bwd_p.bind(qkv,
softmax_aux,
rng_state,
output,
doutput,
cu_seqlen,
attn_bias_type=attn_bias_type,
attn_mask_type=attn_mask_type,
scaling_factor=scaling_factor,
dropout_probability=dropout_probability,
is_training=is_training)
class CrossFusedAttnFwdPrimitive(BasePrimitive):
"""
Cross Fused Attention Forward Primitive
"""
name = "te_cross_fused_attn_forward"
multiple_results = True
@staticmethod
def abstract(
q,
kv,
q_cu_seqlen,
kv_cu_seqlen,
seed, # pylint: disable=unused-argument
*,
attn_bias_type, # pylint: disable=unused-argument
attn_mask_type, # pylint: disable=unused-argument
scaling_factor, # pylint: disable=unused-argument
dropout_probability, # pylint: disable=unused-argument
is_training # pylint: disable=unused-argument
):
"""
Cross fused attention fwd abstract
"""
q_dtype = dtypes.canonicalize_dtype(q.dtype)
batch_q, q_max_seqlen, num_head_q, head_dim_q = q.shape
kv_dtype = dtypes.canonicalize_dtype(kv.dtype)
batch_kv, kv_max_seqlen, nkv, num_head_kv, head_dim_kv = kv.shape
assert q_dtype == kv_dtype
assert batch_q == batch_kv
assert num_head_q == num_head_kv
assert head_dim_q == head_dim_kv
assert nkv == 2
assert q_cu_seqlen.dtype == kv_cu_seqlen.dtype
output_shape = q.shape
output_dtype = q_dtype
softmax_aux_shape = (batch_q, num_head_q, q_max_seqlen, kv_max_seqlen)
softmax_aux_dtype = q_dtype
return (
ShapedArray(output_shape, output_dtype, named_shape=q.named_shape), # output
ShapedArray(softmax_aux_shape, softmax_aux_dtype,
named_shape=q.named_shape), # softmax_aux
)
@staticmethod
def lowering(ctx, q, kv, q_cu_seqlen, kv_cu_seqlen, seed, *, attn_bias_type, attn_mask_type,
scaling_factor, dropout_probability, is_training):
"""
Cross fused attention fwd lowering rules
"""
q_aval, kv_aval, _, _, _ = ctx.avals_in
assert q_aval.dtype == kv_aval.dtype
batch, q_max_seqlen, num_head, head_dim = q_aval.shape
kv_max_seqlen = kv_aval.shape[1]
operands = [q, kv, q_cu_seqlen, kv_cu_seqlen, seed]
operand_shapes = map(lambda x: x.type.shape, operands)
out_types = [
ir.RankedTensorType.get(output.shape, mlir.dtype_to_ir_type(output.dtype))
for output in ctx.avals_out
]
args = CustomCallArgsWrapper(out_types, operands, operand_shapes)
opaque = transformer_engine_jax.pack_fused_attn_descriptor(
batch, num_head, q_max_seqlen, kv_max_seqlen, head_dim,
scaling_factor, dropout_probability, attn_bias_type, attn_mask_type,
jax_dtype_to_te_dtype(q_aval.dtype), is_training)
out = custom_caller(CrossFusedAttnFwdPrimitive.name, args, opaque, has_side_effect=False)
return out
_cross_fused_attn_fwd_p = register_primitive(CrossFusedAttnFwdPrimitive)
def cross_fused_attn_fwd(q: jnp.ndarray, kv: jnp.ndarray, q_cu_seqlen: jnp.ndarray,
kv_cu_seqlen: jnp.ndarray, seed: jnp.ndarray,
attn_bias_type: NVTE_Bias_Type, attn_mask_type: NVTE_Mask_Type,
scaling_factor: float, dropout_probability: float, is_training: bool):
"""
Wrapper for TE cross fused attention fwd
Return BMM1 -> (PreBias) -> ScaleMaskSoftmax -> (PostBias) -> (Dropout) -> BMM2
"""
checker = _FusedAttnRNGStateChecker()
seed = checker.check_seed(seed, dropout_probability, is_training)
return _cross_fused_attn_fwd_p.bind(q,
kv,
q_cu_seqlen,
kv_cu_seqlen,
seed,
attn_bias_type=attn_bias_type,
attn_mask_type=attn_mask_type,
scaling_factor=scaling_factor,
dropout_probability=dropout_probability,
is_training=is_training)
class CrossFusedAttnBwdPrimitive(BasePrimitive):
"""
Cross Fused Attention Backward Primitive
"""
name = "te_cross_fused_attn_backward"
multiple_results = True
@staticmethod
def abstract(
q,
kv,
softmax_aux,
doutput,
q_cu_seqlen,
kv_cu_seqlen,
*,
attn_bias_type, # pylint: disable=unused-argument
attn_mask_type, # pylint: disable=unused-argument
scaling_factor, # pylint: disable=unused-argument
dropout_probability, # pylint: disable=unused-argument
is_training # pylint: disable=unused-argument
):
"""
Cross fused attention bwd abstract
"""
q_dtype = dtypes.canonicalize_dtype(q.dtype)
kv_dtype = dtypes.canonicalize_dtype(kv.dtype)
softmax_aux_dtype = dtypes.canonicalize_dtype(softmax_aux.dtype)
doutput_dtype = dtypes.canonicalize_dtype(doutput.dtype)
assert q_dtype == kv_dtype == softmax_aux_dtype == doutput_dtype
assert q_cu_seqlen.dtype == kv_cu_seqlen.dtype
return (
ShapedArray(q.shape, q_dtype, named_shape=q.named_shape), # dq
ShapedArray(kv.shape, kv_dtype, named_shape=kv.named_shape), # dkv
)
@staticmethod
def lowering(ctx, q, kv, softmax_aux, doutput, q_cu_seqlen, kv_cu_seqlen, *, attn_bias_type,
attn_mask_type, scaling_factor, dropout_probability, is_training):
"""
Cross fused attention bwd lowering rules
"""
q_aval, kv_aval, _, _, _, _ = ctx.avals_in
assert q_aval.dtype == kv_aval.dtype
batch, q_max_seqlen, num_head, head_dim = q_aval.shape
kv_max_seqlen = kv_aval.shape[1]
operands = [q, kv, softmax_aux, doutput, q_cu_seqlen, kv_cu_seqlen]
operand_shapes = map(lambda x: x.type.shape, operands)
out_types = [
ir.RankedTensorType.get(output.shape, mlir.dtype_to_ir_type(output.dtype))
for output in ctx.avals_out
]
args = CustomCallArgsWrapper(out_types, operands, operand_shapes)
# the dropout elements are encoded in the forward auxiliary tensor
# so seed is not needed in backward
opaque = transformer_engine_jax.pack_fused_attn_descriptor(
batch, num_head, q_max_seqlen, kv_max_seqlen, head_dim,
scaling_factor, dropout_probability, attn_bias_type, attn_mask_type,
jax_dtype_to_te_dtype(q_aval.dtype), is_training)
out = custom_caller(CrossFusedAttnBwdPrimitive.name, args, opaque, has_side_effect=False)
return out
_cross_fused_attn_bwd_p = register_primitive(CrossFusedAttnBwdPrimitive)
def cross_fused_attn_bwd(q: jnp.ndarray, kv: jnp.ndarray, softmax_aux: jnp.ndarray,
doutput: jnp.ndarray, q_cu_seqlen: jnp.ndarray, kv_cu_seqlen: jnp.ndarray,
attn_bias_type: NVTE_Bias_Type, attn_mask_type: NVTE_Mask_Type,
scaling_factor: float, dropout_probability: float, is_training: bool):
"""
Wrapper for TE cross fused attention bwd
Return the gradients of cross fused attention with packed kv input
"""
return _cross_fused_attn_bwd_p.bind(q,
kv,
softmax_aux,
doutput,
q_cu_seqlen,
kv_cu_seqlen,
attn_bias_type=attn_bias_type,
attn_mask_type=attn_mask_type,
scaling_factor=scaling_factor,
dropout_probability=dropout_probability,
is_training=is_training)
| TransformerEngine-main | transformer_engine/jax/cpp_extensions.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""JAX te modules"""
from typing import Tuple, Sequence
from functools import partial, reduce
import operator
import jax
import jax.numpy as jnp
from transformer_engine_jax import DType as TEDType
from .cpp_extensions import cast_transpose, gemm, jax_dtype_to_te_dtype
from .fp8 import FP8Helper, FP8GemmPackage
from .sharding import ShardingType, get_dot_sharding_meta, get_fp8_meta_sharding_meta
from .sharding import is_dp_enabled, is_tp_enabled, merge_axis_resources
from .sharding import xmap_runner, extend_fsdp_sharding_meta
jax.config.update('experimental_xmap_spmd_lowering', True)
jax.config.update('experimental_xmap_spmd_lowering_manual', True)
def fp8_dot(fp8_gemm_pkg: FP8GemmPackage,
fwd_dtype: TEDType,
bwd_dtype: TEDType,
contracting_dims: Tuple[Sequence[int], Sequence[int]] = ((-1,), (0,)),
sharding_type: ShardingType = ShardingType.SINGLE,
dp_dim_index: int = 0) -> jnp.ndarray:
"""
FP8 dot wrapper
"""
assert fp8_gemm_pkg.num_of_gemm == 1
inputs = fp8_gemm_pkg.inputs
kernel = fp8_gemm_pkg.kernels[0]
fp8_max = fp8_gemm_pkg.fp8_max
amax = fp8_gemm_pkg.amax
scale = fp8_gemm_pkg.scale
scale_inv = fp8_gemm_pkg.scale_inv
if sharding_type is ShardingType.SINGLE:
res = _fp8_dot(inputs,
kernel,
fp8_max,
amax,
scale,
scale_inv,
fwd_dtype=fwd_dtype,
bwd_dtype=bwd_dtype,
contracting_dims=contracting_dims,
sharding_type=sharding_type,
dp_axis_name="",
tp_axis_name="",
fsdp_axis_name="")
else:
dp_axis_name = "batch"
tp_axis_name = "model"
kernel_tp_index = None
# TODO (Ming Huang): Should we add a new argument to support general sharding to kernel? # pylint: disable=fixme
if sharding_type in (ShardingType.TP_COL, ShardingType.DP_TP_COL):
kernel_tp_index = len(kernel.shape) - 1
elif sharding_type in (ShardingType.TP_ROW, ShardingType.DP_TP_ROW):
kernel_tp_index = 0
input_tp_index = len(inputs.shape) - 1
sharding_meta = get_dot_sharding_meta(sharding_type, inputs.shape, kernel.shape,
dp_dim_index, input_tp_index, kernel_tp_index,
contracting_dims, dp_axis_name, tp_axis_name)
sharding_meta, fsdp_axis_name = extend_fsdp_sharding_meta(sharding_meta, {0: dp_dim_index})
inputs_ = jnp.reshape(inputs, sharding_meta.input_shapes[0]) # 0 for input
kernel_ = jnp.reshape(kernel, sharding_meta.input_shapes[1]) # 1 for kernel
num_of_fp8_meta_kind = 4 # fp8_max, amax, scale, scale_inv
fp8_sharding_meta = get_fp8_meta_sharding_meta(sharding_type, num_of_fp8_meta_kind,
dp_axis_name, tp_axis_name)
axis_resources = merge_axis_resources(
[sharding_meta.axis_resources, fp8_sharding_meta.axis_resources])
partial_fp8_dot = partial(_fp8_dot,
fwd_dtype=fwd_dtype,
bwd_dtype=bwd_dtype,
contracting_dims=contracting_dims,
sharding_type=sharding_type,
dp_axis_name=dp_axis_name,
tp_axis_name=tp_axis_name,
fsdp_axis_name=fsdp_axis_name)
res = xmap_runner(partial_fp8_dot, (*sharding_meta.in_axes, *fp8_sharding_meta.in_axes),
sharding_meta.out_axes, axis_resources,
(inputs_, kernel_, fp8_max, amax, scale, scale_inv))
res = jnp.reshape(res, sharding_meta.output_shapes[0])
return res
@partial(jax.custom_vjp, nondiff_argnums=(6, 7, 8, 9, 10, 11, 12))
def _fp8_dot(inputs: jnp.ndarray, kernel: jnp.ndarray, fp8_maxs: jnp.ndarray, amax: jnp.ndarray,
scale: jnp.ndarray, scale_inv: jnp.ndarray, fwd_dtype: TEDType, bwd_dtype: TEDType,
contracting_dims: Tuple[Sequence[int], Sequence[int]], sharding_type: ShardingType,
dp_axis_name: str, tp_axis_name: str, fsdp_axis_name: str):
res, _ = _fp8_dot_fwd(inputs,
kernel,
fp8_maxs,
amax,
scale,
scale_inv,
fwd_dtype,
bwd_dtype,
contracting_dims=contracting_dims,
sharding_type=sharding_type,
dp_axis_name=dp_axis_name,
tp_axis_name=tp_axis_name,
fsdp_axis_name=fsdp_axis_name)
return res
def _fp8_dot_fwd(
inputs,
kernel,
fp8_maxs,
amax,
scale,
scale_inv,
fwd_dtype,
bwd_dtype, # pylint: disable=unused-argument
contracting_dims,
sharding_type,
dp_axis_name, # pylint: disable=unused-argument
tp_axis_name,
fsdp_axis_name): # pylint: disable=unused-argument
lhs_contracting_dims, rhs_contracting_dims = contracting_dims
input_shape_pre = inputs.shape[:min(lhs_contracting_dims)]
input_shape_suf = inputs.shape[min(lhs_contracting_dims):]
kernel_shape_pre = kernel.shape[:max(rhs_contracting_dims) + 1]
kernel_shape_suf = kernel.shape[max(rhs_contracting_dims) + 1:]
input_contracting_size = reduce(operator.mul, input_shape_suf)
kernel_contracting_size = reduce(operator.mul, kernel_shape_pre)
assert input_contracting_size == kernel_contracting_size
inputs_ = jnp.reshape(inputs, (-1, input_contracting_size))
kernel_ = jnp.reshape(kernel, (kernel_contracting_size, -1))
amax = FP8Helper.update_amax_history(amax)
gemm_input_idx, gemm_kernel_idx, _ = FP8Helper.get_fp8_meta_indices(0)
input_amax = amax[gemm_input_idx, 0:1]
input_scale = scale[gemm_input_idx]
input_scale_inv = scale_inv[gemm_input_idx]
input_cast, input_cast_trans, input_amax = cast_transpose(inputs_, input_amax, input_scale,
input_scale_inv, fwd_dtype)
kernel_amax = amax[gemm_kernel_idx, 0:1]
kernel_scale = scale[gemm_kernel_idx]
kernel_scale_inv = scale_inv[gemm_kernel_idx]
kernel_cast, kernel_cast_trans, kernel_amax = cast_transpose(kernel_, kernel_amax, kernel_scale,
kernel_scale_inv, fwd_dtype)
res = gemm(kernel_cast_trans, kernel_scale_inv, fwd_dtype, True, input_cast, input_scale_inv,
fwd_dtype, False, jax_dtype_to_te_dtype(inputs.dtype), FP8Helper.FP8_2X_ACC_FPROP)
if sharding_type in (ShardingType.TP_ROW, ShardingType.DP_TP_ROW):
res = jax.lax.psum(res, tp_axis_name)
# (input_shape_pre, input_shape_suf)
# x (kernel_shape_pre, kernel_shape_suf)
# = (input_shape_pre, kernel_shape_suf)
output_shape = input_shape_pre + kernel_shape_suf
res = jnp.reshape(res, output_shape)
ctx = (input_cast_trans, kernel_cast, fp8_maxs, amax, scale, scale_inv, input_amax, kernel_amax,
inputs.shape, kernel.shape)
return res, ctx
def _fp8_dot_bwd(
fwd_dtype,
bwd_dtype,
contracting_dims, # pylint: disable=unused-argument
sharding_type,
dp_axis_name,
tp_axis_name,
fsdp_axis_name,
ctx,
g):
input_cast_trans, kernel_cast, \
fp8_maxs, amax, scale, scale_inv, \
input_amax, kernel_amax, \
inputs_shape, kernel_shape = ctx
gemm_input_idx, gemm_kernel_idx, gemm_grad_idx = FP8Helper.get_fp8_meta_indices(0)
grad_amax = amax[gemm_grad_idx, 0:1]
grad_scale = scale[gemm_grad_idx]
grad_scale_inv = scale_inv[gemm_grad_idx]
g = jnp.reshape(g, (input_cast_trans.shape[1], -1))
grad_cast, grad_cast_trans, grad_amax = cast_transpose(g, grad_amax, grad_scale, grad_scale_inv,
bwd_dtype)
input_scale_inv = scale_inv[gemm_input_idx]
wgrad = gemm(grad_cast_trans, grad_scale_inv, bwd_dtype,
True, input_cast_trans, input_scale_inv, fwd_dtype, False,
jax_dtype_to_te_dtype(g.dtype), FP8Helper.FP8_2X_ACC_WGRAD)
kernel_scale_inv = scale_inv[gemm_kernel_idx]
dgrad = gemm(kernel_cast, kernel_scale_inv, fwd_dtype, True, grad_cast, grad_scale_inv,
bwd_dtype, False, jax_dtype_to_te_dtype(g.dtype), FP8Helper.FP8_2X_ACC_DGRAD)
amax = amax.at[gemm_input_idx, 0].set(input_amax[0])
amax = amax.at[gemm_kernel_idx, 0].set(kernel_amax[0])
amax = amax.at[gemm_grad_idx, 0].set(grad_amax[0])
if is_dp_enabled(sharding_type.value[0]):
wgrad = jax.lax.psum(wgrad, dp_axis_name)
amax = jax.lax.pmax(amax, dp_axis_name)
if len(fsdp_axis_name) > 0:
wgrad = jax.lax.psum(wgrad, fsdp_axis_name)
amax = jax.lax.pmax(amax, fsdp_axis_name)
if is_tp_enabled(sharding_type.value[0]):
amax = jax.lax.pmax(amax, tp_axis_name)
if sharding_type in (ShardingType.TP_COL, ShardingType.DP_TP_COL):
dgrad = jax.lax.psum(dgrad, tp_axis_name)
dgrad = jnp.reshape(dgrad, inputs_shape)
wgrad = jnp.reshape(wgrad, kernel_shape)
return dgrad, wgrad, fp8_maxs, amax, scale, scale_inv
_fp8_dot.defvjp(_fp8_dot_fwd, _fp8_dot_bwd)
| TransformerEngine-main | transformer_engine/jax/dot.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""JAX softmax modules"""
from enum import Enum
from functools import partial
from typing import Optional
import jax
import jax.numpy as jnp
from .cpp_extensions import scaled_softmax_fwd
from .cpp_extensions import scaled_softmax_bwd
from .cpp_extensions import scaled_masked_softmax_fwd
from .cpp_extensions import scaled_masked_softmax_bwd
from .cpp_extensions import scaled_upper_triang_masked_softmax_fwd
from .cpp_extensions import scaled_upper_triang_masked_softmax_bwd
from .cpp_extensions import ScaledSoftmaxFwdPrimitive
from .cpp_extensions import ScaledMaskedSoftmaxFwdPrimitive
from .cpp_extensions import ScaledUpperTriangMaskedSoftmaxFwdPrimitive
from .sharding import get_softmax_sharding_meta, ShardingType, ShardingMeta
from .sharding import xmap_runner, extend_fsdp_sharding_meta
jax.config.update('experimental_xmap_spmd_lowering', True)
jax.config.update('experimental_xmap_spmd_lowering_manual', True)
class SoftmaxType(Enum):
"""SoftmaxType."""
SCALED = "scaled"
SCALED_MASKED = "scaled_masked"
SCALED_UPPER_TRIANG_MASKED = "scaled_upper_triang_masked"
def is_softmax_kernel_available(softmax_type: SoftmaxType, batch: int, heads: int, q_seqlen: int,
k_seqlen: int, dtype: jnp.dtype):
"""check softmax available"""
if softmax_type is SoftmaxType.SCALED:
return ScaledSoftmaxFwdPrimitive.is_kernel_available(batch, heads, q_seqlen, k_seqlen,
dtype)
if softmax_type is SoftmaxType.SCALED_MASKED:
return ScaledMaskedSoftmaxFwdPrimitive.is_kernel_available(batch, heads, q_seqlen, k_seqlen,
dtype)
if softmax_type is SoftmaxType.SCALED_UPPER_TRIANG_MASKED:
return ScaledUpperTriangMaskedSoftmaxFwdPrimitive.is_kernel_available(
batch, heads, q_seqlen, k_seqlen, dtype)
raise NotImplementedError
def softmax(inputs: jnp.ndarray,
mask: Optional[jnp.ndarray] = None,
scale_factor: Optional[float] = 1.0,
softmax_type: Optional[SoftmaxType] = SoftmaxType.SCALED,
sharding_type: ShardingType = ShardingType.SINGLE,
dp_dim_index: int = 0,
tp_dim_index: int = 1):
"""
Softmax wrapper
"""
assert dp_dim_index == 0, \
"Only softmax support batch dim in the first place currently."
assert tp_dim_index == 1, \
"Only softmax support head dim in the second place currently."
assert mask is None or mask.shape[tp_dim_index] == 1
if sharding_type is ShardingType.SINGLE:
outputs = _softmax(inputs, mask, scale_factor, softmax_type)
else:
dp_axis_name = "batch"
tp_axis_name = "model"
sharding_meta = get_softmax_sharding_meta(sharding_type,
inputs.shape,
dp_dim=dp_dim_index,
tp_dim=tp_dim_index,
dp_axis_name=dp_axis_name,
tp_axis_name=tp_axis_name)
sharding_meta, _ = extend_fsdp_sharding_meta(sharding_meta, {0: dp_dim_index})
inputs_ = jnp.reshape(inputs, sharding_meta.input_shapes[0]) # 0 for input
mask_ = mask
mask_in_axis = {}
if mask_ is not None:
if sharding_type in (ShardingType.DP, ShardingType.DP_TP_COL, ShardingType.DP_TP_ROW):
# If mask is head broadcastable (heads == 1),
# then it equals to DP sharding.
mask_sharding_meta = get_softmax_sharding_meta(ShardingType.DP,
mask_.shape,
dp_dim=dp_dim_index,
tp_dim=tp_dim_index,
dp_axis_name=dp_axis_name,
tp_axis_name=tp_axis_name)
else:
mask_sharding_meta = ShardingMeta([{}], {}, {}, [mask_.shape], mask_.shape)
mask_sharding_meta, _ = extend_fsdp_sharding_meta(mask_sharding_meta, {0: dp_dim_index})
mask_ = jnp.reshape(mask_, mask_sharding_meta.input_shapes[0])
mask_in_axis = mask_sharding_meta.in_axes[0]
partial_softmax = partial(_softmax, scale_factor=scale_factor, softmax_type=softmax_type)
in_axes = (sharding_meta.in_axes[0], mask_in_axis)
outputs = xmap_runner(partial_softmax, in_axes, sharding_meta.out_axes,
sharding_meta.axis_resources, (inputs_, mask_))
outputs = jnp.reshape(outputs, sharding_meta.output_shapes[0])
return outputs
@partial(jax.custom_vjp, nondiff_argnums=(2, 3))
def _softmax(inputs, mask, scale_factor, softmax_type):
output, _ = _softmax_fwd(inputs, mask, scale_factor, softmax_type)
return output
def _softmax_fwd(inputs, mask, scale_factor, softmax_type):
if softmax_type is SoftmaxType.SCALED_MASKED:
assert mask is not None
outputs = scaled_masked_softmax_fwd(inputs, mask, scale_factor)
elif softmax_type is SoftmaxType.SCALED_UPPER_TRIANG_MASKED:
outputs = scaled_upper_triang_masked_softmax_fwd(inputs, scale_factor)
else:
outputs = scaled_softmax_fwd(inputs, scale_factor)
return outputs, (outputs, mask)
def _softmax_bwd(scale_factor, softmax_type, ctx, grad_outputs):
softmax_outputs, mask = ctx
if softmax_type is SoftmaxType.SCALED_MASKED:
assert mask is not None
dgrad = scaled_masked_softmax_bwd(grad_outputs, softmax_outputs, scale_factor)
elif softmax_type is SoftmaxType.SCALED_UPPER_TRIANG_MASKED:
dgrad = scaled_upper_triang_masked_softmax_bwd(grad_outputs, softmax_outputs, scale_factor)
else:
dgrad = scaled_softmax_bwd(grad_outputs, softmax_outputs, scale_factor)
return (dgrad, None)
_softmax.defvjp(_softmax_fwd, _softmax_bwd)
| TransformerEngine-main | transformer_engine/jax/softmax.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Praxis related Modules"""
from .module import FusedSoftmax, LayerNorm
from .module import LayerNormLinear, LayerNormMLP, Linear, TransformerEngineBaseLayer
from .transformer import MultiHeadAttention, RelativePositionBiases, TransformerLayer
from ..flax.transformer import TransformerLayerType
| TransformerEngine-main | transformer_engine/jax/praxis/__init__.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""
Praxis Modules related Transformer
"""
from functools import partial
from typing import Optional, Sequence, Tuple
from praxis import pax_fiddle
from praxis.base_layer import WeightInit
from praxis.pytypes import JTensor
from .module import TransformerEngineBaseLayer
from ..flax.transformer import TransformerLayerType
from ..flax.transformer import MultiHeadAttention as flax_MultiHeadAttention
from ..flax.transformer import RelativePositionBiases as flax_RelativePositionBiases
from ..flax.transformer import TransformerLayer as flax_TransformerLayer
class RelativePositionBiases(TransformerEngineBaseLayer):
"""RelativePositionBiases"""
num_buckets: int = 32
max_distance: int = 128
num_attention_heads: int = 64
embedding_init: WeightInit = None
embedding_axes: Tuple[str, ...] = ()
@staticmethod
def generate_embedding_init(init, num_attention_heads, num_buckets):
"""generate_embedding_init"""
embedding_init = init
if embedding_init is None:
rb_stddev = (num_attention_heads * num_buckets)**-0.5
embedding_init = WeightInit.Gaussian(rb_stddev)
return embedding_init
def setup(self) -> None:
"""setup"""
super().setup()
embedding_init = RelativePositionBiases.generate_embedding_init(
self.embedding_init, self.num_attention_heads, self.num_buckets)
rpb_cls = partial(flax_RelativePositionBiases,
num_buckets=self.num_buckets,
max_distance=self.max_distance,
num_attention_heads=self.num_attention_heads,
embedding_init=TransformerEngineBaseLayer.generate_params_init(
"rel_embedding", embedding_init),
embedding_axes=self.embedding_axes,
dtype=self.dtype)
self.create_layer("relative_position_bias", rpb_cls)
def __call__(self, q_seqlen: JTensor, k_seqlen: JTensor, bidirectional: bool = True) -> JTensor:
"""__call__"""
return self.relative_position_bias(q_seqlen, k_seqlen, bidirectional)
class MultiHeadAttention(TransformerEngineBaseLayer):
"""MultiHeadAttention"""
head_dim: int = 64
num_heads: int = 16
dropout_rate: float = 0.
dropout_rng_name: str = 'dropout'
layernorm_type: str = "layernorm"
layernorm_epsilon: float = 1e-6
zero_centered_gamma: bool = False
use_bias: bool = False
bias_init: WeightInit = WeightInit.Constant(0.0)
apply_residual_connection_post_layernorm: bool = False
output_layernorm: bool = False
attn_mask_type: str = 'causal'
fuse_qkv: bool = True
transpose_batch_sequence: bool = True
scale_attn_logits: bool = False
scaled_query_init: bool = True
float32_logits: bool = False
def setup(self) -> None:
"""setup"""
super().setup()
mha_cls = partial(
flax_MultiHeadAttention,
dtype=self.dtype,
head_dim=self.head_dim,
num_heads=self.num_heads,
dropout_rate=self.dropout_rate,
dropout_rng_name=self.dropout_rng_name,
layernorm_type=self.layernorm_type,
layernorm_epsilon=self.layernorm_epsilon,
zero_centered_gamma=self.zero_centered_gamma,
kernel_init=TransformerEngineBaseLayer.generate_params_init("kernel", self.params_init),
use_bias=self.use_bias,
bias_init=TransformerEngineBaseLayer.generate_params_init("bias", self.bias_init),
apply_residual_connection_post_layernorm=self.apply_residual_connection_post_layernorm,
output_layernorm=self.output_layernorm,
attn_mask_type=self.attn_mask_type,
fuse_qkv=self.fuse_qkv,
transpose_batch_sequence=self.transpose_batch_sequence,
scale_attn_logits=self.scale_attn_logits,
scaled_query_init=self.scaled_query_init,
float32_logits=self.float32_logits)
self.create_layer("multi_head_attn", mha_cls)
def __call__(self,
inputs_q: JTensor,
inputs_kv: JTensor,
mask: Optional[JTensor] = None,
bias: Optional[JTensor] = None,
*,
decode: bool = False,
deterministic: bool = False) -> JTensor:
"""__call__"""
return self.multi_head_attn(inputs_q,
inputs_kv,
mask,
bias,
decode=decode,
deterministic=deterministic)
class TransformerLayer(TransformerEngineBaseLayer):
"""TransformerLayer"""
hidden_size: int = 512
mlp_hidden_size: int = 2048
num_attention_heads: int = 8
layernorm_type: str = 'layernorm'
layernorm_epsilon: float = 1e-6
zero_centered_gamma: bool = False
hidden_dropout: float = 0.1
hidden_dropout_dims: Sequence[int] = ()
attention_dropout: float = 0.1
dropout_rng_name: str = 'dropout'
mlp_activations: Sequence[str] = ('relu',)
use_bias: bool = False
bias_init: WeightInit = WeightInit.Constant(0.0)
apply_residual_connection_post_layernorm: bool = False
output_layernorm: bool = False
float32_attention_logits: bool = False
layer_type: TransformerLayerType = TransformerLayerType.ENCODER
self_attn_mask_type: str = 'causal'
enable_relative_embedding: bool = True
relative_embedding: pax_fiddle.Config[RelativePositionBiases] = pax_fiddle.template_field(None)
drop_path: float = 0.0
fuse_qkv_params: bool = True
transpose_batch_sequence: bool = False
scale_attn_logits: bool = False
scaled_query_init: bool = True
def setup(self) -> None:
"""setup"""
super().setup()
relative_embedding_flax_module = None
if self.enable_relative_embedding and self.relative_embedding is not None:
assert self.relative_embedding.num_attention_heads == \
self.num_attention_heads, \
"TransformerLayer.relative_embedding.num_attention_heads shoule be" \
"the same as TransformerLayer.num_attention_heads."
embedding_init = RelativePositionBiases.generate_embedding_init(
self.relative_embedding.embedding_init, self.relative_embedding.num_attention_heads,
self.relative_embedding.num_buckets)
relative_embedding_flax_module = flax_RelativePositionBiases(
num_buckets=self.relative_embedding.num_buckets,
max_distance=self.relative_embedding.max_distance,
num_attention_heads=self.relative_embedding.num_attention_heads,
embedding_init=TransformerEngineBaseLayer.generate_params_init(
"rel_embedding", embedding_init),
embedding_axes=self.relative_embedding.embedding_axes,
dtype=self.relative_embedding.dtype)
transformerlayer_cls = partial(
flax_TransformerLayer,
dtype=self.dtype,
hidden_size=self.hidden_size,
mlp_hidden_size=self.mlp_hidden_size,
num_attention_heads=self.num_attention_heads,
layernorm_type=self.layernorm_type,
layernorm_epsilon=self.layernorm_epsilon,
zero_centered_gamma=self.zero_centered_gamma,
hidden_dropout=self.hidden_dropout,
hidden_dropout_dims=self.hidden_dropout_dims,
attention_dropout=self.attention_dropout,
dropout_rng_name=self.dropout_rng_name,
mha_kernel_init=TransformerEngineBaseLayer.generate_params_init(
"mha_kernel", self.params_init),
mlp_kernel_init=TransformerEngineBaseLayer.generate_params_init(
"mlp_kernel", self.params_init),
mlp_activations=self.mlp_activations,
use_bias=self.use_bias,
bias_init=TransformerEngineBaseLayer.generate_params_init("bias", self.bias_init),
apply_residual_connection_post_layernorm=self.apply_residual_connection_post_layernorm,
output_layernorm=self.output_layernorm,
float32_attention_logits=self.float32_attention_logits,
layer_type=self.layer_type,
self_attn_mask_type=self.self_attn_mask_type,
enable_relative_embedding=self.enable_relative_embedding,
relative_embedding=relative_embedding_flax_module,
drop_path=self.drop_path,
fuse_qkv_params=self.fuse_qkv_params,
transpose_batch_sequence=self.transpose_batch_sequence,
scale_attn_logits=self.scale_attn_logits,
scaled_query_init=self.scaled_query_init)
self.create_layer("transformerlayer", transformerlayer_cls)
def __call__(self,
inputs: JTensor,
encoded: JTensor = None,
attention_mask: JTensor = None,
encoder_decoder_mask: JTensor = None,
deterministic: bool = False,
decode: bool = False,
max_decode_length: bool = None) -> JTensor:
"""__call__"""
return self.transformerlayer(inputs, encoded, attention_mask, encoder_decoder_mask,
deterministic, decode, max_decode_length)
| TransformerEngine-main | transformer_engine/jax/praxis/transformer.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""
Praxis Modules
"""
from functools import partial
from typing import Callable, Iterable, Sequence, Tuple, Union
from praxis import pax_fiddle
from praxis.base_layer import init_var
from praxis.base_layer import BaseLayer, WeightInit, WeightHParams, WeightHParamsCollection
from praxis.layers import flax_adapter
from praxis.pytypes import JTensor
from ..fp8 import FP8Helper
from ..flax.module import DenseGeneral, LayerNormDenseGeneral
from ..flax.module import LayerNorm as flax_LayerNorm
from ..flax.module import LayerNormMLP as flax_LayerNormMLP
from ..flax.module import Softmax
from ..softmax import SoftmaxType
from ..sharding import MajorShardingType, ShardingType
def _generate_ln_scale_init(scale_init):
if scale_init is not None:
return TransformerEngineBaseLayer.generate_params_init("scale", scale_init)
return scale_init
class TransformerEngineBaseLayer(BaseLayer):
"""TransformerEngineBaseLayer"""
logical_axes_rules: Tuple[Tuple, ...] = None
@staticmethod
def generate_params_init(name: str, initializer: WeightInit):
"""generate_params_init"""
def kernel_init(key, shape, dtype):
wp = WeightHParams(shape=shape, init=initializer, dtype=dtype)
return init_var(wp, key, name)
return kernel_init
def create_layer(self, name, flax_module_cls):
"""create_layer"""
fp8_collection_map = {
FP8Helper.FP8_COLLECTION_NAME: [
WeightHParamsCollection.SKIP_LP_REGULARIZATION,
WeightHParamsCollection.NON_TRAINABLE,
WeightHParamsCollection.DISALLOW_BFLOAT16_CONVERSION
]
}
flax_module_p = pax_fiddle.Config(flax_adapter.FlaxModuleAdapter,
module_factory_method=flax_module_cls,
logical_axes_rules=self.logical_axes_rules,
var_collection_map=fp8_collection_map,
ici_mesh_shape=self.ici_mesh_shape,
dcn_mesh_shape=self.dcn_mesh_shape,
mesh_axis_names=self.mesh_axis_names)
self.create_child(name, flax_module_p.clone())
class LayerNorm(TransformerEngineBaseLayer):
"""LayerNorm"""
epsilon: float = 1e-6
layernorm_type: str = 'layernorm'
zero_centered_gamma: bool = False
scale_init: WeightInit = None
scale_axes: Tuple[str, ...] = ()
bias_init: WeightInit = WeightInit.Constant(0.0)
bias_axes: Tuple[str, ...] = ()
transpose_batch_sequence: bool = False
sharding_type: ShardingType = ShardingType.SINGLE
def setup(self) -> None:
"""setup"""
super().setup()
ln_cls = partial(flax_LayerNorm,
epsilon=self.epsilon,
layernorm_type=self.layernorm_type,
zero_centered_gamma=self.zero_centered_gamma,
scale_init=_generate_ln_scale_init(self.scale_init),
scale_axes=self.scale_axes,
bias_init=TransformerEngineBaseLayer.generate_params_init(
"ln_bias", self.bias_init),
bias_axes=self.bias_axes,
dtype=self.dtype,
transpose_batch_sequence=self.transpose_batch_sequence,
sharding_type=self.sharding_type)
self.create_layer("layer_norm", ln_cls)
def __call__(self, x: JTensor) -> JTensor:
"""__call__"""
return self.layer_norm(x)
class FusedSoftmax(TransformerEngineBaseLayer):
"""FusedSoftmax"""
scale_factor: float = 1.0
softmax_type: SoftmaxType = SoftmaxType.SCALED
sharding_type: ShardingType = ShardingType.SINGLE
def setup(self) -> None:
"""setup"""
super().setup()
fused_softmax_cls = partial(Softmax,
scale_factor=self.scale_factor,
softmax_type=self.softmax_type,
sharding_type=self.sharding_type)
self.create_layer("fused_softmax", fused_softmax_cls)
def __call__(self, x: JTensor, mask: JTensor = None, bias: JTensor = None) -> JTensor:
"""__call__"""
return self.fused_softmax(x, mask, bias)
class Linear(TransformerEngineBaseLayer):
"""Linear"""
out_features: int = 512
kernel_axes: Tuple[str, ...] = ()
use_bias: bool = True
bias_init: WeightInit = WeightInit.Constant(0.0)
bias_axes: Tuple[str, ...] = ()
axis: Union[Iterable[int], int] = -1
transpose_batch_sequence: bool = False
sharding_type: ShardingType = ShardingType.SINGLE
def setup(self) -> None:
"""setup"""
super().setup()
dense_general_cls = partial(
DenseGeneral,
features=self.out_features,
kernel_init=TransformerEngineBaseLayer.generate_params_init("kernel", self.params_init),
kernel_axes=self.kernel_axes,
use_bias=self.use_bias,
bias_init=TransformerEngineBaseLayer.generate_params_init("bias", self.bias_init),
bias_axes=self.bias_axes,
axis=self.axis,
dtype=self.dtype,
transpose_batch_sequence=self.transpose_batch_sequence,
sharding_type=self.sharding_type)
self.create_layer("linear", dense_general_cls)
def __call__(self, x: JTensor) -> JTensor:
"""__call__"""
return self.linear(x)
class LayerNormLinear(TransformerEngineBaseLayer):
"""LayerNormLinear"""
out_features: int = 512
enable_layernorm: bool = True
layernorm_type: str = 'layernorm'
epsilon: float = 1e-6
zero_centered_gamma: bool = False
scale_init: WeightInit = None
scale_axes: Tuple[str, ...] = ()
ln_bias_init: WeightInit = WeightInit.Constant(1.0)
ln_bias_axes: Tuple[str, ...] = ()
kernel_axes: Tuple[str, ...] = ()
use_bias: bool = False
bias_init: WeightInit = WeightInit.Constant(0.0)
bias_axes: Tuple[str, ...] = ()
return_layernorm_output: bool = True
axis: Union[Iterable[int], int] = -1
transpose_batch_sequence: bool = False
depth_scaling: float = None
sharding_type: ShardingType = ShardingType.SINGLE
def setup(self) -> None:
"""setup"""
super().setup()
ln_dense_general_cls = partial(
LayerNormDenseGeneral,
features=self.out_features,
enable_layernorm=self.enable_layernorm,
layernorm_type=self.layernorm_type,
epsilon=self.epsilon,
zero_centered_gamma=self.zero_centered_gamma,
scale_init=_generate_ln_scale_init(self.scale_init),
scale_axes=self.scale_axes,
ln_bias_init=TransformerEngineBaseLayer.generate_params_init(
"ln_bias", self.ln_bias_init),
ln_bias_axes=self.ln_bias_axes,
kernel_init=TransformerEngineBaseLayer.generate_params_init("kernel", self.params_init),
kernel_axes=self.kernel_axes,
use_bias=self.use_bias,
bias_init=TransformerEngineBaseLayer.generate_params_init("bias", self.bias_init),
bias_axes=self.bias_axes,
return_layernorm_output=self.return_layernorm_output,
axis=self.axis,
dtype=self.dtype,
transpose_batch_sequence=self.transpose_batch_sequence,
depth_scaling=self.depth_scaling,
sharding_type=self.sharding_type)
self.create_layer("ln_linear", ln_dense_general_cls)
def __call__(self, x: JTensor) -> JTensor:
"""__call__"""
return self.ln_linear(x)
class LayerNormMLP(TransformerEngineBaseLayer):
"""LayerNormMLP"""
intermediate_dim: int = 2048
enable_layernorm: bool = True
layernorm_type: str = 'layernorm'
epsilon: float = 1e-6
zero_centered_gamma: bool = False
scale_init: WeightInit = None
scale_axes: Tuple[str, ...] = ()
ln_bias_init: WeightInit = WeightInit.Constant(1.0)
ln_bias_axes: Tuple[str, ...] = ()
kernel_axes_1: Tuple[str, ...] = ()
kernel_axes_2: Tuple[str, ...] = ()
use_bias: bool = False
bias_init: WeightInit = WeightInit.Constant(0.0)
bias_axes_1: Tuple[str, ...] = ()
bias_axes_2: Tuple[str, ...] = ()
return_layernorm_output: bool = True
activations: Sequence[Union[str, Callable]] = ('relu',)
intermediate_dropout_rate: float = 0.1
intermediate_hidden_dropout_dims: Sequence[int] = ()
axis: Union[Iterable[int], int] = -1
transpose_batch_sequence: bool = False
major_sharding_type: MajorShardingType = MajorShardingType.SINGLE
def setup(self) -> None:
"""setup"""
super().setup()
ln_mlp_cls = partial(
flax_LayerNormMLP,
intermediate_dim=self.intermediate_dim,
enable_layernorm=self.enable_layernorm,
layernorm_type=self.layernorm_type,
epsilon=self.epsilon,
zero_centered_gamma=self.zero_centered_gamma,
scale_init=_generate_ln_scale_init(self.scale_init),
scale_axes=self.scale_axes,
ln_bias_init=TransformerEngineBaseLayer.generate_params_init(
"ln_bias", self.ln_bias_init),
ln_bias_axes=self.ln_bias_axes,
kernel_init=TransformerEngineBaseLayer.generate_params_init("kernel", self.params_init),
kernel_axes_1=self.kernel_axes_1,
kernel_axes_2=self.kernel_axes_2,
use_bias=self.use_bias,
bias_init=TransformerEngineBaseLayer.generate_params_init("bias", self.bias_init),
bias_axes_1=self.bias_axes_1,
bias_axes_2=self.bias_axes_2,
return_layernorm_output=self.return_layernorm_output,
activations=self.activations,
intermediate_dropout_rate=self.intermediate_dropout_rate,
intermediate_hidden_dropout_dims=self.intermediate_hidden_dropout_dims,
axis=self.axis,
dtype=self.dtype,
transpose_batch_sequence=self.transpose_batch_sequence,
major_sharding_type=self.major_sharding_type)
self.create_layer("ln_mlp", ln_mlp_cls)
def __call__(self, x: JTensor, deterministic: bool = False) -> JTensor:
"""__call__"""
return self.ln_mlp(x, deterministic)
| TransformerEngine-main | transformer_engine/jax/praxis/module.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""Transformer Engine bindings for JAX"""
from .module import DenseGeneral, LayerNorm
from .module import LayerNormDenseGeneral, LayerNormMLP, TransformerEngineBase
from .transformer import extend_logical_axis_rules
from .transformer import MultiHeadAttention, RelativePositionBiases
from .transformer import TransformerLayer, TransformerLayerType
| TransformerEngine-main | transformer_engine/jax/flax/__init__.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""
Wrapper module for Transformer related layers with FP8 support.
"""
import functools
from enum import Enum
from math import sqrt
import os
from typing import Any, Callable, Optional, Sequence, Tuple, Union
import warnings
import jax
import jax.numpy as jnp
import numpy as np
from flax import linen as nn
from flax.linen import partitioning as nn_partitioning
from jax import dtypes
from jax import nn as jax_nn
from jax import random as jax_random
from jax import lax, vmap
from .module import DenseGeneral, LayerNormDenseGeneral, LayerNormMLP
from .module import LayerNorm, Softmax
from ..fused_attn import AttnBiasType, AttnMaskType
from ..fused_attn import is_fused_attn_kernel_available
from ..fused_attn import self_fused_attn, cross_fused_attn
from ..softmax import SoftmaxType
from ..sharding import infer_major_sharding_type, infer_sharding_type
from ..sharding import global_shard_resource, with_sharding_constraint
from ..sharding import ShardingType
PRNGKey = Any
Shape = Tuple[int, ...]
DType = jnp.dtype
Array = jnp.ndarray
PrecisionLike = Union[None, str, lax.Precision, Tuple[str, str], Tuple[lax.Precision,
lax.Precision]]
Initializer = Callable[[PRNGKey, Shape, DType], Array]
LogicalRules = Sequence[Tuple[str, Union[str, None]]]
BATCH_AXES = 'nvte_batch'
SEQLEN_AXES = 'nvte_seqlen'
HEAD_AXES = 'nvte_head'
HIDDEN_AXES = 'nvte_hidden'
HIDDEN_TP_AXES = 'nvte_hidden_tp'
JOINED_AXES = 'nvte_joined'
W_NO_SHARD_AXES = 'nvte_w_no_shard'
W_FSDP_AXES = 'nvte_w_fsdp'
W_TP_AXES = 'nvte_w_tp'
W_JOINED_AXES = 'nvte_w_joined'
def _generate_drop_path_shape(shape: Sequence[int], batch_dim: int) -> Sequence[int]:
# Generate broadcast dims for drop_path.
drop_path_shape = list(range(0, len(shape)))
drop_path_shape.pop(batch_dim)
return drop_path_shape
def extend_logical_axis_rules(rules: LogicalRules) -> LogicalRules:
"""
Extend the given Flax logical axis rules with the predefined TransformerLayer's
logical axis rules.
.. note::
We currently only support logical axis rules for single GPU training, data parallel
training and 1D-sharding tensor parallel training.
Refer to `Figure 3 in` `Megatron-LM tensor parallel <https://arxiv.org/pdf/1909.08053.pdf>`_
for 1D-sharding tensor parallelism.
.. warning::
Please make sure ShardingResource is set via fp8_autocast before calling this function.
.. note::
This function is only needed when using TransformerLayer. For other modules, such as
DenseGeneral, please properly set axes of kernels and bias.
Parameters
----------
rules : Sequence[Tuple[str, Union[str, None]]]
the base Flax logical axis rules to extend.
Returns
-------
extended_rules : Sequence[Tuple[str, Union[str, None]]]
the extended Flax logical axis rules.
"""
rules_map = {}
for item in rules:
assert len(item) == 2, \
"The logical axis rule should be like (axis_name, mesh_axis_name)."
key = item[0]
val = item[1]
assert isinstance(key, str), \
f"Thie axis_name should be str, but got {type(key)}."
assert isinstance(val, str) or (val is None), \
f"Thie mesh_axis_name should be str or None, but got {type(val)}."
if key in rules_map:
rules_map[key].append(val)
else:
rules_map[key] = [val]
gsr = global_shard_resource()
batch_dim_rule = []
if gsr.dp_resource is not None:
batch_dim_rule.append(gsr.dp_resource)
if gsr.fsdp_resource is not None and gsr.dp_resource != gsr.fsdp_resource:
batch_dim_rule.append(gsr.fsdp_resource)
if len(batch_dim_rule) <= 0:
batch_dim_rule = None
elif len(batch_dim_rule) == 1:
batch_dim_rule = batch_dim_rule[0]
else:
batch_dim_rule = tuple(batch_dim_rule)
te_logical_axis_rules = (
(BATCH_AXES, batch_dim_rule),
(SEQLEN_AXES, None),
(HEAD_AXES, gsr.tp_resource),
(HIDDEN_AXES, None),
(HIDDEN_TP_AXES, gsr.tp_resource),
(JOINED_AXES, None),
(W_NO_SHARD_AXES, None),
(W_FSDP_AXES, gsr.fsdp_resource),
(W_TP_AXES, gsr.tp_resource),
(W_JOINED_AXES, None),
)
extended_rules = [*rules]
for item in te_logical_axis_rules:
key = item[0]
val = item[1]
if key in rules_map:
assert len(rules_map[key]) == 1 and rules_map[key][0] == val, \
f"The rule diverged between TE and given rule." \
f"Axis:{key} map to {rules_map[key]} in the given" \
f" rules, but {val} in TE's rules."
else:
extended_rules.append(item)
return tuple(extended_rules)
def _with_sharding_constraint(x: Array, logical_axis_names: Shape):
assert len(x.shape) == len(logical_axis_names)
rules = extend_logical_axis_rules(tuple())
rules_dict = {}
for key, value in rules:
rules_dict[key] = value
mesh_axis_names = [rules_dict[name] for name in logical_axis_names]
pspec = jax.sharding.PartitionSpec(*mesh_axis_names)
return with_sharding_constraint(x, pspec)
def _merge_mask(func, *masks: Optional[Array]):
masks = [m for m in masks if m is not None]
if not masks:
return None
assert all(map(lambda x: x.ndim == masks[0].ndim,
masks)), (f'masks must have same rank: {tuple(map(lambda x: x.ndim, masks))}')
mask, *other_masks = masks
for other_mask in other_masks:
mask = func(mask, other_mask)
return mask
def combine_masks(*masks: Optional[Array], dtype: DType = jnp.float32):
"""Combine attention masks."""
func = jnp.logical_and
return _merge_mask(func, *masks).astype(dtype)
def combine_biases(*masks: Optional[Array]):
"""Combine attention biases."""
func = lambda a, b: a + b
return _merge_mask(func, *masks)
def core_attention(query: Array,
key: Array,
value: Array,
scale_factor: float,
transpose_batch_sequence: bool,
softmax_type: SoftmaxType = SoftmaxType.SCALED,
softmax_sharding_type: ShardingType = ShardingType.SINGLE,
mask: Optional[Array] = None,
bias: Optional[Array] = None,
dropout_rng: Optional[PRNGKey] = None,
dropout_rate: float = 0.,
deterministic: bool = False,
dtype: DType = jnp.float32,
float32_logits: bool = False):
"""Core attention"""
assert key.ndim == query.ndim == value.ndim, 'q, k, v must have same rank.'
batch_dim = 1 if transpose_batch_sequence else 0
assert query.shape[batch_dim] == key.shape[batch_dim] == value.shape[batch_dim], (
'q, k, v batch dims must match.')
assert query.shape[-2] == key.shape[-2] == value.shape[-2], ('q, k, v num_heads must match.')
sequence_dim = 0 if transpose_batch_sequence else 1
assert key.shape[sequence_dim] == value.shape[sequence_dim], 'k, v lengths must match.'
assert query.shape[-1] == key.shape[-1], 'q, k depths must match.'
if float32_logits:
query = query.astype(jnp.float32)
key = key.astype(jnp.float32)
if transpose_batch_sequence:
attn_weights = jnp.einsum('qbhd,kbhd->bhqk', query, key)
else:
attn_weights = jnp.einsum('bqhd,bkhd->bhqk', query, key)
attn_weights = _with_sharding_constraint(attn_weights,
(BATCH_AXES, HEAD_AXES, SEQLEN_AXES, SEQLEN_AXES))
# When a bias is present, the computation is performed as Softmax(attn_weights * scale + bias).
# In this case, the scale can not fused into the Softmax module.
if bias is not None:
attn_weights = attn_weights * scale_factor
fused_scale_factor = 1.
else:
# If no bias, the scale can be fused into Softmax module
fused_scale_factor = scale_factor
attn_weights = Softmax(softmax_type=softmax_type,
scale_factor=fused_scale_factor,
sharding_type=softmax_sharding_type)(attn_weights, mask,
bias).astype(dtype)
if not deterministic and dropout_rate > 0.:
keep_prob = 1.0 - dropout_rate
dropout_shape = list(attn_weights.shape)
# TODO(rewang): add attention dropout broadcast dimension arguments for users
keep = jax_random.bernoulli(dropout_rng, keep_prob, dropout_shape)
multiplier = (keep.astype(attn_weights.dtype) / jnp.asarray(keep_prob, dtype=dtype))
attn_weights = attn_weights * multiplier
if transpose_batch_sequence:
return jnp.einsum('bhqk,kbhd->qbhd', attn_weights, value)
return jnp.einsum('bhqk,bkhd->bqhd', attn_weights, value)
dynamic_vector_slice_in_dim = vmap(lax.dynamic_slice_in_dim, in_axes=(None, 0, None, None))
class MultiHeadAttention(nn.Module):
r"""
Multi-head Attention (MHA), including Query,
Key, Value and Output projection.
.. note::
Argument :attr:`mask` will be ignored when
:attr:`attn_mask_type` is set to `"causal"`.
Parameters
----------
head_dim : int
The hidden dimension of each attention head.
num_heads : int
The number of attention heads
dropout_rate : float, default = 0.0
Dropout probability for the dropout op during multi-head attention.
dropout_rng_name: str, default = 'dropout'
The key in given RNGs via flax.linen.Module.apply that is used
to generate Dropout masks in the core attention.
layernorm_type : {'layernorm', 'rmsnorm'}, default = 'layernorm'
Indicate the type of layer normalization.
layernorm_epsilon: float, default = 1e-6
A value added to the denominator of layer normalization for numerical stability.
zero_centered_gamma : bool, default = False
If set to `True`, the LayerNorm formula changes to
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} *
(1 + \gamma) + \beta
This parameter is only applicable for 'layernorm'.
kernel_init: Initializer, default =
flax.linen.initializers.variance_scaling(1.0, 'fan_in', 'normal')
Used for initializing the QKV and Output projection weights.
It should be a callable object with three arguments (jax.random.PRNGKey, shape, dtype).
use_bias: bool, default = False
Indicate whether or not to enable bias shifting for QKVO projections.
If set to False, the layer will not learn additive biases.
bias_init: Initializer, default = flax.linen.initializers.zeros
Used for initializing bias of QKVO projections, only used when :attr:`use_bias=True`.
It should be a callable object with three arguments (jax.random.PRNGKey, shape, dtype).
apply_residual_connection_post_layernorm : bool, default = False
Indicate if apply residual connection with the output of layer normalization.
output_layernorm : bool, default = False
Indicate if apply a layer normalization at the end of MHA.
attn_mask_type: {'causal', 'padding'}, default = 'causal'
Type of attention mask passed into softmax operation.
Introduced in v0.10.0.
Optimization parameters
-----------------------
dtype :jax.numpy.dtype, default = jax.numpy.float32
The data type used to allocate the initial parameters.
fuse_qkv: bool, default = True
If set to True, this module exposes a single fused
parameter for query-key-value for self-attention and key-value for
cross-attention.
transpose_batch_sequence : bool, default = True
Indicate whether the input tensors were switched axis of batch
and sequence length dimension. if set to True, the input tensors
should be in (seqlen, batch, hidden), otherwise (batch, seqlen, hidden).
scale_attn_logits: bool, default = False
Indicate whether to scale attention logits.
If set to True, :math:`\frac{Q}{\sqrt{head_dim}*K}`,
else :math:`Q*K`
scaled_query_init: bool, default = `True`
Whether to scale WQ on initialization by :math:`\sqrt{head_dim}`
float32_logits : bool, default = False
Whether to compute attention logits in float32.
"""
head_dim: int
num_heads: int
dropout_rate: float = 0.
dropout_rng_name: str = 'dropout'
layernorm_type: str = "layernorm"
layernorm_epsilon: float = 1e-6
zero_centered_gamma: bool = False
kernel_init: Initializer = None
use_bias: bool = False
bias_init: Initializer = nn.initializers.zeros
apply_residual_connection_post_layernorm: bool = False
output_layernorm: bool = False
attn_mask_type: str = 'causal'
dtype: DType = jnp.float32
fuse_qkv: bool = True
transpose_batch_sequence: bool = True
scale_attn_logits: bool = False
scaled_query_init: bool = True
float32_logits: bool = False # computes logits in float32 for stability.
def __post_init__(self):
if self.kernel_init is None:
self.kernel_init = nn.initializers.variance_scaling(1.0, 'fan_in', 'normal')
super().__post_init__()
@nn.compact
def __call__(self,
inputs_q: Array,
inputs_kv: Array,
mask: Optional[Array] = None,
bias: Optional[Array] = None,
*,
decode: bool = False,
deterministic: bool = False) -> Array:
"""
MultiHeadAttention Layer:
[Query, Key, Value projection] -> Dot Product Attention -> Output projection.
Parameters
----------
inputs_q : jax.numpy.ndarray
Input tensor for query projection.
inputs_kv : jax.numpy.ndarray
Input tensor for key/value projection.
mask : jax.numpy.ndarray, default = None
Boolean tensor used to mask out self-attention softmax input.
bias : jax.numpy.ndarray, default = None
A tensor used to shift self-attention softmax input.
*
decode : bool,default = False
Indicate whether to prepare and use an autoregressive cache.
deterministic : bool,default = False
Disable dropout layers if set to True.
Returns
-------
outputs : jax.numpy.ndarray
Output tensors.
"""
def query_init(*args):
depth_scaling = jnp.sqrt(self.head_dim).astype(self.dtype)
return self.kernel_init(*args) / (depth_scaling if self.scaled_query_init else 1.0)
def qkv_init(key, shape, dtype):
assert len(shape) == 3
assert shape[-2] == 3
q_key, k_key, v_key = jax_random.split(key, num=3)
q_shape = (shape[0], shape[-1])
k_shape = (shape[0], shape[-1])
v_shape = (shape[0], shape[-1])
q_kernel = query_init(q_key, q_shape, dtype)
k_kernel = self.kernel_init(k_key, k_shape, dtype)
v_kernel = self.kernel_init(v_key, v_shape, dtype)
return jnp.stack([q_kernel, k_kernel, v_kernel], axis=-2, dtype=dtype)
def kv_init(key, shape, dtype):
assert len(shape) == 3
assert shape[-2] == 2
k_key, v_key = jax_random.split(key)
k_shape = (shape[0], shape[-1])
v_shape = (shape[0], shape[-1])
k_kernel = self.kernel_init(k_key, k_shape, dtype)
v_kernel = self.kernel_init(v_key, v_shape, dtype)
return jnp.stack([k_kernel, v_kernel], axis=-2, dtype=dtype)
# TODO(rewang): make it configurable for pre_scale_bias
attn_bias_type = AttnBiasType.NO_BIAS if bias is None else AttnBiasType.POST_SCALE_BIAS
def canonicalize_attn_mask_type(attn_mask_type):
"""
Convert the string to AttnMaskType
"""
if attn_mask_type == 'causal':
return AttnMaskType.CAUSAL_MASK
if attn_mask_type == 'padding':
return AttnMaskType.PADDING_MASK
raise ValueError(f"Unsupported {attn_mask_type=}, "
"supported attn_mask_type = {'causal', 'padding'}")
attn_mask_type = canonicalize_attn_mask_type(self.attn_mask_type)
canonicalize_dtype = dtypes.canonicalize_dtype(self.dtype)
q_seqlen = inputs_q.shape[0] if self.transpose_batch_sequence else inputs_q.shape[1]
kv_seqlen = inputs_kv.shape[0] if self.transpose_batch_sequence else inputs_kv.shape[1]
enable_fused_attn = int(os.getenv("NVTE_FUSED_ATTN", "0"))
def _check_seqlen(seqlen):
return seqlen % 64 == 0
def _check_head_dim(head_dim):
return head_dim in [64, 128]
has_fused_attn_kernel = is_fused_attn_kernel_available(self.dtype, self.dtype,
attn_bias_type, attn_mask_type,
self.dropout_rate, q_seqlen,
kv_seqlen, self.head_dim)
use_fused_attn = not decode and not self.transpose_batch_sequence and self.fuse_qkv and \
canonicalize_dtype in [jnp.bfloat16, jnp.float16] and \
_check_seqlen(q_seqlen) and _check_seqlen(kv_seqlen) and \
_check_head_dim(self.head_dim) and \
has_fused_attn_kernel and \
enable_fused_attn
if enable_fused_attn and not use_fused_attn:
reason = ""
if decode:
reason += f"decode=False is required but got {decode}, "
if self.transpose_batch_sequence:
reason += f"transpose_batch_sequence=False is required " \
f"but got {self.transpose_batch_sequence}, "
if not self.fuse_qkv:
reason += f"fuse_qkv=True is required but got {self.fuse_qkv}, "
if canonicalize_dtype not in [jnp.bfloat16, jnp.float16]:
reason += f"dtype in [BF16, FP16] is required " \
f"but got dtype={canonicalize_dtype}, "
if not _check_seqlen(q_seqlen):
reason += f"q_seqlen % 64 == 0 is required " \
f"but got {q_seqlen=}, "
if not _check_seqlen(kv_seqlen):
reason += f"kv_seqlen % 64 == 0 is required " \
f"but got {kv_seqlen=}, "
if not _check_head_dim(self.head_dim):
reason += f"head_dim should be 64 or 128 but got {self.head_dim}, "
if not has_fused_attn_kernel:
reason += "no fused attention kernel is available, "
warnings.warn(
f"Fused attention is not enabled. Because " \
f"{reason}fall back to unfused attention.")
first_sharding_type, second_sharding_type = infer_sharding_type()
residual = inputs_q
if self.fuse_qkv:
if inputs_q is inputs_kv:
qkv_proj, ln_out = LayerNormDenseGeneral(
enable_layernorm=not self.output_layernorm,
layernorm_type=self.layernorm_type,
zero_centered_gamma=self.zero_centered_gamma,
epsilon=self.layernorm_epsilon,
axis=-1,
features=(3, self.num_heads * self.head_dim),
sharding_type=first_sharding_type,
transpose_batch_sequence=self.transpose_batch_sequence,
return_layernorm_output=self.apply_residual_connection_post_layernorm,
scale_axes=(W_NO_SHARD_AXES,),
ln_bias_axes=(W_NO_SHARD_AXES,),
kernel_axes=(W_FSDP_AXES, W_JOINED_AXES, W_TP_AXES),
kernel_init=qkv_init,
use_bias=self.use_bias,
bias_init=self.bias_init,
bias_axes=(W_JOINED_AXES, W_TP_AXES),
name='qkv',
dtype=self.dtype)(inputs_q)
if not use_fused_attn:
query, key, value = jnp.split(qkv_proj, [1, 2], axis=-2)
else:
query, ln_out = LayerNormDenseGeneral(
enable_layernorm=not self.output_layernorm,
layernorm_type=self.layernorm_type,
zero_centered_gamma=self.zero_centered_gamma,
epsilon=self.layernorm_epsilon,
axis=-1,
features=self.num_heads * self.head_dim,
sharding_type=first_sharding_type,
transpose_batch_sequence=self.transpose_batch_sequence,
return_layernorm_output=self.apply_residual_connection_post_layernorm,
scale_axes=(W_NO_SHARD_AXES,),
ln_bias_axes=(W_NO_SHARD_AXES,),
kernel_axes=(W_FSDP_AXES, W_TP_AXES),
use_bias=self.use_bias,
bias_init=self.bias_init,
bias_axes=(W_TP_AXES,),
dtype=self.dtype,
kernel_init=query_init,
name='query')(inputs_q)
kv_proj = DenseGeneral(axis=-1,
features=(2, self.num_heads * self.head_dim),
sharding_type=first_sharding_type,
transpose_batch_sequence=self.transpose_batch_sequence,
kernel_axes=(W_FSDP_AXES, W_JOINED_AXES, W_TP_AXES),
kernel_init=kv_init,
use_bias=self.use_bias,
bias_init=self.bias_init,
bias_axes=(W_JOINED_AXES, W_TP_AXES),
name='kv',
dtype=self.dtype)(inputs_kv)
if not use_fused_attn:
key, value = jnp.split(kv_proj, [1], axis=-2)
else:
kv_projection = functools.partial(
DenseGeneral,
axis=-1,
features=self.num_heads * self.head_dim,
sharding_type=first_sharding_type,
transpose_batch_sequence=self.transpose_batch_sequence,
kernel_axes=(W_FSDP_AXES, W_TP_AXES),
use_bias=self.use_bias,
bias_init=self.bias_init,
bias_axes=(W_TP_AXES,),
dtype=self.dtype)
query, ln_out = LayerNormDenseGeneral(
enable_layernorm=not self.output_layernorm,
layernorm_type=self.layernorm_type,
zero_centered_gamma=self.zero_centered_gamma,
epsilon=self.layernorm_epsilon,
axis=-1,
features=self.num_heads * self.head_dim,
sharding_type=first_sharding_type,
transpose_batch_sequence=self.transpose_batch_sequence,
return_layernorm_output=True,
scale_axes=(W_NO_SHARD_AXES,),
ln_bias_axes=(W_NO_SHARD_AXES,),
kernel_axes=(W_FSDP_AXES, W_TP_AXES),
use_bias=self.use_bias,
bias_init=self.bias_init,
bias_axes=(W_TP_AXES,),
dtype=self.dtype,
kernel_init=query_init,
name='query')(inputs_q)
if inputs_q is inputs_kv:
assert ln_out is not None
inputs_kv = ln_out
key = kv_projection(kernel_init=self.kernel_init, name='key')(inputs_kv)
value = kv_projection(kernel_init=self.kernel_init, name='value')(inputs_kv)
if self.apply_residual_connection_post_layernorm:
assert ln_out is not None
residual = ln_out
if not use_fused_attn:
query = query.reshape((query.shape[0], query.shape[1], self.num_heads, self.head_dim))
key = key.reshape((key.shape[0], key.shape[1], self.num_heads, self.head_dim))
value = value.reshape((value.shape[0], value.shape[1], self.num_heads, self.head_dim))
qkv_sharding_constraint = \
(SEQLEN_AXES, BATCH_AXES, HEAD_AXES, HIDDEN_AXES) \
if self.transpose_batch_sequence \
else (BATCH_AXES, SEQLEN_AXES, HEAD_AXES, HIDDEN_AXES)
query = _with_sharding_constraint(query, qkv_sharding_constraint)
key = _with_sharding_constraint(key, qkv_sharding_constraint)
value = _with_sharding_constraint(value, qkv_sharding_constraint)
if decode:
is_initialized = self.has_variable('cache', 'cached_key')
cached_key = self.variable('cache', 'cached_key', jnp.zeros, key.shape, key.dtype)
cached_value = self.variable('cache', 'cached_value', jnp.zeros, value.shape,
value.dtype)
cache_index = self.variable('cache', 'cache_index',
lambda: jnp.array(0, dtype=jnp.int32))
if is_initialized:
if self.transpose_batch_sequence:
length, batch, num_heads, head_dim = cached_key.value.shape
expected_shape = (1, batch, num_heads, head_dim)
one_hot_indices_shape = (length, 1, 1, 1)
else:
batch, length, num_heads, head_dim = cached_key.value.shape
expected_shape = (batch, 1, num_heads, head_dim)
one_hot_indices_shape = (1, length, 1, 1)
# Sanity shape check of cached key against input query.
if expected_shape != query.shape:
raise ValueError(
'Autoregressive cache shape error, '
f"expected query shape {expected_shape} instead got {query.shape}.")
cur_index = cache_index.value
one_hot_indices = jax_nn.one_hot(cur_index, length, dtype=key.dtype)
one_hot_indices = jnp.reshape(one_hot_indices, one_hot_indices_shape)
key = cached_key.value + key * one_hot_indices
value = cached_value.value + value * one_hot_indices
cached_key.value = key
cached_value.value = value
cache_index.value = cache_index.value + 1
mask = combine_masks(
mask, jnp.broadcast_to(jnp.arange(length) > cur_index, (batch, 1, 1, length)))
if bias is not None:
bias = dynamic_vector_slice_in_dim(jnp.squeeze(bias, axis=0),
jnp.reshape(cur_index, (-1)), 1, -2)
scale_factor = 1.0 / sqrt(self.head_dim) if self.scale_attn_logits else 1.0
dropout_rng = None
if not deterministic and self.dropout_rate > 0.:
dropout_rng = self.make_rng(self.dropout_rng_name)
if use_fused_attn:
assert mask is not None and mask.ndim == 4 # (b, 1, s_q, s_kv)
assert not self.transpose_batch_sequence
seed = None
if dropout_rng is not None:
seed = jax.random.split(dropout_rng, len(jax.devices()))
# ensure the old key never used
del dropout_rng
if inputs_q is inputs_kv:
qkv_proj = qkv_proj.reshape((*qkv_proj.shape[:-1], self.num_heads, self.head_dim))
qkv_sharding_constraint = (BATCH_AXES, SEQLEN_AXES, JOINED_AXES, HEAD_AXES,
HIDDEN_AXES)
qkv_proj = _with_sharding_constraint(qkv_proj, qkv_sharding_constraint)
x = self_fused_attn(qkv_proj,
bias,
mask,
seed,
attn_bias_type=attn_bias_type,
attn_mask_type=attn_mask_type,
scaling_factor=scale_factor,
dropout_probability=self.dropout_rate,
is_training=not deterministic,
sharding_type=first_sharding_type)
else:
assert bias is None
query = query.reshape((*query.shape[:-1], self.num_heads, self.head_dim))
kv_proj = kv_proj.reshape((*kv_proj.shape[:-1], self.num_heads, self.head_dim))
q_sharding_constraint = (BATCH_AXES, SEQLEN_AXES, HEAD_AXES, HIDDEN_AXES)
kv_sharding_constraint = (BATCH_AXES, SEQLEN_AXES, JOINED_AXES, HEAD_AXES,
HIDDEN_AXES)
query = _with_sharding_constraint(query, q_sharding_constraint)
kv_proj = _with_sharding_constraint(kv_proj, kv_sharding_constraint)
x = cross_fused_attn(query,
kv_proj,
mask,
seed,
attn_bias_type=attn_bias_type,
attn_mask_type=attn_mask_type,
scaling_factor=scale_factor,
dropout_probability=self.dropout_rate,
is_training=not deterministic,
sharding_type=first_sharding_type)
else:
def convert_to_softmax_type(attn_mask_type, mask):
"""
Convert the string to SoftmaxType
"""
if attn_mask_type == 'causal':
return SoftmaxType.SCALED_UPPER_TRIANG_MASKED
if attn_mask_type == 'padding':
if mask is not None:
return SoftmaxType.SCALED_MASKED
return SoftmaxType.SCALED
raise ValueError(f"Unsupported {attn_mask_type=}, "
"supported attn_mask_type = {'causal', 'padding'}")
softmax_type = convert_to_softmax_type(self.attn_mask_type, mask)
x = core_attention(query,
key,
value,
scale_factor=scale_factor,
transpose_batch_sequence=self.transpose_batch_sequence,
softmax_type=softmax_type,
softmax_sharding_type=first_sharding_type,
mask=mask,
bias=bias,
dropout_rng=dropout_rng,
dropout_rate=self.dropout_rate,
deterministic=deterministic,
dtype=self.dtype,
float32_logits=self.float32_logits)
x = x.reshape((x.shape[0], x.shape[1], x.shape[2] * x.shape[3]))
attn_context_sharding_constraint = \
(SEQLEN_AXES, BATCH_AXES, HIDDEN_TP_AXES) \
if self.transpose_batch_sequence \
else (BATCH_AXES, SEQLEN_AXES, HIDDEN_TP_AXES)
x = _with_sharding_constraint(x, attn_context_sharding_constraint)
out = DenseGeneral(features=inputs_q.shape[-1],
sharding_type=second_sharding_type,
transpose_batch_sequence=self.transpose_batch_sequence,
axis=-1,
kernel_init=self.kernel_init,
kernel_axes=(W_TP_AXES, W_FSDP_AXES),
use_bias=self.use_bias,
bias_init=self.bias_init,
bias_axes=(W_NO_SHARD_AXES,),
dtype=self.dtype,
name='out')(x)
return out, residual
class RelativePositionBiases(nn.Module):
"""
T5-style relative positional embeddings to the attention logits.
Parameters
----------
num_buckets : int
The number of buckets to bucket distances between key and query positions into.
max_distance : int
The maximum distance before everything is lumped into the last
distance bucket.
num_attention_heads : int
Number of attention heads in the transformer layer.
embedding_init : Initializer, default = flax.linen.linear.default_embed_init
Used for initializing relative embedding tables.
embedding_axes : Tuple[str, ...], default = ('heads', 'relpos_buckets')
The name of axes used to shard embedding attention bias with a corresponding mesh.
Optimization parameters
-----------------------
dtype : jax.numpy.dtype, default = jax.numpy.float32
The data type used to allocate the initial parameters.
"""
num_buckets: int
max_distance: int
num_attention_heads: int
embedding_init: Callable[..., Array] = nn.linear.default_embed_init
embedding_axes: Tuple[str, ...] = ('heads', 'relpos_buckets')
dtype: DType = jnp.float32
@nn.compact
def __call__(self, q_seqlen, k_seqlen, bidirectional=True):
"""
Generate relative position embedding attention biases.
Parameters
----------
q_seqlen : int
The sequence length of query.
k_seqlen : int
The sequence length of key.
bidirectional : bool, default = True
Indicate whether to allow positive memory-query relative position
embeddings.
Returns
-------
output: jax.numpy.ndarray
An attention bias with shape `(1, num_attention_heads, q_seqlen, k_seqlen)`.
"""
context_position = np.arange(q_seqlen, dtype=jnp.int32)[:, None]
memory_position = np.arange(k_seqlen, dtype=jnp.int32)[None, :]
relative_position = memory_position - context_position
# Compute relative position bucket
rp_bucket = 0
negative_rp = -relative_position
rpb_num_buckets = self.num_buckets
if bidirectional:
rpb_num_buckets //= 2
rp_bucket += (negative_rp < 0).astype(np.int32) * rpb_num_buckets
negative_rp = np.abs(negative_rp)
else:
negative_rp = np.maximum(negative_rp, 0)
rpb_max_exact = rpb_num_buckets // 2
rpb_is_small = negative_rp < rpb_max_exact
rpb_val_if_large = rpb_max_exact + (
np.log(negative_rp.astype(np.float32) / rpb_max_exact + np.finfo(np.float32).eps) /
np.log(self.max_distance / rpb_max_exact) *
(rpb_num_buckets - rpb_max_exact)).astype(np.int32)
rpb_val_if_large = np.minimum(rpb_val_if_large, rpb_num_buckets - 1)
rp_bucket += np.where(rpb_is_small, negative_rp, rpb_val_if_large)
# Compute relative attention bias
relative_attention_bias = nn_partitioning.param_with_axes(
'rel_embedding',
self.embedding_init, (self.num_attention_heads, self.num_buckets),
jnp.float32,
axes=self.embedding_axes)
relative_attention_bias = jnp.asarray(relative_attention_bias, self.dtype)
bcast_iota = lax.broadcasted_iota(jnp.int32, (self.num_buckets, 1, 1), 0)
rp_bucket_one_hot = jnp.array(rp_bucket[jnp.newaxis, ...] == bcast_iota, dtype=self.dtype)
values = lax.dot_general(relative_attention_bias, rp_bucket_one_hot,
(((1,), (0,)), ((), ())))
return values[jnp.newaxis, ...]
class TransformerLayerType(Enum):
r"""
TransformerLayerType is an Enum class to specify a type of TransformerLayer
Values
----------
ENCODER:
Encoder type of TransformerLayer.
DECODER:
Decoder type of TransformerLayer.
"""
ENCODER = "encoder"
DECODER = "decoder"
class TransformerLayer(nn.Module):
r"""
TransformerLayer is made up of a relative embedding,
an attention block and a feedforward network (MLP).
This standard layer is based on the paper “Attention Is All You Need”.
.. note::
Argument :attr:`attention_mask` will be ignored when
:attr:`self_attn_mask_type` is set to `"causal"`.
Parameters
----------
hidden_size: int, default = 512
The hidden size of each input sample.
mlp_hidden_size: int, default = 2048
Intermediate size to which input samples are projected.
num_attention_heads: int, default = 8
Number of attention heads in the transformer layer.
layernorm_type : {'layernorm', 'rmsnorm'}, default = 'layernorm'
Indicate the type of layer normalization.
layernorm_epsilon: float, default = 1e-6
A value added to the denominator of layer normalization for numerical stability.
zero_centered_gamma : bool, default = False
If set to `True`, the LayerNorm formula changes to
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} *
(1 + \gamma) + \beta
This parameter is only applicable for 'layernorm'.
hidden_dropout: float, default = 0.1
Dropout probability for the dropout op after FC2 layer.
hidden_dropout_dims: Sequence[int], default = ()
Dimensions that will share the same dropout mask for hidden
attention_dropout: float, default = 0.1
Dropout probability for the dropout op during multi-head attention.
dropout_rng_name: str, default = 'dropout'
The key in given RNGs via flax.linen.Module.apply that for
generating Dropout masks in the Multi-Head Attention.
mha_kernel_init: Initializer, default =
flax.linen.initializers.variance_scaling(1.0, 'fan_in', 'normal')
Used for initializing weights of QKV and Output projection weights.
It should be a callable object with three arguments (jax.random.PRNGKey, shape, dtype).
mlp_kernel_init: Initializer, default =
flax.linen.initializers.variance_scaling(1.0, 'fan_in', 'truncated_normal')
Used for initializing weights of FC1 and FC2 layers.
It should be a callable object with three arguments (jax.random.PRNGKey, shape, dtype).
mlp_activations: Sequence[str], default = ('relu', )
The sequence of activation functions to apply after the first linear transformation.
Each activation has its own transformation layer.
use_bias: bool, default = False
Indicate whether to enable bias shifting for QKVO projections, FC1 and FC2.
If set to False, the layer will not learn additive biases.
bias_init: Initializer, default = flax.linen.initializers.zeros
Used for initializing bias of QKVO projections,
FC1 and FC2. It is only used when :attr:`use_bias=True`.
It should be a callable object with three arguments (jax.random.PRNGKey, shape, dtype).
apply_residual_connection_post_layernorm: bool, default = False
If set to True, residual connections are taken from the output
of layer norm (default is taken from input of layer norm)
output_layernorm: bool, default = False
If set to True, layer normalization is applied on the output side,
after the final dropout-add. default behavior is to apply layer
normalization on the input side, before the QKV transformation.
float32_attention_logits: bool, default = False
If set to True, attention logits are executed in jax.numpy.float32.
layer_type: TransformerLayerType, default = TransformerLayerType.ENCODER
If set to TransformerLayerType.DECODER, an additional cross-attention block
is added after self-attention.this can be used for structures like `T5`
Transformer in conjunction with the TransformerLayerType.ENCODER option.
self_attn_mask_type: {'causal', 'padding'}, default = 'causal'
Type of attention mask passed into softmax operation.
Introduced in v0.10.0.
enable_relative_embedding: bool, default = True
Whether to enable relative embedding as shifting of attention logits.
relative_embedding: flax.linen.Module, default = None
The module for relative embedding execution, only used when
:attr:`enable_relative_embedding=True`. Default is None, which will create
an instance of RelativePositionBiases if :attr:`enable_relative_embedding=True`.
Default: RelativePositionBiases( num_buckets=32, max_distance=128,
num_attention_heads=self.num_attention_heads, dtype=self.dtype,
embedding_init=flax.linen.initializers.variance_scaling(1.0, 'fan_avg', 'uniform'),
name='relpos_bias')
Optimization parameters
-----------------------
dtype :jax.numpy.dtype, default = jax.numpy.float32
The data type used to allocate the initial parameters.
drop_path: float, default = 0.0
When > 0.0, applies stochastic depth per sample in the main
path of the residual block.
fuse_qkv_params: bool, default = True
If set to True, `TransformerLayer` module exposes a single fused
parameter for query-key-value for self-attention and key-value for
cross-attention.
transpose_batch_sequence : bool, default = False
Indicate whether the input tensors were switched axis of batch
and sequence length dimension. if set to True, the input tensors
should be in (seqlen, batch, hidden), otherwise (batch, seqlen, hidden).
scale_attn_logits: bool, default = False
Indicate whether to scale attention logits.
if set to True, :math:`\frac{Q}{\sqrt{head_dim}*K}`,
else :math:`Q*K`
scaled_query_init: bool, default = `True`
Whether to scale WQ on initialization by :math:`\sqrt{head_dim}`
"""
hidden_size: int = 512
mlp_hidden_size: int = 2048
num_attention_heads: int = 8
layernorm_type: str = 'layernorm'
layernorm_epsilon: float = 1e-6
zero_centered_gamma: bool = False
hidden_dropout: float = 0.1
hidden_dropout_dims: Sequence[int] = ()
attention_dropout: float = 0.1
dropout_rng_name: str = 'dropout'
mha_kernel_init: Initializer = None
mlp_kernel_init: Initializer = None
mlp_activations: Sequence[str] = ('relu',)
use_bias: bool = False
bias_init: Initializer = nn.initializers.zeros
apply_residual_connection_post_layernorm: bool = False
output_layernorm: bool = False
float32_attention_logits: bool = False
layer_type: TransformerLayerType = TransformerLayerType.ENCODER
self_attn_mask_type: str = 'causal'
enable_relative_embedding: bool = True
relative_embedding: nn.Module = None
dtype: DType = jnp.float32
drop_path: float = 0.0
fuse_qkv_params: bool = True
transpose_batch_sequence: bool = False
scale_attn_logits: bool = False
scaled_query_init: bool = True
def __post_init__(self):
if self.mha_kernel_init is None:
self.mha_kernel_init = nn.initializers.variance_scaling(1.0, 'fan_in', 'normal')
if self.mlp_kernel_init is None:
self.mlp_kernel_init = nn.initializers.variance_scaling(1.0, 'fan_in',
'truncated_normal')
super().__post_init__()
@nn.compact
def __call__(self,
inputs: Array,
encoded: Array = None,
attention_mask: Array = None,
encoder_decoder_mask: Array = None,
deterministic: bool = False,
decode: bool = False,
max_decode_length: bool = None):
"""
Transformer Layer: attention block and a feedforward network (MLP)
Parameters
----------
inputs : jax.numpy.ndarray
Input tensor.
encoded : jax.numpy.ndarray, default = None
Output tensors of the encoder block to be fed into the decoder block if using
:attr:`layer_type=TransformerLayerType.DECODER`.
attention_mask : jax.numpy.ndarray, default = None
Boolean tensor used to mask out self-attention softmax input.
encoder_decoder_mask : jax.numpy.ndarray, default = None
Boolean tensor used to mask out cross-attention softmax input when
:attr:`layer_type=TransformerLayerType.DECODER`.
deterministic: bool, default = False
Disable dropout layers if set to True.
decode: bool,default = False
Indicate whether to prepare and use an autoregressive cache
in Multi-head attention (MHA).
max_decode_length : bool, default = None
The maximum length to generate relative embedding biases when
:attr:`layer_type=TransformerLayerType.DECODER` and
:attr:`enable_relative_embedding=True`.
Returns
-------
outputs : jax.numpy.ndarray
Output tensors.
"""
assert self.layer_type in TransformerLayerType, \
"layer_type should be one of TransformerLayerType" \
f", but got {self.layer_type}."
assert self.hidden_size % self.num_attention_heads == 0, \
"hidden_size should be multiples of num_attention_heads" \
f", but got {self.hidden_size=} and {self.num_attention_heads=}."
assert self.layer_type == TransformerLayerType.DECODER or \
(self.layer_type == TransformerLayerType.ENCODER and decode is False), \
"decode should be False when layer_type == TransformerLayerType.ENCODER."
head_dim = self.hidden_size // self.num_attention_heads
sequence_dim = 0 if self.transpose_batch_sequence else 1
batch_dim = 1 - sequence_dim
attn_bias = None
if self.enable_relative_embedding:
if self.relative_embedding is None:
rel_emb = RelativePositionBiases(num_buckets=32,
max_distance=128,
num_attention_heads=self.num_attention_heads,
dtype=self.dtype,
embedding_init=nn.initializers.variance_scaling(
1.0, 'fan_avg', 'uniform'),
name='relpos_bias')
else:
rel_emb = self.relative_embedding
if self.layer_type == TransformerLayerType.ENCODER:
attn_bias = rel_emb(inputs.shape[sequence_dim], inputs.shape[sequence_dim], True)
else:
if decode and max_decode_length:
l = max_decode_length
else:
l = inputs.shape[sequence_dim]
attn_bias = rel_emb(l, l, False)
assert inputs.ndim == 3
# Make name be the exactly same as T5X, since names would affect
# RNGKey during init and apply. Myabe no need in the feature.
if self.layer_type == TransformerLayerType.ENCODER:
mha_name = 'attention'
else:
mha_name = 'self_attention'
# [batch, length, emb_dim] -> [batch, length, emb_dim]
x, residual = MultiHeadAttention(
num_heads=self.num_attention_heads,
dtype=self.dtype,
head_dim=head_dim,
transpose_batch_sequence=self.transpose_batch_sequence,
dropout_rate=self.attention_dropout,
dropout_rng_name=self.dropout_rng_name,
float32_logits=self.float32_attention_logits,
scale_attn_logits=self.scale_attn_logits,
scaled_query_init=self.scaled_query_init,
layernorm_type=self.layernorm_type,
layernorm_epsilon=self.layernorm_epsilon,
zero_centered_gamma=self.zero_centered_gamma,
apply_residual_connection_post_layernorm=self.apply_residual_connection_post_layernorm,
output_layernorm=self.output_layernorm,
attn_mask_type=self.self_attn_mask_type,
fuse_qkv=self.fuse_qkv_params,
kernel_init=self.mha_kernel_init,
use_bias=self.use_bias,
bias_init=self.bias_init,
name=mha_name)(inputs,
inputs,
attention_mask,
attn_bias,
deterministic=deterministic,
decode=decode)
def hidden_dropout(x, deterministic):
assert isinstance(self.hidden_dropout_dims, Sequence)
x_shape_len = len(x.shape)
for dims in self.hidden_dropout_dims:
assert -x_shape_len <= dims < x_shape_len
return nn.Dropout(rate=self.hidden_dropout,
broadcast_dims=self.hidden_dropout_dims)(x,
deterministic=deterministic)
x = hidden_dropout(x, deterministic)
if self.drop_path > 0.0:
drop_path_shape = _generate_drop_path_shape(x.shape, batch_dim)
x = nn.Dropout(rate=self.drop_path,
broadcast_dims=drop_path_shape)(x, deterministic=deterministic)
x = x + residual
mlp_input = x
if self.layer_type == TransformerLayerType.DECODER:
assert encoded is not None, \
"encoded is required when layer_type == TransformerLayerType.DECODER."
y, residual = MultiHeadAttention(
num_heads=self.num_attention_heads,
dtype=self.dtype,
head_dim=head_dim,
transpose_batch_sequence=self.transpose_batch_sequence,
dropout_rate=self.attention_dropout,
dropout_rng_name=self.dropout_rng_name,
layernorm_type=self.layernorm_type,
layernorm_epsilon=self.layernorm_epsilon,
zero_centered_gamma=self.zero_centered_gamma,
apply_residual_connection_post_layernorm=self.
apply_residual_connection_post_layernorm,
output_layernorm=False, # Must do LayerNorm before MHA.
attn_mask_type='padding',
float32_logits=self.float32_attention_logits,
scale_attn_logits=self.scale_attn_logits,
scaled_query_init=self.scaled_query_init,
fuse_qkv=self.fuse_qkv_params,
kernel_init=self.mha_kernel_init,
use_bias=self.use_bias,
bias_init=self.bias_init,
name='encoder_decoder_attention')(x,
encoded,
encoder_decoder_mask,
deterministic=deterministic)
y = hidden_dropout(y, deterministic)
mlp_input = y + residual
# MlpBlock
residual = mlp_input
z, ln_out = LayerNormMLP(
layernorm_type=self.layernorm_type,
zero_centered_gamma=self.zero_centered_gamma,
epsilon=self.layernorm_epsilon,
major_sharding_type=infer_major_sharding_type(),
transpose_batch_sequence=self.transpose_batch_sequence,
return_layernorm_output=self.apply_residual_connection_post_layernorm,
intermediate_dim=self.mlp_hidden_size,
activations=self.mlp_activations,
intermediate_dropout_rate=self.hidden_dropout,
intermediate_hidden_dropout_dims=self.hidden_dropout_dims,
dtype=self.dtype,
scale_axes=(W_NO_SHARD_AXES,),
ln_bias_axes=(W_NO_SHARD_AXES,),
kernel_init=self.mlp_kernel_init,
kernel_axes_1=(W_FSDP_AXES, W_JOINED_AXES, W_TP_AXES),
kernel_axes_2=(W_TP_AXES, W_FSDP_AXES),
use_bias=self.use_bias,
bias_init=self.bias_init,
bias_axes_1=(W_JOINED_AXES, W_TP_AXES),
bias_axes_2=(W_NO_SHARD_AXES,),
name='mlp',
)(mlp_input, deterministic=deterministic)
if self.apply_residual_connection_post_layernorm:
assert ln_out is not None
residual = ln_out
z = hidden_dropout(z, deterministic)
if self.drop_path > 0.0:
drop_path_shape = _generate_drop_path_shape(z.shape, batch_dim)
z = nn.Dropout(rate=self.drop_path,
broadcast_dims=drop_path_shape)(z, deterministic=deterministic)
z = z + residual
if self.output_layernorm:
ln_sharding_type, _ = infer_sharding_type()
z = LayerNorm(layernorm_type=self.layernorm_type,
zero_centered_gamma=self.zero_centered_gamma,
epsilon=self.layernorm_epsilon,
scale_axes=(W_NO_SHARD_AXES,),
bias_axes=(W_NO_SHARD_AXES,),
transpose_batch_sequence=self.transpose_batch_sequence,
dtype=self.dtype,
sharding_type=ln_sharding_type,
name="output_layer_norm")(z)
return z
| TransformerEngine-main | transformer_engine/jax/flax/transformer.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.
"""
Wrapper module for Transformer related layers with FP8 support.
"""
import functools
import operator
from typing import Any, Callable, Iterable, List, Sequence, Tuple, Union
import jax.numpy as jnp
import numpy as np
from flax import linen as nn
from flax.linen import partitioning as nn_partitioning
from jax import lax
from jax import nn as jax_nn
from jax import random as jax_random
from ..dot import fp8_dot
from ..fp8 import FP8GemmPackage, FP8Helper
from ..layernorm import canonicalize_layernorm_type
from ..layernorm import layernorm, layernorm_fp8_dot
from ..mlp import fp8_ln_mlp, geglu
from ..sharding import infer_sharding_type
from ..softmax import is_softmax_kernel_available
from ..sharding import MajorShardingType, ShardingType
from ..softmax import softmax, SoftmaxType
PRNGKey = Any
Shape = Tuple[int, ...]
DType = jnp.dtype
Array = jnp.ndarray
PrecisionLike = Union[None, str, lax.Precision, Tuple[str, str], Tuple[lax.Precision,
lax.Precision]]
Initializer = Callable[[PRNGKey, Shape, DType], Array]
def _normalize_axes(axes: Iterable[int], ndim: int) -> Tuple[int]:
# A tuple by convention. len(axes_tuple) then also gives the rank efficiently.
return tuple(ax if ax >= 0 else ndim + ax for ax in axes)
def _canonicalize_tuple(x):
if isinstance(x, Iterable):
return tuple(x)
return (x,)
def _obtain_default_layernorm_scale_init_if_need(original_init, zero_centered_gamma):
if original_init is None:
if not zero_centered_gamma:
return nn.initializers.ones
return nn.initializers.zeros
def _create_layernorm_parameters(layernorm_type, shape, scale_init, scale_axes, bias_init,
bias_axes, dtype):
scale = nn_partitioning.param_with_axes('scale',
scale_init,
shape,
jnp.float32,
axes=scale_axes)
scale = jnp.asarray(scale, dtype)
layernorm_type = canonicalize_layernorm_type(layernorm_type)
if layernorm_type == 'layernorm':
bias = nn_partitioning.param_with_axes('ln_bias',
bias_init,
shape,
jnp.float32,
axes=bias_axes)
bias = jnp.asarray(bias, dtype)
else:
assert layernorm_type == 'rmsnorm'
bias = None
return scale, bias
def _convert_to_activation_function(fn_or_string: Union[str, Callable]) -> Callable:
"""Convert a string to an activation function."""
if fn_or_string == 'linear':
return lambda x: x
if isinstance(fn_or_string, str):
return getattr(nn, fn_or_string)
if callable(fn_or_string):
return fn_or_string
raise ValueError(f"don't know how to convert {fn_or_string} to an activation function")
def _combine_biases(*masks: List[Array]):
"""Combine attention biases."""
masks = [m for m in masks if m is not None]
if not masks:
return None
assert all(map(lambda x: x.ndim == masks[0].ndim,
masks)), (f'masks must have same rank: {tuple(map(lambda x: x.ndim, masks))}')
mask, *other_masks = masks
for other_mask in other_masks:
mask = mask + other_mask
return mask
class Softmax(nn.Module):
r"""
Applies softmax over a mini-batch of inputs.
The input's shape should be [batch, heads, q_seqlen, k_seqlen].
.. code-block:: python
shifted_input = input + bias
masked_scaled = (1 - mask)*(shifted_input * scale_factor)
softmax_mask = mask * -1e-10
output = softmax(masked_scaled + softmax_mask)
Parameters
----------
scale_factor : float, default = 1.0
Scalar for the input to softmax.
softmax_type : SoftmaxType, default = SoftmaxType.SCALED
Indicate the type of softmax.
Optimization parameters
-----------------------
sharding_type : ShardingType, default = ShardingType.SINGLE
Indicate the sharding pattern.
"""
scale_factor: float = 1.0
softmax_type: SoftmaxType = SoftmaxType.SCALED
sharding_type: ShardingType = ShardingType.SINGLE
@nn.compact
def __call__(self, inputs: Array, mask: Array = None, bias: Array = None) -> jnp.ndarray:
batch = inputs.shape[0]
heads = inputs.shape[1]
q_seqlen = inputs.shape[2]
k_seqlen = inputs.shape[3]
dtype = inputs.dtype
logits = inputs
if (self.softmax_type is not SoftmaxType.SCALED and is_softmax_kernel_available(
self.softmax_type, batch, heads, q_seqlen, k_seqlen, inputs.dtype)):
if bias is not None:
logits = logits + bias.astype(dtype)
mask_ = mask
if self.softmax_type is not SoftmaxType.SCALED_MASKED:
mask_ = None
outputs = softmax(logits, mask_, self.scale_factor, self.softmax_type,
self.sharding_type)
else:
attention_bias = None
if mask is not None:
attention_bias = lax.select(mask > 0,
jnp.full(mask.shape, -1e10).astype(dtype),
jnp.full(mask.shape, 0.).astype(dtype))
if bias is not None:
attention_bias = _combine_biases(attention_bias, bias)
if attention_bias is not None:
logits = logits + attention_bias.astype(dtype)
# For the case that self.softmax == SoftmaxType.SCALED_UPPER_TRIANG_MASKED
# and kernel is unavailable, then try on pure scaled softmax custom calls.
if is_softmax_kernel_available(SoftmaxType.SCALED, batch, heads, q_seqlen, k_seqlen,
dtype):
outputs = softmax(logits, None, self.scale_factor, SoftmaxType.SCALED,
self.sharding_type)
else:
outputs = jax_nn.softmax(logits * self.scale_factor)
return outputs
class LayerNorm(nn.Module):
r"""
Applies layer normalization over a mini-batch of inputs.
There are two types of normalization supported by this module,
regular and root mean square layer Normalization.
The regular layer normalization is as described in
the paper `Layer Normalization <https://arxiv.org/abs/1607.06450>`__
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
:math:`\gamma` and :math:`\beta` are learnable affine transform parameters of
size of each input sample.
The root mean square layer normalization (RMSNorm) is as described in
the paper `Root Mean Square Layer Normalization <https://arxiv.org/abs/1910.07467>`__
.. math::
y = \frac{x}{ \mathrm{RMS}[x] + \epsilon} * \gamma
.. math::
RMS = \sqrt{\mathrm{E}[x^2]}
:math:`\gamma` is learnable affine transform parameters of
size of each input sample.
Parameters
----------
epsilon : float, default = 1e-6
A value added to the denominator of layer normalization for numerical stability.
layernorm_type : {'layernorm', 'rmsnorm'}, default = 'layernorm'
Indicate the type of layer normalization.
zero_centered_gamma : bool, default = False
If set to `True`, the LayerNorm formula changes to
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} *
(1 + \gamma) + \beta
This parameter is only applicable for 'layernorm'.
The default of `scale_init` will also be changed. See `scale_init`.
scale_init : Initializer, default = None
Used for initializing scale factors :math:`\gamma`.
If `None` is provided, scale_init is set according to the value of zero_centered_gamma.
If zero_centered_gamma is set to `True`, then scale_init is `flax.linen.initializers.zeros`.
Otherwise, scale_init is `flax.linen.initializers.ones`.
It should be a callable object with three arguments (jax.random.PRNGKey, shape, dtype).
scale_axes : Tuple[str, ...], default = ('embed', )
The name of axes used to shard the scale factors :math:`\gamma` with a corresponding mesh.
bias_init : Initializer, default = flax.linen.initializers.zeros
Used for initializing shift factors :math:`\beta`,
only used when :attr:`layernorm_type='layernorm'`.
It should be a callable object with three arguments (jax.random.PRNGKey, shape, dtype).
bias_axes : Tuple[str, ...], default = ('embed', )
The name of axes used to shard the shift factors :math:`\beta` with a corresponding mesh.
only used when :attr:`layernorm_type='layernorm'`.
Optimization parameters
-----------------------
dtype : jax.numpy.dtype, default = jax.numpy.float32
the data type used to allocate the initial parameters.
transpose_batch_sequence : bool, default = False
Indicate whether the input tensors were switched axis of batch
and sequence length dimension. If set to True, the input tensors
should be in (seqlen, batch, hidden), otherwise (batch, seqlen, hidden).
sharding_type : ShardingType, default = ShardingType.SINGLE
Indicate the sharding pattern.
"""
epsilon: float = 1e-6
layernorm_type: str = 'layernorm'
zero_centered_gamma: bool = False
scale_init: Initializer = None
scale_axes: Tuple[str, ...] = ('embed',)
bias_init: Initializer = nn.initializers.zeros
bias_axes: Tuple[str, ...] = ('embed',)
dtype: DType = jnp.float32
transpose_batch_sequence: bool = False
sharding_type: ShardingType = ShardingType.SINGLE
def __post_init__(self):
self.scale_init = _obtain_default_layernorm_scale_init_if_need(
self.scale_init, self.zero_centered_gamma)
super().__post_init__()
@nn.compact
def __call__(self, x: jnp.ndarray) -> jnp.ndarray:
"""
Applies layer normalization to the input :attr:`inputs`.
Parameters
----------
inputs : jax.numpy.ndarray
Input tensors.
Returns
-------
outputs : jax.numpy.ndarray
Output tensors.
"""
features = x.shape[-1]
scale, ln_bias = _create_layernorm_parameters(self.layernorm_type, (features,),
self.scale_init, self.scale_axes,
self.bias_init, self.bias_axes, self.dtype)
return layernorm(x,
scale,
ln_bias,
layernorm_type=self.layernorm_type,
zero_centered_gamma=self.zero_centered_gamma,
epsilon=self.epsilon,
sharding_type=self.sharding_type,
dp_dim_index=1 if self.transpose_batch_sequence else 0)
class TransformerEngineBase(nn.Module):
"""
Base class of transformer engine
"""
@staticmethod
def get_fp8_metas(num_of_gemm: int) -> List[jnp.ndarray]:
"""
Get the FP8 metas
"""
num_of_meta = num_of_gemm * FP8Helper.NUM_META_PER_GEMM
axes = ('fp8_meta_axis', 'fp8_meta_history')
fp8_max = nn_partitioning.variable_with_axes(FP8Helper.FP8_COLLECTION_NAME,
FP8Helper.FP8_MAX_NAME,
FP8Helper.generate_fp8_max_array,
num_of_meta,
axes=axes)
fp8_metas_amax = nn_partitioning.variable_with_axes(
FP8Helper.FP8_COLLECTION_NAME,
FP8Helper.FP8_AMAX_NAME,
jnp.zeros, (num_of_meta, FP8Helper.AMAX_HISTORY_LEN),
jnp.float32,
axes=axes)
fp8_metas_scale = nn_partitioning.variable_with_axes(FP8Helper.FP8_COLLECTION_NAME,
FP8Helper.FP8_SCALE_NAME,
jnp.ones, (num_of_meta, 1),
jnp.float32,
axes=axes)
fp8_metas_scale_inv = nn_partitioning.variable_with_axes(FP8Helper.FP8_COLLECTION_NAME,
FP8Helper.FP8_SCALE_INV_NAME,
jnp.ones, (num_of_meta, 1),
jnp.float32,
axes=axes)
return fp8_max.value, fp8_metas_amax.value, fp8_metas_scale.value, fp8_metas_scale_inv.value
@staticmethod
def get_fp8_gemm_package(num_of_gemm: int, inputs: jnp.ndarray,
kernels: List[jnp.ndarray]) -> FP8GemmPackage:
"""
Get the FP8 metas
"""
assert num_of_gemm == len(kernels)
fp8_max, fp8_metas_amax, fp8_metas_scale, fp8_metas_scale_inv = \
TransformerEngineBase.get_fp8_metas(num_of_gemm)
return FP8GemmPackage(num_of_gemm, inputs, kernels, fp8_max, fp8_metas_amax,
fp8_metas_scale, fp8_metas_scale_inv)
class DenseGeneral(TransformerEngineBase):
"""
Applies a linear transformation to the incoming data :math:`y = xA^T + b`
Parameters
----------
features : Union[Iterable[int], int]
The hidden size of each output sample.
kernel_init : Initializer, default =
flax.linen.initializers.variance_scaling(1.0, 'fan_in', 'truncated_normal')
Used for initializing weights.
It should be a callable object with three arguments (jax.random.PRNGKey, shape, dtype).
kernel_axes : Tuple[str, ...], default = ()
The name of axes used to shard the weights with a corresponding mesh.
use_bias: bool, default = False
Indicate whether to enable bias shifting.
If set to False, the layer will not learn an additive bias.
bias_init: Initializer, default = flax.linen.initializers.zeros
Used for initializing bias, only used when :attr:`use_bias=True`.
It should be a callable object with three arguments (jax.random.PRNGKey, shape, dtype).
bias_axes: Tuple[str, ...], default = ()
The name of axes used to shard bias with a corresponding mesh,
only used when :attr:`use_bias=True`.
axis: Union[Iterable[int], int], default = -1
An integer tuple with axes to apply the transformation on.
Optimization parameters
-----------------------
dtype : jax.numpy.dtype, default = jax.numpy.float32
The data type used to allocate the initial parameters.
transpose_batch_sequence : bool, default = True
Indicate whether the input tensors were switched axis of batch
and sequence length dimension. If set to True, the input tensors
should be in (seqlen, batch, hidden), otherwise (batch, seqlen, hidden).
sharding_type : ShardingType, default = ShardingType.SINGLE
Indicate the sharding pattern.
"""
features: Union[Iterable[int], int]
kernel_init: Initializer = None
kernel_axes: Tuple[str, ...] = ()
use_bias: bool = True
bias_init: Initializer = nn.initializers.zeros
bias_axes: Tuple[str, ...] = ()
axis: Union[Iterable[int], int] = -1
dtype: DType = jnp.float32
transpose_batch_sequence: bool = False
sharding_type: ShardingType = ShardingType.SINGLE
def __post_init__(self):
if self.kernel_init is None:
self.kernel_init = nn.initializers.variance_scaling(1.0, 'fan_in', 'truncated_normal')
super().__post_init__()
@nn.compact
def __call__(self, inputs: Array) -> Array:
"""
Apply the linear transformation to the input.
Parameters
----------
inputs : jax.numpy.ndarray
Input tensors.
Returns
-------
outputs : jax.numpy.ndarray
Output tensors.
"""
features = _canonicalize_tuple(self.features)
axis = _canonicalize_tuple(self.axis)
inputs = jnp.asarray(inputs, self.dtype)
axis = _normalize_axes(axis, inputs.ndim)
kernel_shape = tuple(inputs.shape[ax] for ax in axis) + features
kernel_param_shape = (np.prod([inputs.shape[ax] for ax in axis]),) + features
kernel = nn_partitioning.param_with_axes('kernel',
self.kernel_init,
kernel_param_shape,
jnp.float32,
axes=self.kernel_axes)
kernel = jnp.reshape(kernel, kernel_shape)
if self.use_bias:
bias = nn_partitioning.param_with_axes('bias',
self.bias_init,
features,
self.dtype,
axes=self.bias_axes)
else:
bias = None
contract_ind = tuple(range(0, len(axis)))
if FP8Helper.is_fp8_enabled():
fp8_gemm_package = \
TransformerEngineBase.get_fp8_gemm_package(1, inputs, [kernel])
y = fp8_dot(fp8_gemm_package,
FP8Helper.FWD_DTYPE,
FP8Helper.BWD_DTYPE, (axis, contract_ind),
sharding_type=self.sharding_type,
dp_dim_index=1 if self.transpose_batch_sequence else 0)
else:
kernel = jnp.asarray(kernel, self.dtype)
y = lax.dot_general(inputs, kernel, ((axis, contract_ind), ((), ())))
if bias is not None:
bias_shape = (1,) * (y.ndim - bias.ndim) + bias.shape
y += jnp.reshape(bias, bias_shape)
return y
class LayerNormDenseGeneral(TransformerEngineBase):
r"""
Applies layer normalization followed by linear transformation to the incoming data.
Parameters
----------
features : Union[Iterable[int], int]
The hidden size of each output sample.
enable_layernorm: bool, default = True
Indicate whether to enable layer normalization before linear transformation.
layernorm_type : {'layernorm', 'rmsnorm'}, default = 'layernorm'
Indicate the type of layer normalization.
epsilon : float, default = 1e-6
A value added to the denominator of layer normalization for numerical stability.
zero_centered_gamma : bool, default = False
If set to `True`, the LayerNorm formula changes to
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} *
(1 + \gamma) + \beta
This parameter is only applicable for 'layernorm'.
The default of `scale_init` will also be changed. See `scale_init`
scale_init : Initializer, default = None
Used for initializing scale factors :math:`\gamma`.
If `None` is provided, scale_init is set according to the value of zero_centered_gamma.
If zero_centered_gamma is set to `True`, then scale_init is `flax.linen.initializers.zeros`.
Otherwise, scale_init is `flax.linen.initializers.ones`.
It should be a callable object with three arguments (jax.random.PRNGKey, shape, dtype).
scale_axes : Tuple[str, ...], default = ('embed', )
The name of axes used to shard the scale factors :math:`\gamma` with a corresponding mesh,
only used when :attr:`enable_layernorm=True`.
ln_bias_init: Initializer, default = flax.linen.initializers.zeros
Used for initializing shift factors :math:`\beta`,
only used when :attr:`enable_layernorm=True` and :attr:`layernorm_type='layernorm'`.
It should be a callable object with three arguments (jax.random.PRNGKey, shape, dtype).
ln_bias_axes: Tuple[str, ...], default = ('embed', )
The name of axes used to shard the shift factors :math:`\beta` with a corresponding mesh.
It is only used when :attr:`enable_layernorm=True` and :attr:`layernorm_type='layernorm'`.
kernel_init : Initializer, default =
flax.linen.initializers.variance_scaling(1.0, 'fan_in', 'truncated_normal')
Used for initializing weights.
It should be a callable object with three arguments (jax.random.PRNGKey, shape, dtype).
kernel_axes : Tuple[str, ...], default = ()
The name of axes used to shard the weights with a corresponding mesh.
use_bias: bool, default = False
Indicate whether to enable bias shifting.
If set to False, the layer will not learn an additive bias.
bias_init: Initializer, default = flax.linen.initializers.zeros
Used for initializing bias, only used when :attr:`use_bias=True`.
It should be a callable object with three arguments (jax.random.PRNGKey, shape, dtype).
bias_axes: Tuple[str, ...], default = ()
The name of axes used to shard bias with a corresponding mesh,
only used when :attr:`use_bias=True`.
return_layernorm_output: bool, default = True
Indicate whether to return the output of layer normalization.
If set False, return None as the second tensor in outputs.
axis: Union[Iterable[int], int], default = -1
An integer tuple with axes to apply the transformation on.
Optimization parameters
-----------------------
dtype : jax.numpy.dtype, default = jax.numpy.float32
The data type used to allocate the initial parameters.
transpose_batch_sequence : bool, default = True
Indicate whether the input tensors were switched axis of batch
and sequence length dimension. If set to True, the input tensors
should be in (seqlen, batch, hidden), otherwise (batch, seqlen, hidden).
depth_scaling: float, default = None
The factor to scale the output from `DenseGeneral`. It should be a float
value or None. When None is set, then no scaling is applied.
sharding_type : ShardingType, default = ShardingType.SINGLE
Indicate the sharding pattern.
"""
features: Union[Iterable[int], int]
enable_layernorm: bool = True
layernorm_type: str = 'layernorm'
epsilon: float = 1e-6
zero_centered_gamma: bool = False
scale_init: Initializer = None
scale_axes: Tuple[str, ...] = ('embed',)
ln_bias_init: Initializer = nn.initializers.zeros
ln_bias_axes: Tuple[str, ...] = ('embed',)
kernel_init: Initializer = None
kernel_axes: Tuple[str, ...] = ()
use_bias: bool = False
bias_init: Initializer = nn.initializers.zeros
bias_axes: Tuple[str, ...] = ()
return_layernorm_output: bool = True
axis: Union[Iterable[int], int] = -1
dtype: DType = jnp.float32
transpose_batch_sequence: bool = True
depth_scaling: float = None
sharding_type: ShardingType = ShardingType.SINGLE
def __post_init__(self):
if self.kernel_init is None:
self.kernel_init = nn.initializers.variance_scaling(1.0, 'fan_in', 'truncated_normal')
self.scale_init = _obtain_default_layernorm_scale_init_if_need(
self.scale_init, self.zero_centered_gamma)
super().__post_init__()
@nn.compact
def __call__(self, inputs: Array) -> Array:
"""
Apply layer normalization to the input followed by a linear transformation.
Parameters
----------
inputs: jax.numpy.ndarray
Input tensor.
Returns
-------
outputs : jax.numpy.ndarray
Output tensors.
ln_outputs: jax.numpy.ndarray
The output tensors of layer normalization.
If :attr:`return_layernorm_output=False`, then this would be None.
"""
ln_output = None
fuse_layernorm = FP8Helper.is_fp8_enabled(
) and not self.return_layernorm_output and self.enable_layernorm
if self.enable_layernorm:
features = inputs.shape[-1]
scale, ln_bias = _create_layernorm_parameters(self.layernorm_type, (features,),
self.scale_init, self.scale_axes,
self.ln_bias_init, self.ln_bias_axes,
self.dtype)
if not fuse_layernorm:
y = layernorm(inputs,
scale,
ln_bias,
layernorm_type=self.layernorm_type,
zero_centered_gamma=self.zero_centered_gamma,
epsilon=self.epsilon,
sharding_type=self.sharding_type,
dp_dim_index=1 if self.transpose_batch_sequence else 0)
else:
assert not self.return_layernorm_output
y = inputs
else:
y = inputs
if self.return_layernorm_output:
ln_output = y
# DenseGeneral
features = _canonicalize_tuple(self.features)
axis = _canonicalize_tuple(self.axis)
axis = _normalize_axes(axis, y.ndim)
kernel_shape = tuple(y.shape[ax] for ax in axis) + features
kernel_param_shape = (np.prod([inputs.shape[ax] for ax in axis]),) + features
kernel = nn_partitioning.param_with_axes('kernel',
self.kernel_init,
kernel_param_shape,
jnp.float32,
axes=self.kernel_axes)
kernel = jnp.reshape(kernel, kernel_shape)
contract_ind = tuple(range(0, len(axis)))
if FP8Helper.is_fp8_enabled():
fp8_gemm_package = \
TransformerEngineBase.get_fp8_gemm_package(1, y, [kernel])
if not fuse_layernorm:
z = fp8_dot(fp8_gemm_package,
FP8Helper.FWD_DTYPE,
FP8Helper.BWD_DTYPE, (axis, contract_ind),
sharding_type=self.sharding_type,
dp_dim_index=1 if self.transpose_batch_sequence else 0)
else:
z = layernorm_fp8_dot(fp8_gemm_package,
scale,
ln_bias,
self.layernorm_type,
FP8Helper.FWD_DTYPE,
FP8Helper.BWD_DTYPE, (axis, contract_ind),
zero_centered_gamma=self.zero_centered_gamma,
epsilon=self.epsilon,
sharding_type=self.sharding_type,
dp_dim_index=1 if self.transpose_batch_sequence else 0)
else:
kernel = jnp.asarray(kernel, self.dtype)
z = lax.dot_general(y, kernel, ((axis, contract_ind), ((), ())))
bias = None
if self.use_bias:
bias = nn_partitioning.param_with_axes('bias',
self.bias_init,
features,
self.dtype,
axes=self.bias_axes)
if bias is not None:
bias_shape = (1,) * (z.ndim - bias.ndim) + bias.shape
z += jnp.reshape(bias, bias_shape)
if self.depth_scaling is not None:
z = z / self.depth_scaling
return z, ln_output # dense_output, layer_norm_output
class LayerNormMLP(TransformerEngineBase):
r"""
Applies layer normalization on the input followed by the MLP module,
consisting of 2 successive linear transformations, separated by given activations.
Parameters
----------
intermediate_dim: int, default = 2048
Intermediate size to which input samples are projected.
enable_layernorm: bool, default = True
Indicate whether to enable layer normalization before linear transformation.
layernorm_type : {'layernorm', 'rmsnorm'}, default = 'layernorm'
Indicate the type of layer normalization.
epsilon : float, default = 1e-6
A value added to the denominator of layer normalization for numerical stability.
zero_centered_gamma : bool, default = False
If set to `True`, the LayerNorm formula changes to
.. math::
y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} *
(1 + \gamma) + \beta
This parameter is only applicable for 'layernorm'.
The default of `scale_init` will also be changed. See `scale_init`.
scale_init : Initializer, default = None
Used for initializing scale factors :math:`\gamma`.
If `None` is provided, scale_init is set according to the value of zero_centered_gamma.
If zero_centered_gamma is set to `True`, then scale_init is `flax.linen.initializers.zeros`.
Otherwise, scale_init is `flax.linen.initializers.ones`.
It should be a callable object with three arguments (jax.random.PRNGKey, shape, dtype).
scale_axes : Tuple[str, ...], default = ('embed', )
The name of axes used to shard the scale factors :math:`\gamma` with a corresponding mesh,
only used when :attr:`enable_layernorm=True`.
ln_bias_init: Initializer, default = flax.linen.initializers.zeros
Used for initializing shift factors :math:`\beta`,
only used when :attr:`enable_layernorm=True` and :attr:`layernorm_type='layernorm'`.
It should be a callable object with three arguments (jax.random.PRNGKey, shape, dtype).
ln_bias_axes: Tuple[str, ...], default = ('embed', )
The name of axes used to shard the shift factors :math:`\beta` with a corresponding mesh.
Only used when :attr:`enable_layernorm=True` and :attr:`layernorm_type='layernorm'`.
kernel_init : Initializer, default =
flax.linen.initializers.variance_scaling(1.0, 'fan_in', 'truncated_normal')
Used for initializing the weights of both linear transformations.
It should be a callable object with three arguments (jax.random.PRNGKey, shape, dtype).
kernel_axes_1 : Tuple[str, ...], default = ('embed', 'act', 'mlp')
The name of axes used to shard the weights with a corresponding mesh for
the weight of the first linear transformations.
kernel_axes_2 : Tuple[str, ...], default = ('mlp', 'embed')
The name of axes used to shard the weights with a corresponding mesh for
the weight of the second linear transformations.
use_bias: bool, default = False
Indicate whether to enable bias shifting.
If set to False, the layer will not learn an additive bias.
bias_init: Initializer, default = flax.linen.initializers.zeros
Used for initializing bias, only used when :attr:`use_bias=True`.
It should be a callable object with three arguments (jax.random.PRNGKey, shape, dtype).
bias_axes_1: Tuple[str, ...], default = ('mlp',)
The name of axes used to shard bias with a corresponding mesh for
the weight of the first linear transformations.
Only used when :attr:`use_bias=True`.
bias_axes_2: Tuple[str, ...], default = ('embed',)
The name of axes used to shard bias with a corresponding mesh for
the weight of the second linear transformations.
Only used when :attr:`use_bias=True`.
return_layernorm_output: bool, default = True
Indicate whether to return the output of layer normalization.
If set False, return None as the second tensor in outputs.
activations: Sequence[Union[str, Callable]], default = ('relu',)
The sequence of activation functions to apply after the first linear transformation.
Each activation has its own transformation layer.
intermediate_dropout_rate: float, default = 0.1
Dropout probability for the dropout op after the :attr:`activations`.
intermediate_hidden_dropout_dims: Sequence[int], default = ()
Dimensions that will share the same dropout mask for hidden
axis: Union[Iterable[int], int], default = -1
An integer tuple with axes to apply the transformation on.
Optimization parameters
-----------------------
dtype : jax.numpy.dtype, default = jax.numpy.float32
The data type used to allocate the initial parameters.
transpose_batch_sequence : bool, default = True
Indicate whether the input tensors were switched axis of batch
and sequence length dimension. If set to True, the input tensors
should be in (seqlen, batch, hidden), otherwise (batch, seqlen, hidden).
major_sharding_type : MajorShardingType, default = MajorShardingType.SINGLE
Indicate the sharding pattern.
"""
intermediate_dim: int = 2048
enable_layernorm: bool = True
layernorm_type: str = 'layernorm'
epsilon: float = 1e-6
zero_centered_gamma: bool = False
scale_init: Initializer = None
scale_axes: Tuple[str, ...] = ('embed',)
ln_bias_init: Initializer = nn.initializers.zeros
ln_bias_axes: Tuple[str, ...] = ('embed',)
kernel_init: Initializer = None
kernel_axes_1: Tuple[str, ...] = ('embed', 'act', 'mlp')
kernel_axes_2: Tuple[str, ...] = ('mlp', 'embed')
use_bias: bool = False
bias_init: Initializer = nn.initializers.zeros
bias_axes_1: Tuple[str, ...] = (
'act',
'mlp',
)
bias_axes_2: Tuple[str, ...] = ('embed',)
return_layernorm_output: bool = True
activations: Sequence[Union[str, Callable]] = ('relu',)
intermediate_dropout_rate: float = 0.1
intermediate_hidden_dropout_dims: Sequence[int] = ()
axis: Union[Iterable[int], int] = -1
dtype: DType = jnp.float32
transpose_batch_sequence: bool = True
major_sharding_type: MajorShardingType = MajorShardingType.SINGLE
def __post_init__(self):
if self.kernel_init is None:
self.kernel_init = nn.initializers.variance_scaling(1.0, 'fan_in', 'truncated_normal')
self.scale_init = _obtain_default_layernorm_scale_init_if_need(
self.scale_init, self.zero_centered_gamma)
super().__post_init__()
@nn.compact
def __call__(self, inputs: Array, deterministic: bool = False) -> Array:
"""
Apply layer normalization to the input followed by a feedforward network (MLP Block).
Parameters
----------
inputs: jax.numpy.ndarray
Input tensor.
deterministic: bool, default = False
Disable dropout ops if set to True.
Returns
-------
outputs : jax.numpy.ndarray
Output tensors.
ln_outputs: jax.numpy.ndarray
The output tensors of layer normalization.
If :attr:`return_layernorm_output=False`, then this would be None.
"""
ln_output = None
fuse_layernorm = FP8Helper.is_fp8_enabled(
) and not self.return_layernorm_output and self.enable_layernorm
use_fused_ln_mlp = fuse_layernorm \
and (not self.use_bias) and self.activations == ('gelu', 'linear') \
and (self.intermediate_dropout_rate < 1e-3)
first_sharding_type, second_sharding_type = infer_sharding_type(self.major_sharding_type)
# LayerNorm
if self.enable_layernorm:
features = inputs.shape[-1]
scale, ln_bias = _create_layernorm_parameters(self.layernorm_type, (features,),
self.scale_init, self.scale_axes,
self.ln_bias_init, self.ln_bias_axes,
self.dtype)
if not fuse_layernorm:
y = layernorm(inputs,
scale,
ln_bias,
layernorm_type=self.layernorm_type,
zero_centered_gamma=self.zero_centered_gamma,
epsilon=self.epsilon,
sharding_type=first_sharding_type,
dp_dim_index=1 if self.transpose_batch_sequence else 0)
else:
assert not self.return_layernorm_output
y = inputs
else:
y = inputs
if self.return_layernorm_output:
ln_output = y
def kernel_1_init(key, num_kernels, stack_axis, *init_args):
kernels = []
for _ in range(num_kernels):
key, init_key = jax_random.split(key)
kernels.append(self.kernel_init(init_key, *init_args))
return jnp.stack(kernels, axis=stack_axis, dtype=jnp.float32)
num_of_gemm = 2
if use_fused_ln_mlp:
num_activations = len(self.activations)
axis = _canonicalize_tuple(self.axis)
axis = _normalize_axes(axis, inputs.ndim)
intermediate_dim = _canonicalize_tuple((num_activations, self.intermediate_dim))
kernel_1_shape = tuple(inputs.shape[ax] for ax in axis) + intermediate_dim
kernel_1_each_shape = (np.prod([y.shape[ax] for ax in axis]), self.intermediate_dim)
kernel_1 = nn_partitioning.param_with_axes('wi_kernel',
kernel_1_init,
num_activations,
-2,
kernel_1_each_shape,
jnp.float32,
axes=self.kernel_axes_1)
kernel_1 = jnp.reshape(kernel_1, kernel_1_shape)
hidden_size = inputs.shape[-1]
hidden_size_tuple = _canonicalize_tuple(hidden_size)
kernel_2_shape = (self.intermediate_dim,) + hidden_size_tuple
kernel_2_param_shape = (self.intermediate_dim, np.prod(hidden_size_tuple))
kernel_2 = nn_partitioning.param_with_axes('wo_kernel',
self.kernel_init,
kernel_2_param_shape,
jnp.float32,
axes=self.kernel_axes_2)
kernel_2 = jnp.reshape(kernel_2, kernel_2_shape)
contract_ind = tuple(range(0, len(axis)))
fp8_gemm_package = \
TransformerEngineBase.get_fp8_gemm_package(num_of_gemm, y, [kernel_1, kernel_2])
out = fp8_ln_mlp(fp8_gemm_package,
scale,
ln_bias,
self.layernorm_type,
FP8Helper.FWD_DTYPE,
FP8Helper.BWD_DTYPE,
zero_centered_gamma=self.zero_centered_gamma,
epsilon=self.epsilon,
contracting_dims=(axis, contract_ind),
major_sharding_type=self.major_sharding_type,
dp_dim_index=1 if self.transpose_batch_sequence else 0,
activations=self.activations)
else: # not use_fused_ln_mlp
def fp8_meta_generator():
fp8_max, fp8_metas_amax, fp8_metas_scale, fp8_metas_scale_inv = (None, None, None,
None)
if FP8Helper.is_fp8_enabled():
fp8_max, fp8_metas_amax, fp8_metas_scale, fp8_metas_scale_inv = \
TransformerEngineBase.get_fp8_metas(num_of_gemm)
return fp8_max, fp8_metas_amax, fp8_metas_scale, fp8_metas_scale_inv
fp8_max, fp8_metas_amax, fp8_metas_scale, fp8_metas_scale_inv = \
fp8_meta_generator()
# DenseGeneral 1
activations = []
num_activations = len(self.activations)
axis = _canonicalize_tuple(self.axis)
axis = _normalize_axes(axis, y.ndim)
intermediate_dim = _canonicalize_tuple((num_activations, self.intermediate_dim))
kernel_shape = tuple(y.shape[ax] for ax in axis) + intermediate_dim
kernel_1_each_shape = (np.prod([y.shape[ax] for ax in axis]), self.intermediate_dim)
kernel = nn_partitioning.param_with_axes('wi_kernel',
kernel_1_init,
num_activations,
-2,
kernel_1_each_shape,
jnp.float32,
axes=self.kernel_axes_1)
kernel = jnp.reshape(kernel, kernel_shape)
contract_ind = tuple(range(0, len(axis)))
if FP8Helper.is_fp8_enabled():
fp8_gemm_package = FP8GemmPackage(
1, y, [kernel], fp8_max[:FP8Helper.NUM_META_PER_GEMM, :],
fp8_metas_amax[:FP8Helper.NUM_META_PER_GEMM, :],
fp8_metas_scale[:FP8Helper.NUM_META_PER_GEMM, :],
fp8_metas_scale_inv[:FP8Helper.NUM_META_PER_GEMM, :])
if not fuse_layernorm:
x = fp8_dot(fp8_gemm_package,
FP8Helper.FWD_DTYPE,
FP8Helper.BWD_DTYPE, (axis, contract_ind),
sharding_type=first_sharding_type,
dp_dim_index=1 if self.transpose_batch_sequence else 0)
else:
x = layernorm_fp8_dot(fp8_gemm_package,
scale,
ln_bias,
self.layernorm_type,
FP8Helper.FWD_DTYPE,
FP8Helper.BWD_DTYPE, (axis, contract_ind),
zero_centered_gamma=self.zero_centered_gamma,
epsilon=self.epsilon,
sharding_type=first_sharding_type,
dp_dim_index=1 if self.transpose_batch_sequence else 0)
else: # not enable fp8
kernel = jnp.asarray(kernel, self.dtype)
x = lax.dot_general(y, kernel, ((axis, contract_ind), ((), ())))
bias = None
if self.use_bias:
bias = nn_partitioning.param_with_axes('wi_bias',
self.bias_init,
intermediate_dim,
self.dtype,
axes=self.bias_axes_1)
bias_shape = (1,) * (x.ndim - bias.ndim) + bias.shape
x += jnp.reshape(bias, bias_shape)
if self.activations == ('gelu', 'linear'):
z = geglu(x,
contracting_dims=(-2, -1),
sharding_type=second_sharding_type,
dp_dim_index=1 if self.transpose_batch_sequence else 0)
else:
x = jnp.split(x, num_activations, axis=-2)
for idx, act_fn in enumerate(self.activations):
x_i = _convert_to_activation_function(act_fn)(x[idx])
activations.append(x_i)
z = functools.reduce(operator.mul, activations)
z = jnp.reshape(z, (*z.shape[:-2], -1))
z = nn.Dropout(rate=self.intermediate_dropout_rate,
broadcast_dims=self.intermediate_hidden_dropout_dims)(
z, deterministic=deterministic)
# DenseGeneral 2
hidden_size = inputs.shape[-1]
hidden_size_tuple = _canonicalize_tuple(hidden_size)
axis = _canonicalize_tuple(self.axis)
axis = _normalize_axes(axis, z.ndim)
kernel_shape = tuple(z.shape[ax] for ax in axis) + hidden_size_tuple
kernel_param_shape = (np.prod([z.shape[ax] for ax in axis]), np.prod(hidden_size_tuple))
kernel = nn_partitioning.param_with_axes('wo_kernel',
self.kernel_init,
kernel_param_shape,
jnp.float32,
axes=self.kernel_axes_2)
kernel = jnp.reshape(kernel, kernel_shape)
contract_ind = tuple(range(0, len(axis)))
if FP8Helper.is_fp8_enabled():
fp8_gemm_package = FP8GemmPackage(
1, z, [kernel], fp8_max[FP8Helper.NUM_META_PER_GEMM:, :],
fp8_metas_amax[FP8Helper.NUM_META_PER_GEMM:, :],
fp8_metas_scale[FP8Helper.NUM_META_PER_GEMM:, :],
fp8_metas_scale_inv[FP8Helper.NUM_META_PER_GEMM:, :])
out = fp8_dot(fp8_gemm_package,
FP8Helper.FWD_DTYPE,
FP8Helper.BWD_DTYPE, (axis, contract_ind),
sharding_type=second_sharding_type,
dp_dim_index=1 if self.transpose_batch_sequence else 0)
else:
kernel = jnp.asarray(kernel, self.dtype)
out = lax.dot_general(z, kernel, ((axis, contract_ind), ((), ())))
bias = None
if self.use_bias:
bias = nn_partitioning.param_with_axes('wo_bias',
self.bias_init, (hidden_size,),
self.dtype,
axes=self.bias_axes_2)
out += jnp.reshape(bias, (1,) * (out.ndim - 1) + (-1,))
return out, ln_output # Output, layner_norm_output
| TransformerEngine-main | transformer_engine/jax/flax/module.py |
#!/usr/bin/env python2
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
import os.path
import setuptools
LOCAL_DIR = os.path.dirname(os.path.abspath(__file__))
# Get current __version__
version_locals = {}
execfile(os.path.join(LOCAL_DIR, 'digits', 'version.py'), {}, version_locals)
# Get requirements
requirements = []
with open(os.path.join(LOCAL_DIR, 'requirements.txt'), 'r') as infile:
for line in infile:
line = line.strip()
if line and not line[0] == '#': # ignore comments
requirements.append(line)
# Get test requirements
test_requirements = []
with open(os.path.join(LOCAL_DIR, 'requirements_test.txt'), 'r') as infile:
for line in infile:
line = line.strip()
if line and not line[0] == '#': # ignore comments
test_requirements.append(line)
setuptools.setup(
name='digits',
version=version_locals['__version__'],
description="NVIDIA's Deep Learning GPU Training System",
url='https://developer.nvidia.com/digits',
author='DIGITS Development Team',
author_email='[email protected]',
license='BSD',
classifiers=[
'Framework :: Flask',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 2 :: Only',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
keywords='nvidia digits',
packages=setuptools.find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requirements,
extras_require={'test': test_requirements},
scripts=['digits-devserver'],
)
| DIGITS-master | setup.py |
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
import os
from setuptools import setup, find_packages
from digits.extensions.view import GROUP as DIGITS_PLUGIN_GROUP
# Utility function to read the README file.
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="digits_gan_view_plugin",
version="0.0.1",
author="Greg Heinrich",
description=("A view plugin for GANs"),
long_description=read('README'),
license="Apache",
packages=find_packages(),
entry_points={
DIGITS_PLUGIN_GROUP: [
'class=digitsViewPluginGan:Visualization',
]
},
include_package_data=True,
install_requires=['imageio>=2.1.2'],
)
| DIGITS-master | plugins/view/gan/setup.py |
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from .view import Visualization
__all__ = ['Visualization']
| DIGITS-master | plugins/view/gan/digitsViewPluginGan/__init__.py |
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os
from digits import utils
from digits.utils import subclass
from flask_wtf import Form
import wtforms.validators
@subclass
class ConfigForm(Form):
"""
A form used to configure gradient visualization
"""
def validate_file_path(form, field):
if not field.data:
pass
else:
# make sure the filesystem path exists
if not os.path.exists(field.data) and not os.path.isdir(field.data):
raise wtforms.validators.ValidationError('File does not exist or is not reachable')
else:
return True
gan_view_task_id = utils.forms.SelectField(
'Task',
choices=[
('grid', 'Grid'),
('mnist_encoder', 'MNIST Encoder'),
('celeba_encoder', 'CelebA Encoder'),
('animation', 'Animation'),
('attributes', 'CelebA get attributes'),
],
default='grid',
tooltip="Select a task."
)
attributes_file = utils.forms.StringField(
u'Attributes vector file',
validators=[
validate_file_path,
],
tooltip="Specify the path to a file that contains attributes vectors."
)
pass
| DIGITS-master | plugins/view/gan/digitsViewPluginGan/forms.py |
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os
# Find the best implementation available
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import pickle
import imageio
import numpy as np
import PIL.Image
import PIL.ImageDraw
import digits
from digits.utils import subclass, override
from digits.extensions.view.interface import VisualizationInterface
from .forms import ConfigForm
CONFIG_TEMPLATE = "templates/config_template.html"
HEADER_TEMPLATE = "templates/header_template.html"
VIEW_TEMPLATE = "templates/view_template.html"
CELEBA_ATTRIBUTES = """
5_o_Clock_Shadow Arched_Eyebrows Attractive Bags_Under_Eyes Bald Bangs
Big_Lips Big_Nose Black_Hair Blond_Hair Blurry Brown_Hair Bushy_Eyebrows
Chubby Double_Chin Eyeglasses Goatee Gray_Hair Heavy_Makeup High_Cheekbones
Male Mouth_Slightly_Open Mustache Narrow_Eyes No_Beard Oval_Face Pale_Skin
Pointy_Nose Receding_Hairline Rosy_Cheeks Sideburns Smiling Straight_Hair
Wavy_Hair Wearing_Earrings Wearing_Hat Wearing_Lipstick Wearing_Necklace
Wearing_Necktie Young
""".split()
@subclass
class Visualization(VisualizationInterface):
"""
A visualization extension to display the output of a GAN
"""
def __init__(self, dataset, **kwargs):
"""
Init
"""
# memorize view template for later use
extension_dir = os.path.dirname(os.path.abspath(__file__))
self.view_template = open(
os.path.join(extension_dir, VIEW_TEMPLATE), "r").read()
self.normalize = True
self.grid_size = 10
# view options
self.task_id = kwargs['gan_view_task_id']
self.attributes_file = kwargs['attributes_file']
@staticmethod
def get_config_form():
return ConfigForm()
@staticmethod
def get_config_template(form):
"""
parameters:
- form: form returned by get_config_form(). This may be populated
with values if the job was cloned
returns:
- (template, context) tuple
- template is a Jinja template to use for rendering config options
- context is a dictionary of context variables to use for rendering
the form
"""
extension_dir = os.path.dirname(os.path.abspath(__file__))
template = open(
os.path.join(extension_dir, CONFIG_TEMPLATE), "r").read()
context = {'form': form}
return (template, context)
@override
def get_header_template(self):
"""
Implements get_header_template() method from view extension interface
"""
extension_dir = os.path.dirname(os.path.abspath(__file__))
template = open(
os.path.join(extension_dir, HEADER_TEMPLATE), "r").read()
context = {'task_id': self.task_id,
'cols': range(self.grid_size),
'rows': range(self.grid_size),
'animated_image': None}
if hasattr(self, 'animated_images'):
# create animated gif
string_buf = StringIO()
fmt = "gif"
imageio.mimsave(string_buf, self.animated_images, format=fmt)
data = string_buf.getvalue().encode('base64').replace('\n', '')
animated_image_html = 'data:image/%s;base64,%s' % (fmt, data)
context['animated_image'] = animated_image_html
return template, context
@staticmethod
def get_id():
return "image-gan"
@staticmethod
def get_title():
return "GAN"
def get_image_html(self, image):
# assume 8-bit
if self.normalize:
image -= image.min()
if image.max() > 0:
image /= image.max()
image *= 255
else:
# clip
image = image.clip(0, 255)
# convert to uint8
image = image.astype('uint8')
# convert to PIL image
channels = image.shape[2]
if channels == 1:
# drop channel axis
image = PIL.Image.fromarray(image[:, :, 0])
elif channels == 3:
image = PIL.Image.fromarray(image)
else:
raise ValueError("Unhandled number of channels: %d" % channels)
# image.save(fname)
image_html = digits.utils.image.embed_image_html(image)
return image_html
@override
def get_view_template(self, data):
"""
parameters:
- data: data returned by process_data()
returns:
- (template, context) tuple
- template is a Jinja template to use for rendering config
options
- context is a dictionary of context variables to use for
rendering the form
"""
context = {'task_id': self.task_id}
context.update(data)
if self.task_id in ['celeba_encoder', 'mnist_encoder']:
context.update({'task_id': 'encoder'})
return self.view_template, context
@override
def process_data(self, input_id, input_data, output_data):
"""
Process one inference and return data to visualize
"""
data = output_data[output_data.keys()[0]].astype('float32')
if self.task_id == 'grid':
col_id = int(input_id) // self.grid_size
row_id = int(input_id) % self.grid_size
image_html = self.get_image_html(data)
img_size = data.shape[0]
if img_size == 28:
# MNIST
if not hasattr(self, 'animated_images'):
self.animated_images = [None] * (self.grid_size ** 2)
self.animated_images[row_id * self.grid_size + col_id] = data.astype('uint8')
elif img_size == 64:
# CelebA
if not hasattr(self, 'animated_images'):
self.animated_images = [None] * (4 * self.grid_size - 4)
print("animated: %s" % repr(self.animated_images))
if (col_id == 0 or row_id == 0 or col_id == (self.grid_size - 1) or row_id == (self.grid_size - 1)):
if row_id == 0:
idx = col_id
elif col_id == (self.grid_size - 1):
idx = self.grid_size - 1 + row_id
elif row_id == (self.grid_size - 1):
idx = 3 * self.grid_size - 3 - col_id
else:
idx = 4 * self.grid_size - 4 - row_id
self.animated_images[idx] = data.astype('uint8')
print("set idx %d " % idx)
else:
raise ValueError("Unhandled image size: %d" % img_size)
return {'image': image_html,
'col_id': col_id,
'row_id': row_id,
'key': input_id}
elif self.task_id == 'mnist_encoder':
self.z_dim = 100
z = data[:self.z_dim]
image = data[self.z_dim:].reshape(28, 28)
input_data = input_data.astype('float32')
input_data = input_data[:, :, np.newaxis]
image = image[:, :, np.newaxis]
image_input_html = self.get_image_html(input_data)
image_output_html = self.get_image_html(image)
return {'z': z,
'image_input': image_input_html,
'image_output': image_output_html,
'key': input_id}
elif self.task_id == 'celeba_encoder':
self.z_dim = 100
z = data[:self.z_dim]
image = data[self.z_dim:].reshape(64, 64, 3)
input_data = input_data.astype('float32')
image_input_html = self.get_image_html(input_data)
image_output_html = self.get_image_html(image)
return {'z': z,
'image_input': image_input_html,
'image_output': image_output_html,
'key': input_id}
elif self.task_id == 'animation':
image_html = self.get_image_html(data)
if not hasattr(self, 'animated_images'):
self.animated_images = []
self.animated_images.append(data.astype('uint8'))
return {'image': image_html,
'key': input_id}
elif self.task_id == 'attributes':
self.z_dim = 100
z = data[:self.z_dim]
input_data = input_data.astype('float32')
image_input_html = self.get_image_html(input_data)
image = data[self.z_dim:].reshape(64, 64, 3)
image_output_html = self.get_image_html(image)
with open(self.attributes_file, 'rb') as f:
attributes_z = pickle.load(f)
# inner_products = np.inner(z, attributes_z)
inner_products = np.empty((40))
for i in range(40):
if True:
attr = attributes_z[i]
inner_products[i] = np.inner(z, attr) / np.linalg.norm(attr)
else:
inner_products[i] = 0
top_5_indices = np.argsort(inner_products)[::-1][:5]
top_5 = [(CELEBA_ATTRIBUTES[idx], "%.2f" % inner_products[idx]) for idx in top_5_indices]
return {'image_input': image_input_html,
'image_output': image_output_html,
'top5': top_5}
else:
raise ValueError("Unknown task: %s" % self.task_id)
| DIGITS-master | plugins/view/gan/digitsViewPluginGan/view.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
import os
from setuptools import setup, find_packages
from digits.extensions.view import GROUP as DIGITS_PLUGIN_GROUP
# Utility function to read the README file.
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="digits_image_gradients_view_plugin",
version="0.0.1",
author="Greg Heinrich",
description=("A view plugin for image gradients"),
long_description=read('README'),
license="Apache",
packages=find_packages(),
entry_points={
DIGITS_PLUGIN_GROUP: [
'class=digitsViewPluginImageGradients:Visualization',
]
},
include_package_data=True,
)
| DIGITS-master | plugins/view/imageGradients/setup.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from .view import Visualization
__all__ = ['Visualization']
| DIGITS-master | plugins/view/imageGradients/digitsViewPluginImageGradients/__init__.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from digits import utils
from digits.utils import subclass
from flask_wtf import Form
import wtforms
from wtforms import validators
@subclass
class ConfigForm(Form):
"""
A form used to configure gradient visualization
"""
arrow_color = wtforms.SelectField(
'Arrow color',
choices=[
('red', 'Red'),
('green', 'Green'),
('blue', 'Blue'),
],
default='red',
)
arrow_size = utils.forms.IntegerField(
'Arrow size (%)',
validators=[
validators.DataRequired(),
validators.NumberRange(min=1),
],
default=80,
tooltip="Expressed as percentage of input image"
)
| DIGITS-master | plugins/view/imageGradients/digitsViewPluginImageGradients/forms.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import numpy as np
import os
import PIL.Image
import PIL.ImageDraw
import digits
from digits.utils import subclass, override
from digits.extensions.view.interface import VisualizationInterface
from .forms import ConfigForm
CONFIG_TEMPLATE = "templates/config_template.html"
VIEW_TEMPLATE = "templates/view_template.html"
@subclass
class Visualization(VisualizationInterface):
"""
A visualization extension to display image gradient magnitude and direction
"""
def __init__(self, dataset, **kwargs):
"""
Init
"""
# arrow config
arrow_color = kwargs['arrow_color']
if arrow_color == "red":
self.color = (255, 0, 0)
elif arrow_color == "green":
self.color = (0, 255, 0)
elif arrow_color == "blue":
self.color = (0, 0, 255)
else:
raise ValueError("unknown color: %s" % arrow_color)
self.arrow_size = float(kwargs['arrow_size'])
# image dimensions (HWC)
image_shape = dataset.get_feature_dims()
self.height = image_shape[0]
self.width = image_shape[1]
# memorize view template for later use
extension_dir = os.path.dirname(os.path.abspath(__file__))
self.view_template = open(
os.path.join(extension_dir, VIEW_TEMPLATE), "r").read()
@staticmethod
def get_config_form():
return ConfigForm()
@staticmethod
def get_config_template(form):
"""
parameters:
- form: form returned by get_config_form(). This may be populated
with values if the job was cloned
returns:
- (template, context) tuple
- template is a Jinja template to use for rendering config options
- context is a dictionary of context variables to use for rendering
the form
"""
extension_dir = os.path.dirname(os.path.abspath(__file__))
template = open(
os.path.join(extension_dir, CONFIG_TEMPLATE), "r").read()
context = {'form': form}
return (template, context)
@staticmethod
def get_id():
return "image-gradients"
@staticmethod
def get_title():
return "Gradients"
@override
def get_view_template(self, data):
"""
parameters:
- data: data returned by process_data()
returns:
- (template, context) tuple
- template is a Jinja template to use for rendering config
options
- context is a dictionary of context variables to use for
rendering the form
"""
return self.view_template, {'gradients': data['gradients'], 'image': data['image']}
@override
def process_data(self, input_id, input_data, output_data):
"""
Process one inference and return data to visualize
"""
# assume only one output and grayscale input
output_vector = output_data[output_data.keys()[0]]
grad = np.array([
output_vector[0] * self.width,
output_vector[1] * self.height])
grad_rotated_90 = np.array([-grad[1], grad[0]])
center = np.array([self.width / 2, self.height / 2])
arrow = grad * (self.arrow_size / 100.)
arrow_tip = center + arrow / 2
arrow_tail = center - arrow / 2
# arrow tail (anticlockwise)
at_acw = arrow_tail + 0.1 * grad_rotated_90
# arrow tail (clockwise)
at_cw = arrow_tail - 0.1 * grad_rotated_90
# draw an oriented caret
image = PIL.Image.fromarray(input_data).convert('RGB')
draw = PIL.ImageDraw.Draw(image)
draw.line(
(at_acw[0], at_acw[1], arrow_tip[0], arrow_tip[1]),
fill=self.color)
draw.line(
(at_cw[0], at_cw[1], arrow_tip[0], arrow_tip[1]),
fill=self.color)
draw.line(
(at_acw[0], at_acw[1], at_cw[0], at_cw[1]),
fill=self.color)
image_html = digits.utils.image.embed_image_html(image)
return {'image': image_html,
'gradients': [output_vector[0], output_vector[1]]}
| DIGITS-master | plugins/view/imageGradients/digitsViewPluginImageGradients/view.py |
import os
from setuptools import setup, find_packages
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from digits.extensions.view import GROUP as DIGITS_PLUGIN_GROUP
# Utility function to read the README file.
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="digits_text_classification_view_plugin",
version="0.0.1",
author="Greg Heinrich",
description=("A view plugin for text classification"),
long_description=read('README'),
license="BSD",
packages=find_packages(),
entry_points={
DIGITS_PLUGIN_GROUP: [
'class=digitsViewPluginTextClassification:Visualization',
]},
include_package_data=True,
)
| DIGITS-master | plugins/view/textClassification/setup.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from .view import Visualization
__all__ = ['Visualization']
| DIGITS-master | plugins/view/textClassification/digitsViewPluginTextClassification/__init__.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from digits import utils
from digits.utils import subclass
from flask_wtf import Form
from wtforms import validators
@subclass
class ConfigForm(Form):
"""
A form used to configure text classification visualization
"""
max_classes = utils.forms.IntegerField(
u'Number of Top classes to show',
default=5,
validators=[
validators.DataRequired(),
validators.NumberRange(min=1),
],
tooltip='Specify how many classes to show in classification'
)
| DIGITS-master | plugins/view/textClassification/digitsViewPluginTextClassification/forms.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os
import numpy as np
from digits.utils import subclass, override
from digits.extensions.view.interface import VisualizationInterface
from .forms import ConfigForm
CONFIG_TEMPLATE = "templates/config_template.html"
VIEW_TEMPLATE = "templates/view_template.html"
@subclass
class Visualization(VisualizationInterface):
"""
A visualization extension to display the output
of a text classification network
"""
def __init__(self, dataset, **kwargs):
"""
Init
"""
# memorize view template for later use
extension_dir = os.path.dirname(os.path.abspath(__file__))
self.view_template = open(
os.path.join(extension_dir, VIEW_TEMPLATE), "r").read()
# memorize class labels
if 'class_labels' in dataset.extension_userdata:
self.class_labels = dataset.extension_userdata['class_labels']
else:
self.class_labels = None
# memorize alphabet
if 'alphabet' in dataset.extension_userdata:
self.alphabet = dataset.extension_userdata['alphabet']
self.alphabet_len = len(self.alphabet)
else:
raise RuntimeError("No alphabet found in dataset")
# view options
self.max_classes = kwargs['max_classes']
@staticmethod
def get_config_form():
return ConfigForm()
@staticmethod
def get_config_template(form):
"""
parameters:
- form: form returned by get_config_form(). This may be populated
with values if the job was cloned
returns:
- (template, context) tuple
- template is a Jinja template to use for rendering config options
- context is a dictionary of context variables to use for rendering
the form
"""
extension_dir = os.path.dirname(os.path.abspath(__file__))
template = open(
os.path.join(extension_dir, CONFIG_TEMPLATE), "r").read()
context = {'form': form}
return (template, context)
@staticmethod
def get_id():
return "text-classification"
@staticmethod
def get_title():
return "Text Classification"
@override
def get_view_template(self, data):
"""
parameters:
- data: data returned by process_data()
returns:
- (template, context) tuple
- template is a Jinja template to use for rendering config
options
- context is a dictionary of context variables to use for
rendering the form
"""
return self.view_template, {'input': data['input'],
'predictions': data['predictions']}
@override
def process_data(self, input_id, input_data, output_data):
"""
Process one inference and return data to visualize
"""
# convert input data to a string of characters
input_characters = []
for idx in input_data[0]:
c = self.alphabet[idx - 1] if idx < self.alphabet_len else '.'
input_characters.append(c)
input_string = ''.join(input_characters)
# assume the only output is from a probability distribution
scores = output_data[output_data.keys()[0]].astype('float32')
if np.max(scores) < 0:
# terminal layer is a logsoftmax
scores = np.exp(scores)
indices = (-scores).argsort()
predictions = [(self.class_labels[i] if self.class_labels else '#%d' % i,
round(100.0 * scores[i], 2)) for i in indices[:self.max_classes]]
return {'input': input_string, 'predictions': predictions}
| DIGITS-master | plugins/view/textClassification/digitsViewPluginTextClassification/view.py |
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
import os
from setuptools import setup, find_packages
from digits.extensions.data import GROUP as DIGITS_PLUGIN_GROUP
# Utility function to read the README file.
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="digits_gan_data_plugin",
version="0.0.1",
author="Greg Heinrich",
description=("A data ingestion plugin for GANs"),
long_description=read('README'),
license="Apache",
packages=find_packages(),
entry_points={
DIGITS_PLUGIN_GROUP: [
'class=digitsDataPluginGan:DataIngestion',
]},
include_package_data=True,
)
| DIGITS-master | plugins/data/gan/setup.py |
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from .data import DataIngestion
__all__ = ['DataIngestion']
| DIGITS-master | plugins/data/gan/digitsDataPluginGan/__init__.py |
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os
from flask_wtf import Form
from wtforms import HiddenField, TextAreaField, validators
from digits import utils
from digits.utils import subclass
@subclass
class DatasetForm(Form):
"""
A form used to create a Sunnybrook dataset
"""
def validate_file_path(form, field):
if not field.data:
pass
else:
# make sure the filesystem path exists
if not os.path.exists(field.data) and not os.path.isdir(field.data):
raise validators.ValidationError(
'File does not exist or is not reachable')
else:
return True
def validate_folder_path(form, field):
if not field.data:
pass
else:
# make sure the filesystem path exists
if not os.path.exists(field.data) or not os.path.isdir(field.data):
raise validators.ValidationError(
'Folder does not exist or is not reachable')
else:
return True
file_list = utils.forms.StringField(
u'File list (with attributes) in CelebA format',
validators=[
validate_file_path,
],
tooltip="Provide file list in CelebA format"
)
image_folder = utils.forms.StringField(
u'Image folder',
validators=[
validators.DataRequired(),
validate_folder_path,
],
tooltip="Specify the path to a folder of images."
)
center_crop_size = utils.forms.IntegerField(
u'Center crop size',
default=108,
validators=[
validators.NumberRange(min=0)
],
tooltip="Specify center crop."
)
resize = utils.forms.IntegerField(
u'Resize after crop',
default=64,
tooltip="Resize after crop."
)
@subclass
class InferenceForm(Form):
"""
A form used to perform inference on a text classification dataset
"""
def __init__(self, attributes, editable_attribute_ids, **kwargs):
super(InferenceForm, self).__init__(**kwargs)
self.attributes = attributes
self.editable_attribute_ids = editable_attribute_ids
def validate_file_path(form, field):
if not field.data:
pass
else:
# make sure the filesystem path exists
if not os.path.exists(field.data) and not os.path.isdir(field.data):
raise validators.ValidationError(
'File does not exist or is not reachable')
else:
return True
def validate_folder_path(form, field):
if not field.data:
pass
else:
# make sure the filesystem path exists
if not os.path.exists(field.data) or not os.path.isdir(field.data):
raise validators.ValidationError(
'Folder does not exist or is not reachable')
else:
return True
row_count = utils.forms.IntegerField(
u'Rows',
default=10,
validators=[
validators.NumberRange(min=1)
],
tooltip="Rows to generate in output grid."
)
dataset_type = utils.forms.SelectField(
'Dataset',
choices=[
('mnist', 'MNIST'),
('celeba', 'CelebA'),
],
default='celeba',
tooltip="Select a dataset."
)
task_id = utils.forms.SelectField(
'Task ID',
choices=[
('class', 'MNIST - Class sweep'),
('style', 'MNIST - Style sweep'),
('genimg', 'Generate single image'),
('attributes', 'CelebA - add/remove attributes'),
('enclist', 'CelebA - Encode list of images'),
('analogy', 'CelebA - Analogy'),
('animation', 'CelebA - Animation'),
],
default='class',
tooltip="Select a task to execute."
)
class_z_vector = utils.forms.StringField(
u'Z vector (leave blank for random)',
)
style_z1_vector = utils.forms.StringField(
u'Z1 vector (leave blank for random)',
)
style_z2_vector = utils.forms.StringField(
u'Z2 vector (leave blank for random)',
)
genimg_z_vector = utils.forms.StringField(
u'Z vector (leave blank for random)',
)
genimg_class_id = utils.forms.IntegerField(
u'Class ID',
default=0,
validators=[
validators.NumberRange(min=0, max=9)
],
tooltip="Class of image to generate (leave blank for CelebA)."
)
attributes_z_vector = utils.forms.StringField(
u'Z vector (leave blank for random)',
)
attributes_file = utils.forms.StringField(
u'Attributes vector file',
validators=[
validate_file_path,
],
tooltip="Specify the path to a file that contains attributes vectors."
)
attributes_params = HiddenField()
enc_file_list = utils.forms.StringField(
u'File list',
validators=[
validate_file_path,
],
tooltip="Specify the path to a file that contains a list of files."
)
enc_image_folder = utils.forms.StringField(
u'Image folder',
validators=[
validate_folder_path,
],
tooltip="Specify the path to a folder of images."
)
enc_num_images = utils.forms.IntegerField(
u'Number of images to encode',
default=100,
validators=[
validators.NumberRange(min=0)
],
tooltip="Max number of images to encode."
)
attributes_z1_vector = utils.forms.StringField(
u'Source Z vector (leave blank for random)',
)
attributes_z2_vector = utils.forms.StringField(
u'First Sink Z vector (leave blank for random)',
)
attributes_z3_vector = utils.forms.StringField(
u'Second Sink Z vector (leave blank for random)',
)
animation_num_transitions = utils.forms.IntegerField(
u'Number of transitions per image',
default=10,
validators=[
validators.NumberRange(min=1, max=100)
],
tooltip="Number of transitions between each of the specified images"
)
animation_z_vectors = TextAreaField(
u'z vectors (one per line)',
)
| DIGITS-master | plugins/data/gan/digitsDataPluginGan/forms.py |
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os
import pickle
import numpy as np
from digits.utils import constants, override, image, subclass
from digits.extensions.data.interface import DataIngestionInterface
from .forms import DatasetForm, InferenceForm
DATASET_TEMPLATE = "templates/dataset_template.html"
INFERENCE_TEMPLATE = "templates/inference_template.html"
CELEBA_ALL_ATTRIBUTES = """
5_o_Clock_Shadow Arched_Eyebrows Attractive Bags_Under_Eyes Bald Bangs
Big_Lips Big_Nose Black_Hair Blond_Hair Blurry Brown_Hair Bushy_Eyebrows
Chubby Double_Chin Eyeglasses Goatee Gray_Hair Heavy_Makeup High_Cheekbones
Male Mouth_Slightly_Open Mustache Narrow_Eyes No_Beard Oval_Face Pale_Skin
Pointy_Nose Receding_Hairline Rosy_Cheeks Sideburns Smiling Straight_Hair
Wavy_Hair Wearing_Earrings Wearing_Hat Wearing_Lipstick Wearing_Necklace
Wearing_Necktie Young
""".split()
CELEBA_EDITABLE_ATTRIBUTES = [
'Bald', 'Black_Hair', 'Blond_Hair', 'Male', 'Smiling', 'Wearing_Lipstick', 'Young'
]
CELEBA_EDITABLE_ATTRIBUTES_IDS = [CELEBA_ALL_ATTRIBUTES.index(attr) for attr in CELEBA_EDITABLE_ATTRIBUTES]
def one_hot(val, depth):
x = np.zeros(depth)
x[val] = 1
return x
def slerp(val, low, high):
"""Spherical interpolation. val has a range of 0 to 1."""
if val <= 0:
return low
elif val >= 1:
return high
omega = np.arccos(np.dot(low/np.linalg.norm(low), high/np.linalg.norm(high)))
so = np.sin(omega)
return np.sin((1.0-val)*omega) / so * low + np.sin(val*omega)/so * high
def parse_lines_of_floats(s):
return [[float(val) for val in line.split()] for line in s.splitlines()]
@subclass
class DataIngestion(DataIngestionInterface):
"""
A data ingestion extension for GANs
"""
# CONFIG = "mnist"
CONFIG = "celeba"
# CONFIG = "celeba_cond"
def __init__(self, is_inference_db=False, **kwargs):
super(DataIngestion, self).__init__(**kwargs)
if 'dataset_type' in self.userdata:
self.CONFIG = self.userdata['dataset_type']
self.z_dim = 100
if self.CONFIG == "mnist":
self.y_dim = 10
elif self.CONFIG == "celeba":
self.y_dim = 0
elif self.CONFIG == "celeba_cond":
self.y_dim = 40
self.userdata['is_inference_db'] = is_inference_db
self.input_dim = self.z_dim + self.y_dim
@override
def encode_entry(self, entry):
if not self.userdata['is_inference_db']:
filename = entry[0]
label = entry[1]
feature = self.scale_image(filename)
label = np.array(label).reshape(1, 1, len(label))
else:
if self.userdata['task_id'] in ['style',
'class',
'genimg',
'attributes',
'analogy',
'animation']:
feature = entry
label = np.array([0])
elif self.userdata['task_id'] == 'enclist':
filename = entry[0]
label = entry[1]
feature = self.scale_image(filename)
label = np.array(label).reshape(1, 1, len(label))
else:
raise NotImplementedError
return feature, label
def encode_PIL_Image(self, image):
# convert to numpy array
image = np.array(image)
# add channel axis if input is grayscale image
if image.ndim == 2:
image = image[..., np.newaxis]
elif image.ndim != 3:
raise ValueError("Unhandled number of channels: %d" % image.ndim)
# transpose to CHW
image = image.transpose(2, 0, 1)
return image
@staticmethod
@override
def get_category():
return "Images"
@staticmethod
@override
def get_id():
return "image-gan"
@staticmethod
@override
def get_dataset_form():
return DatasetForm()
@staticmethod
@override
def get_dataset_template(form):
"""
parameters:
- form: form returned by get_dataset_form(). This may be populated
with values if the job was cloned
return:
- (template, context) tuple
- template is a Jinja template to use for rendering dataset creation
options
- context is a dictionary of context variables to use for rendering
the form
"""
extension_dir = os.path.dirname(os.path.abspath(__file__))
template = open(os.path.join(extension_dir, DATASET_TEMPLATE), "r").read()
context = {'form': form}
return (template, context)
@override
def get_inference_form(self):
return InferenceForm(CELEBA_ALL_ATTRIBUTES, CELEBA_EDITABLE_ATTRIBUTES_IDS)
@staticmethod
@override
def get_inference_template(form):
extension_dir = os.path.dirname(os.path.abspath(__file__))
template = open(os.path.join(extension_dir, INFERENCE_TEMPLATE), "r").read()
context = {'form': form}
return (template, context)
@staticmethod
@override
def get_title():
return "GAN"
@override
def itemize_entries(self, stage):
entries = []
if not self.userdata['is_inference_db']:
if stage == constants.TRAIN_DB:
# read file list
with open(self.userdata['file_list']) as f:
lines = f.read().splitlines()
# skip first 2 lines (header)
for line in lines[2:]:
fields = line.split()
filename = fields[0]
# add full path
filename = os.path.join(self.userdata['image_folder'], filename)
label = [int(field) for field in fields[1:]]
entries.append((filename, label))
elif stage == constants.TEST_DB:
if self.userdata['task_id'] == 'style':
if self.userdata['style_z1_vector']:
z1 = np.array([float(v) for v in self.userdata['style_z1_vector'].split()])
else:
z1 = np.random.normal(size=(100,))
if self.userdata['style_z2_vector']:
z2 = np.array([float(v) for v in self.userdata['style_z2_vector'].split()])
else:
z2 = np.random.normal(size=(100,))
for val in np.linspace(0, 1, self.userdata['row_count']):
for c in range(10):
z_ = slerp(val, z1, z2)
feature = np.append(z_, one_hot(c, self.y_dim)).reshape((1, 1, self.input_dim))
entries.append(feature)
elif self.userdata['task_id'] == 'class':
if self.userdata['class_z_vector']:
z = np.array([float(v) for v in self.userdata['class_z_vector'].split()])
else:
z = np.random.normal(size=(100,))
for val in np.linspace(0, 1, self.userdata['row_count']):
for i in range(10):
c_0 = i
c_1 = (i + 1) % 10
feature_0 = np.append(z, one_hot(c_0, self.y_dim))
feature_1 = np.append(z, one_hot(c_1, self.y_dim))
feature = slerp(val, feature_0, feature_1).reshape((1, 1, self.input_dim))
entries.append(feature)
elif self.userdata['task_id'] == 'genimg':
c = int(self.userdata['genimg_class_id'])
if self.userdata['genimg_z_vector']:
z = np.array([float(v) for v in self.userdata['genimg_z_vector'].split()])
else:
z = np.random.normal(size=(100,))
if self.y_dim > 0:
z = np.append(z, one_hot(c, self.y_dim))
feature = z.reshape((1, 1, self.input_dim))
entries.append(feature)
elif self.userdata['task_id'] == 'attributes':
if self.userdata['attributes_z_vector']:
z = np.array([float(v) for v in self.userdata['attributes_z_vector'].split()])
else:
z = np.random.normal(size=(100,))
with open(self.userdata['attributes_file'], 'rb') as f:
attributes_z = pickle.load(f)
params = parse_lines_of_floats(self.userdata['attributes_params'])
for img_params in params:
z_img = np.copy(z)
for i, coeff in enumerate(img_params):
z_img += coeff * attributes_z[CELEBA_EDITABLE_ATTRIBUTES_IDS[i]]
entries.append(z_img.reshape((1, 1, self.input_dim)))
elif self.userdata['task_id'] == 'enclist':
with open(self.userdata['enc_file_list']) as f:
lines = f.read().splitlines()
# skip first 2 lines (header)
max_images = self.userdata['enc_num_images']
for line in lines[2:max_images + 2]:
fields = line.split()
filename = fields[0]
# add full path
filename = os.path.join(self.userdata['enc_image_folder'], filename)
label = [int(field) for field in fields[1:]]
entries.append((filename, label))
elif self.userdata['task_id'] == 'analogy':
if self.userdata['attributes_z1_vector']:
z1 = np.array([float(v) for v in self.userdata['attributes_z1_vector'].split()])
else:
z1 = np.random.normal(size=(100,))
if self.userdata['attributes_z2_vector']:
z2 = np.array([float(v) for v in self.userdata['attributes_z2_vector'].split()])
else:
z2 = np.random.normal(size=(100,))
if self.userdata['attributes_z3_vector']:
z3 = np.array([float(v) for v in self.userdata['attributes_z3_vector'].split()])
else:
z3 = np.random.normal(size=(100,))
# create analogy vector
z4 = z2 + z3 - z1
grid_size = self.userdata['row_count']
# now interpolate across columns
for row in xrange(grid_size):
row_k = row / float(grid_size - 1)
z_left = slerp(row_k, z1, z3)
z_right = slerp(row_k, z2, z4)
entries.append(z_left.reshape((1, 1, self.input_dim)))
for col in xrange(1, grid_size - 1):
col_k = col / float(grid_size - 1)
z = slerp(col_k, z_left, z_right)
entries.append(z.reshape((1, 1, self.input_dim)))
entries.append(z_right.reshape((1, 1, self.input_dim)))
elif self.userdata['task_id'] == 'animation':
zs = parse_lines_of_floats(self.userdata['animation_z_vectors'])
zs = [np.array(z) for z in zs]
num_transitions = self.userdata['animation_num_transitions']
for i, z in enumerate(zs):
z_next = zs[(i + 1) % len(zs)]
for k in xrange(num_transitions):
z_ = slerp(float(k) / num_transitions, z, z_next)
entries.append(z_.reshape((1, 1, self.input_dim)))
else:
raise ValueError("Unknown task: %s" % self.userdata['task_id'])
return entries
def scale_image(self, filename):
im = np.array(image.load_image(filename))
# center crop
if self.userdata['center_crop_size']:
crop_size = int(self.userdata['center_crop_size'])
width, height = im.shape[0:2]
i = (width // 2) - crop_size // 2
j = (height // 2) - crop_size // 2
im = im[i:i + crop_size, j:j + crop_size, :]
# resize
if self.userdata['resize']:
resize = int(self.userdata['resize'])
im = image.resize_image(im, resize, resize, resize_mode='squash')
# transpose to CHW
feature = im.transpose(2, 0, 1)
return feature
| DIGITS-master | plugins/data/gan/digitsDataPluginGan/data.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
import os
from setuptools import setup, find_packages
from digits.extensions.data import GROUP as DIGITS_PLUGIN_GROUP
# Utility function to read the README file.
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="digits_sunnybrook_data_plugin",
version="0.0.1",
author="Greg Heinrich",
description=("A data ingestion plugin for the Sunnybrook cardiac dataset"),
long_description=read('README'),
license="Apache",
packages=find_packages(),
entry_points={
DIGITS_PLUGIN_GROUP: [
'class=digitsDataPluginSunnybrook:DataIngestion',
]
},
include_package_data=True,
install_requires=['pydicom'],
)
| DIGITS-master | plugins/data/sunnybrook/setup.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from .data import DataIngestion
__all__ = ['DataIngestion']
| DIGITS-master | plugins/data/sunnybrook/digitsDataPluginSunnybrook/__init__.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os
from digits import utils
from digits.utils import subclass
from flask_wtf import Form
from wtforms import validators
@subclass
class DatasetForm(Form):
"""
A form used to create a Sunnybrook dataset
"""
def validate_folder_path(form, field):
if not field.data:
pass
else:
# make sure the filesystem path exists
if not os.path.exists(field.data) or not os.path.isdir(field.data):
raise validators.ValidationError(
'Folder does not exist or is not reachable')
else:
return True
image_folder = utils.forms.StringField(
u'Image folder',
validators=[
validators.DataRequired(),
validate_folder_path,
],
tooltip="Specify the path to the image folder"
)
contour_folder = utils.forms.StringField(
u'Contour folder',
validators=[
validators.DataRequired(),
validate_folder_path,
],
tooltip="Specify the path to the contour folder"
)
channel_conversion = utils.forms.SelectField(
'Channel conversion',
choices=[
('RGB', 'RGB'),
('L', 'Grayscale'),
],
default='L',
tooltip="Perform selected channel conversion."
)
folder_pct_val = utils.forms.IntegerField(
u'% for validation',
default=10,
validators=[
validators.NumberRange(min=0, max=100)
],
tooltip="You can choose to set apart a certain percentage of images "
"from the training images for the validation set."
)
@subclass
class InferenceForm(Form):
def validate_file_path(form, field):
if not field.data:
pass
else:
# make sure the filesystem path exists
if not os.path.exists(field.data) and not os.path.isdir(field.data):
raise validators.ValidationError(
'File does not exist or is not reachable')
else:
return True
"""
A form used to perform inference on a text classification dataset
"""
test_image_file = utils.forms.StringField(
u'Image file',
validators=[
validate_file_path,
],
tooltip="Provide the (server) path to an image."
)
validation_record = utils.forms.SelectField(
'Record from validation set',
choices=[
('none', '- select record -'),
],
default='none',
tooltip="Test a record from the validation set."
)
| DIGITS-master | plugins/data/sunnybrook/digitsDataPluginSunnybrook/forms.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import cv2
import fnmatch
import math
import os
import random
import re
import pydicom
import numpy as np
from digits.utils import subclass, override, constants
from digits.utils.constants import COLOR_PALETTE_ATTRIBUTE
from digits.extensions.data.interface import DataIngestionInterface
from .forms import DatasetForm, InferenceForm
DATASET_TEMPLATE = "templates/dataset_template.html"
INFERENCE_TEMPLATE = "templates/inference_template.html"
# This is the subset of SAX series to use for Left Ventricle segmentation
# in the challenge training dataset
SAX_SERIES = {
"SC-HF-I-1": "0004",
"SC-HF-I-2": "0106",
"SC-HF-I-4": "0116",
"SC-HF-I-40": "0134",
"SC-HF-NI-3": "0379",
"SC-HF-NI-4": "0501",
"SC-HF-NI-34": "0446",
"SC-HF-NI-36": "0474",
"SC-HYP-1": "0550",
"SC-HYP-3": "0650",
"SC-HYP-38": "0734",
"SC-HYP-40": "0755",
"SC-N-2": "0898",
"SC-N-3": "0915",
"SC-N-40": "0944",
}
#
# Utility functions
#
def shrink_case(case):
toks = case.split("-")
def shrink_if_number(x):
try:
cvt = int(x)
return str(cvt)
except ValueError:
return x
return "-".join([shrink_if_number(t) for t in toks])
class Contour(object):
def __init__(self, ctr_path):
self.ctr_path = ctr_path
match = re.search(r"/([^/]*)/contours-manual/IRCCI-expert/IM-0001-(\d{4})-icontour-manual.txt", ctr_path)
self.case = shrink_case(match.group(1))
self.img_no = int(match.group(2))
def __str__(self):
return "<Contour for case %s, image %d>" % (self.case, self.img_no)
__repr__ = __str__
def get_all_contours(contour_path):
# walk the directory structure for all the contour files
contours = [
os.path.join(dirpath, f)
for dirpath, dirnames, files in os.walk(contour_path)
for f in fnmatch.filter(files, 'IM-0001-*-icontour-manual.txt')
]
extracted = map(Contour, contours)
return extracted
def load_contour(contour, img_path):
filename = "IM-%s-%04d.dcm" % (SAX_SERIES[contour.case], contour.img_no)
full_path = os.path.join(img_path, contour.case, filename)
img = load_image(full_path)
ctrs = np.loadtxt(contour.ctr_path, delimiter=" ").astype(np.int)
label = np.zeros_like(img, dtype="uint8")
cv2.fillPoly(label, [ctrs], 1)
return img, label
def load_image(full_path):
f = pydicom.dcmread(full_path)
return f.pixel_array.astype(np.int)
@subclass
class DataIngestion(DataIngestionInterface):
"""
A data ingestion extension for the Sunnybrook dataset
"""
def __init__(self, is_inference_db=False, **kwargs):
super(DataIngestion, self).__init__(**kwargs)
self.userdata['is_inference_db'] = is_inference_db
self.userdata['class_labels'] = ['background', 'left ventricle']
# get list of contours
if 'contours' not in self.userdata:
contours = get_all_contours(self.contour_folder)
random.shuffle(contours)
self.userdata['contours'] = contours
else:
contours = self.userdata['contours']
# get number of validation entries
pct_val = int(self.folder_pct_val)
self.userdata['n_val_entries'] = int(math.floor(len(contours) * pct_val / 100))
# label palette (0->black (background), 1->white (foreground), others->black)
palette = [0, 0, 0, 255, 255, 255] + [0] * (254 * 3)
self.userdata[COLOR_PALETTE_ATTRIBUTE] = palette
@override
def encode_entry(self, entry):
if isinstance(entry, basestring):
img = load_image(entry)
label = np.array([0])
else:
img, label = load_contour(entry, self.image_folder)
label = label[np.newaxis, ...]
if self.userdata['channel_conversion'] == 'L':
feature = img[np.newaxis, ...]
elif self.userdata['channel_conversion'] == 'RGB':
feature = np.empty(shape=(3, img.shape[0], img.shape[1]), dtype=img.dtype)
# just copy the same data over the three color channels
feature[0] = img
feature[1] = img
feature[2] = img
return feature, label
@staticmethod
@override
def get_category():
return "Images"
@staticmethod
@override
def get_id():
return "image-sunnybrook"
@staticmethod
@override
def get_dataset_form():
return DatasetForm()
@staticmethod
@override
def get_dataset_template(form):
"""
parameters:
- form: form returned by get_dataset_form(). This may be populated
with values if the job was cloned
return:
- (template, context) tuple
- template is a Jinja template to use for rendering dataset creation
options
- context is a dictionary of context variables to use for rendering
the form
"""
extension_dir = os.path.dirname(os.path.abspath(__file__))
template = open(os.path.join(extension_dir, DATASET_TEMPLATE), "r").read()
context = {'form': form}
return (template, context)
@override
def get_inference_form(self):
n_val_entries = self.userdata['n_val_entries']
form = InferenceForm()
for idx, ctr in enumerate(self.userdata['contours'][:n_val_entries]):
form.validation_record.choices.append((str(idx), ctr.case))
return form
@staticmethod
@override
def get_inference_template(form):
extension_dir = os.path.dirname(os.path.abspath(__file__))
template = open(os.path.join(extension_dir, INFERENCE_TEMPLATE), "r").read()
context = {'form': form}
return (template, context)
@staticmethod
@override
def get_title():
return "Sunnybrook LV Segmentation"
@override
def itemize_entries(self, stage):
ctrs = self.userdata['contours']
n_val_entries = self.userdata['n_val_entries']
entries = []
if not self.userdata['is_inference_db']:
if stage == constants.TRAIN_DB:
entries = ctrs[n_val_entries:]
elif stage == constants.VAL_DB:
entries = ctrs[:n_val_entries]
elif stage == constants.TEST_DB:
if self.userdata['validation_record'] != 'none':
if self.userdata['test_image_file']:
raise ValueError("Specify either an image or a record "
"from the validation set.")
# test record from validation set
entries = [ctrs[int(self.validation_record)]]
else:
# test image file
entries = [self.userdata['test_image_file']]
return entries
| DIGITS-master | plugins/data/sunnybrook/digitsDataPluginSunnybrook/data.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
import os
from setuptools import setup, find_packages
from digits.extensions.data import GROUP as DIGITS_PLUGIN_GROUP
# Utility function to read the README file.
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="digits_image_gradients_data_plugin",
version="0.0.1",
author="Greg Heinrich",
description=("A data ingestion plugin for image gradients"),
long_description=read('README'),
license="Apache",
packages=find_packages(),
entry_points={
DIGITS_PLUGIN_GROUP: [
'class=digitsDataPluginImageGradients:DataIngestion',
]
},
include_package_data=True,
)
| DIGITS-master | plugins/data/imageGradients/setup.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from .data import DataIngestion
__all__ = ['DataIngestion']
| DIGITS-master | plugins/data/imageGradients/digitsDataPluginImageGradients/__init__.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from digits import utils
from digits.utils import subclass
from digits.utils.forms import validate_required_iff
from flask_wtf import Form
import wtforms
from wtforms import validators
@subclass
class DatasetForm(Form):
"""
A form used to create an image gradient dataset
"""
train_image_count = utils.forms.IntegerField(
'Train Image count',
validators=[
validators.DataRequired(),
validators.NumberRange(min=1),
],
default=1000,
tooltip="Number of images to create in training set"
)
val_image_count = utils.forms.IntegerField(
'Validation Image count',
validators=[
validators.Optional(),
validators.NumberRange(min=0),
],
default=250,
tooltip="Number of images to create in validation set"
)
test_image_count = utils.forms.IntegerField(
'Test Image count',
validators=[
validators.Optional(),
validators.NumberRange(min=0),
],
default=0,
tooltip="Number of images to create in validation set"
)
image_width = wtforms.IntegerField(
u'Image Width',
default=50,
validators=[validators.DataRequired()]
)
image_height = wtforms.IntegerField(
u'Image Height',
default=50,
validators=[validators.DataRequired()]
)
@subclass
class InferenceForm(Form):
"""
A form used to perform inference on a gradient regression model
"""
gradient_x = utils.forms.FloatField(
'Gradient (x)',
validators=[
validate_required_iff(test_image_count=None),
validators.NumberRange(min=-0.5, max=0.5),
],
tooltip="Specify a number between -0.5 and 0.5"
)
gradient_y = utils.forms.FloatField(
'Gradient (y)',
validators=[
validate_required_iff(test_image_count=None),
validators.NumberRange(min=-0.5, max=0.5),
],
tooltip="Specify a number between -0.5 and 0.5"
)
test_image_count = utils.forms.IntegerField(
'Test Image count',
validators=[
validators.Optional(),
validators.NumberRange(min=0),
],
tooltip="Number of images to create in test set"
)
| DIGITS-master | plugins/data/imageGradients/digitsDataPluginImageGradients/forms.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from digits.utils import subclass, override, constants
from digits.extensions.data.interface import DataIngestionInterface
from .forms import DatasetForm, InferenceForm
import numpy as np
import os
TEMPLATE = "templates/template.html"
INFERENCE_TEMPLATE = "templates/inference_template.html"
@subclass
class DataIngestion(DataIngestionInterface):
"""
A data ingestion extension for an image gradient dataset
"""
def __init__(self, is_inference_db=False, **kwargs):
super(DataIngestion, self).__init__(**kwargs)
self.userdata['is_inference_db'] = is_inference_db
# Used to calculate the gradients later
self.yy, self.xx = np.mgrid[:self.image_height,
:self.image_width].astype('float')
@override
def encode_entry(self, entry):
xslope, yslope = entry
label = np.array([xslope, yslope])
a = xslope * 255 / self.image_width
b = yslope * 255 / self.image_height
image = a * (self.xx - self.image_width / 2) + b * (self.yy - self.image_height / 2) + 127.5
image = image.astype('uint8')
# convert to 3D tensors
image = image[np.newaxis, ...]
label = label[np.newaxis, np.newaxis, ...]
return image, label
@staticmethod
@override
def get_category():
return "Images"
@staticmethod
@override
def get_id():
return "image-gradients"
@staticmethod
@override
def get_dataset_form():
return DatasetForm()
@staticmethod
@override
def get_dataset_template(form):
"""
parameters:
- form: form returned by get_dataset_form(). This may be populated
with values if the job was cloned
return:
- (template, context) tuple
- template is a Jinja template to use for rendering dataset creation
options
- context is a dictionary of context variables to use for rendering
the form
"""
extension_dir = os.path.dirname(os.path.abspath(__file__))
template = open(os.path.join(extension_dir, TEMPLATE), "r").read()
context = {'form': form}
return (template, context)
@override
def get_inference_form(self):
return InferenceForm()
@staticmethod
@override
def get_inference_template(form):
extension_dir = os.path.dirname(os.path.abspath(__file__))
template = open(os.path.join(extension_dir, INFERENCE_TEMPLATE), "r").read()
context = {'form': form}
return (template, context)
@staticmethod
@override
def get_title():
return "Gradients"
@override
def itemize_entries(self, stage):
count = 0
if self.userdata['is_inference_db']:
if stage == constants.TEST_DB:
if self.test_image_count:
count = self.test_image_count
else:
return [(self.gradient_x, self.gradient_y)]
else:
if stage == constants.TRAIN_DB:
count = self.train_image_count
elif stage == constants.VAL_DB:
count = self.val_image_count
elif stage == constants.TEST_DB:
count = self.test_image_count
return [np.random.random_sample(2) - 0.5 for i in xrange(count)] if count > 0 else []
| DIGITS-master | plugins/data/imageGradients/digitsDataPluginImageGradients/data.py |
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
import os
from setuptools import setup, find_packages
from digits.extensions.data import GROUP as DIGITS_PLUGIN_GROUP
# Utility function to read the README file.
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="digits_bAbI_data_plugin",
version="0.0.1",
author="Greg Heinrich",
description=("A data ingestion plugin for the bAbI dataset"),
long_description=read('README'),
license="Apache",
packages=find_packages(),
entry_points={
DIGITS_PLUGIN_GROUP: [
'class=digitsDataPluginBAbI:DataIngestion',
]},
include_package_data=True,
)
| DIGITS-master | plugins/data/bAbI/setup.py |
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from .data import DataIngestion
__all__ = ['DataIngestion']
| DIGITS-master | plugins/data/bAbI/digitsDataPluginBAbI/__init__.py |
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os
from digits import utils
from digits.utils import subclass
from flask_wtf import Form
from wtforms import validators
@subclass
class DatasetForm(Form):
"""
A form used to create a Sunnybrook dataset
"""
def validate_folder_path(form, field):
if not field.data:
pass
else:
# make sure the filesystem path exists
if not os.path.exists(field.data) or not os.path.isdir(field.data):
raise validators.ValidationError(
'Folder does not exist or is not reachable')
else:
return True
story_folder = utils.forms.StringField(
u'Story folder',
validators=[
validators.DataRequired(),
validate_folder_path,
],
tooltip="Specify the path to a folder of stories - filenames are "
"expected to have this format: qa[1-N]*[train|test].txt"
)
task_id = utils.forms.SelectField(
'Task ID',
choices=[
('all', 'All'),
('1', '1'),
('2', '2'),
('3', '3'),
('4', '4'),
('5', '5'),
('6', '6'),
('7', '7'),
('8', '8'),
('9', '9'),
('10', '10'),
('11', '11'),
('12', '12'),
('13', '13'),
('14', '14'),
('15', '15'),
('16', '16'),
('17', '17'),
('18', '18'),
('19', '19'),
('20', '20'),
],
default='1',
tooltip="Select a task to train on or 'all' to train a joint model "
"on all tasks."
)
pct_val = utils.forms.IntegerField(
u'% for validation',
default=10,
validators=[
validators.NumberRange(min=0, max=100)
],
tooltip="You can choose to set apart a certain percentage of images "
"from the training images for the validation set."
)
@subclass
class InferenceForm(Form):
def validate_file_path(form, field):
if not field.data:
pass
else:
# make sure the filesystem path exists
if not os.path.exists(field.data) and not os.path.isdir(field.data):
raise validators.ValidationError(
'File does not exist or is not reachable')
else:
return True
"""
A form used to perform inference on a text classification dataset
"""
snippet = utils.forms.TextAreaField(
u'Story/Question',
tooltip="Write all sentences there and end with a question"
)
| DIGITS-master | plugins/data/bAbI/digitsDataPluginBAbI/forms.py |
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import copy
import os
import string
import numpy as np
def encode_field(field, word_map, sentence_size, story_size):
"""
return a 2-D array with shape (story_size, sentence_size)
"""
x = np.zeros((story_size, sentence_size))
for i, sentence in enumerate(field):
if i >= story_size:
raise ValueError("Field '%s' is longer than max (%d)" %
(field, story_size))
for j, word in enumerate(sentence):
if j >= sentence_size:
raise ValueError("Sentence '%s' is longer than max (%d)" %
(field, sentence_size))
try:
idx = word_map[word]
except:
# assign to last index
idx = len(word_map) + 1
x[i, j] = idx
return x
def encode_sample(sample, word_map, sentence_size, story_size):
"""
return an encoded (feature, label) tuple
"""
story = encode_field(sample['story'], word_map, sentence_size, story_size)
question = encode_field(sample['question'], word_map, sentence_size, story_size)
answer = encode_field(sample['answer'], word_map, sentence_size, story_size)
feature = np.zeros((2, story_size, sentence_size))
feature[0] = story
feature[1] = question
label = answer[np.newaxis, :]
return feature, label
def find_files(path, task_id, train):
"""
Find files in specified path with filenames that
match {task}*{phase}.txt where:
task="qa{task_id}_" or "" if task_id==None
phase="train" if train==True or "test" otherwise
"""
task = "qa{}_".format(task_id) if task_id else ""
phase = "train" if train else "test"
files = []
for dirpath, dirnames, filenames in os.walk(path, followlinks=True):
for filename in filenames:
if task in filename and phase in filename:
files.append(os.path.join(dirpath, filename))
return files
def get_stats(dataset):
"""
return dataset statistics
"""
fields = [field for sample in dataset for field in sample.values()]
sentences = [sentence for field in fields for sentence in field]
words = sorted(set([word for sentence in sentences for word in sentence]))
return {'word_map': dict((word, i) for i, word in enumerate(words, start=1)),
'sentence_size': max([len(sentence) for sentence in sentences]),
'story_size': max([len(story) for story in fields])}
def parse_folder_phase(path, task_id, train):
"""
Returns a list of samples for a phase by aggregating all samples
from matching files
"""
phase_data = []
files = find_files(path, task_id, train)
for file in files:
phase_data.extend(parse_file(file))
return phase_data
def parse_file(filename):
with open(filename) as f:
return parse_lines(f.readlines())
def parse_lines(lines):
"""
Returns a list of samples from a collection of lines where each sample
is a dictionary with 'story', 'question', 'answer' keys. Every key
value is a list of words without punctuation.
"""
data = []
print "lines are %s" % lines
story = None
for line in lines:
# convert to lower case
line = line.lower()
# find line ID (new stories start with line ID = 1)
line_id, line = line.split(' ', 1)
try:
if int(line_id) == 1:
# new story
story = []
except:
if not story:
story = []
# this isn't a like id, re-integrate into line
line = "%s %s" % (line_id, line)
# is this a question?
if '?' in line:
items = remove_punctuation(line).split('\t')
question = items[0]
if len(items) > 1:
answer = items[1]
else:
answer = ''
# add to data
data.append({
'story': copy.copy(story),
'question': [question.split()],
'answer': [answer.split()],
})
else:
story.append(remove_punctuation(line).split())
return data
def remove_punctuation(s):
return s.translate(string.maketrans("", ""), string.punctuation)
| DIGITS-master | plugins/data/bAbI/digitsDataPluginBAbI/utils.py |
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os
from digits.utils import subclass, override, constants
from digits.extensions.data.interface import DataIngestionInterface
from .forms import DatasetForm, InferenceForm
from . import utils
DATASET_TEMPLATE = "templates/dataset_template.html"
INFERENCE_TEMPLATE = "templates/inference_template.html"
@subclass
class DataIngestion(DataIngestionInterface):
"""
A data ingestion extension for the bAbI dataset
"""
def __init__(self, is_inference_db=False, **kwargs):
super(DataIngestion, self).__init__(**kwargs)
self.userdata['is_inference_db'] = is_inference_db
if 'train_text_data' not in self.userdata:
# get task ID
try:
task_id = int(self.task_id)
except:
task_id = None
self.userdata['task_id'] = task_id
# get data - this doesn't scale well to huge datasets but this makes it
# straightforard to create a mapping of words to indices and figure out max
# dimensions of stories and sentences
self.userdata['train_text_data'] = utils.parse_folder_phase(
self.story_folder, task_id, train=True)
self.userdata['stats'] = utils.get_stats(self.userdata['train_text_data'])
@override
def encode_entry(self, entry):
stats = self.userdata['stats']
return utils.encode_sample(entry, stats['word_map'], stats['sentence_size'], stats['story_size'])
@staticmethod
@override
def get_category():
return "Text"
@staticmethod
@override
def get_id():
return "text-babi"
@staticmethod
@override
def get_dataset_form():
return DatasetForm()
@staticmethod
@override
def get_dataset_template(form):
"""
parameters:
- form: form returned by get_dataset_form(). This may be populated
with values if the job was cloned
return:
- (template, context) tuple
- template is a Jinja template to use for rendering dataset creation
options
- context is a dictionary of context variables to use for rendering
the form
"""
extension_dir = os.path.dirname(os.path.abspath(__file__))
template = open(os.path.join(extension_dir, DATASET_TEMPLATE), "r").read()
context = {'form': form}
return (template, context)
@override
def get_inference_form(self):
return InferenceForm()
@staticmethod
@override
def get_inference_template(form):
extension_dir = os.path.dirname(os.path.abspath(__file__))
template = open(os.path.join(extension_dir, INFERENCE_TEMPLATE), "r").read()
context = {'form': form}
return (template, context)
@staticmethod
@override
def get_title():
return "bAbI"
@override
def itemize_entries(self, stage):
entries = []
if not self.userdata['is_inference_db']:
data = self.userdata['train_text_data']
n_val_entries = int(len(data)*self.pct_val/100)
if stage == constants.TRAIN_DB:
entries = data[n_val_entries:]
elif stage == constants.VAL_DB:
entries = data[:n_val_entries]
elif stage == constants.TEST_DB:
if not bool(self.snippet):
raise ValueError("You must write a story and a question")
entries = utils.parse_lines(str(self.snippet).splitlines())
return entries
| DIGITS-master | plugins/data/bAbI/digitsDataPluginBAbI/data.py |
import os
from setuptools import setup, find_packages
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from digits.extensions.data import GROUP as DIGITS_PLUGIN_GROUP
# Utility function to read the README file.
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="digits_text_classification_data_plugin",
version="0.0.1",
author="Greg Heinrich",
description=("A data ingestion plugin for text classification"),
long_description=read('README'),
license="BSD",
packages=find_packages(),
entry_points={
DIGITS_PLUGIN_GROUP: [
'class=digitsDataPluginTextClassification:DataIngestion',
]},
include_package_data=True,
)
| DIGITS-master | plugins/data/textClassification/setup.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from .data import DataIngestion
__all__ = ['DataIngestion']
| DIGITS-master | plugins/data/textClassification/digitsDataPluginTextClassification/__init__.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os
from digits import utils
from digits.utils import subclass
from flask_wtf import Form
from wtforms import validators
def validate_file_path(form, field):
if not field.data:
pass
else:
# make sure the filesystem path exists
if not os.path.exists(field.data) and not os.path.isdir(field.data):
raise validators.ValidationError(
'File does not exist or is not reachable')
else:
return True
@subclass
class DatasetForm(Form):
"""
A form used to create a text classification dataset
"""
train_data_file = utils.forms.StringField(
u'Training data (.csv)',
validators=[
validators.DataRequired(),
validate_file_path,
],
tooltip="Data file in .csv format. There should be one sample "
"per line. On each line, the first field should be the "
"numerical class label. All subsequent fields will be "
"concatenated to produce a single string of characters, "
"up to the specified limit"
)
val_data_file = utils.forms.StringField(
u'Validation data (.csv)',
validators=[
validate_file_path,
],
tooltip="Data file in .csv format. There should be one sample "
"per line. On each line, the first field should be the "
"numerical class label. All subsequent fields will be "
"concatenated to produce a single string of characters, "
"up to the specified limit."
)
alphabet = utils.forms.StringField(
u'Dictionary',
default="abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\"/\\|_@#$%^&*~`+ =<>()[]{}",
tooltip="Alphabet to use when converting characters to IDs "
"(1-based indexing). Unknown characters will be all be "
"assigned the same next available ID. "
)
class_labels_file = utils.forms.StringField(
u'Class labels',
validators=[
validate_file_path,
],
tooltip="The 'i'th line of the file should give the string label "
"associated with the '(i-1)'th numeric label. (E.g. the "
"string label for the numeric label 0 is supposed to be "
"on line 1.)"
)
max_chars_per_sample = utils.forms.IntegerField(
u'Number of characters per sample',
default=1024,
validators=[
validators.Optional(),
validators.NumberRange(min=1),
],
tooltip="Specify how many characters to retain per sample. "
"Shorter samples will be padded. Longer samples will "
"be truncated. Leave blank and disable 'Enforce same "
"shape' below to use sample size in data file "
)
@subclass
class InferenceForm(Form):
"""
A form used to perform inference on a text classification dataset
"""
snippet = utils.forms.TextAreaField(
u'Snippet',
tooltip="Test a single snippet"
)
test_data_file = utils.forms.StringField(
u'Test data (.csv)',
validators=[
validate_file_path,
],
tooltip="Data file in .csv format. There should be one sample "
"per line. On each line, the first field should be the "
"numerical class label. All subsequent fields will be "
"concatenated to produce a single string of characters, "
"up to the specified limit"
)
| DIGITS-master | plugins/data/textClassification/digitsDataPluginTextClassification/forms.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import csv
import os
import random
import numpy as np
from digits.utils import subclass, override, constants
from digits.extensions.data.interface import DataIngestionInterface
from .forms import DatasetForm, InferenceForm
DATASET_TEMPLATE = "templates/dataset_template.html"
INFERENCE_TEMPLATE = "templates/inference_template.html"
@subclass
class DataIngestion(DataIngestionInterface):
"""
A data ingestion extension for text classification
"""
def __init__(self, is_inference_db=False, **kwargs):
super(DataIngestion, self).__init__(**kwargs)
self.userdata['is_inference_db'] = is_inference_db
# create character dict
self.userdata['cdict'] = {}
for i, c in enumerate(self.alphabet):
self.userdata['cdict'][c] = i + 1 # indices start at 1
# assign unknown characters to the same next available ID
self.userdata['unknown_char_id'] = len(self.alphabet) + 1
if self.class_labels_file:
with open(self.class_labels_file) as f:
self.userdata['class_labels'] = f.read().splitlines()
@override
def encode_entry(self, entry):
label = np.array([int(entry['class'])])
# convert characters to numbers
sample = []
count = 0
max_chars = self.max_chars_per_sample
for field in entry['fields']:
for char in field.lower():
if max_chars and count < self.max_chars_per_sample:
if char in self.userdata['cdict']:
num = self.userdata['cdict'][char]
else:
num = self.userdata['unknown_char_id']
sample.append(num)
count += 1
else:
break
# convert to numpy array
sample = np.array(sample, dtype='uint8')
# pad if necessary
if max_chars and count < max_chars:
sample = np.append(sample, np.full(
(max_chars - count),
fill_value=self.userdata['unknown_char_id'],
dtype='uint8'))
# make it a 3-D array
sample = sample[np.newaxis, np.newaxis, :]
return sample, label
@staticmethod
@override
def get_category():
return "Text"
@staticmethod
@override
def get_id():
return "text-classification"
@staticmethod
@override
def get_dataset_form():
return DatasetForm()
@staticmethod
@override
def get_dataset_template(form):
"""
parameters:
- form: form returned by get_dataset_form(). This may be populated
with values if the job was cloned
return:
- (template, context) tuple
- template is a Jinja template to use for rendering dataset creation
options
- context is a dictionary of context variables to use for rendering
the form
"""
extension_dir = os.path.dirname(os.path.abspath(__file__))
template = open(os.path.join(extension_dir, DATASET_TEMPLATE), "r").read()
context = {'form': form}
return (template, context)
@staticmethod
@override
def get_inference_form():
return InferenceForm()
@staticmethod
@override
def get_inference_template(form):
extension_dir = os.path.dirname(os.path.abspath(__file__))
template = open(os.path.join(extension_dir, INFERENCE_TEMPLATE), "r").read()
context = {'form': form}
return (template, context)
@staticmethod
@override
def get_title():
return "Classification"
@override
def itemize_entries(self, stage):
if not self.userdata['is_inference_db']:
if stage == constants.TRAIN_DB:
entries = self.read_csv(self.train_data_file)
elif stage == constants.VAL_DB:
if self.val_data_file:
entries = self.read_csv(self.val_data_file)
else:
entries = []
else:
entries = []
else:
if stage == constants.TEST_DB:
if not (bool(self.test_data_file) ^ bool(self.snippet)):
raise ValueError("You must provide either a data file or a snippet")
if self.test_data_file:
entries = self.read_csv(self.test_data_file, False)
elif self.snippet:
entries = [{'class': '0', 'fields': [self.snippet]}]
else:
entries = []
return entries
def read_csv(self, filename, shuffle=True):
entries = []
with open(filename) as f:
reader = csv.DictReader(f, fieldnames=['class'], restkey='fields')
for row in reader:
entries.append(row)
random.shuffle(entries)
return entries
| DIGITS-master | plugins/data/textClassification/digitsDataPluginTextClassification/data.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os.path
import re
from digits import test_utils
test_utils.skipIfNotFramework('none')
class TestVersion():
DEV_REGEX = re.compile('^(0|[1-9]\d*)\.(0|[1-9]\d*)-dev$')
# Copyright (c) Sindre Sorhus <[email protected]> (sindresorhus.com)
# The MIT License (MIT)
# https://github.com/sindresorhus/semver-regex/blob/v1.0.0/index.js
STANDARD_SEMVER_REGEX = re.compile(
'^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)'
'(-[\da-z\-]+(\.[\da-z\-]+)*)?(\+[\da-z\-]+(\.[\da-z\-]+)*)?$')
def check_version(self, version):
standard_match = re.match(self.STANDARD_SEMVER_REGEX, version)
dev_match = re.match(self.DEV_REGEX, version)
assert (standard_match is not None or dev_match is not None), \
'Version string "%s" is ill-formatted' % version
def test_package_version(self):
import digits
self.check_version(digits.__version__)
def test_import_version(self):
import digits.version
self.check_version(digits.version.__version__)
# Test a programmatic and reliable way to check the version
# python -c "execfile('digits/version.py'); print __version__"
def test_execfile_version(self):
import digits
filename = os.path.join(os.path.dirname(digits.__file__), 'version.py')
file_locals = {}
execfile(filename, {}, file_locals)
assert file_locals.keys() == ['__version__'], \
'version.py should only declare a single variable'
self.check_version(file_locals['__version__'])
# Make sure somebody doesn't overwrite the version in __init__.py
def test_package_version_matches_import_version(self):
import digits
import digits.version
assert digits.__version__ == digits.version.__version__
| DIGITS-master | digits/test_version.py |
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import logging
import os.path
import platform
import re
import signal
import subprocess
import time
import flask
import gevent.event
from . import utils
from .config import config_value
from .status import Status, StatusCls
import digits.log
# NOTE: Increment this every time the pickled version changes
PICKLE_VERSION = 1
class Task(StatusCls):
"""
Base class for Tasks
A Task is a compute-heavy operation that runs in a separate executable
Communication is done by processing the stdout of the executable
"""
def __init__(self, job_dir, parents=None):
super(Task, self).__init__()
self.pickver_task = PICKLE_VERSION
self.job_dir = job_dir
self.job_id = os.path.basename(job_dir)
if parents is None:
self.parents = None
elif isinstance(parents, (list, tuple)):
self.parents = parents
elif isinstance(parents, Task):
self.parents = [parents]
else:
raise TypeError('parents is %s' % type(parents))
self.exception = None
self.traceback = None
self.aborted = gevent.event.Event()
self.set_logger()
self.p = None # Subprocess object for training
def __getstate__(self):
d = self.__dict__.copy()
if 'aborted' in d:
del d['aborted']
if 'logger' in d:
del d['logger']
if 'p' in d:
# Subprocess object for training is not pickleable
del d['p']
return d
def __setstate__(self, state):
self.__dict__ = state
self.aborted = gevent.event.Event()
self.set_logger()
def set_logger(self):
self.logger = digits.log.JobIdLoggerAdapter(
logging.getLogger('digits.webapp'),
{'job_id': self.job_id},
)
def name(self):
"""
Returns a string
"""
raise NotImplementedError
def html_id(self):
"""
Returns a string
"""
return 'task-%s' % id(self)
def on_status_update(self):
"""
Called when StatusCls.status.setter is used
"""
from digits.webapp import app, socketio
# Send socketio updates
message = {
'task': self.html_id(),
'update': 'status',
'status': self.status.name,
'css': self.status.css,
'show': (self.status in [Status.RUN, Status.ERROR]),
'running': self.status.is_running(),
}
with app.app_context():
message['html'] = flask.render_template('status_updates.html',
updates=self.status_history,
exception=self.exception,
traceback=self.traceback,
)
socketio.emit('task update',
message,
namespace='/jobs',
room=self.job_id,
)
from digits.webapp import scheduler
job = scheduler.get_job(self.job_id)
if job:
job.on_status_update()
def path(self, filename, relative=False):
"""
Returns a path to the given file
Arguments:
filename -- the requested file
Keyword arguments:
relative -- If False, return an absolute path to the file
If True, return a path relative to the jobs directory
"""
if not filename:
return None
if os.path.isabs(filename):
path = filename
else:
path = os.path.join(self.job_dir, filename)
if relative:
path = os.path.relpath(path, config_value('jobs_dir'))
return str(path).replace("\\", "/")
def ready_to_queue(self):
"""
Returns True if all parents are done
"""
if not self.parents:
return True
for parent in self.parents:
if parent.status != Status.DONE:
return False
return True
def offer_resources(self, resources):
"""
Check the available resources and return a set of requested resources
Arguments:
resources -- a copy of scheduler.resources
"""
raise NotImplementedError
def task_arguments(self, resources, env):
"""
Returns args used by subprocess.Popen to execute the task
Returns False if the args cannot be set properly
Arguments:
resources -- the resources assigned by the scheduler for this task
environ -- os.environ instance to run process in
"""
raise NotImplementedError
def before_run(self):
"""
Called before run() executes
Raises exceptions
"""
pass
def run(self, resources):
"""
Execute the task
Arguments:
resources -- the resources assigned by the scheduler for this task
"""
self.before_run()
env = os.environ.copy()
args = self.task_arguments(resources, env)
if not args:
self.logger.error('Could not create the arguments for Popen')
self.status = Status.ERROR
return False
# Convert them all to strings
args = [str(x) for x in args]
self.logger.info('%s task started.' % self.name())
self.status = Status.RUN
unrecognized_output = []
import sys
env['PYTHONPATH'] = os.pathsep.join(['.', self.job_dir, env.get('PYTHONPATH', '')] + sys.path)
# https://docs.python.org/2/library/subprocess.html#converting-argument-sequence
if platform.system() == 'Windows':
args = ' '.join(args)
self.logger.info('Task subprocess args: "{}"'.format(args))
else:
self.logger.info('Task subprocess args: "%s"' % ' '.join(args))
self.p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=self.job_dir,
close_fds=False if platform.system() == 'Windows' else True,
env=env,
)
try:
sigterm_time = None # When was the SIGTERM signal sent
sigterm_timeout = 2 # When should the SIGKILL signal be sent
while self.p.poll() is None:
for line in utils.nonblocking_readlines(self.p.stdout):
if self.aborted.is_set():
if sigterm_time is None:
# Attempt graceful shutdown
self.p.send_signal(signal.SIGTERM)
sigterm_time = time.time()
self.status = Status.ABORT
break
if line is not None:
# Remove whitespace
line = line.strip()
if line:
if not self.process_output(line):
self.logger.warning('%s unrecognized output: %s' % (self.name(), line.strip()))
unrecognized_output.append(line)
else:
time.sleep(0.05)
if sigterm_time is not None and (time.time() - sigterm_time > sigterm_timeout):
self.p.send_signal(signal.SIGKILL)
self.logger.warning('Sent SIGKILL to task "%s"' % self.name())
time.sleep(0.1)
time.sleep(0.01)
except:
self.p.terminate()
self.after_run()
raise
self.after_run()
if self.status != Status.RUN:
return False
elif self.p.returncode != 0:
self.logger.error('%s task failed with error code %d' % (self.name(), self.p.returncode))
if self.exception is None:
self.exception = 'error code %d' % self.p.returncode
if unrecognized_output:
if self.traceback is None:
self.traceback = '\n'.join(unrecognized_output)
else:
self.traceback = self.traceback + ('\n'.join(unrecognized_output))
self.after_runtime_error()
self.status = Status.ERROR
return False
else:
self.logger.info('%s task completed.' % self.name())
self.status = Status.DONE
return True
def abort(self):
"""
Abort the Task
"""
if self.status.is_running():
self.aborted.set()
def preprocess_output_digits(self, line):
"""
Takes line of output and parses it according to DIGITS's log format
Returns (timestamp, level, message) or (None, None, None)
"""
# NOTE: This must change when the logging format changes
# YYYY-MM-DD HH:MM:SS [LEVEL] message
match = re.match(r'(\S{10} \S{8}) \[(\w+)\s*\] (.*)$', line)
if match:
timestr = match.group(1)
timestamp = time.mktime(time.strptime(timestr, digits.log.DATE_FORMAT))
level = match.group(2)
message = match.group(3)
if level.startswith('DEB'):
level = 'debug'
elif level.startswith('INF'):
level = 'info'
elif level.startswith('WAR'):
level = 'warning'
elif level.startswith('ERR'):
level = 'error'
elif level.startswith('CRI'):
level = 'critical'
return (timestamp, level, message)
else:
return (None, None, None)
def process_output(self, line):
"""
Process a line of output from the task
Returns True if the output was able to be processed
Arguments:
line -- a line of output
"""
raise NotImplementedError
def est_done(self):
"""
Returns the estimated time in seconds until the task is done
"""
if self.status != Status.RUN or self.progress == 0:
return None
elapsed = time.time() - self.status_history[-1][1]
return (1 - self.progress) * elapsed // self.progress
def after_run(self):
"""
Called after run() executes
"""
pass
def after_runtime_error(self):
"""
Called after a runtime error during run()
"""
pass
def emit_progress_update(self):
"""
Call socketio.emit for task progress update, and trigger job progress update.
"""
from digits.webapp import socketio
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'progress',
'percentage': int(round(100 * self.progress)),
'eta': utils.time_filters.print_time_diff(self.est_done()),
},
namespace='/jobs',
room=self.job_id,
)
from digits.webapp import scheduler
job = scheduler.get_job(self.job_id)
if job:
job.emit_progress_update()
| DIGITS-master | digits/task.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
"""
Utility functions used in other test files
"""
from __future__ import absolute_import
import os
import unittest
from digits.config import config_value
def skipTest(message):
raise unittest.SkipTest(message)
def skipIfNotFramework(framework):
"""
Raises SkipTest if DIGITS_TEST_FRAMEWORK is set
to something other than framework
"""
key = 'DIGITS_TEST_FRAMEWORK'
if (key in os.environ and os.environ[key] != framework):
raise unittest.SkipTest(
'Skipping because %s is "%s" and not "%s"'
% (key, os.environ[key], framework))
class DatasetMixin(object):
"""
Mixin for dataset tests - skip if framework is not "none"
"""
@classmethod
def setUpClass(cls):
skipIfNotFramework('none')
# Call super.setUpClass() unless we're the last in the class hierarchy
supercls = super(DatasetMixin, cls)
if hasattr(supercls, 'setUpClass'):
supercls.setUpClass()
class CaffeMixin(object):
"""
Mixin for caffe tests
"""
FRAMEWORK = 'caffe'
@classmethod
def setUpClass(cls):
skipIfNotFramework('caffe')
# Call super.setUpClass() unless we're the last in the class hierarchy
supercls = super(CaffeMixin, cls)
if hasattr(supercls, 'setUpClass'):
supercls.setUpClass()
class TorchMixin(object):
"""
Mixin for torch tests
"""
FRAMEWORK = 'torch'
@classmethod
def setUpClass(cls):
skipIfNotFramework('torch')
if cls.FRAMEWORK == 'torch' and not config_value('torch')['enabled']:
raise unittest.SkipTest('Torch not found')
# Call super.setUpClass() unless we're the last in the class hierarchy
supercls = super(TorchMixin, cls)
if hasattr(supercls, 'setUpClass'):
supercls.setUpClass()
class TensorflowMixin(object):
"""
Mixin for tensorflow tests
"""
FRAMEWORK = 'tensorflow'
@classmethod
def setUpClass(cls):
skipIfNotFramework('tensorflow')
if cls.FRAMEWORK == 'tensorflow' and not config_value('tensorflow')['enabled']:
raise unittest.SkipTest('Tensorflow not found')
# Call super.setUpClass() unless we're the last in the class hierarchy
supercls = super(TensorflowMixin, cls)
if hasattr(supercls, 'setUpClass'):
supercls.setUpClass()
| DIGITS-master | digits/test_utils.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
__version__ = '6.1.1'
| DIGITS-master | digits/version.py |
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import logging
import logging.handlers
import sys
from digits.config import config_value
DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
class JobIdLogger(logging.Logger):
def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None):
"""
Customizing it to set a default value for extra['job_id']
"""
rv = logging.LogRecord(name, level, fn, lno, msg, args, exc_info, func)
if extra is not None:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
if 'job_id' not in rv.__dict__:
rv.__dict__['job_id'] = ''
return rv
class JobIdLoggerAdapter(logging.LoggerAdapter):
"""
Accepts an optional keyword argument: 'job_id'
You can use this in 2 ways:
1. On class initialization
adapter = JobIdLoggerAdapter(logger, {'job_id': job_id})
adapter.debug(msg)
2. On method invocation
adapter = JobIdLoggerAdapter(logger, {})
adapter.debug(msg, job_id=id)
"""
def process(self, msg, kwargs):
if 'job_id' in kwargs:
if 'extra' not in kwargs:
kwargs['extra'] = {}
kwargs['extra']['job_id'] = ' [%s]' % kwargs['job_id']
del kwargs['job_id']
elif 'job_id' in self.extra:
if 'extra' not in kwargs:
kwargs['extra'] = {}
kwargs['extra']['job_id'] = ' [%s]' % self.extra['job_id']
return msg, kwargs
def setup_logging():
# Set custom logger
logging.setLoggerClass(JobIdLogger)
formatter = logging.Formatter(
fmt="%(asctime)s%(job_id)s [%(levelname)-5s] %(message)s",
datefmt=DATE_FORMAT,
)
# digits logger
main_logger = logging.getLogger('digits')
main_logger.setLevel(logging.DEBUG)
# Log to stdout
stdoutHandler = logging.StreamHandler(sys.stdout)
stdoutHandler.setFormatter(formatter)
stdoutHandler.setLevel(logging.DEBUG)
main_logger.addHandler(stdoutHandler)
# digits.webapp logger
logfile_filename = config_value('log_file')['filename']
logfile_level = config_value('log_file')['level']
if logfile_filename is not None:
webapp_logger = logging.getLogger('digits.webapp')
webapp_logger.setLevel(logging.DEBUG)
# Log to file
fileHandler = logging.handlers.RotatingFileHandler(
logfile_filename,
maxBytes=(1024 * 1024 * 10), # 10 MB
backupCount=10,
)
fileHandler.setFormatter(formatter)
fileHandler.setLevel(logfile_level)
webapp_logger.addHandler(fileHandler)
# Useful shortcut for the webapp, which may set job_id
return JobIdLoggerAdapter(webapp_logger, {})
else:
print 'WARNING: log_file config option not found - no log file is being saved'
return JobIdLoggerAdapter(main_logger, {})
# Do it when this module is loaded
logger = setup_logging()
| DIGITS-master | digits/log.py |
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os
import os.path
import pickle
import shutil
import threading
import time
import flask
from .status import Status, StatusCls
from digits.config import config_value
from digits.utils import sizeof_fmt, filesystem as fs
# NOTE: Increment this every time the pickled object changes
PICKLE_VERSION = 2
class Job(StatusCls):
"""
Base class
"""
SAVE_FILE = 'status.pickle'
@classmethod
def load(cls, job_id):
"""
Loads a Job in the given job_id
Returns the Job or throws an exception
"""
from digits.model.tasks import TrainTask
job_dir = os.path.join(config_value('jobs_dir'), job_id)
filename = os.path.join(job_dir, cls.SAVE_FILE)
with open(filename, 'rb') as savefile:
job = pickle.load(savefile)
# Reset this on load
job._dir = job_dir
for task in job.tasks:
task.job_dir = job_dir
if isinstance(task, TrainTask):
# can't call this until the job_dir is set
task.detect_snapshots()
task.detect_timeline_traces()
return job
def __init__(self, name, username, group='', persistent=True):
"""
Arguments:
name -- name of this job
username -- creator of this job
"""
super(Job, self).__init__()
# create a unique ID
self._id = '%s-%s' % (time.strftime('%Y%m%d-%H%M%S'), os.urandom(2).encode('hex'))
self._dir = os.path.join(config_value('jobs_dir'), self._id)
self._name = name
self.group = group
self.username = username
self.pickver_job = PICKLE_VERSION
self.tasks = []
self.exception = None
self._notes = None
self.event = threading.Event()
self.persistent = persistent
os.mkdir(self._dir)
def __getstate__(self):
"""
Used when saving a pickle file
"""
d = self.__dict__.copy()
# Isn't linked to state
if '_dir' in d:
del d['_dir']
if 'event' in d:
del d['event']
return d
def __setstate__(self, state):
"""
Used when loading a pickle file
"""
if 'username' not in state:
state['username'] = None
if 'group' not in state:
state['group'] = ''
self.__dict__ = state
self.persistent = True
def json_dict(self, detailed=False):
"""
Returns a dict used for a JSON representation
"""
d = {
'id': self.id(),
'name': self.name(),
'status': self.status.name,
}
if detailed:
d.update({
'directory': self.dir(),
})
return d
def id(self):
"""getter for _id"""
return self._id
def dir(self):
"""getter for _dir"""
return self._dir
def path(self, filename, relative=False):
"""
Returns a path to the given file
Arguments:
filename -- the requested file
Keyword arguments:
relative -- If False, return an absolute path to the file
If True, return a path relative to the jobs directory
"""
if not filename:
return None
if os.path.isabs(filename):
path = filename
else:
path = os.path.join(self._dir, filename)
if relative:
path = os.path.relpath(path, config_value('jobs_dir'))
return str(path).replace("\\", "/")
def path_is_local(self, path):
"""assert that a path is local to _dir"""
p = os.path.normpath(path)
if os.path.isabs(p):
return False
if p.startswith('..'):
return False
return True
def name(self):
return self._name
def notes(self):
if hasattr(self, '_notes'):
return self._notes
else:
return None
def job_type(self):
"""
String representation for this class
virtual function
"""
raise NotImplementedError('Implement me!')
def status_of_tasks(self):
"""
Returns the status of the job's tasks
"""
job_status = self.status
if job_status in [Status.ABORT, Status.ERROR]:
return job_status
task_statuses = [t.status for t in self.tasks]
important_statuses = [
Status(s) for s in [
# Sorted by importance
Status.ERROR,
Status.ABORT,
Status.RUN,
Status.WAIT,
Status.INIT,
Status.DONE,
]
]
for s in important_statuses:
# Return if any task matches
if s in task_statuses:
return s
return Status(Status.DONE)
def runtime_of_tasks(self):
"""
Returns the time (in sec) between when the first task started and when the last task stopped
NOTE: this may not be what you're expecting if there was some WAIT time in-between
"""
starts = []
stops = []
for task in self.tasks:
for start_status, start_timestamp in task.status_history:
if start_status == Status.RUN:
starts.append(start_timestamp)
# Only search for stops if the task was started at some point
for stop_status, stop_timestamp in task.status_history:
if stop_status in [Status.DONE, Status.ABORT, Status.ERROR]:
stops.append(stop_timestamp)
break
if len(starts):
min_start = min(starts)
if len(stops):
max_stop = max(stops)
return max_stop - min_start
else:
return time.time() - min_start
else:
return 0
def on_status_update(self):
"""
Called when StatusCls.status.setter is used
"""
from digits.webapp import app, socketio
message = {
'update': 'status',
'status': self.status_of_tasks().name,
'css': self.status_of_tasks().css,
'running': self.status.is_running(),
'job_id': self.id(),
}
with app.app_context():
message['html'] = flask.render_template('status_updates.html', updates=self.status_history)
socketio.emit('job update',
message,
namespace='/jobs',
room=self.id(),
)
# send message to job_management room as well
socketio.emit('job update',
message,
namespace='/jobs',
room='job_management',
)
if not self.status.is_running():
if hasattr(self, 'event'):
# release threads that are waiting for job to complete
self.event.set()
def abort(self):
"""
Abort a job and stop all running tasks
"""
if self.status.is_running():
self.status = Status.ABORT
for task in self.tasks:
task.abort()
def save(self):
"""
Saves the job to disk as a pickle file
Suppresses errors, but returns False if something goes wrong
"""
try:
# use tmpfile so we don't abort during pickle dump (leading to EOFErrors)
tmpfile_path = self.path(self.SAVE_FILE + '.tmp')
with open(tmpfile_path, 'wb') as tmpfile:
pickle.dump(self, tmpfile)
shutil.move(tmpfile_path, self.path(self.SAVE_FILE))
return True
except KeyboardInterrupt:
pass
except Exception as e:
print 'Caught %s while saving job %s: %s' % (type(e).__name__, self.id(), e)
return False
def disk_size_fmt(self):
"""
return string representing job disk size
"""
size = fs.get_tree_size(self._dir)
return sizeof_fmt(size)
def get_progress(self):
"""
Return job progress computed from task progress
"""
if len(self.tasks) == 0:
return 0.0
progress = 0.0
for task in self.tasks:
progress += task.progress
progress /= len(self.tasks)
return progress
def emit_progress_update(self):
"""
Call socketio.emit for task job update, by considering task progress.
"""
progress = self.get_progress()
from digits.webapp import socketio
socketio.emit('job update',
{
'job_id': self.id(),
'update': 'progress',
'percentage': int(round(100 * progress)),
},
namespace='/jobs',
room='job_management'
)
def emit_attribute_changed(self, attribute, value):
"""
Call socketio.emit for task job update
"""
from digits.webapp import socketio
socketio.emit('job update',
{
'job_id': self.id(),
'update': 'attribute',
'attribute': attribute,
'value': value,
},
namespace='/jobs',
room='job_management'
)
def wait_completion(self):
"""
Wait for the job to complete
"""
# if job was loaded from disk (which is the only case
# when the 'event' attribute should be missing) then
# assume it has completed already (done, errored or interrupted)
if hasattr(self, 'event'):
self.event.wait()
def is_persistent(self):
"""
Returns whether job is persistent
"""
return self.persistent
def is_read_only(self):
"""
Returns False if this job can be edited
"""
return not self.is_persistent()
| DIGITS-master | digits/job.py |
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os
import flask
from flask_socketio import SocketIO
from gevent import monkey
monkey.patch_all()
from .config import config_value # noqa
from digits import utils # noqa
from digits.utils import filesystem as fs # noqa
from digits.utils.store import StoreCache # noqa
import digits.scheduler # noqa
# Create Flask, Scheduler and SocketIO objects
url_prefix = config_value('url_prefix')
app = flask.Flask(__name__, static_url_path=url_prefix+'/static')
app.config['DEBUG'] = True
# Disable CSRF checking in WTForms
app.config['WTF_CSRF_ENABLED'] = False
# This is still necessary for SocketIO
app.config['SECRET_KEY'] = os.urandom(12).encode('hex')
app.url_map.redirect_defaults = False
app.config['URL_PREFIX'] = url_prefix
socketio = SocketIO(app, async_mode='gevent', path=url_prefix+'/socket.io')
app.config['store_cache'] = StoreCache()
app.config['store_url_list'] = config_value('model_store')['url_list']
scheduler = digits.scheduler.Scheduler(config_value('gpu_list'), True)
# Register filters and views
app.jinja_env.globals['server_name'] = config_value('server_name')
app.jinja_env.globals['server_version'] = digits.__version__
app.jinja_env.globals['caffe_version'] = config_value('caffe')['version']
app.jinja_env.globals['caffe_flavor'] = config_value('caffe')['flavor']
app.jinja_env.globals['dir_hash'] = fs.dir_hash(
os.path.join(os.path.dirname(digits.__file__), 'static'))
app.jinja_env.filters['print_time'] = utils.time_filters.print_time
app.jinja_env.filters['print_time_diff'] = utils.time_filters.print_time_diff
app.jinja_env.filters['print_time_since'] = utils.time_filters.print_time_since
app.jinja_env.filters['sizeof_fmt'] = utils.sizeof_fmt
app.jinja_env.filters['has_permission'] = utils.auth.has_permission
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
import digits.views # noqa
app.register_blueprint(digits.views.blueprint,
url_prefix=url_prefix)
import digits.dataset.views # noqa
app.register_blueprint(digits.dataset.views.blueprint,
url_prefix=url_prefix+'/datasets')
import digits.dataset.generic.views # noqa
app.register_blueprint(digits.dataset.generic.views.blueprint,
url_prefix=url_prefix+'/datasets/generic')
import digits.dataset.images.views # noqa
app.register_blueprint(digits.dataset.images.views.blueprint,
url_prefix=url_prefix+'/datasets/images')
import digits.dataset.images.classification.views # noqa
app.register_blueprint(digits.dataset.images.classification.views.blueprint,
url_prefix=url_prefix+'/datasets/images/classification')
import digits.dataset.images.generic.views # noqa
app.register_blueprint(digits.dataset.images.generic.views.blueprint,
url_prefix=url_prefix+'/datasets/images/generic')
import digits.model.views # noqa
app.register_blueprint(digits.model.views.blueprint,
url_prefix=url_prefix+'/models')
import digits.model.images.views # noqa
app.register_blueprint(digits.model.images.views.blueprint,
url_prefix=url_prefix+'/models/images')
import digits.model.images.classification.views # noqa
app.register_blueprint(digits.model.images.classification.views.blueprint,
url_prefix=url_prefix+'/models/images/classification')
import digits.model.images.generic.views # noqa
app.register_blueprint(digits.model.images.generic.views.blueprint,
url_prefix=url_prefix+'/models/images/generic')
import digits.pretrained_model.views # noqa
app.register_blueprint(digits.pretrained_model.views.blueprint,
url_prefix=url_prefix+'/pretrained_models')
import digits.store.views # noqa
app.register_blueprint(digits.store.views.blueprint,
url_prefix=url_prefix+'/store')
def username_decorator(f):
from functools import wraps
@wraps(f)
def decorated(*args, **kwargs):
this_username = flask.request.cookies.get('username', None)
app.jinja_env.globals['username'] = this_username
return f(*args, **kwargs)
return decorated
for endpoint, function in app.view_functions.iteritems():
app.view_functions[endpoint] = username_decorator(function)
# Setup the environment
scheduler.load_past_jobs()
| DIGITS-master | digits/webapp.py |
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from .version import __version__
__all__ = ['__version__']
| DIGITS-master | digits/__init__.py |
#!/usr/bin/env python2
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import argparse
import ctypes
import platform
class c_cudaDeviceProp(ctypes.Structure):
"""
Passed to cudart.cudaGetDeviceProperties()
"""
_fields_ = [
('name', ctypes.c_char * 256),
('totalGlobalMem', ctypes.c_size_t),
('sharedMemPerBlock', ctypes.c_size_t),
('regsPerBlock', ctypes.c_int),
('warpSize', ctypes.c_int),
('memPitch', ctypes.c_size_t),
('maxThreadsPerBlock', ctypes.c_int),
('maxThreadsDim', ctypes.c_int * 3),
('maxGridSize', ctypes.c_int * 3),
('clockRate', ctypes.c_int),
('totalConstMem', ctypes.c_size_t),
('major', ctypes.c_int),
('minor', ctypes.c_int),
('textureAlignment', ctypes.c_size_t),
('texturePitchAlignment', ctypes.c_size_t),
('deviceOverlap', ctypes.c_int),
('multiProcessorCount', ctypes.c_int),
('kernelExecTimeoutEnabled', ctypes.c_int),
('integrated', ctypes.c_int),
('canMapHostMemory', ctypes.c_int),
('computeMode', ctypes.c_int),
('maxTexture1D', ctypes.c_int),
('maxTexture1DMipmap', ctypes.c_int),
('maxTexture1DLinear', ctypes.c_int),
('maxTexture2D', ctypes.c_int * 2),
('maxTexture2DMipmap', ctypes.c_int * 2),
('maxTexture2DLinear', ctypes.c_int * 3),
('maxTexture2DGather', ctypes.c_int * 2),
('maxTexture3D', ctypes.c_int * 3),
('maxTexture3DAlt', ctypes.c_int * 3),
('maxTextureCubemap', ctypes.c_int),
('maxTexture1DLayered', ctypes.c_int * 2),
('maxTexture2DLayered', ctypes.c_int * 3),
('maxTextureCubemapLayered', ctypes.c_int * 2),
('maxSurface1D', ctypes.c_int),
('maxSurface2D', ctypes.c_int * 2),
('maxSurface3D', ctypes.c_int * 3),
('maxSurface1DLayered', ctypes.c_int * 2),
('maxSurface2DLayered', ctypes.c_int * 3),
('maxSurfaceCubemap', ctypes.c_int),
('maxSurfaceCubemapLayered', ctypes.c_int * 2),
('surfaceAlignment', ctypes.c_size_t),
('concurrentKernels', ctypes.c_int),
('ECCEnabled', ctypes.c_int),
('pciBusID', ctypes.c_int),
('pciDeviceID', ctypes.c_int),
('pciDomainID', ctypes.c_int),
('tccDriver', ctypes.c_int),
('asyncEngineCount', ctypes.c_int),
('unifiedAddressing', ctypes.c_int),
('memoryClockRate', ctypes.c_int),
('memoryBusWidth', ctypes.c_int),
('l2CacheSize', ctypes.c_int),
('maxThreadsPerMultiProcessor', ctypes.c_int),
('streamPrioritiesSupported', ctypes.c_int),
('globalL1CacheSupported', ctypes.c_int),
('localL1CacheSupported', ctypes.c_int),
('sharedMemPerMultiprocessor', ctypes.c_size_t),
('regsPerMultiprocessor', ctypes.c_int),
('managedMemSupported', ctypes.c_int),
('isMultiGpuBoard', ctypes.c_int),
('multiGpuBoardGroupID', ctypes.c_int),
# Extra space for new fields in future toolkits
('__future_buffer', ctypes.c_int * 128),
# added later with cudart.cudaDeviceGetPCIBusId
# (needed by NVML)
('pciBusID_str', ctypes.c_char * 16),
]
class struct_c_nvmlDevice_t(ctypes.Structure):
"""
Handle to a device in NVML
"""
pass # opaque handle
c_nvmlDevice_t = ctypes.POINTER(struct_c_nvmlDevice_t)
class c_nvmlMemory_t(ctypes.Structure):
"""
Passed to nvml.nvmlDeviceGetMemoryInfo()
"""
_fields_ = [
('total', ctypes.c_ulonglong),
('free', ctypes.c_ulonglong),
('used', ctypes.c_ulonglong),
# Extra space for new fields in future toolkits
('__future_buffer', ctypes.c_ulonglong * 8),
]
class c_nvmlUtilization_t(ctypes.Structure):
"""
Passed to nvml.nvmlDeviceGetUtilizationRates()
"""
_fields_ = [
('gpu', ctypes.c_uint),
('memory', ctypes.c_uint),
# Extra space for new fields in future toolkits
('__future_buffer', ctypes.c_uint * 8),
]
def get_library(name):
"""
Returns a ctypes.CDLL or None
"""
try:
if platform.system() == 'Windows':
return ctypes.windll.LoadLibrary(name)
else:
return ctypes.cdll.LoadLibrary(name)
except OSError:
pass
return None
def get_cudart():
"""
Return the ctypes.DLL object for cudart or None
"""
if platform.system() == 'Windows':
arch = platform.architecture()[0]
for ver in range(90, 50, -5):
cudart = get_library('cudart%s_%d.dll' % (arch[:2], ver))
if cudart is not None:
return cudart
elif platform.system() == 'Darwin':
for major in xrange(9, 5, -1):
for minor in (5, 0):
cudart = get_library('libcudart.%d.%d.dylib' % (major, minor))
if cudart is not None:
return cudart
return get_library('libcudart.dylib')
else:
for major in xrange(9, 5, -1):
for minor in (5, 0):
cudart = get_library('libcudart.so.%d.%d' % (major, minor))
if cudart is not None:
return cudart
return get_library('libcudart.so')
return None
def get_nvml():
"""
Return the ctypes.DLL object for cudart or None
"""
if platform.system() == 'Windows':
return get_library('nvml.dll')
else:
for name in (
'libnvidia-ml.so.1',
'libnvidia-ml.so',
'nvml.so'):
nvml = get_library(name)
if nvml is not None:
return nvml
return None
devices = None
def get_devices(force_reload=False):
"""
Returns a list of c_cudaDeviceProp's
Prints an error and returns None if something goes wrong
Keyword arguments:
force_reload -- if False, return the previously loaded list of devices
"""
global devices
if not force_reload and devices is not None:
# Only query CUDA once
return devices
devices = []
cudart = get_cudart()
if cudart is None:
return []
# check CUDA version
cuda_version = ctypes.c_int()
rc = cudart.cudaRuntimeGetVersion(ctypes.byref(cuda_version))
if rc != 0:
print 'cudaRuntimeGetVersion() failed with error #%s' % rc
return []
if cuda_version.value < 6050:
print 'ERROR: Cuda version must be >= 6.5, not "%s"' % cuda_version.value
return []
# get number of devices
num_devices = ctypes.c_int()
rc = cudart.cudaGetDeviceCount(ctypes.byref(num_devices))
if rc != 0:
print 'cudaGetDeviceCount() failed with error #%s' % rc
return []
# query devices
for x in xrange(num_devices.value):
properties = c_cudaDeviceProp()
rc = cudart.cudaGetDeviceProperties(ctypes.byref(properties), x)
if rc == 0:
pciBusID_str = ' ' * 16
# also save the string representation of the PCI bus ID
rc = cudart.cudaDeviceGetPCIBusId(ctypes.c_char_p(pciBusID_str), 16, x)
if rc == 0:
properties.pciBusID_str = pciBusID_str
devices.append(properties)
else:
print 'cudaGetDeviceProperties() failed with error #%s' % rc
del properties
return devices
def get_device(device_id):
"""
Returns a c_cudaDeviceProp
"""
return get_devices()[int(device_id)]
def get_nvml_info(device_id):
"""
Gets info from NVML for the given device
Returns a dict of dicts from different NVML functions
"""
device = get_device(device_id)
if device is None:
return None
nvml = get_nvml()
if nvml is None:
return None
rc = nvml.nvmlInit()
if rc != 0:
raise RuntimeError('nvmlInit() failed with error #%s' % rc)
try:
# get device handle
handle = c_nvmlDevice_t()
rc = nvml.nvmlDeviceGetHandleByPciBusId(ctypes.c_char_p(device.pciBusID_str), ctypes.byref(handle))
if rc != 0:
raise RuntimeError('nvmlDeviceGetHandleByPciBusId() failed with error #%s' % rc)
# Grab info for this device from NVML
info = {}
memory = c_nvmlMemory_t()
rc = nvml.nvmlDeviceGetMemoryInfo(handle, ctypes.byref(memory))
if rc == 0:
info['memory'] = {
'total': memory.total,
'used': memory.used,
'free': memory.free,
}
utilization = c_nvmlUtilization_t()
rc = nvml.nvmlDeviceGetUtilizationRates(handle, ctypes.byref(utilization))
if rc == 0:
info['utilization'] = {
'gpu': utilization.gpu,
'memory': utilization.memory, # redundant
}
temperature = ctypes.c_int()
rc = nvml.nvmlDeviceGetTemperature(handle, 0, ctypes.byref(temperature))
if rc == 0:
info['temperature'] = temperature.value
return info
finally:
rc = nvml.nvmlShutdown()
if rc != 0:
pass
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='DIGITS Device Query')
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
if not len(get_devices()):
print 'No devices found.'
for i, device in enumerate(get_devices()):
print 'Device #%d:' % i
print '>>> CUDA attributes:'
for name, t in device._fields_:
if name in ['__future_buffer']:
continue
if not args.verbose and name not in [
'name', 'totalGlobalMem', 'clockRate', 'major', 'minor', ]:
continue
if 'c_int_Array' in t.__name__:
val = ','.join(str(v) for v in getattr(device, name))
else:
val = getattr(device, name)
print ' %-28s %s' % (name, val)
info = get_nvml_info(i)
if info is not None:
print '>>> NVML attributes:'
nvml_fmt = ' %-28s %s'
if 'memory' in info:
print nvml_fmt % ('Total memory',
'%s MB' % (info['memory']['total'] / 2**20,))
print nvml_fmt % ('Used memory',
'%s MB' % (info['memory']['used'] / 2**20,))
if args.verbose:
print nvml_fmt % ('Free memory',
'%s MB' % (info['memory']['free'] / 2**20,))
if 'utilization' in info:
print nvml_fmt % ('Memory utilization',
'%s%%' % info['utilization']['memory'])
print nvml_fmt % ('GPU utilization',
'%s%%' % info['utilization']['gpu'])
if 'temperature' in info:
print nvml_fmt % ('Temperature',
'%s C' % info['temperature'])
print
| DIGITS-master | digits/device_query.py |
# Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os
import pickle
import tempfile
from .job import Job
from .status import Status
from digits import test_utils
test_utils.skipIfNotFramework('none')
class TestStatus():
def test_run_too_soon(self):
job = Job(name='testsuite-job', username='digits-testsuite')
job.status = Status.WAIT
job.status = Status.RUN
# Status.WAIT should be removed so the len should be 2 rather
# than 3.
assert len(job.status_history) == 2, 'history length should be 2'
def test_empty_history(self):
job = Job(name='testsuite-job', username='digits-testsuite')
job.status = Status.WAIT
job.status = Status.RUN
job.status_history = []
# An empty history should not happen, but if it did, the value
# should be Status.INIT.
assert job.status == Status.INIT, 'status should be Status.INIT'
def test_set_dict(self):
job = Job(name='testsuite-job', username='digits-testsuite')
# Testing some untested cases in set_dict()
job.status = Status.ERROR
assert job.status.css == 'danger', 'status.css should be "danger".'
job.status = '404'
assert job.status.css == 'default', 'status.css should be "default".'
def test_equality(self):
s = Status(Status.INIT)
# Testing __eq__
assert (s == Status.INIT), 'should be true.'
assert (s == 'I'), 'should be true.'
assert not (s == 7), 'should be false.'
assert not (s != Status.INIT), 'should be false.'
assert not (s != 'I'), 'should be false.'
assert (s != 7), 'should be true.'
def test_pickle(self):
# Testing __setstate__ and __getstate__
s = Status(Status.INIT)
s = Status.WAIT
loaded_status = None
tmpfile_fd, tmpfile_path = tempfile.mkstemp(suffix='.p')
with open(tmpfile_path, 'wb') as tmpfile:
pickle.dump(s, tmpfile)
with open(tmpfile_path, 'rb') as tmpfile:
loaded_status = pickle.load(tmpfile)
os.close(tmpfile_fd)
os.remove(tmpfile_path)
assert loaded_status == Status.WAIT, 'status should be WAIT'
def test_str(self):
# Testing __str__
s = Status(Status.INIT)
s = Status.WAIT
assert str(s) == 'W', 'should be W'
| DIGITS-master | digits/test_status.py |
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from . import scheduler
from .config import config_value
from .job import Job
from .webapp import app
from digits import test_utils
from digits.utils import subclass, override
test_utils.skipIfNotFramework('none')
class TestScheduler():
def get_scheduler(self):
return scheduler.Scheduler(config_value('gpu_list'))
def test_add_before_start(self):
s = self.get_scheduler()
assert not s.add_job(None), 'add_job should fail'
def test_start_twice(self):
s = self.get_scheduler()
assert s.start(), 'failed to start'
assert s.start(), 'failed to start the second time'
assert s.stop(), 'failed to stop'
def test_stop_before_start(self):
s = self.get_scheduler()
assert s.stop(), 'failed to stop'
@subclass
class JobForTesting(Job):
@override
def job_type(self):
return 'Job For Testing'
class TestSchedulerFlow():
@classmethod
def setUpClass(cls):
cls.s = scheduler.Scheduler(config_value('gpu_list'))
assert cls.s.start(), 'failed to start'
@classmethod
def tearDownClass(cls):
assert cls.s.stop(), 'failed to stop'
def test_add_remove_job(self):
with app.test_request_context():
job = JobForTesting(name='testsuite-job', username='digits-testsuite')
assert self.s.add_job(job), 'failed to add job'
assert len(self.s.jobs) == 1, 'scheduler has %d jobs' % len(self.s.jobs)
assert self.s.delete_job(job), 'failed to delete job'
assert len(self.s.jobs) == 0, 'scheduler has %d jobs' % len(self.s.jobs)
| DIGITS-master | digits/test_scheduler.py |
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import json
import time
import urllib
from urlparse import urlparse
from . import test_utils
from . import webapp
################################################################################
# Base classes (they don't start with "Test" so nose won't run them)
################################################################################
class BaseViewsTest(object):
"""
Abstract class with a Flask context and a Scheduler
Provides some other useful functions to children who inherit this
"""
@classmethod
def setUpClass(cls):
# Call super.setUpClass() unless we're the last in the class hierarchy
supercls = super(BaseViewsTest, cls)
if hasattr(supercls, 'setUpClass'):
supercls.setUpClass()
# Start up the server
assert webapp.scheduler.start(), "scheduler wouldn't start"
webapp.app.config['WTF_CSRF_ENABLED'] = False
webapp.app.config['TESTING'] = True
cls.app = webapp.app.test_client()
cls.created_datasets = []
cls.created_models = []
rv = cls.app.post('/login', data={
'username': 'digits-testsuite'})
assert rv.status_code == 302, 'Login failed with %s' % rv.status_code
@classmethod
def tearDownClass(cls):
# Remove all created jobs
for job_id in cls.created_models:
cls.delete_model(job_id)
for job_id in cls.created_datasets:
cls.delete_dataset(job_id)
rv = cls.app.post('/logout')
assert rv.status_code == 302, 'Logout failed with %s' % rv.status_code
@classmethod
def job_id_from_response(cls, rv):
"""
Extract the job_id from an HTTP response
"""
job_url = rv.headers['Location']
parsed_url = urlparse(job_url)
return parsed_url.path.split('/')[-1]
@classmethod
def job_exists(cls, job_id, job_type='jobs'):
"""
Test whether a job exists
"""
url = '/%s/%s' % (job_type, job_id)
rv = cls.app.get(url, follow_redirects=True)
assert rv.status_code in [200, 404], 'got status code "%s" from "%s"\n%s' % (rv.status_code, url, rv.data)
return rv.status_code == 200
@classmethod
def job_status(cls, job_id, job_type='jobs'):
"""
Get the status of a job
"""
url = '/%s/%s/status' % (job_type, job_id)
rv = cls.app.get(url)
assert rv.status_code == 200, 'Cannot get status of job %s. "%s" returned %s' % (job_id, url, rv.status_code)
status = json.loads(rv.data)
return status['status']
@classmethod
def job_info(cls, job_id, job_type='jobs'):
"""
Get job information (full JSON response)
"""
url = '/%s/%s/json' % (job_type, job_id)
rv = cls.app.get(url)
assert rv.status_code == 200, 'Cannot get info from job %s. "%s" returned %s' % (job_id, url, rv.status_code)
info = json.loads(rv.data)
return info
@classmethod
def job_info_html(cls, job_id, job_type='jobs'):
"""
Get job information (full HTML response)
"""
url = '/%s/%s' % (job_type, job_id)
rv = cls.app.get(url)
assert rv.status_code == 200, 'Cannot get info from job %s. "%s" returned %s' % (job_id, url, rv.status_code)
return rv.data
@classmethod
def abort_job(cls, job_id, job_type='jobs'):
"""
Abort a job
Returns the HTTP status code
"""
rv = cls.app.post('/%s/%s/abort' % (job_type, job_id))
return rv.status_code
@classmethod
def job_wait_completion(cls, job_id, timeout=10, polling_period=0.5, job_type='jobs'):
"""
Poll the job status until it completes
Returns the final status
Arguments:
job_id -- the job to wait for
Keyword arguments:
timeout -- maximum wait time (seconds)
polling_period -- how often to poll (seconds)
job_type -- [datasets|models]
"""
while True:
status = cls.job_status(job_id, job_type=job_type)
if status in ['Done', 'Abort', 'Error']:
# make sure job appears in completed jobs
url = '/completed_jobs/json'
rv = cls.app.get(url)
assert rv.status_code == 200, 'Cannot get info from job %s. "%s" returned %s' % (
job_id, url, rv.status_code)
info = json.loads(rv.data)
dataset_ids = [job['id'] for job in info['datasets']]
model_ids = [job['id'] for job in info['models']]
assert job_id in dataset_ids or job_id in model_ids, "job %s not found in completed jobs" % job_id
# make sure job can be shown without error
url = '/jobs/%s' % job_id
rv = cls.app.get(url, follow_redirects=True)
assert rv.status_code == 200, 'Cannot get info from job %s. "%s" returned %s' % (
job_id, url, rv.status_code)
assert job_id in rv.data
return status
time.sleep(polling_period)
@classmethod
def edit_job(cls, job_id, name=None, notes=None):
"""
Edit the name of a job
"""
data = {}
if name:
data['job_name'] = name
if notes:
data['job_notes'] = notes
rv = cls.app.put('/jobs/%s' % job_id, data=data)
return rv.status_code
@classmethod
def delete_job(cls, job_id, job_type='jobs'):
"""
Delete a job
Returns the HTTP status code
"""
rv = cls.app.delete('/%s/%s' % (job_type, job_id))
return rv.status_code
################################################################################
# Test classes
################################################################################
class TestViews(BaseViewsTest):
@classmethod
def setUpClass(cls):
test_utils.skipIfNotFramework('none')
super(TestViews, cls).setUpClass()
def test_homepage(self):
rv = self.app.get('/')
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
for text in ['Home', 'Datasets', 'Models']:
assert text in rv.data, 'unexpected page format'
def test_invalid_page(self):
rv = self.app.get('/foo')
assert rv.status_code == 404, 'should return 404'
def test_autocomplete(self):
for absolute_path in (True, False):
yield self.check_autocomplete, absolute_path
def check_autocomplete(self, absolute_path):
path = '/' if absolute_path else './'
url = '/autocomplete/path?query=%s' % (urllib.quote(path, safe=''))
rv = self.app.get(url)
assert rv.status_code == 200
status = json.loads(rv.data)
assert 'suggestions' in status
| DIGITS-master | digits/test_views.py |
# Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import mock
import platform
import unittest
from . import device_query
from digits import test_utils
test_utils.skipIfNotFramework('none')
class TestGetDevices():
"""
tests for device_query.get_devices()
"""
@classmethod
def tearDownClass(cls):
# Reload the normal list of devices
device_query.get_devices(True)
@unittest.skipIf(platform.system() not in ['Linux', 'Darwin'],
'Platform not supported')
@mock.patch('digits.device_query.ctypes.cdll')
def test_no_cudart(self, mock_cdll):
mock_cdll.LoadLibrary.return_value = None
assert device_query.get_devices(True) == [], 'Devices found even when CUDA disabled!'
class TestGetNvmlInfo():
"""
tests for device_query.get_nvml_info()
"""
@classmethod
def setUpClass(cls):
if device_query.get_nvml() is None:
raise unittest.SkipTest('NVML not found')
@unittest.skipIf(len(device_query.get_devices(True)) == 0,
'No GPUs on system')
def test_memory_info_exists(self):
for index, device in enumerate(device_query.get_devices(True)):
assert 'memory' in device_query.get_nvml_info(
index), 'NVML should have memory information for "%s"' % device.name
| DIGITS-master | digits/test_device_query.py |
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from collections import OrderedDict
import os
import shutil
import signal
import time
import traceback
import gevent
import gevent.event
import gevent.queue
from . import utils
from .config import config_value
from .dataset import DatasetJob
from .job import Job
from .log import logger
from .model import ModelJob
from .pretrained_model import PretrainedModelJob
from .status import Status
from digits.utils import errors
"""
This constant configures how long to wait before automatically
deleting completed non-persistent jobs
"""
NON_PERSISTENT_JOB_DELETE_TIMEOUT_SECONDS = 3600
class Resource(object):
"""
Stores information about which tasks are using a resource
"""
class ResourceAllocation(object):
"""
Marks that a task is using [part of] a resource
"""
def __init__(self, task, value):
"""
Arguments:
task -- which task is using the resource
value -- how much of the resource is being used
"""
self.task = task
self.value = value
def __init__(self, identifier=None, max_value=1):
"""
Keyword arguments:
identifier -- some way to identify this resource
max_value -- a numeric representation of the capacity of this resource
"""
if identifier is None:
self.identifier = id(self)
else:
self.identifier = identifier
self.max_value = max_value
self.allocations = []
def remaining(self):
"""
Returns the amount of this resource that is not being used
"""
return self.max_value - sum(a.value for a in self.allocations)
def allocate(self, task, value):
"""
A task is requesting to use this resource
"""
if self.remaining() - value < 0:
raise RuntimeError('Resource is already maxed out at %s/%s' % (
self.remaining(),
self.max_value)
)
self.allocations.append(self.ResourceAllocation(task, value))
def deallocate(self, task):
"""
The task has finished using this resource
"""
for i, a in enumerate(self.allocations):
if id(task) == id(a.task):
self.allocations.pop(i)
return True
return False
class Scheduler:
"""
Coordinates execution of Jobs
"""
def __init__(self, gpu_list=None, verbose=False):
"""
Keyword arguments:
gpu_list -- a comma-separated string which is a list of GPU id's
verbose -- if True, print more errors
"""
self.jobs = OrderedDict()
self.verbose = verbose
# Keeps track of resource usage
self.resources = {
# TODO: break this into CPU cores, memory usage, IO usage, etc.
'parse_folder_task_pool': [Resource()],
'create_db_task_pool': [Resource(max_value=4)],
'analyze_db_task_pool': [Resource(max_value=4)],
'inference_task_pool': [Resource(max_value=4)],
'gpus': [Resource(identifier=index)
for index in gpu_list.split(',')] if gpu_list else [],
}
self.running = False
self.shutdown = gevent.event.Event()
def load_past_jobs(self):
"""
Look in the jobs directory and load all valid jobs
"""
loaded_jobs = []
failed_jobs = []
for dir_name in sorted(os.listdir(config_value('jobs_dir'))):
if os.path.isdir(os.path.join(config_value('jobs_dir'), dir_name)):
# Make sure it hasn't already been loaded
if dir_name in self.jobs:
continue
try:
job = Job.load(dir_name)
# The server might have crashed
if job.status.is_running():
job.status = Status.ABORT
for task in job.tasks:
if task.status.is_running():
task.status = Status.ABORT
# We might have changed some attributes here or in __setstate__
job.save()
loaded_jobs.append(job)
except Exception as e:
failed_jobs.append((dir_name, e))
# add DatasetJobs or PretrainedModelJobs
for job in loaded_jobs:
if isinstance(job, DatasetJob) or isinstance(job, PretrainedModelJob):
self.jobs[job.id()] = job
# add ModelJobs
for job in loaded_jobs:
if isinstance(job, ModelJob):
try:
# load the DatasetJob
job.load_dataset()
self.jobs[job.id()] = job
except Exception as e:
failed_jobs.append((dir_name, e))
logger.info('Loaded %d jobs.' % len(self.jobs))
if len(failed_jobs):
logger.warning('Failed to load %d jobs.' % len(failed_jobs))
if self.verbose:
for job_id, e in failed_jobs:
logger.debug('%s - %s: %s' % (job_id, type(e).__name__, str(e)))
def add_job(self, job):
"""
Add a job to self.jobs
"""
if not self.running:
logger.error('Scheduler not running. Cannot add job.')
return False
else:
self.jobs[job.id()] = job
# Need to fix this properly
# if True or flask._app_ctx_stack.top is not None:
from digits.webapp import app, socketio
with app.app_context():
# send message to job_management room that the job is added
socketio.emit('job update',
{
'update': 'added',
'job_id': job.id(),
},
namespace='/jobs',
room='job_management',
)
if 'DIGITS_MODE_TEST' not in os.environ:
# Let the scheduler do a little work before returning
time.sleep(utils.wait_time())
return True
def get_job(self, job_id):
"""
Look through self.jobs to try to find the Job
Returns None if not found
"""
if job_id is None:
return None
return self.jobs.get(job_id, None)
def get_related_jobs(self, job):
"""
Look through self.jobs to try to find the Jobs
whose parent contains job
"""
related_jobs = []
if isinstance(job, ModelJob):
datajob = job.dataset
related_jobs.append(datajob)
elif isinstance(job, DatasetJob):
datajob = job
else:
raise ValueError("Unhandled job type %s" % job.job_type())
for j in self.jobs.values():
# Any model that shares (this/the same) dataset should be added too:
if isinstance(j, ModelJob):
if datajob == j.train_task().dataset and j.id() != job.id():
related_jobs.append(j)
return related_jobs
def abort_job(self, job_id):
"""
Aborts a running Job
Returns True if the job was found and aborted
"""
job = self.get_job(job_id)
if job is None or not job.status.is_running():
return False
job.abort()
logger.info('Job aborted.', job_id=job_id)
return True
def delete_job(self, job):
"""
Deletes an entire job folder from disk
Returns True if the Job was found and deleted
"""
if isinstance(job, str) or isinstance(job, unicode):
job_id = str(job)
elif isinstance(job, Job):
job_id = job.id()
else:
raise ValueError('called delete_job with a %s' % type(job))
dependent_jobs = []
# try to find the job
job = self.jobs.get(job_id, None)
if job:
if isinstance(job, DatasetJob):
# check for dependencies
for j in self.jobs.values():
if isinstance(j, ModelJob) and j.dataset_id == job.id():
logger.error('Cannot delete "%s" (%s) because "%s" (%s) depends on it.' %
(job.name(), job.id(), j.name(), j.id()))
dependent_jobs.append(j.name())
if len(dependent_jobs) > 0:
error_message = 'Cannot delete "%s" because %d model%s depend%s on it: %s' % (
job.name(),
len(dependent_jobs),
('s' if len(dependent_jobs) != 1 else ''),
('s' if len(dependent_jobs) == 1 else ''),
', '.join(['"%s"' % j for j in dependent_jobs]))
raise errors.DeleteError(error_message)
self.jobs.pop(job_id, None)
job.abort()
if os.path.exists(job.dir()):
shutil.rmtree(job.dir())
logger.info('Job deleted.', job_id=job_id)
from digits.webapp import socketio
socketio.emit('job update',
{
'update': 'deleted',
'job_id': job.id()
},
namespace='/jobs',
room='job_management',
)
return True
# see if the folder exists on disk
path = os.path.join(config_value('jobs_dir'), job_id)
path = os.path.normpath(path)
if os.path.dirname(path) == config_value('jobs_dir') and os.path.exists(path):
shutil.rmtree(path)
return True
return False
def running_dataset_jobs(self):
"""a query utility"""
return sorted(
[j for j in self.jobs.values() if isinstance(j, DatasetJob) and j.status.is_running()],
cmp=lambda x, y: cmp(y.id(), x.id())
)
def completed_dataset_jobs(self):
"""a query utility"""
return sorted(
[j for j in self.jobs.values() if isinstance(j, DatasetJob) and not j.status.is_running()],
cmp=lambda x, y: cmp(y.id(), x.id())
)
def running_model_jobs(self):
"""a query utility"""
return sorted(
[j for j in self.jobs.values() if isinstance(j, ModelJob) and j.status.is_running()],
cmp=lambda x, y: cmp(y.id(), x.id())
)
def completed_model_jobs(self):
"""a query utility"""
return sorted(
[j for j in self.jobs.values() if isinstance(j, ModelJob) and not j.status.is_running()],
cmp=lambda x, y: cmp(y.id(), x.id())
)
def start(self):
"""
Start the Scheduler
Returns True on success
"""
if self.running:
return True
gevent.spawn(self.main_thread)
self.running = True
return True
def stop(self):
"""
Stop the Scheduler
Returns True if the shutdown was graceful
"""
self.shutdown.set()
wait_limit = 5
start = time.time()
while self.running:
if time.time() - start > wait_limit:
return False
time.sleep(0.1)
return True
def main_thread(self):
"""
Monitors the jobs in current_jobs, updates their statuses,
and puts their tasks in queues to be processed by other threads
"""
signal.signal(signal.SIGTERM, self.sigterm_handler)
try:
last_saved = None
while not self.shutdown.is_set():
# Iterate backwards so we can delete jobs
for job in self.jobs.values():
if job.status == Status.INIT:
def start_this_job(job):
if isinstance(job, ModelJob):
if job.dataset.status == Status.DONE:
job.status = Status.RUN
elif job.dataset.status in [Status.ABORT, Status.ERROR]:
job.abort()
else:
job.status = Status.WAIT
else:
job.status = Status.RUN
if 'DIGITS_MODE_TEST' in os.environ:
start_this_job(job)
else:
# Delay start by one second for initial page load
gevent.spawn_later(1, start_this_job, job)
if job.status == Status.WAIT:
if isinstance(job, ModelJob):
if job.dataset.status == Status.DONE:
job.status = Status.RUN
elif job.dataset.status in [Status.ABORT, Status.ERROR]:
job.abort()
else:
job.status = Status.RUN
if job.status == Status.RUN:
alldone = True
for task in job.tasks:
if task.status in [Status.INIT, Status.WAIT]:
alldone = False
# try to start the task
if task.ready_to_queue():
requested_resources = task.offer_resources(self.resources)
if requested_resources is None:
task.status = Status.WAIT
else:
if self.reserve_resources(task, requested_resources):
gevent.spawn(self.run_task,
task, requested_resources)
elif task.status == Status.RUN:
# job is not done
alldone = False
elif task.status in [Status.DONE, Status.ABORT]:
# job is done
pass
elif task.status == Status.ERROR:
# propagate error status up to job
job.status = Status.ERROR
alldone = False
break
else:
logger.warning('Unrecognized task status: "%s"', task.status, job_id=job.id())
if alldone:
job.status = Status.DONE
logger.info('Job complete.', job_id=job.id())
job.save()
# save running jobs every 15 seconds
if not last_saved or time.time() - last_saved > 15:
for job in self.jobs.values():
if job.status.is_running():
if job.is_persistent():
job.save()
elif (not job.is_persistent() and
(time.time() - job.status_history[-1][1] >
NON_PERSISTENT_JOB_DELETE_TIMEOUT_SECONDS)):
# job has been unclaimed for far too long => proceed to garbage collection
self.delete_job(job)
last_saved = time.time()
if 'DIGITS_MODE_TEST' not in os.environ:
time.sleep(utils.wait_time())
else:
time.sleep(0.05)
except KeyboardInterrupt:
pass
# Shutdown
for job in self.jobs.values():
job.abort()
job.save()
self.running = False
def sigterm_handler(self, signal, frame):
"""
Catch SIGTERM in addition to SIGINT
"""
self.shutdown.set()
def task_error(self, task, error):
"""
Handle an error while executing a task
"""
logger.error('%s: %s' % (type(error).__name__, error), job_id=task.job_id)
task.exception = error
task.traceback = traceback.format_exc()
task.status = Status.ERROR
def reserve_resources(self, task, resources):
"""
Reserve resources for a task
"""
try:
# reserve resources
for resource_type, requests in resources.iteritems():
for identifier, value in requests:
found = False
for resource in self.resources[resource_type]:
if resource.identifier == identifier:
resource.allocate(task, value)
self.emit_gpus_available()
found = True
break
if not found:
raise RuntimeError('Resource "%s" with identifier="%s" not found' % (
resource_type, identifier))
task.current_resources = resources
return True
except Exception as e:
self.task_error(task, e)
self.release_resources(task, resources)
return False
def release_resources(self, task, resources):
"""
Release resources previously reserved for a task
"""
# release resources
for resource_type, requests in resources.iteritems():
for identifier, value in requests:
for resource in self.resources[resource_type]:
if resource.identifier == identifier:
resource.deallocate(task)
self.emit_gpus_available()
task.current_resources = None
def run_task(self, task, resources):
"""
Executes a task
Arguments:
task -- the task to run
resources -- the resources allocated for this task
a dict mapping resource_type to lists of (identifier, value) tuples
"""
try:
task.run(resources)
except Exception as e:
self.task_error(task, e)
finally:
self.release_resources(task, resources)
def emit_gpus_available(self):
"""
Call socketio.emit gpu availability
"""
from digits.webapp import scheduler, socketio
socketio.emit('server update',
{
'update': 'gpus_available',
'total_gpu_count': len(self.resources['gpus']),
'remaining_gpu_count': sum(r.remaining() for r in scheduler.resources['gpus']),
},
namespace='/jobs',
room='job_management'
)
| DIGITS-master | digits/scheduler.py |
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
import argparse
import os.path
import sys
# Update PATH to include the local DIGITS directory
PARENT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
found_parent_dir = False
for p in sys.path:
if os.path.abspath(p) == PARENT_DIR:
found_parent_dir = True
break
if not found_parent_dir:
sys.path.insert(0, PARENT_DIR)
def main():
parser = argparse.ArgumentParser(description='DIGITS server')
parser.add_argument(
'-p', '--port',
type=int,
default=5000,
help='Port to run app on (default 5000)'
)
parser.add_argument(
'-d', '--debug',
action='store_true',
help=('Run the application in debug mode (reloads when the source '
'changes and gives more detailed error messages)')
)
parser.add_argument(
'--version',
action='store_true',
help='Print the version number and exit'
)
args = vars(parser.parse_args())
import digits
if args['version']:
print digits.__version__
sys.exit()
print ' ___ ___ ___ ___ _____ ___'
print ' | \_ _/ __|_ _|_ _/ __|'
print ' | |) | | (_ || | | | \__ \\'
print ' |___/___\___|___| |_| |___/', digits.__version__
print
import digits.config
import digits.log
import digits.webapp
try:
if not digits.webapp.scheduler.start():
print 'ERROR: Scheduler would not start'
else:
digits.webapp.app.debug = args['debug']
digits.webapp.socketio.run(digits.webapp.app, '0.0.0.0', args['port'])
except KeyboardInterrupt:
pass
finally:
digits.webapp.scheduler.stop()
if __name__ == '__main__':
main()
| DIGITS-master | digits/__main__.py |
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import glob
import json
import platform
import traceback
import os
import flask
from flask_socketio import join_room, leave_room
import werkzeug.exceptions
from .config import config_value
from .webapp import app, socketio, scheduler
import digits
from digits import dataset, extensions, model, utils, pretrained_model
from digits.log import logger
from digits.utils.routing import request_wants_json
blueprint = flask.Blueprint(__name__, __name__)
@blueprint.route('/index/json', methods=['GET'])
@blueprint.route('/', methods=['GET'])
def home(tab=2):
"""
DIGITS home page
Returns information about each job on the server
Returns JSON when requested:
{
datasets: [{id, name, status},...],
models: [{id, name, status},...]
}
"""
running_datasets = get_job_list(dataset.DatasetJob, True)
completed_datasets = get_job_list(dataset.DatasetJob, False)
running_models = get_job_list(model.ModelJob, True)
completed_models = get_job_list(model.ModelJob, False)
if request_wants_json():
data = {
'version': digits.__version__,
'jobs_dir': config_value('jobs_dir'),
'datasets': [j.json_dict()
for j in running_datasets + completed_datasets],
'models': [j.json_dict()
for j in running_models + completed_models],
}
if config_value('server_name'):
data['server_name'] = config_value('server_name')
return flask.jsonify(data)
else:
new_dataset_options = {
'Images': {
'image-classification': {
'title': 'Classification',
'url': flask.url_for(
'digits.dataset.images.classification.views.new'),
},
'image-other': {
'title': 'Other',
'url': flask.url_for(
'digits.dataset.images.generic.views.new'),
},
},
}
new_model_options = {
'Images': {
'image-classification': {
'title': 'Classification',
'url': flask.url_for(
'digits.model.images.classification.views.new'),
},
'image-other': {
'title': 'Other',
'url': flask.url_for(
'digits.model.images.generic.views.new'),
},
},
}
load_model_options = {
'Images': {
'pretrained-model': {
'title': 'Upload Pretrained Model',
'id': 'uploadPretrainedModel',
'url': flask.url_for(
'digits.pretrained_model.views.new'),
},
'access-model-store': {
'title': 'Retrieve from Model Store',
'id': 'retrieveModelStore',
'url': flask.url_for('digits.store.views.store'),
}
},
}
# add dataset options for known dataset extensions
data_extensions = extensions.data.get_extensions()
for extension in data_extensions:
ext_category = extension.get_category()
ext_title = extension.get_title()
ext_title = ext_title[:21] + ' ..' if len(ext_title) > 21 else ext_title
ext_id = extension.get_id()
if ext_category not in new_dataset_options:
new_dataset_options[ext_category] = {}
new_dataset_options[ext_category][ext_id] = {
'title': ext_title,
'url': flask.url_for(
'digits.dataset.generic.views.new',
extension_id=ext_id),
}
if ext_category not in new_model_options:
new_model_options[ext_category] = {}
new_model_options[ext_category][ext_id] = {
'title': ext_title,
'url': flask.url_for(
'digits.model.images.generic.views.new',
extension_id=ext_id),
}
return flask.render_template(
'home.html',
tab=tab,
new_dataset_options=new_dataset_options,
running_datasets=running_datasets,
completed_datasets=completed_datasets,
new_model_options=new_model_options,
running_models=running_models,
completed_models=completed_models,
load_model_options=load_model_options,
total_gpu_count=len(scheduler.resources['gpus']),
remaining_gpu_count=sum(r.remaining()
for r in scheduler.resources['gpus']),
)
def json_dict(job, model_output_fields):
d = {
'id': job.id(),
'name': job.name(),
'group': job.group,
'status': job.status_of_tasks().name,
'status_css': job.status_of_tasks().css,
'submitted': job.status_history[0][1],
'elapsed': job.runtime_of_tasks(),
}
if 'train_db_task' in dir(job):
d.update({
'backend': job.train_db_task().backend,
})
if 'train_task' in dir(job):
d.update({
'framework': job.train_task().get_framework_id(),
})
for prefix, outputs in (('train', job.train_task().train_outputs),
('val', job.train_task().val_outputs)):
for key in outputs.keys():
data = outputs[key].data
if len(data) > 0:
key = '%s (%s) ' % (key, prefix)
model_output_fields.add(key + 'last')
model_output_fields.add(key + 'min')
model_output_fields.add(key + 'max')
d.update({key + 'last': data[-1]})
d.update({key + 'min': min(data)})
d.update({key + 'max': max(data)})
if (job.train_task().combined_graph_data() and
'columns' in job.train_task().combined_graph_data()):
d.update({
'sparkline': job.train_task().combined_graph_data()['columns'][0][1:],
})
if 'get_progress' in dir(job):
d.update({
'progress': int(round(100 * job.get_progress())),
})
if hasattr(job, 'dataset_id'):
d.update({
'dataset_id': job.dataset_id,
})
if hasattr(job, 'extension_id'):
d.update({
'extension': job.extension_id,
})
else:
if hasattr(job, 'dataset_id'):
ds = scheduler.get_job(job.dataset_id)
if ds and hasattr(ds, 'extension_id'):
d.update({
'extension': ds.extension_id,
})
if isinstance(job, dataset.DatasetJob):
d.update({'type': 'dataset'})
if isinstance(job, model.ModelJob):
d.update({'type': 'model'})
if isinstance(job, pretrained_model.PretrainedModelJob):
model_output_fields.add("has_labels")
model_output_fields.add("username")
d.update({
'type': 'pretrained_model',
'framework': job.framework,
'username': job.username,
'has_labels': job.has_labels_file()
})
return d
@blueprint.route('/completed_jobs/json', methods=['GET'])
def completed_jobs():
"""
Returns JSON
{
datasets: [{id, name, group, status, status_css, submitted, elapsed, badge}],
models: [{id, name, group, status, status_css, submitted, elapsed, badge}],
}
"""
completed_datasets = get_job_list(dataset.DatasetJob, False)
completed_models = get_job_list(model.ModelJob, False)
running_datasets = get_job_list(dataset.DatasetJob, True)
running_models = get_job_list(model.ModelJob, True)
pretrained_models = get_job_list(pretrained_model.PretrainedModelJob, False)
model_output_fields = set()
data = {
'running': [json_dict(j, model_output_fields) for j in running_datasets + running_models],
'datasets': [json_dict(j, model_output_fields) for j in completed_datasets],
'models': [json_dict(j, model_output_fields) for j in completed_models],
'pretrained_models': [json_dict(j, model_output_fields) for j in pretrained_models],
'model_output_fields': sorted(list(model_output_fields)),
}
return flask.jsonify(data)
@blueprint.route('/jobs/<job_id>/table_data/json', methods=['GET'])
def job_table_data(job_id):
"""
Get the job data for the front page tables
"""
job = scheduler.get_job(job_id)
if job is None:
raise werkzeug.exceptions.NotFound('Job not found')
model_output_fields = set()
return flask.jsonify({'job': json_dict(job, model_output_fields)})
def get_job_list(cls, running):
return sorted(
[j for j in scheduler.jobs.values() if isinstance(j, cls) and j.status.is_running() == running],
key=lambda j: j.status_history[0][1],
reverse=True,
)
@blueprint.route('/group', methods=['GET', 'POST'])
def group():
"""
Assign the group for the listed jobs
"""
not_found = 0
forbidden = 0
group_name = utils.routing.get_request_arg('group_name').strip()
job_ids = flask.request.form.getlist('job_ids[]')
error = []
for job_id in job_ids:
try:
job = scheduler.get_job(job_id)
if job is None:
logger.warning('Job %s not found for group assignment.' % job_id)
not_found += 1
continue
if not utils.auth.has_permission(job, 'edit'):
logger.warning('Group assignment not permitted for job %s' % job_id)
forbidden += 1
continue
job.group = group_name
# update form data so updated name gets used when cloning job
if hasattr(job, 'form_data'):
job.form_data['form.group_name.data'] = job.group
job.emit_attribute_changed('group', job.group)
except Exception as e:
error.append(e)
pass
for job_id in job_ids:
job = scheduler.get_job(job_id)
error = []
if not_found:
error.append('%d job%s not found.' % (not_found, '' if not_found == 1 else 's'))
if forbidden:
error.append('%d job%s not permitted to be regrouped.' % (forbidden, '' if forbidden == 1 else 's'))
if len(error) > 0:
error = ' '.join(error)
raise werkzeug.exceptions.BadRequest(error)
return 'Jobs regrouped.'
# Authentication/login
@blueprint.route('/login', methods=['GET', 'POST'])
def login():
"""
Ask for a username (no password required)
Sets a cookie
"""
# Get the URL to redirect to after logging in
next_url = utils.routing.get_request_arg('next') or \
flask.request.referrer or flask.url_for('.home')
if flask.request.method == 'GET':
return flask.render_template('login.html', next=next_url)
# Validate username
username = utils.routing.get_request_arg('username').strip()
try:
utils.auth.validate_username(username)
except ValueError as e:
# Invalid username
flask.flash(e.message, 'danger')
return flask.render_template('login.html', next=next_url)
# Valid username
response = flask.make_response(flask.redirect(next_url))
response.set_cookie('username', username)
return response
@blueprint.route('/logout', methods=['GET', 'POST'])
def logout():
"""
Unset the username cookie
"""
next_url = utils.routing.get_request_arg('next') or \
flask.request.referrer or flask.url_for('.home')
response = flask.make_response(flask.redirect(next_url))
response.set_cookie('username', '', expires=0)
return response
# Jobs routes
@blueprint.route('/jobs/<job_id>', methods=['GET'])
def show_job(job_id):
"""
Redirects to the appropriate /datasets/ or /models/ page
"""
job = scheduler.get_job(job_id)
if job is None:
raise werkzeug.exceptions.NotFound('Job not found')
if isinstance(job, dataset.DatasetJob):
return flask.redirect(flask.url_for('digits.dataset.views.show', job_id=job_id))
if isinstance(job, model.ModelJob):
return flask.redirect(flask.url_for('digits.model.views.show', job_id=job_id))
if isinstance(job, pretrained_model.PretrainedModelJob):
return flask.redirect(flask.url_for('digits.pretrained_model.views.show', job_id=job_id))
else:
raise werkzeug.exceptions.BadRequest('Invalid job type')
@blueprint.route('/jobs/<job_id>', methods=['PUT'])
@utils.auth.requires_login(redirect=False)
def edit_job(job_id):
"""
Edit a job's name and/or notes
"""
job = scheduler.get_job(job_id)
if job is None:
raise werkzeug.exceptions.NotFound('Job not found')
if not utils.auth.has_permission(job, 'edit'):
raise werkzeug.exceptions.Forbidden()
# Edit name
if 'job_name' in flask.request.form:
name = flask.request.form['job_name'].strip()
if not name:
raise werkzeug.exceptions.BadRequest('name cannot be blank')
job._name = name
job.emit_attribute_changed('name', job.name())
# update form data so updated name gets used when cloning job
if 'form.dataset_name.data' in job.form_data:
job.form_data['form.dataset_name.data'] = name
elif 'form.model_name.data' in job.form_data:
job.form_data['form.model_name.data'] = name
else:
# we are utterly confused
raise werkzeug.exceptions.BadRequest('Unable to edit job type %s' % job.job_type())
logger.info('Set name to "%s".' % job.name(), job_id=job.id())
# Edit notes
if 'job_notes' in flask.request.form:
notes = flask.request.form['job_notes'].strip()
if not notes:
notes = None
job._notes = notes
logger.info('Updated notes.', job_id=job.id())
return '%s updated.' % job.job_type()
@blueprint.route('/datasets/<job_id>/status', methods=['GET'])
@blueprint.route('/models/<job_id>/status', methods=['GET'])
@blueprint.route('/jobs/<job_id>/status', methods=['GET'])
def job_status(job_id):
"""
Returns a JSON objecting representing the status of a job
"""
job = scheduler.get_job(job_id)
result = {}
if job is None:
result['error'] = 'Job not found.'
else:
result['error'] = None
result['status'] = job.status.name
result['name'] = job.name()
result['type'] = job.job_type()
return json.dumps(result)
@blueprint.route('/pretrained_models/<job_id>', methods=['DELETE'])
@blueprint.route('/datasets/<job_id>', methods=['DELETE'])
@blueprint.route('/models/<job_id>', methods=['DELETE'])
@blueprint.route('/jobs/<job_id>', methods=['DELETE'])
@utils.auth.requires_login(redirect=False)
def delete_job(job_id):
"""
Deletes a job
"""
job = scheduler.get_job(job_id)
if job is None:
raise werkzeug.exceptions.NotFound('Job not found')
if not utils.auth.has_permission(job, 'delete'):
raise werkzeug.exceptions.Forbidden()
try:
if scheduler.delete_job(job_id):
return 'Job deleted.'
else:
raise werkzeug.exceptions.Forbidden('Job not deleted')
except utils.errors.DeleteError as e:
raise werkzeug.exceptions.Forbidden(str(e))
@blueprint.route('/jobs', methods=['DELETE'])
@utils.auth.requires_login(redirect=False)
def delete_jobs():
"""
Deletes a list of jobs
"""
not_found = 0
forbidden = 0
failed = 0
job_ids = flask.request.form.getlist('job_ids[]')
error = []
for job_id in job_ids:
try:
job = scheduler.get_job(job_id)
if job is None:
not_found += 1
continue
if not utils.auth.has_permission(job, 'delete'):
forbidden += 1
continue
if not scheduler.delete_job(job_id):
failed += 1
continue
except Exception as e:
error.append(str(e))
pass
if not_found:
error.append('%d job%s not found.' % (not_found, '' if not_found == 1 else 's'))
if forbidden:
error.append('%d job%s not permitted to be deleted.' % (forbidden, '' if forbidden == 1 else 's'))
if failed:
error.append('%d job%s failed to delete.' % (failed, '' if failed == 1 else 's'))
if len(error) > 0:
error = ' '.join(error)
raise werkzeug.exceptions.BadRequest(error)
return 'Jobs deleted.'
@blueprint.route('/abort_jobs', methods=['POST'])
@utils.auth.requires_login(redirect=False)
def abort_jobs():
"""
Aborts a list of jobs
"""
not_found = 0
forbidden = 0
failed = 0
errors = []
job_ids = flask.request.form.getlist('job_ids[]')
for job_id in job_ids:
try:
job = scheduler.get_job(job_id)
if job is None:
not_found += 1
continue
if not utils.auth.has_permission(job, 'abort'):
forbidden += 1
continue
if not scheduler.abort_job(job_id):
failed += 1
continue
except Exception as e:
errors.append(e)
pass
if not_found:
errors.append('%d job%s not found.' % (not_found, '' if not_found == 1 else 's'))
if forbidden:
errors.append('%d job%s not permitted to be aborted.' % (forbidden, '' if forbidden == 1 else 's'))
if failed:
errors.append('%d job%s failed to abort.' % (failed, '' if failed == 1 else 's'))
if len(errors) > 0:
raise werkzeug.exceptions.BadRequest(' '.join(errors))
return 'Jobs aborted.'
@blueprint.route('/datasets/<job_id>/abort', methods=['POST'])
@blueprint.route('/models/<job_id>/abort', methods=['POST'])
@blueprint.route('/jobs/<job_id>/abort', methods=['POST'])
@utils.auth.requires_login(redirect=False)
def abort_job(job_id):
"""
Aborts a running job
"""
job = scheduler.get_job(job_id)
if job is None:
raise werkzeug.exceptions.NotFound('Job not found')
if scheduler.abort_job(job_id):
return 'Job aborted.'
else:
raise werkzeug.exceptions.Forbidden('Job not aborted')
@blueprint.route('/clone/<clone>', methods=['POST', 'GET'])
@utils.auth.requires_login
def clone_job(clone):
"""
Clones a job with the id <clone>, populating the creation page with data saved in <clone>
"""
# <clone> is the job_id to clone
job = scheduler.get_job(clone)
if job is None:
raise werkzeug.exceptions.NotFound('Job not found')
if isinstance(job, dataset.GenericDatasetJob):
return flask.redirect(
flask.url_for('digits.dataset.generic.views.new', extension_id=job.extension_id) + '?clone=' + clone)
if isinstance(job, dataset.ImageClassificationDatasetJob):
return flask.redirect(flask.url_for('digits.dataset.images.classification.views.new') + '?clone=' + clone)
if isinstance(job, dataset.GenericImageDatasetJob):
return flask.redirect(flask.url_for('digits.dataset.images.generic.views.new') + '?clone=' + clone)
if isinstance(job, model.ImageClassificationModelJob):
return flask.redirect(flask.url_for('digits.model.images.classification.views.new') + '?clone=' + clone)
if isinstance(job, model.GenericImageModelJob):
return flask.redirect(flask.url_for('digits.model.images.generic.views.new') + '?clone=' + clone)
else:
raise werkzeug.exceptions.BadRequest('Invalid job type')
# Error handling
@app.errorhandler(Exception)
def handle_error(e):
"""
Handle errors, formatting them as JSON if requested
"""
error_type = type(e).__name__
message = str(e)
trace = None
description = None
status_code = 500
if isinstance(e, werkzeug.exceptions.HTTPException):
status_code = e.code
description = e.description
if app.debug:
trace = traceback.format_exc()
if request_wants_json():
details = {
'message': message,
'type': error_type,
}
if description is not None:
details['description'] = description
if trace is not None:
details['trace'] = trace.split('\n')
return flask.jsonify({'error': details}), status_code
else:
message = message.replace('\\n', '<br />')
if isinstance(e, digits.frameworks.errors.NetworkVisualizationError):
trace = message
message = ''
return flask.render_template('error.html',
title=error_type,
message=message,
description=description,
trace=trace,
), status_code
# Register this handler for all error codes
# Necessary for flask<=0.10.1
# for code in HTTP_STATUS_CODES:
# if code not in [301]:
# app.register_error_handler(code, handle_error)
# File serving
@blueprint.route('/files/<path:path>', methods=['GET'])
def serve_file(path):
"""
Return a file in the jobs directory
If you install the nginx.site file, nginx will serve files instead
and this path will never be used
"""
jobs_dir = config_value('jobs_dir')
return flask.send_from_directory(jobs_dir, path)
# Path Completion
@blueprint.route('/autocomplete/path', methods=['GET'])
def path_autocomplete():
"""
Return a list of paths matching the specified preamble
"""
path = flask.request.args.get('query', '')
if not os.path.isabs(path):
# Only allow absolute paths by prepending forward slash
path = os.path.sep + path
suggestions = [os.path.abspath(p) for p in glob.glob(path + "*")]
if platform.system() == 'Windows':
# on windows, convert backslashes with forward slashes
suggestions = [p.replace('\\', '/') for p in suggestions]
result = {
"suggestions": sorted(suggestions)
}
return json.dumps(result)
@blueprint.route('/extension-static/<extension_type>/<extension_id>/<path:filename>')
def extension_static(extension_type, extension_id, filename):
"""
Returns static files from an extension's static directory.
'/extension-static/view/image-segmentation/js/app.js'
would send the file
'digits/extensions/view/imageSegmentation/static/js/app.js'
"""
extension = None
if (extension_type == 'view'):
extension = extensions.view.get_extension(extension_id)
elif (extension_type == 'data'):
extension = extensions.data.get_extension(extension_id)
if extension is None:
raise ValueError("Unknown extension '%s'" % extension_id)
digits_root = os.path.dirname(os.path.abspath(digits.__file__))
rootdir = os.path.join(digits_root, *['extensions', 'view', extension.get_dirname(), 'static'])
return flask.send_from_directory(rootdir, filename)
# SocketIO functions
# /home
@socketio.on('connect', namespace='/home')
def on_connect_home():
"""
Somebody connected to the homepage
"""
pass
@socketio.on('disconnect', namespace='/home')
def on_disconnect_home():
"""
Somebody disconnected from the homepage
"""
pass
# /jobs
@socketio.on('connect', namespace='/jobs')
def on_connect_jobs():
"""
Somebody connected to a jobs page
"""
pass
@socketio.on('disconnect', namespace='/jobs')
def on_disconnect_jobs():
"""
Somebody disconnected from a jobs page
"""
pass
@socketio.on('join', namespace='/jobs')
def on_join_jobs(data):
"""
Somebody joined a room
"""
room = data['room']
join_room(room)
flask.session['room'] = room
@socketio.on('leave', namespace='/jobs')
def on_leave_jobs():
"""
Somebody left a room
"""
if 'room' in flask.session:
room = flask.session['room']
del flask.session['room']
# print '>>> Somebody left room %s' % room
leave_room(room)
| DIGITS-master | digits/views.py |
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import time
class Status():
"""
A little class to store the state of Jobs and Tasks
It's pickle-able!
"""
# Enum-like attributes
INIT = 'I'
WAIT = 'W'
RUN = 'R'
DONE = 'D'
ABORT = 'A'
ERROR = 'E'
def __init__(self, val):
self.set_dict(val)
def __str__(self):
return self.val
# Pickling
def __getstate__(self):
return self.val
def __setstate__(self, state):
self.set_dict(state)
# Operators
def __eq__(self, other):
if type(other) == type(self):
return self.val == other.val
elif type(other) == str:
return self.val == other
else:
return False
def __ne__(self, other):
if type(other) == type(self):
return self.val != other.val
elif type(other) == str:
return self.val != other
else:
return True
# Member functions
def set_dict(self, val):
self.val = val
if val == self.INIT:
self.name = 'Initialized'
self.css = 'warning'
elif val == self.WAIT:
self.name = 'Waiting'
self.css = 'warning'
elif val == self.RUN:
self.name = 'Running'
self.css = 'info'
elif val == self.DONE:
self.name = 'Done'
self.css = 'success'
elif val == self.ABORT:
self.name = 'Aborted'
self.css = 'warning'
elif val == self.ERROR:
self.name = 'Error'
self.css = 'danger'
else:
self.name = '?'
self.css = 'default'
def is_running(self):
return self.val in (self.INIT, self.WAIT, self.RUN)
class StatusCls(object):
"""
A class that stores a history of Status updates
Child classes can declare the on_status_update() callback
"""
def __init__(self):
self.progress = 0
self.status_history = []
self.status = Status.INIT
@property
def status(self):
if len(self.status_history) > 0:
return self.status_history[-1][0]
else:
return Status.INIT
@status.setter
def status(self, value):
if isinstance(value, str):
value = Status(value)
assert isinstance(value, Status)
if self.status_history and value == self.status_history[-1][0]:
return
self.status_history.append((value, time.time()))
# Remove WAIT status if waited for less than 1 second
if value == Status.RUN and len(self.status_history) >= 2:
curr = self.status_history[-1]
prev = self.status_history[-2]
if prev[0] == Status.WAIT and (curr[1] - prev[1]) < 1:
self.status_history.pop(-2)
# If the status is Done, then force the progress to 100%
if value == Status.DONE:
self.progress = 1.0
if hasattr(self, 'emit_progress_update'):
self.emit_progress_update()
# Don't invoke callback for INIT
if value != Status.INIT:
if hasattr(self, 'on_status_update'):
self.on_status_update()
| DIGITS-master | digits/status.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os
from digits.job import Job
from digits.utils import subclass, override
from digits.pretrained_model.tasks import CaffeUploadTask, TorchUploadTask, TensorflowUploadTask
@subclass
class PretrainedModelJob(Job):
"""
A Job that uploads a pretrained model
"""
def __init__(self, weights_path, model_def_path, labels_path=None, framework="caffe",
image_type="3", resize_mode="Squash", width=224, height=224, **kwargs):
super(PretrainedModelJob, self).__init__(persistent=False, **kwargs)
self.framework = framework
self.image_info = {
"image_type": image_type,
"resize_mode": resize_mode,
"width": width,
"height": height
}
self.tasks = []
taskKwargs = {
"weights_path": weights_path,
"model_def_path": model_def_path,
"image_info": self.image_info,
"labels_path": labels_path,
"job_dir": self.dir()
}
if self.framework == "caffe":
self.tasks.append(CaffeUploadTask(**taskKwargs))
elif self.framework == "torch":
self.tasks.append(TorchUploadTask(**taskKwargs))
elif self.framework == "tensorflow":
self.tasks.append(TensorflowUploadTask(**taskKwargs))
else:
raise Exception("framework of type " + self.framework + " is not supported")
def get_weights_path(self):
return self.tasks[0].get_weights_path()
def get_model_def_path(self):
return self.tasks[0].get_model_def_path()
def get_python_layer_path(self):
tmp_dir = os.path.dirname(self.tasks[0].get_model_def_path())
python_layer_file_name = 'digits_python_layers.py'
if os.path.exists(os.path.join(tmp_dir, python_layer_file_name)):
return os.path.join(tmp_dir, python_layer_file_name)
elif os.path.exists(os.path.join(tmp_dir, python_layer_file_name + 'c')):
return os.path.join(tmp_dir, python_layer_file_name + 'c')
else:
return None
def has_labels_file(self):
return os.path.isfile(self.tasks[0].get_labels_path())
@override
def is_persistent(self):
return True
@override
def job_type(self):
return "Pretrained Model"
@override
def __getstate__(self):
fields_to_save = ['_id', '_name', 'username', 'tasks', 'status_history', 'framework', 'image_info']
full_state = super(PretrainedModelJob, self).__getstate__()
state_to_save = {}
for field in fields_to_save:
state_to_save[field] = full_state[field]
return state_to_save
@override
def __setstate__(self, state):
super(PretrainedModelJob, self).__setstate__(state)
| DIGITS-master | digits/pretrained_model/job.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from .job import PretrainedModelJob
__all__ = ['PretrainedModelJob']
| DIGITS-master | digits/pretrained_model/__init__.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
import json
import os
import tempfile
import io
import tarfile
from bs4 import BeautifulSoup
import digits.webapp
import digits.dataset.images.classification.test_views
import digits.model.images.classification.test_views
from digits import test_utils
import digits.test_views
# May be too short on a slow system
TIMEOUT_DATASET = 45
TIMEOUT_MODEL = 60
class BaseTestUpload(digits.model.images.classification.test_views.BaseViewsTestWithModel):
"""
Tests uploading Pretrained Models
"""
def test_upload_manual(self):
# job = digits.webapp.scheduler.get_job(self.model_id)
job = digits.webapp.scheduler.get_job(self.model_id)
if job is None:
raise AssertionError('Failed To Create Job')
# Write the stats of the job to json,
# and store in tempfile (for archive)
info = job.json_dict(verbose=False, epoch=-1)
task = job.train_task()
snapshot_filename = task.get_snapshot(-1)
weights_file = open(snapshot_filename, 'r')
model_def_file = open(os.path.join(job.dir(), task.model_file), 'r')
labels_file = open(os.path.join(task.dataset.dir(), info["labels file"]), 'r')
rv = self.app.post(
'/pretrained_models/new',
data={
'weights_file': weights_file,
'model_def_file': model_def_file,
'labels_file': labels_file,
'framework': info['framework'],
'image_type': info["image dimensions"][2],
'resize_mode': info["image resize mode"],
'width': info["image dimensions"][0],
'height': info["image dimensions"][1],
'job_name': 'test_create_pretrained_model_job'
}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 302, 'POST failed with %s\n\n%s' % (rv.status_code, body)
def test_upload_archive(self):
job = digits.webapp.scheduler.get_job(self.model_id)
if job is None:
raise AssertionError('Failed To Create Job')
info = json.dumps(job.json_dict(verbose=False, epoch=-1), sort_keys=True, indent=4, separators=(',', ': '))
info_io = io.BytesIO()
info_io.write(info)
tmp = tempfile.NamedTemporaryFile()
tf = tarfile.open(fileobj=tmp, mode='w:')
for path, name in job.download_files(-1):
tf.add(path, arcname=name)
tf_info = tarfile.TarInfo("info.json")
tf_info.size = len(info_io.getvalue())
info_io.seek(0)
tf.addfile(tf_info, info_io)
tmp.flush()
tmp.seek(0)
rv = self.app.post(
'/pretrained_models/upload_archive',
data={
'archive': tmp
}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
tmp.close()
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
class TestCaffeUpload(BaseTestUpload, test_utils.CaffeMixin):
pass
class TestTorchUpload(BaseTestUpload, test_utils.TorchMixin):
pass
| DIGITS-master | digits/pretrained_model/test_views.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
import flask
import tempfile
import tarfile
import zipfile
import json
import os
import shutil
from digits import utils
from digits.webapp import scheduler
from digits.pretrained_model import PretrainedModelJob
import werkzeug.exceptions
blueprint = flask.Blueprint(__name__, __name__)
def get_tempfile(f, suffix):
temp = tempfile.mkstemp(suffix=suffix)
f.save(temp[1])
path = temp[1]
os.close(temp[0])
return path
def validate_caffe_files(files):
"""
Upload a caffemodel
"""
# Validate model weights:
if str(files['weights_file'].filename) is '':
raise werkzeug.exceptions.BadRequest('Missing weights file')
elif files['weights_file'].filename.rsplit('.', 1)[1] != "caffemodel":
raise werkzeug.exceptions.BadRequest('Weights must be a .caffemodel file')
# Validate model definition:
if str(files['model_def_file'].filename) is '':
raise werkzeug.exceptions.BadRequest('Missing model definition file')
elif files['model_def_file'].filename.rsplit('.', 1)[1] != "prototxt":
raise werkzeug.exceptions.BadRequest('Model definition must be .prototxt file')
weights_path = get_tempfile(flask.request.files['weights_file'], ".caffemodel")
model_def_path = get_tempfile(flask.request.files['model_def_file'], ".prototxt")
return (weights_path, model_def_path)
def validate_torch_files(files):
"""
Upload a torch model
"""
# Validate model weights:
if str(files['weights_file'].filename) is '':
raise werkzeug.exceptions.BadRequest('Missing weights file')
elif files['weights_file'].filename.rsplit('.', 1)[1] != "t7":
raise werkzeug.exceptions.BadRequest('Weights must be a .t7 file')
# Validate model definition:
if str(files['model_def_file'].filename) is '':
raise werkzeug.exceptions.BadRequest('Missing model definition file')
elif files['model_def_file'].filename.rsplit('.', 1)[1] != "lua":
raise werkzeug.exceptions.BadRequest('Model definition must be .lua file')
weights_path = get_tempfile(flask.request.files['weights_file'], ".t7")
model_def_path = get_tempfile(flask.request.files['model_def_file'], ".lua")
return (weights_path, model_def_path)
def validate_archive_keys(info):
"""
Validate keys stored in the info.json file
"""
keys = ["snapshot file", "framework", "name"]
for key in keys:
if key not in info:
return (False, key)
return (True, 0)
@utils.auth.requires_login
@blueprint.route('/upload_archive', methods=['POST'])
def upload_archive():
"""
Upload archive
"""
files = flask.request.files
archive_file = get_tempfile(files["archive"], ".archive")
if tarfile.is_tarfile(archive_file):
archive = tarfile.open(archive_file, 'r')
names = archive.getnames()
elif zipfile.is_zipfile(archive_file):
archive = zipfile.ZipFile(archive_file, 'r')
names = archive.namelist()
else:
return flask.jsonify({"status": "Incorrect Archive Type"}), 500
if "info.json" in names:
# Create a temp directory to storce archive
tempdir = tempfile.mkdtemp()
labels_file = None
archive.extractall(path=tempdir)
with open(os.path.join(tempdir, "info.json")) as data_file:
info = json.load(data_file)
valid, key = validate_archive_keys(info)
if valid is False:
return flask.jsonify({"status": "Missing Key '" + key + "' in info.json"}), 500
# Get path to files needed to be uploaded in directory
weights_file = os.path.join(tempdir, info["snapshot file"])
if "model file" in info:
model_file = os.path.join(tempdir, info["model file"])
elif "network file" in info:
model_file = os.path.join(tempdir, info["network file"])
else:
return flask.jsonify({"status": "Missing model definition in info.json"}), 500
if "labels file" in info:
labels_file = os.path.join(tempdir, info["labels file"])
# Upload the Model:
job = PretrainedModelJob(
weights_file,
model_file,
labels_file,
info["framework"],
username=utils.auth.get_username(),
name=info["name"]
)
scheduler.add_job(job)
job.wait_completion()
# Delete temp directory
shutil.rmtree(tempdir, ignore_errors=True)
return flask.jsonify({"status": "success"}), 200
else:
return flask.jsonify({"status": "Missing or Incorrect json file"}), 500
@utils.auth.requires_login
@blueprint.route('/new', methods=['POST'])
def new():
"""
Upload a pretrained model
"""
labels_path = None
framework = None
form = flask.request.form
files = flask.request.files
if 'framework' not in form:
framework = "caffe"
else:
framework = form['framework']
if 'job_name' not in flask.request.form:
raise werkzeug.exceptions.BadRequest('Missing job name')
elif str(flask.request.form['job_name']) is '':
raise werkzeug.exceptions.BadRequest('Missing job name')
if framework == "caffe":
weights_path, model_def_path = validate_caffe_files(files)
else:
weights_path, model_def_path = validate_torch_files(files)
if str(flask.request.files['labels_file'].filename) is not '':
labels_path = get_tempfile(flask.request.files['labels_file'], ".txt")
job = PretrainedModelJob(
weights_path,
model_def_path,
labels_path,
framework,
form["image_type"],
form["resize_mode"],
form["width"],
form["height"],
username=utils.auth.get_username(),
name=flask.request.form['job_name']
)
scheduler.add_job(job)
return flask.redirect(flask.url_for('digits.views.home', tab=3)), 302
| DIGITS-master | digits/pretrained_model/views.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from .upload_pretrained import UploadPretrainedModelTask
from .caffe_upload import CaffeUploadTask
from .torch_upload import TorchUploadTask
from .tensorflow_upload import TensorflowUploadTask
__all__ = [
'UploadPretrainedModelTask',
'CaffeUploadTask',
'TorchUploadTask',
'TensorflowUploadTask'
]
| DIGITS-master | digits/pretrained_model/tasks/__init__.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os
from digits.utils import subclass, override
from digits.status import Status
from digits.pretrained_model.tasks import UploadPretrainedModelTask
@subclass
class CaffeUploadTask(UploadPretrainedModelTask):
def __init__(self, **kwargs):
super(CaffeUploadTask, self).__init__(**kwargs)
@override
def name(self):
return 'Upload Pretrained Caffe Model'
@override
def get_model_def_path(self):
"""
Get path to model definition
"""
return os.path.join(self.job_dir, "original.prototxt")
@override
def get_weights_path(self):
"""
Get path to model weights
"""
return os.path.join(self.job_dir, "model.caffemodel")
@override
def __setstate__(self, state):
super(CaffeUploadTask, self).__setstate__(state)
@override
def run(self, resources):
self.move_file(self.weights_path, "model.caffemodel")
self.move_file(self.model_def_path, "original.prototxt")
if self.labels_path is not None:
self.move_file(self.labels_path, "labels.txt")
tmp_dir = os.path.dirname(self.weights_path)
python_layer_file_name = 'digits_python_layers.py'
if os.path.exists(os.path.join(tmp_dir, python_layer_file_name)):
self.move_file(os.path.join(tmp_dir, python_layer_file_name), python_layer_file_name)
elif os.path.exists(os.path.join(tmp_dir, python_layer_file_name + 'c')):
self.move_file(os.path.join(tmp_dir, python_layer_file_name + 'c'), python_layer_file_name + 'c')
self.status = Status.DONE
| DIGITS-master | digits/pretrained_model/tasks/caffe_upload.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os
from digits.utils import subclass, override
from digits.status import Status
from digits.pretrained_model.tasks import UploadPretrainedModelTask
@subclass
class TensorflowUploadTask(UploadPretrainedModelTask):
def __init__(self, **kwargs):
super(TensorflowUploadTask, self).__init__(**kwargs)
@override
def name(self):
return 'Upload Pretrained Tensorflow Model'
@override
def get_model_def_path(self):
"""
Get path to model definition
"""
return os.path.join(self.job_dir, "network.py")
@override
def get_weights_path(self):
"""
Get path to model weights
"""
return os.path.join(self.job_dir, "snapshot.ckpt")
@override
def __setstate__(self, state):
super(TensorflowUploadTask, self).__setstate__(state)
@override
def run(self, resources):
self.move_file(self.weights_path + ".data-00000-of-00001", "snapshot.ckpt.data-00000-of-00001")
self.move_file(self.weights_path + ".index", "snapshot.ckpt.index")
self.move_file(self.weights_path + ".meta", "snapshot.ckpt.meta")
self.move_file(self.model_def_path, "network.py")
if self.labels_path is not None:
self.move_file(self.labels_path, "labels.txt")
self.status = Status.DONE
| DIGITS-master | digits/pretrained_model/tasks/tensorflow_upload.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os
import shutil
from digits.task import Task
from digits.utils import subclass, override
@subclass
class UploadPretrainedModelTask(Task):
"""
A task for uploading pretrained models
"""
def __init__(self, **kwargs):
"""
Arguments:
weights_path -- path to model weights (**.caffemodel or ***.t7)
model_def_path -- path to model definition (**.prototxt or ***.lua)
image_info -- a dictionary containing image_type, resize_mode, width, and height
labels_path -- path to text file containing list of labels
framework -- framework of this job (ie caffe or torch)
"""
self.weights_path = kwargs.pop('weights_path', None)
self.model_def_path = kwargs.pop('model_def_path', None)
self.image_info = kwargs.pop('image_info', None)
self.labels_path = kwargs.pop('labels_path', None)
self.framework = kwargs.pop('framework', None)
# resources
self.gpu = None
super(UploadPretrainedModelTask, self).__init__(**kwargs)
@override
def name(self):
return 'Upload Pretrained Model'
@override
def __setstate__(self, state):
super(UploadPretrainedModelTask, self).__setstate__(state)
@override
def process_output(self, line):
return True
@override
def offer_resources(self, resources):
reserved_resources = {}
# we need one CPU resource from inference_task_pool
cpu_key = 'inference_task_pool'
if cpu_key not in resources:
return None
for resource in resources[cpu_key]:
if resource.remaining() >= 1:
reserved_resources[cpu_key] = [(resource.identifier, 1)]
return reserved_resources
return None
def move_file(self, input_file, output):
shutil.copy(input_file, os.path.join(self.job_dir, output))
def get_labels_path(self):
"""
Get path to label file
"""
return os.path.join(self.job_dir, "labels.txt")
def get_model_def_path(self):
"""
Get path to model definition
"""
raise NotImplementedError('Please implement me')
def get_weights_path(self):
"""
Get path to model weights
"""
raise NotImplementedError('Please implement me')
| DIGITS-master | digits/pretrained_model/tasks/upload_pretrained.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os
from digits.utils import subclass, override
from digits.status import Status
from digits.pretrained_model.tasks import UploadPretrainedModelTask
@subclass
class TorchUploadTask(UploadPretrainedModelTask):
def __init__(self, **kwargs):
super(TorchUploadTask, self).__init__(**kwargs)
@override
def name(self):
return 'Upload Pretrained Torch Model'
@override
def get_model_def_path(self):
"""
Get path to model definition
"""
return os.path.join(self.job_dir, "original.lua")
@override
def get_weights_path(self):
"""
Get path to model weights
"""
return os.path.join(self.job_dir, "_Model.t7")
@override
def __setstate__(self, state):
super(TorchUploadTask, self).__setstate__(state)
@override
def run(self, resources):
self.move_file(self.weights_path, "_Model.t7")
self.move_file(self.model_def_path, "original.lua")
if self.labels_path is not None:
self.move_file(self.labels_path, "labels.txt")
self.status = Status.DONE
| DIGITS-master | digits/pretrained_model/tasks/torch_upload.py |
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
import itertools
import os
import shutil
import tempfile
import mock
from nose.tools import raises, assert_raises
import numpy as np
import PIL.Image
from . import parse_folder
from digits import test_utils
test_utils.skipIfNotFramework('none')
class TestUnescape():
def test_hello(self):
assert parse_folder.unescape('hello') == 'hello'
def test_space(self):
assert parse_folder.unescape('%20') == ' '
class TestValidateFolder():
@classmethod
def setUpClass(cls):
cls.tmpdir = tempfile.mkdtemp()
_handle, cls.tmpfile = tempfile.mkstemp(dir=cls.tmpdir)
@classmethod
def tearDownClass(cls):
try:
shutil.rmtree(cls.tmpdir)
except:
pass
def test_dir(self):
assert parse_folder.validate_folder(self.tmpdir) is True
def test_file(self):
assert parse_folder.validate_folder(self.tmpfile) is False
def test_nonexistent_dir(self):
assert parse_folder.validate_folder(os.path.abspath('not-a-directory')) is False
def test_nonexistent_url(self):
assert parse_folder.validate_folder('http://localhost/not-a-url') is False
class TestValidateOutputFile():
@classmethod
def setUpClass(cls):
cls.tmpdir = tempfile.mkdtemp()
_handle, cls.tmpfile = tempfile.mkstemp(dir=cls.tmpdir)
@classmethod
def tearDownClass(cls):
try:
shutil.rmtree(cls.tmpdir)
except:
pass
def test_missing_file(self):
assert parse_folder.validate_output_file(None) is True, 'all new files should be valid'
def test_file(self):
assert parse_folder.validate_output_file(os.path.join(self.tmpdir, 'output.txt')) is True
@mock.patch('os.access')
def test_local_file(self, mock_access):
mock_access.return_value = True
assert parse_folder.validate_output_file('not-a-file.txt') is True, 'relative paths should be accepted'
@mock.patch('os.access')
def test_not_writeable(self, mock_access):
mock_access.return_value = False
assert parse_folder.validate_output_file(self.tmpfile) is False, 'should not succeed without write permission'
def test_existing_file(self):
assert parse_folder.validate_output_file(self.tmpfile) is False
def test_nonexistent_dir(self):
assert parse_folder.validate_output_file(
os.path.join(
os.path.abspath('not-a-dir'),
'output.txt'
)
) is False
class TestValidateInputFile():
@classmethod
def setUpClass(cls):
_handle, cls.tmpfile = tempfile.mkstemp()
os.close(_handle)
@classmethod
def tearDownClass(cls):
os.remove(cls.tmpfile)
def test_missing_file(self):
assert parse_folder.validate_input_file('not-a-file.txt') is False, 'should not pass on missing file'
@mock.patch('os.access')
def test_not_readable(self, mock_access):
mock_access.return_value = False
assert parse_folder.validate_input_file(self.tmpfile) is False, 'should not succeed without read permission'
class TestValidateRange():
def test_no_range(self):
assert parse_folder.validate_range(0) is True
def test_min_less(self):
assert parse_folder.validate_range(-1, min_value=0) is False
def test_min_equal(self):
assert parse_folder.validate_range(0, min_value=0) is True
def test_min_more(self):
assert parse_folder.validate_range(1, min_value=0) is True
def test_max_less(self):
assert parse_folder.validate_range(9, max_value=10) is True
def test_max_equal(self):
assert parse_folder.validate_range(10, max_value=10) is True
def test_max_more(self):
assert parse_folder.validate_range(11, max_value=10) is False
def test_allow_none_true(self):
assert parse_folder.validate_range(None, allow_none=True) is True
def test_allow_none_false(self):
assert parse_folder.validate_range(None, allow_none=False) is False
def test_string(self):
assert parse_folder.validate_range('foo') is False
@mock.patch('digits.tools.parse_folder.validate_output_file')
@mock.patch('digits.tools.parse_folder.validate_input_file')
class TestCalculatePercentages():
@raises(AssertionError)
def test_making_0(self, mock_input, mock_output):
parse_folder.calculate_percentages(None, None, None, None, None, None, None)
def test_making_1(self, mock_input, mock_output):
mock_input.return_value = True
mock_output.return_value = True
expected_outputs = [
('train_file', (100, 0, 0)),
('val_file', (0, 100, 0)),
('test_file', (0, 0, 100))
]
for supplied, expected in expected_outputs:
args = {k: None for k in ['labels_file', 'train_file', 'percent_train',
'val_file', 'percent_val', 'test_file', 'percent_test']}
args.update({supplied: ''})
output = parse_folder.calculate_percentages(**args)
assert output == expected, 'expected output of {}, got {}'.format(output, expected)
def test_making_2(self, mock_input, mock_output):
mock_input.return_value = True
mock_output.return_value = True
permutes = itertools.combinations(['train', 'val', 'test'], 2)
expected_outputs = itertools.izip(permutes, itertools.repeat((32, 68)))
for supplied, expected in expected_outputs:
args = {k: None for k in ['labels_file', 'train_file', 'percent_train',
'val_file', 'percent_val', 'test_file', 'percent_test']}
args.update({k + '_file': '' for k in supplied})
args.update({'percent_' + k: v for k, v in itertools.izip(supplied, expected)})
# Tricky line. itertools returns combinations in sorted order, always.
# The order of the returned non-zero values should always be correct.
output = [x for x in parse_folder.calculate_percentages(**args) if x != 0]
assert output == list(expected), 'expected output of {}, got {}'.format(output, expected)
def test_making_3_all_given(self, mock_input, mock_output):
mock_input.return_value = True
mock_output.return_value = True
expected = (25, 30, 45)
assert parse_folder.calculate_percentages(
labels_file='not-a-file.txt',
train_file='not-a-file.txt', percent_train=25,
val_file='not-a-file.txt', percent_val=30,
test_file='not-a-file.txt', percent_test=45
) == expected, 'Calculate percentages should return identical values of {}'.format(expected)
def test_making_3_2_given(self, mock_input, mock_output):
mock_input.return_value = True
mock_output.return_value = True
expected = 45
assert parse_folder.calculate_percentages(
labels_file='not-a-file.txt',
train_file='not-a-file.txt', percent_train=25,
val_file='not-a-file.txt', percent_val=30,
test_file='not-a-file.txt', percent_test=None
)[2] == expected, 'Calculate percentages should calculate third value of {}'.format(expected)
@raises(AssertionError)
def test_making_out_of_range(self, mock_input, mock_output):
mock_input.return_value = True
mock_output.return_value = True
# should raise AssertionError because percentages not between 0-100 are invalid
parse_folder.calculate_percentages(
labels_file='not-a-file.txt',
train_file='not-a-file.txt', percent_train=-1,
val_file=None, percent_val=None,
test_file=None, percent_test=None
)
class TestParseWebListing():
def test_non_url(self):
for url in ['not-a-url', 'http://not-a-url', 'https://not-a-url']:
yield self.check_url_raises, url
def check_url_raises(self, url):
assert_raises(Exception, parse_folder.parse_web_listing, url)
def test_mock_url(self):
for content, dirs, files in [
# Nothing
('', [], []),
# Apache 2.2.22
(
'<head></head><body><table>\n \
<tr><td><a href="/home/">Parent</a></td></tr>\n \
<tr><td><a href="cat1/">cat1/</a></td><td>01-Jan-2015 12:34</td><td> - </td></tr>\n \
<tr><td><a href="cat2/">cat2/</a></td><td>02-Feb-2015 23:45</td><td> - </td></tr>\n \
<tr><td><a href="cat.jpg">cat.jpg</a></td><td>03-Mar-2015 1:23</td><td> 1 </td></tr>\n \
</table</body>\n',
['cat1/', 'cat2/'],
['cat.jpg'],
),
# Apache 2.4.7
(
'<html><head></head><body><table>\n \
<tr><td><a href="/home/">Parent</a></td></tr>\n \
<tr><td><a href="dog/">dog/</a></td><td>01-01-2015 12:34</td><td> - </td></tr>\n \
<tr><td><a href="dog1.jpeg">dog1.jpeg</a></td><td>02-02-2015 23:45</td><td> 1 </td></tr>\n \
<tr><td><a href="dog2.png">dog2.png</a></td><td>03-03-2015 1:23</td><td> 2 </td></tr>\n \
</table</body></html>\n',
['dog/'],
['dog1.jpeg', 'dog2.png'],
),
# Nginx
(
'<html><head></head><body>\n \
<a href="bird.jpg">bird.jpg</a> 01-Jan-1999 01:23 1\n \
<a href="birds/">birds/</a> 02-Feb-1999 12:34 -',
['birds/'],
['bird.jpg'],
),
]:
with mock.patch('digits.tools.parse_folder.requests') as mock_requests:
response = mock.Mock()
response.status_code = mock_requests.codes.ok
response.content = content
mock_requests.get.return_value = response
yield self.check_listing, (dirs, files)
def check_listing(self, rc):
assert parse_folder.parse_web_listing('any_url') == rc
class TestSplitIndices():
def test_indices(self):
for size in [5, 22, 32]:
for percent_b in range(0, 100, 31):
for percent_c in range(0, 100 - percent_b, 41):
yield self.check_split, size, percent_b, percent_c
def check_split(self, size, pct_b, pct_c):
ideala = size * float(100 - pct_b - pct_c) / 100.0
idealb = size * float(100 - pct_c) / 100.0
idxa, idxb = parse_folder.three_way_split_indices(size, pct_b, pct_c)
assert abs(ideala - idxa) <= 2, 'split should be close to {}, is {}'.format(ideala, idxa)
assert abs(idealb - idxb) <= 2, 'split should be close to {}, is {}'.format(idealb, idxb)
class TestParseFolder():
def test_all_train(self):
tmpdir = tempfile.mkdtemp()
img = PIL.Image.fromarray(np.zeros((10, 10, 3), dtype='uint8'))
classes = ['A', 'B', 'C']
for cls in classes:
os.makedirs(os.path.join(tmpdir, cls))
img.save(os.path.join(tmpdir, cls, 'image1.png'))
img.save(os.path.join(tmpdir, cls, 'image2.jpg'))
labels_file = os.path.join(tmpdir, 'labels.txt')
train_file = os.path.join(tmpdir, 'train.txt')
parse_folder.parse_folder(tmpdir, labels_file, train_file=train_file,
percent_train=100, percent_val=0, percent_test=0)
with open(labels_file) as infile:
parsed_classes = [line.strip() for line in infile]
assert parsed_classes == classes, '%s != %s' % (parsed_classes, classes)
shutil.rmtree(tmpdir)
| DIGITS-master | digits/tools/test_parse_folder.py |
# Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
import os.path
import shutil
import tempfile
import lmdb
import numpy as np
from . import analyze_db
from digits import test_utils
# Must import after importing digits.config
import caffe.io
test_utils.skipIfNotFramework('none')
class BaseTestWithDB(object):
SAME_SHAPE = True
PASS_DEFAULTS = True
PASS_FORCE = True
PASS_COUNT = True
@classmethod
def setUpClass(cls):
cls._data_dir = tempfile.mkdtemp()
cls.db = lmdb.open(os.path.join(cls._data_dir, 'db'))
for i in xrange(2):
if cls.SAME_SHAPE:
width = 10
else:
width = 10 + i
datum = cls.create_datum(10, width, 3)
with cls.db.begin(write=True) as txn:
txn.put(str(i), datum.SerializeToString())
@classmethod
def tearDownClass(cls):
cls.db.close()
shutil.rmtree(cls._data_dir)
@staticmethod
def create_datum(*shape):
"""
Creates a datum with an image of the given shape
"""
image = np.ones(shape, dtype='uint8')
return caffe.io.array_to_datum(image)
def test_defaults(self):
assert analyze_db.analyze_db(self.db.path()) == self.PASS_DEFAULTS
def test_force_shape(self):
assert analyze_db.analyze_db(self.db.path(), force_same_shape=True) == self.PASS_FORCE
class TestSameShape(BaseTestWithDB):
pass
class TestDifferentShape(BaseTestWithDB):
SAME_SHAPE = False
PASS_FORCE = False
| DIGITS-master | digits/tools/test_analyze_db.py |
# Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
import mock
import tempfile
from . import resize_image
from digits import test_utils
test_utils.skipIfNotFramework('none')
class TestOutputValidation():
def test_no_filename(self):
assert resize_image.validate_output_file(None), 'All new files should be valid'
@mock.patch('os.access')
def test_not_writable(self, mock_access):
mock_access.return_value = False
with tempfile.NamedTemporaryFile('r') as f:
assert not resize_image.validate_output_file(f.name), 'validation should not pass on unwritable file'
def test_normal(self):
with tempfile.NamedTemporaryFile('r') as f:
assert resize_image.validate_output_file(f.name), 'validation should pass on temporary file'
class TestInputValidation():
def test_does_not_exist(self):
assert not resize_image.validate_input_file(''), 'validation should not pass on missing file'
@mock.patch('os.access')
def test_unreadable_file(self, mock_access):
mock_access.return_value = False
with tempfile.NamedTemporaryFile('r') as f:
assert not resize_image.validate_input_file(f.name), 'validation should not pass on unreadable file'
class TestRangeValidation():
def test_number_none_and_not_allowed(self):
assert not resize_image.validate_range(
None, allow_none=False), 'number=None should not be allowed with allow_none=False'
def test_number_not_float_compatible(self):
value = 'a'
assert not resize_image.validate_range(value), 'number=%s should not be accepted' % value
def test_number_below_min(self):
assert not resize_image.validate_range(0, min_value=1), 'validation should not pass with number < min_value'
def test_number_above_max(self):
assert not resize_image.validate_range(2, max_value=1), 'validation should not pass with number > max_value'
def test_range(self):
assert resize_image.validate_range(
5, min_value=0, max_value=255), 'validation should pass with 5 in range (0, 255)'
| DIGITS-master | digits/tools/test_resize_image.py |
#!/usr/bin/env python2
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
import argparse
import logging
import os
import random
import requests
import re
import sys
import time
import urllib
from s3_walker import S3Walker
# Add path for DIGITS package
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from digits import utils, log # noqa
logger = logging.getLogger('digits.tools.parse_s3')
local_prefix = 's3_tmp'
def unescape(s):
return urllib.unquote(s)
def validate_s3(walker, bucket, path):
try:
walker.connect()
if not path.endswith('/'):
path = path + '/'
walker.head(bucket, path)
digits = walker.listbucket(bucket, prefix=path, with_prefix=True)
for digit in digits:
walker.listbucket(bucket, prefix=digit, max_size=10)
except Exception:
logger.exception('failed to validate s3: %s', str(sys.exc_info()[0]))
return False
return True
def validate_output_file(filename):
if filename is None:
return True
if os.path.exists(filename):
logger.error('output file "%s" already exists!' % filename)
return False
output_dir = os.path.dirname(filename)
if not output_dir:
output_dir = '.'
if not os.path.exists(output_dir):
logger.error('output directory "%s" does not exist!' % output_dir)
return False
if not os.access(output_dir, os.W_OK):
logger.error('you do not have write access to output directory "%s"!' % output_dir)
return False
return True
def validate_input_file(filename):
if not os.path.exists(filename) or not os.path.isfile(filename):
logger.error('input file "%s" does not exist!' % filename)
return False
if not os.access(filename, os.R_OK):
logger.error('you do not have read access to "%s"!' % filename)
return False
return True
def validate_range(number, min_value=None, max_value=None, allow_none=False):
if number is None:
if allow_none:
return True
else:
logger.error('invalid value %s' % number)
return False
try:
float(number)
except ValueError:
logger.error('invalid value %s' % number)
return False
if min_value is not None and number < min_value:
logger.error('invalid value %s' % number)
return False
if max_value is not None and number > max_value:
logger.error('invalid value %s' % number)
return False
return True
def calculate_percentages(labels_file,
train_file, percent_train,
val_file, percent_val,
test_file, percent_test,
**kwargs):
"""
Returns (percent_train, percent_val, percent_test)
Throws exception on errors
"""
# reject any percentages not between 0-100
assert all(x is None or 0 <= x <= 100
for x in [percent_train, percent_val, percent_test]), \
'all percentages must be 0-100 inclusive or not specified'
# return values
pt = None
pv = None
ps = None
# making these sets
mt = False
mv = False
ms = False
if train_file is not None:
pt = percent_train
mt = True
if val_file is not None:
pv = percent_val
mv = True
if test_file is not None:
ps = percent_test
ms = True
making = sum([mt, mv, ms])
assert making > 0, 'must specify at least one of train_file, val_file and test_file'
if train_file is not None:
assert validate_output_file(labels_file)
else:
assert validate_input_file(labels_file)
if making == 1:
if mt:
return (100, 0, 0)
elif mv:
return (0, 100, 0)
else:
return (0, 0, 100)
elif making == 2:
if mt and mv:
assert not (pt is None and pv is None), 'must give percent_train or percent_val'
if pt is not None and pv is not None:
assert (pt + pv) == 100, 'percentages do not sum to 100'
return (pt, pv, 0)
elif pt is not None:
return (pt, 100 - pt, 0)
else:
return (100 - pv, pv, 0)
elif mt and ms:
assert not (pt is None and ps is None), 'must give percent_train or percent_test'
if pt is not None and ps is not None:
assert (pt + ps) == 100, 'percentages do not sum to 100'
return (pt, 0, ps)
elif pt is not None:
return (pt, 0, 100 - pt)
else:
return (100 - ps, 0, ps)
elif mv and ms:
assert not (pv is None and ps is None), 'must give percent_val or percent_test'
if pv is not None and ps is not None:
assert (pv + ps) == 100, 'percentages do not sum to 100'
return (0, pv, ps)
elif pv is not None:
return (0, pv, 100 - pv)
else:
return (0, 100 - ps, ps)
elif making == 3:
specified = sum([pt is not None, pv is not None, ps is not None])
assert specified >= 2, 'must specify two of percent_train, percent_val, and percent_test'
if specified == 3:
assert (pt + pv + ps) == 100, 'percentages do not sum to 100'
return (pt, pv, ps)
elif specified == 2:
if pt is None:
assert (pv + ps) <= 100, 'percentages cannot exceed 100'
return (100 - (pv + ps), pv, ps)
elif pv is None:
assert (pt + ps) <= 100, 'percentages cannot exceed 100'
return (pt, 100 - (pt + ps), ps)
elif ps is None:
assert (pt + pv) <= 100, 'percentages cannot exceed 100'
return (pt, pv, 100 - (pt + pv))
def parse_web_listing(url):
"""Utility for parse_folder()
Parses an autoindexed folder into directories and files
Returns (dirs, files)
"""
dirs = []
files = []
r = requests.get(url, timeout=3.05)
if r.status_code != requests.codes.ok:
raise Exception('HTTP Status Code %s' % r.status_code)
for line in r.content.split('\n'):
line = line.strip()
# Matches nginx and apache's autoindex formats
match = re.match(
r'^.*\<a.+href\=[\'\"]([^\'\"]+)[\'\"].*\>.*(\w{1,4}-\w{1,4}-\w{1,4})', line, flags=re.IGNORECASE)
if match:
if match.group(1).endswith('/'):
dirs.append(match.group(1))
elif match.group(1).lower().endswith(utils.image.SUPPORTED_EXTENSIONS):
files.append(match.group(1))
return (dirs, files)
def web_listing_all_files(url, count=0, max_count=None):
"""Utility for parse_folder()
Gets all files from a url by parsing the directory and all subdirectories looking for image files
Returns (urls, count)
(recursive)
"""
urls = []
dirs, files = parse_web_listing(url)
for f in files:
urls.append(url + f)
count += 1
if max_count is not None and count >= max_count:
logger.warning('Reached maximum limit for this category')
return urls, count
for d in dirs:
new_urls, count = web_listing_all_files(url + d, count, max_count)
urls += new_urls
if max_count is not None and count >= max_count:
break
return urls, count
def three_way_split_indices(size, pct_b, pct_c):
"""
Utility for splitting an array
Returns (a, b) where a and b are indices for splitting the array into 3 pieces
Arguments:
size -- the size of the array
pct_b -- the percent of the array that should be used for group b
pct_c -- the percent of the array that should be used for group c
"""
assert 0 <= pct_b <= 100
assert 0 <= pct_c <= 100
pct_a = 100 - (pct_b + pct_c)
assert 0 <= pct_a <= 100
if pct_a >= 100:
return size, size
elif pct_b >= 100:
return 0, size
elif pct_c >= 100:
return 0, 0
else:
a = int(round(float(size) * pct_a / 100))
if pct_a and not a:
a = 1
b = int(round(float(size) * pct_b / 100))
if a + b > size:
b = size - a
if pct_b and not b:
if a > 1:
a -= 1
b = 1
elif a != size:
b = 1
c = size - (a + b)
if pct_c and not c:
if b > 1:
b -= 1
c = 1
elif a > 1:
a -= 1
c = 1
assert a + b + c == size
return a, a + b
def parse_s3(walker, bucket, path, labels_file,
train_file=None, percent_train=None,
val_file=None, percent_val=None,
test_file=None, percent_test=None,
min_per_category=2,
max_per_category=None):
"""
Parses a folder of images into three textfiles
Returns True on success
Arguments:
walker -- S3Walker object
bucket -- bucekt name
path -- a path to images
labels_file -- file for labels
Keyword Arguments:
train_file -- output file for training images
percent_test -- percentage of images to use in the training set
val_file -- output file for validation images
percent_val -- percentage of images to use in the validation set
test_file -- output file for test images
percent_test -- percentage of images to use in the test set
min_per_category -- minimum number of images per category
max_per_category -- maximum number of images per category
"""
create_labels = (percent_train > 0)
labels = []
# Read the labels from labels_file
if not create_labels:
with open(labels_file) as infile:
for line in infile:
line = line.strip()
if line:
labels.append(line)
# Verify that at least two category folders exist
walker.connect()
if not path.endswith('/'):
path = path + '/'
subdirs = []
digits = walker.listbucket(bucket, prefix=path, with_prefix=True)
for digit in digits:
subdirs.append(digit[len(path):])
subdirs.sort()
if len(subdirs) < 2:
logger.error('folder must contain at least two subdirectories')
return False
# Parse the folder
train_count = 0
val_count = 0
test_count = 0
if percent_train:
train_outfile = open(train_file, 'w')
if percent_val:
val_outfile = open(val_file, 'w')
if percent_test:
test_outfile = open(test_file, 'w')
subdir_index = 0
label_index = 0
for subdir in subdirs:
# Use the directory name as the label
label_name = subdir
if label_name.endswith('/'):
# Remove trailing slash
label_name = label_name[0:-1]
if create_labels:
labels.append(label_name)
label_index = len(labels) - 1
else:
found = False
for i, l in enumerate(labels):
if label_name == l:
found = True
label_index = i
break
if not found:
logger.warning('Category "%s" not found in labels_file. Skipping.' % label_name)
continue
logger.debug('Category - %s' % label_name)
lines = []
# Read all images under the path
max_size = 100
files = walker.listbucket(bucket, prefix=path+subdir, max_size=max_size)
for file in files:
if file.lower().endswith(utils.image.SUPPORTED_EXTENSIONS):
parent_folder = os.path.join(local_prefix, path, subdir)
if not os.path.exists(parent_folder):
os.makedirs(parent_folder)
localfile = os.path.join(parent_folder, file[file.rindex('/')+1:])
walker.get(bucket, file, localfile)
lines.append('%s %d' % (localfile, label_index))
if max_per_category is not None and len(lines) >= max_per_category:
break
if max_per_category is not None and len(lines) >= max_per_category:
logger.warning('Reached maximum limit for this category')
break
# Split up the lines
train_lines = []
val_lines = []
test_lines = []
required_categories = 0
if percent_train > 0:
required_categories += 1
if percent_val > 0:
required_categories += 1
if percent_test > 0:
required_categories += 1
if not lines or len(lines) < required_categories or len(lines) < min_per_category:
logger.warning('Not enough images for this category')
labels.pop()
else:
random.shuffle(lines)
a, b = three_way_split_indices(len(lines), percent_val, percent_test)
train_lines = lines[:a]
val_lines = lines[a:b]
test_lines = lines[b:]
if train_lines:
train_outfile.write('\n'.join(train_lines) + '\n')
train_count += len(train_lines)
if val_lines:
val_outfile.write('\n'.join(val_lines) + '\n')
val_count += len(val_lines)
if test_lines:
test_outfile.write('\n'.join(test_lines) + '\n')
test_count += len(test_lines)
subdir_index += 1
logger.debug('Progress: %0.2f' % (float(subdir_index) / len(subdirs)))
if percent_train:
train_outfile.close()
if percent_val:
val_outfile.close()
if percent_test:
test_outfile.close()
if create_labels:
if len(labels) < 2:
logger.error('Did not find two valid categories')
return False
else:
with open(labels_file, 'w') as labels_outfile:
labels_outfile.write('\n'.join(labels) + '\n')
logger.info('Found %d images in %d categories.' % (train_count + val_count + test_count, len(labels)))
logger.info('Selected %d for training.' % train_count)
logger.info('Selected %d for validation.' % val_count)
logger.info('Selected %d for testing.' % test_count)
return True
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Parse-S3 tool - DIGITS')
# Positional arguments
parser.add_argument(
'endpoint',
help='An S3 Endpoint URL'
)
parser.add_argument(
'bucket',
help='A Bucket Name'
)
parser.add_argument(
'path',
help='A Path To Images'
)
parser.add_argument(
'accesskey',
help='An Access Key'
)
parser.add_argument(
'secretkey',
help='A Secret Key'
)
parser.add_argument(
'labels_file',
help=('The file containing labels. If train_file is set, this file '
'will be generated (output). Otherwise, this file will be read (input).')
)
# Optional arguments
parser.add_argument(
'-t', '--train_file',
help='The output file for training images'
)
parser.add_argument(
'-T', '--percent_train', type=float,
help='Percent of images used for the training set (constant across all categories)'
)
parser.add_argument(
'-v', '--val_file',
help='The output file for validation images'
)
parser.add_argument(
'-V', '--percent_val', type=float,
help='Percent of images used for the validation set (constant across all categories)'
)
parser.add_argument(
'-s', '--test_file',
help='The output file for test images'
)
parser.add_argument(
'-S', '--percent_test', type=float,
help='Percent of images used for the test set (constant across all categories)'
)
parser.add_argument(
'--min', type=int, metavar='MIN_PER_CATEGORY', default=1,
help=("What is the minimum allowable number of images per category? "
"(categories which don't meet this criteria will be ignored) [default=2]")
)
parser.add_argument(
'--max', type=int, metavar='MAX_PER_CATEGORY',
help=("What is the maximum limit of images per category? "
"(categories which exceed this limit will be trimmed down) [default=None]")
)
args = vars(parser.parse_args())
walker = S3Walker(args['endpoint'], args['accesskey'], args['secretkey'])
for valid in [
validate_s3(walker, args['bucket'], args['path']),
validate_range(args['percent_train'],
min_value=0, max_value=100, allow_none=True),
validate_output_file(args['train_file']),
validate_range(args['percent_val'],
min_value=0, max_value=100, allow_none=True),
validate_output_file(args['val_file']),
validate_range(args['percent_test'],
min_value=0, max_value=100, allow_none=True),
validate_output_file(args['test_file']),
validate_range(args['min'], min_value=1),
validate_range(args['max'], min_value=1, allow_none=True),
]:
if not valid:
sys.exit(1)
try:
percent_train, percent_val, percent_test = calculate_percentages(**args)
except Exception as e:
logger.error('%s: %s' % (type(e).__name__, e))
sys.exit(1)
start_time = time.time()
if parse_s3(walker, args['bucket'], args['path'], args['labels_file'],
train_file=args['train_file'],
percent_train=percent_train,
val_file=args['val_file'],
percent_val=percent_val,
test_file=args['test_file'],
percent_test=percent_test,
min_per_category=args['min'],
max_per_category=args['max']):
logger.info('Done after %d seconds.' % (time.time() - start_time))
sys.exit(0)
else:
sys.exit(1)
| DIGITS-master | digits/tools/parse_s3.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.s3.prefix import Prefix
import logging
from digits import utils, log # noqa
logger = logging.getLogger('digits.tools.s3_walker')
class S3Walker(object):
def __init__(self, endpoint, accesskey, secretkey):
self.is_secure = endpoint.find('https://') > -1
self.host = endpoint[endpoint.index('://')+3:]
has_port = self.host.find(':') > -1
if has_port:
self.port = int(self.host[self.host.index(':')+1:])
self.host = self.host[:self.host.index(':')]
else:
if self.is_secure:
self.port = 443
else:
self.port = 80
self.accesskey = accesskey
self.secretkey = secretkey
self.conn = None
logger.info('Host %s is secure: %s port: %s.' % (self.host, str(self.is_secure), str(self.port)))
def connect(self):
self.conn = S3Connection(aws_access_key_id=self.accesskey, aws_secret_access_key=self.secretkey,
is_secure=self.is_secure, host=self.host, port=self.port)
def head(self, bucket, key):
b = self.conn.get_bucket(bucket)
return b.get_key(key)
def get(self, bucket, key, filename):
k = self.head(bucket, key)
k.get_contents_to_filename(filename)
def get_as_string(self, bucket, key):
k = self.head(bucket, key)
return k.get_contents_as_string()
def get_meta(self, bucket, key, meta):
value = None
k = self.head(bucket, key)
if k is not None:
value = k.get_metadata(meta)
return value
def put(self, bucket, key, filename):
b = self.conn.get_bucket(bucket)
k = Key(b)
k.key = key
k.set_contents_from_filename(filename)
def listbucket(self, bucket, prefix='', max_size=1000, marker='', with_prefix=False):
logger.info('listing bucket with prefix = ' + prefix + ', with_prefix = ' + str(with_prefix))
b = self.conn.get_bucket(bucket)
resultset = b.list(prefix=prefix, delimiter='/', marker=marker)
keys = []
for key in resultset:
# returns only Keys
if isinstance(key, Key):
keys.append(key.key)
elif isinstance(key, Prefix) and with_prefix:
keys.append(key.name)
if len(keys) >= max_size:
break
if len(keys) > 0:
logger.info('retrieved ' + str(len(keys)) + ' keys from ' + keys[0] + ' to ' + keys[-1])
return keys
def create_bucket(self, bucket):
return self.conn.create_bucket(bucket)
| DIGITS-master | digits/tools/s3_walker.py |
#!/usr/bin/env python2
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
import argparse
# Find the best implementation available
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import lmdb
import logging
import numpy as np
import os
import PIL.Image
import Queue
import sys
import threading
# Add path for DIGITS package
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import digits.config # noqa
from digits import extensions, log # noqa
from digits.job import Job # noqa
# Import digits.config first to set the path to Caffe
import caffe.io # noqa
import caffe_pb2 # noqa
logger = logging.getLogger('digits.tools.create_dataset')
class DbWriter(threading.Thread):
"""
Abstract class for writing to databases
"""
def __init__(self, output_dir, total_batches):
self._dir = output_dir
self.write_queue = Queue.Queue(10)
# sequence number
self.seqn = 0
self.total_batches = total_batches
self.processed_batches = 0
self.done = False
threading.Thread.__init__(self)
def write_batch_threadsafe(self, batch):
"""
This function writes a batch of data into the database
This may be called from multiple threads
"""
self.write_queue.put(batch)
def set_done(self):
"""
Instructs writer thread to complete after queue becomes empty
"""
self.done = True
def run(self):
"""
DB Writer thread entry point
"""
while True:
try:
batch = self.write_queue.get(timeout=0.1)
except Queue.Empty:
if self.done:
# break out of main loop and terminate
break
else:
# just keep looping
continue
self.write_batch_threadunsafe(batch)
class LmdbWriter(DbWriter):
def __init__(self,
dataset_dir,
stage,
feature_encoding,
label_encoding,
**kwargs):
self.stage = stage
db_dir = os.path.join(dataset_dir, stage)
if not os.path.exists(db_dir):
os.makedirs(db_dir)
super(LmdbWriter, self).__init__(dataset_dir, **kwargs)
# create LMDB for features
self.feature_db = self.create_lmdb("features")
# will create LMDB for labels later if necessary
self.label_db = None
# encoding
self.feature_encoding = feature_encoding
self.label_encoding = label_encoding
def create_lmdb(self, db_type):
sub_dir = os.path.join(self.stage, db_type)
db_dir = os.path.join(self._dir, sub_dir)
db = lmdb.open(
db_dir,
map_async=True,
max_dbs=0)
logger.info('Created %s db for stage %s in %s' % (db_type,
self.stage,
sub_dir))
return db
def array_to_datum(self, data, scalar_label, encoding):
if data.ndim != 3:
raise ValueError('Invalid number of dimensions: %d' % data.ndim)
if encoding == 'none':
if data.shape[0] == 3:
# RGB to BGR
# XXX see issue #59
data = data[[2, 1, 0], ...]
datum = caffe.io.array_to_datum(data, scalar_label)
else:
# Transpose to (height, width, channel)
data = data.transpose((1, 2, 0))
datum = caffe_pb2.Datum()
datum.height = data.shape[0]
datum.width = data.shape[1]
datum.channels = data.shape[2]
datum.label = scalar_label
if data.shape[2] == 1:
# grayscale
data = data[:, :, 0]
s = StringIO()
if encoding == 'png':
PIL.Image.fromarray(data).save(s, format='PNG')
elif encoding == 'jpg':
PIL.Image.fromarray(data).save(s, format='JPEG', quality=90)
else:
raise ValueError('Invalid encoding type')
datum.data = s.getvalue()
datum.encoded = True
return datum
def write_batch(self, batch):
"""
encode data into datum objects
this may be called from multiple encoder threads
"""
datums = []
for (feature, label) in batch:
# restrict features to 3D data (Caffe Datum objects)
if feature.ndim != 3:
raise ValueError("LMDB/Caffe expect 3D data - ndim=%d" % feature.ndim)
# restrict labels to 3D data (Caffe Datum objects) or scalars
if not (label.ndim == 3 or label.size == 1):
raise ValueError("LMDB/Caffe expect 3D or scalar label - ndim=%d" % label.ndim)
if label.size > 1:
label_datum = self.array_to_datum(
label,
0,
self.label_encoding)
# setting label to 0 - it will be unused as there is
# a dedicated label DB
label = 0
else:
label = label[0]
label_datum = None
feature_datum = self.array_to_datum(
feature,
label,
self.feature_encoding)
datums.append(
(feature_datum.SerializeToString(),
label_datum.SerializeToString() if label_datum else None))
self.write_batch_threadsafe(datums)
def write_batch_threadunsafe(self, batch):
"""
Write batch do DB, this must only be called from the writer thread
"""
feature_datums = []
label_datums = []
for (feature, label) in batch:
key = "%09d" % self.seqn
if label is not None:
if self.label_db is None:
self.label_db = self.create_lmdb("labels")
label_datums.append((key, label))
feature_datums.append((key, feature))
self.seqn += 1
self.write_datums(self.feature_db, feature_datums)
if len(label_datums) > 0:
self.write_datums(self.label_db, label_datums)
self.processed_batches += 1
logger.info('Processed %d/%d' % (self.processed_batches, self.total_batches))
def write_datums(self, db, batch):
try:
with db.begin(write=True) as lmdb_txn:
for key, datum in batch:
lmdb_txn.put(key, datum)
except lmdb.MapFullError:
# double the map_size
curr_limit = db.info()['map_size']
new_limit = curr_limit * 2
try:
db.set_mapsize(new_limit) # double it
except AttributeError as e:
version = tuple(int(x) for x in lmdb.__version__.split('.'))
if version < (0, 87):
raise ValueError('py-lmdb is out of date (%s vs 0.87)' % lmdb.__version__)
else:
raise e
# try again
self.write_datums(db, batch)
class Encoder(threading.Thread):
def __init__(self, queue, writer, extension, error_queue, force_same_shape):
self.extension = extension
self.queue = queue
self.writer = writer
self.label_shape = None
self.feature_shape = None
self.feature_sum = None
self.processed_count = 0
self.sample_count = 0
self.error_queue = error_queue
self.force_same_shape = force_same_shape
threading.Thread.__init__(self)
def run(self):
data = []
while True:
# get entry ID
# don't block- if the queue is empty then we're done
try:
batch = self.queue.get_nowait()
except Queue.Empty:
# break out of main loop and terminate
break
try:
data = []
for entry_id in batch:
# call into extension to format entry into number arrays
entry_value = self.extension.encode_entry(entry_id)
# entry_value is either a list of (feature, label) tuples
# or a single tuple
if not isinstance(entry_value, list):
entry_value = [entry_value] # convert to list
for feature, label in entry_value:
# check feature and label shapes
if self.feature_shape is None:
self.feature_shape = feature.shape
if self.label_shape is None:
self.label_shape = label.shape
if self.force_same_shape:
if self.feature_shape != feature.shape:
raise ValueError("Feature shape mismatch (last:%s, previous:%s)"
% (repr(feature.shape), repr(self.feature_shape)))
if self.label_shape != label.shape:
raise ValueError("Label shape mismatch (last:%s, previous:%s)"
% (repr(label.shape), repr(self.label_shape)))
if self.feature_sum is None:
self.feature_sum = np.zeros(self.feature_shape, dtype=np.float64)
# accumulate sum for mean file calculation
self.feature_sum += feature
# aggregate data
data.append((feature, label))
self.sample_count += 1
self.processed_count += 1
if len(data) >= 0:
# write data
self.writer.write_batch(data)
except Exception as e:
self.error_queue.put('%s: %s' % (type(e).__name__, e.message))
raise
class DbCreator(object):
def create_db(self,
extension,
stage,
dataset_dir,
batch_size,
num_threads,
feature_encoding,
label_encoding,
force_same_shape):
# retrieve itemized list of entries
entry_ids = extension.itemize_entries(stage)
entry_count = len(entry_ids)
if entry_count > 0:
# create a queue to write errors to
error_queue = Queue.Queue()
# create and fill encoder queue
encoder_queue = Queue.Queue()
batch_indices = xrange(0, len(entry_ids), batch_size)
for batch in [entry_ids[start:start+batch_size] for start in batch_indices]:
# queue this batch
encoder_queue.put(batch)
# create db writer
writer = LmdbWriter(
dataset_dir,
stage,
total_batches=len(batch_indices),
feature_encoding=feature_encoding,
label_encoding=label_encoding)
writer.daemon = True
writer.start()
# create encoder threads
encoders = []
for _ in xrange(num_threads):
encoder = Encoder(encoder_queue, writer, extension, error_queue, force_same_shape)
encoder.daemon = True
encoder.start()
encoders.append(encoder)
# wait for all encoder threads to complete and aggregate data
feature_sum = None
processed_count = 0
sample_count = 0
feature_shape = None
label_shape = None
for encoder in encoders:
encoder.join()
# catch errors that may have occurred in reader thread
if not error_queue.empty():
while not error_queue.empty():
err = error_queue.get()
logger.error(err)
raise Exception(err)
if feature_shape is None:
feature_shape = encoder.feature_shape
logger.info('Feature shape for stage %s: %s' % (stage, repr(feature_shape)))
if label_shape is None:
label_shape = encoder.label_shape
logger.info('Label shape for stage %s: %s' % (stage, repr(label_shape)))
if force_same_shape:
if encoder.feature_shape and feature_shape != encoder.feature_shape:
raise ValueError("Feature shape mismatch (last:%s, previous:%s)"
% (repr(feature_shape), repr(encoder.feature_shape)))
if encoder.label_shape and label_shape != encoder.label_shape:
raise ValueError("Label shape mismatch (last:%s, previous:%s)"
% (repr(label_shape), repr(encoder.label_shape)))
if feature_sum is None:
feature_sum = encoder.feature_sum
elif encoder.feature_sum is not None:
feature_sum += encoder.feature_sum
processed_count += encoder.processed_count
sample_count += encoder.sample_count
# write mean file
if feature_sum is not None:
self.save_mean(feature_sum, sample_count, dataset_dir, stage)
# wait for writer thread to complete
writer.set_done()
writer.join()
if processed_count != entry_count:
# TODO: handle this more gracefully
raise ValueError('Number of processed entries (%d) does not match entry count (%d)'
% (processed_count, entry_count))
logger.info('Found %d entries for stage %s' % (sample_count, stage))
def save_mean(self, feature_sum, entry_count, dataset_dir, stage):
"""
Save mean to file
"""
data = np.around(feature_sum / entry_count).astype(np.uint8)
mean_file = os.path.join(stage, 'mean.binaryproto')
# Transform to caffe's format requirements
if data.ndim == 3:
if data.shape[0] == 3:
# channel swap
# XXX see issue #59
data = data[[2, 1, 0], ...]
elif data.ndim == 2:
# Add a channels axis
data = data[np.newaxis, :, :]
blob = caffe_pb2.BlobProto()
blob.num = 1
blob.channels, blob.height, blob.width = data.shape
blob.data.extend(data.astype(float).flat)
with open(os.path.join(dataset_dir, mean_file), 'wb') as outfile:
outfile.write(blob.SerializeToString())
logger.info('Created mean file for stage %s in %s' % (stage, mean_file))
def create_generic_db(jobs_dir, dataset_id, stage):
"""
Create a generic DB
"""
# job directory defaults to that defined in DIGITS config
if jobs_dir == 'none':
jobs_dir = digits.config.config_value('jobs_dir')
# load dataset job
dataset_dir = os.path.join(jobs_dir, dataset_id)
if not os.path.isdir(dataset_dir):
raise IOError("Dataset dir %s does not exist" % dataset_dir)
dataset = Job.load(dataset_dir)
# create instance of extension
extension_id = dataset.extension_id
extension_class = extensions.data.get_extension(extension_id)
extension = extension_class(**dataset.extension_userdata)
# encoding
feature_encoding = dataset.feature_encoding
label_encoding = dataset.label_encoding
batch_size = dataset.batch_size
num_threads = dataset.num_threads
force_same_shape = dataset.force_same_shape
# create main DB creator object and execute main method
db_creator = DbCreator()
db_creator.create_db(
extension,
stage,
dataset_dir,
batch_size,
num_threads,
feature_encoding,
label_encoding,
force_same_shape)
logger.info('Generic DB creation Done')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='DB creation tool - DIGITS')
# Positional arguments
parser.add_argument(
'dataset',
help='Dataset Job ID')
# Optional arguments
parser.add_argument(
'-j',
'--jobs_dir',
default='none',
help='Jobs directory (default: from DIGITS config)',
)
parser.add_argument(
'-s',
'--stage',
default='train',
help='Stage (train, val, test)',
)
args = vars(parser.parse_args())
try:
create_generic_db(
args['jobs_dir'],
args['dataset'],
args['stage']
)
except Exception as e:
logger.error('%s: %s' % (type(e).__name__, e.message))
raise
| DIGITS-master | digits/tools/create_generic_db.py |
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from collections import Counter
import os.path
import shutil
import tempfile
import Queue
import nose.tools
import numpy as np
import PIL.Image
from . import create_db
from digits import test_utils
test_utils.skipIfNotFramework('none')
class BaseTest():
"""
Provides some helpful files and utilities
"""
@classmethod
def setUpClass(cls):
cls.empty_file = tempfile.mkstemp()
cls.empty_dir = tempfile.mkdtemp()
# Create one good textfile
cls.good_file = tempfile.mkstemp()
# Create a color image
cls.color_image_file = tempfile.mkstemp(suffix='.png')
cls.numpy_image_color = np.ones((8, 10, 3), dtype='uint8')
cls.pil_image_color = PIL.Image.fromarray(cls.numpy_image_color)
cls.pil_image_color.save(cls.color_image_file[1])
# Create a grayscale image
cls.gray_image_file = tempfile.mkstemp(suffix='.png')
cls.numpy_image_gray = np.ones((8, 10), dtype='uint8')
cls.pil_image_gray = PIL.Image.fromarray(cls.numpy_image_gray)
cls.pil_image_gray.save(cls.gray_image_file[1])
cls.image_count = 0
for i in xrange(3):
for j in xrange(3):
os.write(cls.good_file[0], '%s %s\n' % (cls.color_image_file[1], i))
os.write(cls.good_file[0], '%s %s\n' % (cls.gray_image_file[1], i))
cls.image_count += 2
@classmethod
def tearDownClass(cls):
for f in cls.empty_file, cls.good_file, cls.color_image_file, cls.gray_image_file:
try:
os.close(f[0])
os.remove(f[1])
except OSError:
pass
try:
shutil.rmtree(cls.empty_dir)
except OSError:
raise
class TestFillLoadQueue(BaseTest):
def test_valid_file(self):
for shuffle in True, False:
yield self.check_valid_file, shuffle
def check_valid_file(self, shuffle):
queue = Queue.Queue()
result = create_db._fill_load_queue(self.good_file[1], queue, shuffle)
assert result == self.image_count, 'lines not added'
assert queue.qsize() == self.image_count, 'queue not full'
def test_empty_file(self):
for shuffle in True, False:
yield self.check_empty_file, shuffle
def check_empty_file(self, shuffle):
queue = Queue.Queue()
nose.tools.assert_raises(
create_db.BadInputFileError,
create_db._fill_load_queue,
self.empty_file[1], queue, shuffle)
class TestParseLine():
def test_good_lines(self):
for label, line in [
(0, '/path/image.jpg 0'),
(1, 'image.jpg 1'),
(2, 'image.jpg 2\n'),
(3, 'image.jpg 3'),
(4, 'spaces in filename.jpg 4'),
]:
yield self.check_good_line, line, label
def check_good_line(self, line, label):
c = Counter()
p, l = create_db._parse_line(line, c)
assert l == label, 'parsed label wrong'
assert c[l] == 1, 'distribution is wrong'
def test_bad_lines(self):
for line in [
'nolabel.jpg',
'non-number.jpg five',
'negative.jpg -1',
]:
yield self.check_bad_line, line
def check_bad_line(self, line):
nose.tools.assert_raises(
create_db.ParseLineError,
create_db._parse_line,
line, Counter()
)
class TestCalculateBatchSize():
def test(self):
for count, batch_size in [
(1, 1),
(50, 50),
(100, 100),
(200, 100),
]:
yield self.check, count, batch_size
def check(self, count, batch_size):
assert create_db._calculate_batch_size(count) == batch_size
class TestCalculateNumThreads():
def test(self):
for batch_size, shuffle, num in [
(1000, True, 10),
(1000, False, 1),
(100, True, 10),
(100, False, 1),
(50, True, 7),
(4, True, 2),
(1, True, 1),
]:
yield self.check, batch_size, shuffle, num
def check(self, batch_size, shuffle, num):
assert create_db._calculate_num_threads(
batch_size, shuffle) == num
class TestInitialImageSum():
def test_color(self):
s = create_db._initial_image_sum(10, 10, 3)
assert s.shape == (10, 10, 3)
assert s.dtype == 'float64'
def test_grayscale(self):
s = create_db._initial_image_sum(10, 10, 1)
assert s.shape == (10, 10)
assert s.dtype == 'float64'
class TestImageToDatum(BaseTest):
def test(self):
for compression in None, 'png', 'jpg':
yield self.check_color, compression
yield self.check_grayscale, compression
def check_color(self, compression):
d = create_db._array_to_datum(self.numpy_image_color, 1, compression)
assert d.height == self.numpy_image_color.shape[0]
assert d.width == self.numpy_image_color.shape[1]
assert d.channels == 3
assert d.encoded == bool(compression)
def check_grayscale(self, compression):
d = create_db._array_to_datum(self.numpy_image_gray, 1, compression)
assert d.height == self.numpy_image_gray.shape[0]
assert d.width == self.numpy_image_gray.shape[1]
assert d.channels == 1
assert d.encoded == bool(compression)
class TestSaveMeans():
def test(self):
for color in True, False:
d = tempfile.mkdtemp()
for filename in 'mean.jpg', 'mean.png', 'mean.npy', 'mean.binaryproto':
yield self.check, d, filename, color
shutil.rmtree(d)
def check(self, directory, filename, color):
filename = os.path.join(directory, filename)
if color:
s = np.ones((8, 10, 3), dtype='float64')
else:
s = np.ones((8, 10), dtype='float64')
create_db._save_means(s, 2, [filename])
assert os.path.exists(filename)
class BaseCreationTest(BaseTest):
def test_image_sizes(self):
for width in 8, 12:
for channels in 1, 3:
yield self.check_image_sizes, width, channels, False
def check_image_sizes(self, width, channels, shuffle):
create_db.create_db(self.good_file[1], os.path.join(self.empty_dir, 'db'),
width, 10, channels, self.BACKEND)
def test_no_shuffle(self):
create_db.create_db(self.good_file[1], os.path.join(self.empty_dir, 'db'),
10, 10, 1, self.BACKEND, shuffle=False)
def test_means(self):
mean_files = []
for suffix in 'jpg', 'npy', 'png', 'binaryproto':
mean_files.append(os.path.join(self.empty_dir, 'mean.%s' % suffix))
create_db.create_db(self.good_file[1], os.path.join(self.empty_dir, 'db'),
10, 10, 1, self.BACKEND, mean_files=mean_files)
class TestLmdbCreation(BaseCreationTest):
BACKEND = 'lmdb'
class TestHdf5Creation(BaseCreationTest):
BACKEND = 'hdf5'
def test_dset_limit(self):
db_dir = os.path.join(self.empty_dir, 'db')
create_db.create_db(self.good_file[1], db_dir,
10, 10, 1, 'hdf5', hdf5_dset_limit=10 * 10)
with open(os.path.join(db_dir, 'list.txt')) as infile:
lines = infile.readlines()
assert len(lines) == self.image_count, '%d != %d' % (len(lines), self.image_count)
| DIGITS-master | digits/tools/test_create_db.py |
DIGITS-master | digits/tools/__init__.py |
|
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
import itertools
import os
import shutil
import tempfile
import mock
from nose.tools import raises, assert_raises
try:
from . import parse_s3
from digits.tools.mock_s3_walker import MockS3Walker
import_failed = False
except ImportError:
import_failed = True
from digits import test_utils
test_utils.skipIfNotFramework('none')
class TestUnescape():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed')
def test_hello(self):
assert parse_s3.unescape('hello') == 'hello'
def test_space(self):
assert parse_s3.unescape('%20') == ' '
class TestValidateS3():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed')
cls.mock_walker = MockS3Walker()
def test_non_existent_bucket(self):
result = parse_s3.validate_s3(self.mock_walker, 'nonexistentbucket', '')
assert not result
def test_empty_bucket(self):
result = parse_s3.validate_s3(self.mock_walker, 'emptybucket', '')
assert not result
def test_valid_endpoint(self):
result = parse_s3.validate_s3(self.mock_walker, 'validbucket', '')
assert result
class TestValidateOutputFile():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed')
cls.tmpdir = tempfile.mkdtemp()
_handle, cls.tmpfile = tempfile.mkstemp(dir=cls.tmpdir)
@classmethod
def tearDownClass(cls):
try:
shutil.rmtree(cls.tmpdir)
except IOError:
pass
def test_missing_file(self):
assert parse_s3.validate_output_file(None) is True, 'all new files should be valid'
def test_file(self):
assert parse_s3.validate_output_file(os.path.join(self.tmpdir, 'output.txt')) is True
@mock.patch('os.access')
def test_local_file(self, mock_access):
mock_access.return_value = True
assert parse_s3.validate_output_file('not-a-file.txt') is True, 'relative paths should be accepted'
@mock.patch('os.access')
def test_not_writeable(self, mock_access):
mock_access.return_value = False
assert parse_s3.validate_output_file(self.tmpfile) is False, 'should not succeed without write permission'
def test_existing_file(self):
assert parse_s3.validate_output_file(self.tmpfile) is False
def test_nonexistent_dir(self):
assert parse_s3.validate_output_file(
os.path.join(
os.path.abspath('not-a-dir'),
'output.txt'
)
) is False
class TestValidateInputFile():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed')
_handle, cls.tmpfile = tempfile.mkstemp()
os.close(_handle)
@classmethod
def tearDownClass(cls):
os.remove(cls.tmpfile)
def test_missing_file(self):
assert parse_s3.validate_input_file('not-a-file.txt') is False, 'should not pass on missing file'
@mock.patch('os.access')
def test_not_readable(self, mock_access):
mock_access.return_value = False
assert parse_s3.validate_input_file(self.tmpfile) is False, 'should not succeed without read permission'
class TestValidateRange():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed')
def test_no_range(self):
assert parse_s3.validate_range(0) is True
def test_min_less(self):
assert parse_s3.validate_range(-1, min_value=0) is False
def test_min_equal(self):
assert parse_s3.validate_range(0, min_value=0) is True
def test_min_more(self):
assert parse_s3.validate_range(1, min_value=0) is True
def test_max_less(self):
assert parse_s3.validate_range(9, max_value=10) is True
def test_max_equal(self):
assert parse_s3.validate_range(10, max_value=10) is True
def test_max_more(self):
assert parse_s3.validate_range(11, max_value=10) is False
def test_allow_none_true(self):
assert parse_s3.validate_range(None, allow_none=True) is True
def test_allow_none_false(self):
assert parse_s3.validate_range(None, allow_none=False) is False
def test_string(self):
assert parse_s3.validate_range('foo') is False
@mock.patch('digits.tools.parse_s3.validate_output_file')
@mock.patch('digits.tools.parse_s3.validate_input_file')
class TestCalculatePercentages():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed')
@raises(AssertionError)
def test_making_0(self, mock_input, mock_output):
parse_s3.calculate_percentages(None, None, None, None, None, None, None)
def test_making_1(self, mock_input, mock_output):
mock_input.return_value = True
mock_output.return_value = True
expected_outputs = [
('train_file', (100, 0, 0)),
('val_file', (0, 100, 0)),
('test_file', (0, 0, 100))
]
for supplied, expected in expected_outputs:
args = {k: None for k in ['labels_file', 'train_file', 'percent_train',
'val_file', 'percent_val', 'test_file', 'percent_test']}
args.update({supplied: ''})
output = parse_s3.calculate_percentages(**args)
assert output == expected, 'expected output of {}, got {}'.format(output, expected)
def test_making_2(self, mock_input, mock_output):
mock_input.return_value = True
mock_output.return_value = True
permutes = itertools.combinations(['train', 'val', 'test'], 2)
expected_outputs = itertools.izip(permutes, itertools.repeat((32, 68)))
for supplied, expected in expected_outputs:
args = {k: None for k in ['labels_file', 'train_file', 'percent_train',
'val_file', 'percent_val', 'test_file', 'percent_test']}
args.update({k + '_file': '' for k in supplied})
args.update({'percent_' + k: v for k, v in itertools.izip(supplied, expected)})
# Tricky line. itertools returns combinations in sorted order, always.
# The order of the returned non-zero values should always be correct.
output = [x for x in parse_s3.calculate_percentages(**args) if x != 0]
assert output == list(expected), 'expected output of {}, got {}'.format(output, expected)
def test_making_3_all_given(self, mock_input, mock_output):
mock_input.return_value = True
mock_output.return_value = True
expected = (25, 30, 45)
assert parse_s3.calculate_percentages(
labels_file='not-a-file.txt',
train_file='not-a-file.txt', percent_train=25,
val_file='not-a-file.txt', percent_val=30,
test_file='not-a-file.txt', percent_test=45
) == expected, 'Calculate percentages should return identical values of {}'.format(expected)
def test_making_3_2_given(self, mock_input, mock_output):
mock_input.return_value = True
mock_output.return_value = True
expected = 45
assert parse_s3.calculate_percentages(
labels_file='not-a-file.txt',
train_file='not-a-file.txt', percent_train=25,
val_file='not-a-file.txt', percent_val=30,
test_file='not-a-file.txt', percent_test=None
)[2] == expected, 'Calculate percentages should calculate third value of {}'.format(expected)
@raises(AssertionError)
def test_making_out_of_range(self, mock_input, mock_output):
mock_input.return_value = True
mock_output.return_value = True
# should raise AssertionError because percentages not between 0-100 are invalid
parse_s3.calculate_percentages(
labels_file='not-a-file.txt',
train_file='not-a-file.txt', percent_train=-1,
val_file=None, percent_val=None,
test_file=None, percent_test=None
)
class TestParseWebListing():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed')
def test_non_url(self):
for url in ['not-a-url', 'http://not-a-url', 'https://not-a-url']:
yield self.check_url_raises, url
def check_url_raises(self, url):
assert_raises(Exception, parse_s3.parse_web_listing, url)
def test_mock_url(self):
for content, dirs, files in [
# Nothing
('', [], []),
# Apache 2.2.22
(
'<head></head><body><table>\n \
<tr><td><a href="/home/">Parent</a></td></tr>\n \
<tr><td><a href="cat1/">cat1/</a></td><td>01-Jan-2015 12:34</td><td> - </td></tr>\n \
<tr><td><a href="cat2/">cat2/</a></td><td>02-Feb-2015 23:45</td><td> - </td></tr>\n \
<tr><td><a href="cat.jpg">cat.jpg</a></td><td>03-Mar-2015 1:23</td><td> 1 </td></tr>\n \
</table</body>\n',
['cat1/', 'cat2/'],
['cat.jpg'],
),
# Apache 2.4.7
(
'<html><head></head><body><table>\n \
<tr><td><a href="/home/">Parent</a></td></tr>\n \
<tr><td><a href="dog/">dog/</a></td><td>01-01-2015 12:34</td><td> - </td></tr>\n \
<tr><td><a href="dog1.jpeg">dog1.jpeg</a></td><td>02-02-2015 23:45</td><td> 1 </td></tr>\n \
<tr><td><a href="dog2.png">dog2.png</a></td><td>03-03-2015 1:23</td><td> 2 </td></tr>\n \
</table</body></html>\n',
['dog/'],
['dog1.jpeg', 'dog2.png'],
),
# Nginx
(
'<html><head></head><body>\n \
<a href="bird.jpg">bird.jpg</a> 01-Jan-1999 01:23 1\n \
<a href="birds/">birds/</a> 02-Feb-1999 12:34 -',
['birds/'],
['bird.jpg'],
),
]:
with mock.patch('digits.tools.parse_s3.requests') as mock_requests:
response = mock.Mock()
response.status_code = mock_requests.codes.ok
response.content = content
mock_requests.get.return_value = response
yield self.check_listing, (dirs, files)
def check_listing(self, rc):
assert parse_s3.parse_web_listing('any_url') == rc
class TestSplitIndices():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed')
def test_indices(self):
for size in [5, 22, 32]:
for percent_b in range(0, 100, 31):
for percent_c in range(0, 100 - percent_b, 41):
yield self.check_split, size, percent_b, percent_c
def check_split(self, size, pct_b, pct_c):
ideala = size * float(100 - pct_b - pct_c) / 100.0
idealb = size * float(100 - pct_c) / 100.0
idxa, idxb = parse_s3.three_way_split_indices(size, pct_b, pct_c)
assert abs(ideala - idxa) <= 2, 'split should be close to {}, is {}'.format(ideala, idxa)
assert abs(idealb - idxb) <= 2, 'split should be close to {}, is {}'.format(idealb, idxb)
class TestParseS3():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import parse_s3, most likely cause is Boto not installed')
def test_all_train(self):
classes = range(10)
mock_walker = MockS3Walker(classes)
try:
tmpdir = tempfile.mkdtemp()
labels_file = tempfile.mkstemp(dir=tmpdir)
train_file = tempfile.mkstemp(dir=tmpdir)
parse_s3.parse_s3(mock_walker, 'validbucket', 'train/', labels_file[1],
percent_train=100, train_file=train_file[1], percent_val=0, percent_test=0)
with open(labels_file[1]) as infile:
parsed_classes = [line.strip() for line in infile]
expected_classes = [str(i) for i in classes]
assert parsed_classes == expected_classes, '%s != %s' % (parsed_classes, classes)
finally:
shutil.rmtree(tmpdir)
def test_neg_all_train(self):
try:
classes = range(1)
mock_walker = MockS3Walker(classes)
tmpdir = tempfile.mkdtemp()
labels_file = tempfile.mkstemp(dir=tmpdir)
train_file = tempfile.mkstemp(dir=tmpdir)
assert not parse_s3.parse_s3(mock_walker, 'invalidbucket', 'train/', labels_file[1], percent_train=100,
train_file=train_file[1], percent_val=0, percent_test=0)
finally:
shutil.rmtree(tmpdir)
| DIGITS-master | digits/tools/test_parse_s3.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import ConfigParser
from s3_walker import S3Walker
config = ConfigParser.RawConfigParser()
config.read('upload_config.cfg')
endpoint = config.get('S3 Config', 'endpoint')
accesskey = config.get('S3 Config', 'accesskey')
secretkey = config.get('S3 Config', 'secretkey')
bucket_name = config.get('S3 Config', 'bucket')
path_prefix = config.get('S3 Config', 'prefix')
if not (path_prefix.endswith('/')):
path_prefix += '/'
# mnist
# - train
# -- 0 ... 9
# --- XXX.png
try:
mnist_folder = sys.argv[1]
except IndexError:
print('mnist folder should be passed')
sys.exit(1)
walker = S3Walker(endpoint, accesskey, secretkey)
walker.connect()
# Create bucket
print('Creating bucket')
walker.create_bucket(bucket_name)
mnist_train_folder = os.path.join(mnist_folder, 'train')
digits = os.listdir(mnist_train_folder)
for digit in digits:
digit_folder = os.path.join(mnist_train_folder, digit)
if os.path.isfile(digit_folder):
continue
files = os.listdir(digit_folder)
for f in files:
if not f.endswith('.png'):
continue
file = os.path.join(digit_folder, f)
key = path_prefix + file[file.index('train'):]
walker.put(bucket_name, key, file)
print('uploaded ' + file + ' ==> ' + key)
| DIGITS-master | digits/tools/upload_s3_data.py |
#!/usr/bin/env python2
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
import argparse
import base64
import h5py
import logging
import numpy as np
import PIL.Image
import os
import sys
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# Add path for DIGITS package
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import digits.config # noqa
from digits import utils, log # noqa
from digits.inference.errors import InferenceError # noqa
from digits.job import Job # noqa
from digits.utils.lmdbreader import DbReader # noqa
# Import digits.config before caffe to set the path
import caffe_pb2 # noqa
logger = logging.getLogger('digits.tools.inference')
"""
Perform inference on a list of images using the specified model
"""
def infer(input_list,
output_dir,
jobs_dir,
model_id,
epoch,
batch_size,
layers,
gpu,
input_is_db,
resize):
"""
Perform inference on a list of images using the specified model
"""
# job directory defaults to that defined in DIGITS config
if jobs_dir == 'none':
jobs_dir = digits.config.config_value('jobs_dir')
# load model job
model_dir = os.path.join(jobs_dir, model_id)
assert os.path.isdir(model_dir), "Model dir %s does not exist" % model_dir
model = Job.load(model_dir)
# load dataset job
dataset_dir = os.path.join(jobs_dir, model.dataset_id)
assert os.path.isdir(dataset_dir), "Dataset dir %s does not exist" % dataset_dir
dataset = Job.load(dataset_dir)
for task in model.tasks:
task.dataset = dataset
# retrieve snapshot file
task = model.train_task()
snapshot_filename = None
epoch = float(epoch)
if epoch == -1 and len(task.snapshots):
# use last epoch
epoch = task.snapshots[-1][1]
snapshot_filename = task.snapshots[-1][0]
else:
for f, e in task.snapshots:
if e == epoch:
snapshot_filename = f
break
if not snapshot_filename:
raise InferenceError("Unable to find snapshot for epoch=%s" % repr(epoch))
# retrieve image dimensions and resize mode
image_dims = dataset.get_feature_dims()
height = image_dims[0]
width = image_dims[1]
channels = image_dims[2]
resize_mode = dataset.resize_mode if hasattr(dataset, 'resize_mode') else 'squash'
n_input_samples = 0 # number of samples we were able to load
input_ids = [] # indices of samples within file list
input_data = [] # sample data
if input_is_db:
# load images from database
reader = DbReader(input_list)
for key, value in reader.entries():
datum = caffe_pb2.Datum()
datum.ParseFromString(value)
if datum.encoded:
s = StringIO()
s.write(datum.data)
s.seek(0)
img = PIL.Image.open(s)
img = np.array(img)
else:
import caffe.io
arr = caffe.io.datum_to_array(datum)
# CHW -> HWC
arr = arr.transpose((1, 2, 0))
if arr.shape[2] == 1:
# HWC -> HW
arr = arr[:, :, 0]
elif arr.shape[2] == 3:
# BGR -> RGB
# XXX see issue #59
arr = arr[:, :, [2, 1, 0]]
img = arr
input_ids.append(key)
input_data.append(img)
n_input_samples = n_input_samples + 1
else:
# load paths from file
paths = None
with open(input_list) as infile:
paths = infile.readlines()
# load and resize images
for idx, path in enumerate(paths):
path = path.strip()
try:
image = utils.image.load_image(path.strip())
if resize:
image = utils.image.resize_image(
image,
height,
width,
channels=channels,
resize_mode=resize_mode)
else:
image = utils.image.image_to_array(
image,
channels=channels)
input_ids.append(idx)
input_data.append(image)
n_input_samples = n_input_samples + 1
except utils.errors.LoadImageError as e:
print e
# perform inference
visualizations = None
if n_input_samples == 0:
raise InferenceError("Unable to load any image from file '%s'" % repr(input_list))
elif n_input_samples == 1:
# single image inference
outputs, visualizations = model.train_task().infer_one(
input_data[0],
snapshot_epoch=epoch,
layers=layers,
gpu=gpu,
resize=resize)
else:
if layers != 'none':
raise InferenceError("Layer visualization is not supported for multiple inference")
outputs = model.train_task().infer_many(
input_data,
snapshot_epoch=epoch,
gpu=gpu,
resize=resize)
# write to hdf5 file
db_path = os.path.join(output_dir, 'inference.hdf5')
db = h5py.File(db_path, 'w')
# write input paths and images to database
db.create_dataset("input_ids", data=input_ids)
db.create_dataset("input_data", data=input_data)
# write outputs to database
db_outputs = db.create_group("outputs")
for output_id, output_name in enumerate(outputs.keys()):
output_data = outputs[output_name]
output_key = base64.urlsafe_b64encode(str(output_name))
dset = db_outputs.create_dataset(output_key, data=output_data)
# add ID attribute so outputs can be sorted in
# the order they appear in here
dset.attrs['id'] = output_id
# write visualization data
if visualizations is not None and len(visualizations) > 0:
db_layers = db.create_group("layers")
for idx, layer in enumerate(visualizations):
vis = layer['vis'] if layer['vis'] is not None else np.empty(0)
dset = db_layers.create_dataset(str(idx), data=vis)
dset.attrs['name'] = layer['name']
dset.attrs['vis_type'] = layer['vis_type']
if 'param_count' in layer:
dset.attrs['param_count'] = layer['param_count']
if 'layer_type' in layer:
dset.attrs['layer_type'] = layer['layer_type']
dset.attrs['shape'] = layer['data_stats']['shape']
dset.attrs['mean'] = layer['data_stats']['mean']
dset.attrs['stddev'] = layer['data_stats']['stddev']
dset.attrs['histogram_y'] = layer['data_stats']['histogram'][0]
dset.attrs['histogram_x'] = layer['data_stats']['histogram'][1]
dset.attrs['histogram_ticks'] = layer['data_stats']['histogram'][2]
db.close()
logger.info('Saved data to %s', db_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Inference tool - DIGITS')
# Positional arguments
parser.add_argument(
'input_list',
help='An input file containing paths to input data')
parser.add_argument(
'output_dir',
help='Directory to write outputs to')
parser.add_argument(
'model',
help='Model ID')
# Optional arguments
parser.add_argument(
'-e',
'--epoch',
default='-1',
help="Epoch (-1 for last)"
)
parser.add_argument(
'-j',
'--jobs_dir',
default='none',
help='Jobs directory (default: from DIGITS config)',
)
parser.add_argument(
'-l',
'--layers',
default='none',
help='Which layers to write to output ("none" [default] or "all")',
)
parser.add_argument(
'-b',
'--batch_size',
type=int,
default=1,
help='Batch size',
)
parser.add_argument(
'-g',
'--gpu',
type=int,
default=None,
help='GPU to use (as in nvidia-smi output, default: None)',
)
parser.add_argument(
'--db',
action='store_true',
help='Input file is a database',
)
parser.add_argument(
'--resize',
dest='resize',
action='store_true')
parser.add_argument(
'--no-resize',
dest='resize',
action='store_false')
parser.set_defaults(resize=True)
args = vars(parser.parse_args())
try:
infer(
args['input_list'],
args['output_dir'],
args['jobs_dir'],
args['model'],
args['epoch'],
args['batch_size'],
args['layers'],
args['gpu'],
args['db'],
args['resize']
)
except Exception as e:
logger.error('%s: %s' % (type(e).__name__, e.message))
raise
| DIGITS-master | digits/tools/inference.py |
from s3_walker import S3Walker
class MockS3Walker(S3Walker):
def __init__(self, classes=range(10)): # endpoint, accesskey, secretkey):
self.s3_dict = {}
self.s3_dict['emptybucket'] = []
data = []
for prefix in ['train/', 'test/']:
for i in classes:
for j in range(1, 10):
data.append(prefix+str(i)+'/image'+str(j)+'.png')
self.s3_dict['validbucket'] = data
invalid_data = []
for i in classes:
for j in range(1, 10):
invalid_data.append('train/'+str(i)+'/image'+str(j)+'.png')
self.s3_dict['invalidbucket'] = invalid_data
# not needed #
def connect(self):
pass
# not needed #
def head(self, bucket, key):
pass
# not needed #
def get(self, bucket, key, filename):
pass
# not needed #
def get_as_string(self, bucket, key):
pass
# not needed #
def get_meta(self, bucket, key, meta):
pass
# not needed #
def put(self, bucket, key, filename):
pass
# not needed #
def listbucket(self, bucket, prefix='', max_size=1000, marker='', with_prefix=False):
if len(self.s3_dict[bucket]) == 0:
raise Exception('No keys in specified bucket')
prefix_filtered_list = [k for k in self.s3_dict[bucket] if k.startswith(prefix)]
if not with_prefix:
return prefix_filtered_list
result = []
for full_name in prefix_filtered_list: # train/1/image1.zip
file_name = full_name[len(prefix):] # 1/image1.zip
label = file_name.split('/')[0] # 1
candidate_result = prefix + label # train/1
if candidate_result not in result:
result.append(candidate_result)
return result
| DIGITS-master | digits/tools/mock_s3_walker.py |
#!/usr/bin/env python2
# Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
import argparse
from collections import Counter
import logging
import operator
import os.path
import sys
import time
# Find the best implementation available
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import lmdb
import numpy as np
import PIL.Image
# Add path for DIGITS package
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import digits.config # noqa
from digits import log # noqa
# Import digits.config first to set path to Caffe
import caffe.io # noqa
import caffe_pb2 # noqa
logger = logging.getLogger('digits.tools.analyze_db')
np.set_printoptions(suppress=True, precision=3)
class DbReader(object):
"""
Reads a database
"""
def __init__(self, location):
"""
Arguments:
location -- where is the database
"""
self._db = lmdb.open(location,
map_size=1024**3, # 1MB
readonly=True, lock=False)
with self._db.begin() as txn:
self.total_entries = txn.stat()['entries']
def entries(self):
"""
Generator returning all entries in the DB
"""
with self._db.begin() as txn:
cursor = txn.cursor()
for item in cursor:
yield item
def validate_database_path(database):
"""
Returns a valid database path
Throws ValueErrors
"""
p = os.path.abspath(database)
if not os.path.exists(p):
raise ValueError('No such file or directory')
if os.path.isfile(p):
p = os.path.dirname(p)
if not os.path.isdir(p):
raise ValueError('Not a directory')
return p
def print_datum(datum):
"""
Utility for printing a datum
"""
logger.debug('\tWxHxC: %sx%sx%s' % (datum.width, datum.height, datum.channels))
logger.debug('\tLabel: %s' % (datum.label if datum.HasField('label') else 'None',))
logger.debug('\tEncoded: %s' % datum.encoded)
def analyze_db(database,
only_count=False,
force_same_shape=False,
print_data=False,
):
"""
Looks at the data in a prebuilt database and verifies it
Also prints out some information about it
Returns True if all entries are valid
Arguments:
database -- path to the database
Keyword arguments:
only_count -- only count the entries, don't inspect them
force_same_shape -- throw an error if not all images have the same shape
print_data -- print the array for each datum
"""
start_time = time.time()
# Open database
try:
database = validate_database_path(database)
except ValueError as e:
logger.error(e.message)
return False
reader = DbReader(database)
logger.info('Total entries: %s' % reader.total_entries)
unique_shapes = Counter()
count = 0
update_time = None
for key, value in reader.entries():
datum = caffe_pb2.Datum()
datum.ParseFromString(value)
if print_data:
array = caffe.io.datum_to_array(datum)
print '>>> Datum #%d (shape=%s)' % (count, array.shape)
print array
if (not datum.HasField('height') or datum.height == 0 or
not datum.HasField('width') or datum.width == 0):
if datum.encoded:
if force_same_shape or not len(unique_shapes.keys()):
# Decode datum to learn the shape
s = StringIO()
s.write(datum.data)
s.seek(0)
img = PIL.Image.open(s)
width, height = img.size
channels = len(img.split())
else:
# We've already decoded one image, don't bother reading the rest
width = '?'
height = '?'
channels = '?'
else:
errstr = 'Shape is not set and datum is not encoded'
logger.error(errstr)
raise ValueError(errstr)
else:
width, height, channels = datum.width, datum.height, datum.channels
shape = '%sx%sx%s' % (width, height, channels)
unique_shapes[shape] += 1
if force_same_shape and len(unique_shapes.keys()) > 1:
logger.error("Images with different shapes found: %s and %s" % tuple(unique_shapes.keys()))
return False
count += 1
# Send update every 2 seconds
if update_time is None or (time.time() - update_time) > 2:
logger.debug('>>> Key %s' % key)
print_datum(datum)
logger.debug('Progress: %s/%s' % (count, reader.total_entries))
update_time = time.time()
if only_count:
# quit after reading one
count = reader.total_entries
logger.info('Assuming all entries have same shape ...')
unique_shapes[unique_shapes.keys()[0]] = count
break
if count != reader.total_entries:
logger.warning('LMDB reported %s total entries, but only read %s' % (reader.total_entries, count))
for key, val in sorted(unique_shapes.items(), key=operator.itemgetter(1), reverse=True):
logger.info('%s entries found with shape %s (WxHxC)' % (val, key))
logger.info('Completed in %s seconds.' % (time.time() - start_time,))
return True
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Analyze-Db tool - DIGITS')
# Positional arguments
parser.add_argument('database',
help='Path to the database')
# Optional arguments
parser.add_argument('--only-count',
action="store_true",
help="Only print the number of entries, don't analyze the data")
parser.add_argument('--force-same-shape',
action="store_true",
help='Throw an error if not all entries have the same shape')
parser.add_argument('--print-data',
action="store_true",
help='Print the array for each datum (best used with --only-count)')
args = vars(parser.parse_args())
if analyze_db(args['database'],
only_count=args['only_count'],
force_same_shape=args['force_same_shape'],
print_data=args['print_data'],
):
sys.exit(0)
else:
sys.exit(1)
| DIGITS-master | digits/tools/analyze_db.py |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
import shutil
import tempfile
import unittest
from . import create_generic_db
from digits import extensions
from digits import test_utils
from digits.utils import constants
test_utils.skipIfNotFramework('none')
class BaseTest(object):
FEATURE_ENCODING = "png"
LABEL_ENCODING = "none"
BATCH_SIZE = 256
NUM_THREADS = 2
"""
Provides some helpful files and utilities
"""
@classmethod
def setUpClass(cls):
if extensions.data.get_extension(cls.EXTENSION_ID) is None:
raise unittest.SkipTest('Extension "%s" is not installed' % cls.EXTENSION_ID)
cls.dataset_dir = tempfile.mkdtemp()
cls.extension_class = extensions.data.get_extension(cls.EXTENSION_ID)
cls.extension = cls.extension_class(**cls.EXTENSION_PARAMS)
@classmethod
def tearDownClass(cls):
try:
shutil.rmtree(cls.dataset_dir)
except OSError:
raise
class BaseTestGradientsExtension(BaseTest):
"""
Create databases for the gradient extension
"""
EXTENSION_ID = "image-gradients"
EXTENSION_PARAMS = {
"train_image_count": 10000,
"val_image_count": 50,
"test_image_count": 10,
"image_width": 256,
"image_height": 128
}
FORCE_SAME_SHAPE = True
def create_db(self, stage):
# create main DB creator object and execute main method
db_creator = create_generic_db.DbCreator()
db_creator.create_db(
self.extension,
stage,
self.dataset_dir,
self.BATCH_SIZE,
self.NUM_THREADS,
self.FEATURE_ENCODING,
self.LABEL_ENCODING,
force_same_shape=self.FORCE_SAME_SHAPE)
def test_create_stages(self):
for stage in (constants.TRAIN_DB, constants.VAL_DB, constants.TEST_DB):
yield self.create_db, stage
class TestGradientsExtension(BaseTestGradientsExtension):
FORCE_SAME_SHAPE = True
class TestGradientsExtensionDontForceSameShape(BaseTestGradientsExtension):
FORCE_SAME_SHAPE = False
| DIGITS-master | digits/tools/test_create_generic_db.py |
#!/usr/bin/env python2
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
import argparse
import logging
import os
import random
import requests
import re
import sys
import time
import urllib
# Add path for DIGITS package
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import digits.config # noqa
from digits import utils, log # noqa
logger = logging.getLogger('digits.tools.parse_folder')
def unescape(s):
return urllib.unquote(s)
def validate_folder(folder):
if utils.is_url(folder):
try:
r = requests.head(folder, timeout=utils.HTTP_TIMEOUT)
if r.status_code not in [requests.codes.ok, requests.codes.moved, requests.codes.found]:
logger.error('"%s" returned status_code %s' % (folder, r.status_code))
return False
except Exception as e:
logger.error('%s: %s' % (type(e).__name__, e))
return False
return True
if not os.path.exists(folder):
logger.error('folder "%s" does not exist' % folder)
return False
if not os.path.isdir(folder):
logger.error('"%s" is not a directory' % folder)
return False
if not os.access(folder, os.R_OK):
logger.error('you do not have read access to folder "%s"' % folder)
return False
return True
def validate_output_file(filename):
if filename is None:
return True
if os.path.exists(filename):
logger.error('output file "%s" already exists!' % filename)
return False
output_dir = os.path.dirname(filename)
if not output_dir:
output_dir = '.'
if not os.path.exists(output_dir):
logger.error('output directory "%s" does not exist!' % output_dir)
return False
if not os.access(output_dir, os.W_OK):
logger.error('you do not have write access to output directory "%s"!' % output_dir)
return False
return True
def validate_input_file(filename):
if not os.path.exists(filename) or not os.path.isfile(filename):
logger.error('input file "%s" does not exist!' % filename)
return False
if not os.access(filename, os.R_OK):
logger.error('you do not have read access to "%s"!' % filename)
return False
return True
def validate_range(number, min_value=None, max_value=None, allow_none=False):
if number is None:
if allow_none:
return True
else:
logger.error('invalid value %s' % number)
return False
try:
float(number)
except ValueError:
logger.error('invalid value %s' % number)
return False
if min_value is not None and number < min_value:
logger.error('invalid value %s' % number)
return False
if max_value is not None and number > max_value:
logger.error('invalid value %s' % number)
return False
return True
def calculate_percentages(labels_file,
train_file, percent_train,
val_file, percent_val,
test_file, percent_test,
**kwargs):
"""
Returns (percent_train, percent_val, percent_test)
Throws exception on errors
"""
# reject any percentages not between 0-100
assert all(x is None or 0 <= x <= 100
for x in [percent_train, percent_val, percent_test]), \
'all percentages must be 0-100 inclusive or not specified'
# return values
pt = None
pv = None
ps = None
# making these sets
mt = False
mv = False
ms = False
if train_file is not None:
pt = percent_train
mt = True
if val_file is not None:
pv = percent_val
mv = True
if test_file is not None:
ps = percent_test
ms = True
making = sum([mt, mv, ms])
assert making > 0, 'must specify at least one of train_file, val_file and test_file'
if train_file is not None:
assert validate_output_file(labels_file)
else:
assert validate_input_file(labels_file)
if making == 1:
if mt:
return (100, 0, 0)
elif mv:
return (0, 100, 0)
else:
return (0, 0, 100)
elif making == 2:
if mt and mv:
assert not (pt is None and pv is None), 'must give percent_train or percent_val'
if pt is not None and pv is not None:
assert (pt + pv) == 100, 'percentages do not sum to 100'
return (pt, pv, 0)
elif pt is not None:
return (pt, 100 - pt, 0)
else:
return (100 - pv, pv, 0)
elif mt and ms:
assert not (pt is None and ps is None), 'must give percent_train or percent_test'
if pt is not None and ps is not None:
assert (pt + ps) == 100, 'percentages do not sum to 100'
return (pt, 0, ps)
elif pt is not None:
return (pt, 0, 100 - pt)
else:
return (100 - ps, 0, ps)
elif mv and ms:
assert not (pv is None and ps is None), 'must give percent_val or percent_test'
if pv is not None and ps is not None:
assert (pv + ps) == 100, 'percentages do not sum to 100'
return (0, pv, ps)
elif pv is not None:
return (0, pv, 100 - pv)
else:
return (0, 100 - ps, ps)
elif making == 3:
specified = sum([pt is not None, pv is not None, ps is not None])
assert specified >= 2, 'must specify two of percent_train, percent_val, and percent_test'
if specified == 3:
assert (pt + pv + ps) == 100, 'percentages do not sum to 100'
return (pt, pv, ps)
elif specified == 2:
if pt is None:
assert (pv + ps) <= 100, 'percentages cannot exceed 100'
return (100 - (pv + ps), pv, ps)
elif pv is None:
assert (pt + ps) <= 100, 'percentages cannot exceed 100'
return (pt, 100 - (pt + ps), ps)
elif ps is None:
assert (pt + pv) <= 100, 'percentages cannot exceed 100'
return (pt, pv, 100 - (pt + pv))
def parse_web_listing(url):
"""Utility for parse_folder()
Parses an autoindexed folder into directories and files
Returns (dirs, files)
"""
dirs = []
files = []
r = requests.get(url, timeout=3.05)
if r.status_code != requests.codes.ok:
raise Exception('HTTP Status Code %s' % r.status_code)
for line in r.content.split('\n'):
line = line.strip()
# Matches nginx and apache's autoindex formats
match = re.match(
r'^.*\<a.+href\=[\'\"]([^\'\"]+)[\'\"].*\>.*(\w{1,4}-\w{1,4}-\w{1,4})', line, flags=re.IGNORECASE)
if match:
if match.group(1).endswith('/'):
dirs.append(match.group(1))
elif match.group(1).lower().endswith(utils.image.SUPPORTED_EXTENSIONS):
files.append(match.group(1))
return (dirs, files)
def web_listing_all_files(url, count=0, max_count=None):
"""Utility for parse_folder()
Gets all files from a url by parsing the directory and all subdirectories looking for image files
Returns (urls, count)
(recursive)
"""
urls = []
dirs, files = parse_web_listing(url)
for f in files:
urls.append(url + f)
count += 1
if max_count is not None and count >= max_count:
logger.warning('Reached maximum limit for this category')
return urls, count
for d in dirs:
new_urls, count = web_listing_all_files(url + d, count, max_count)
urls += new_urls
if max_count is not None and count >= max_count:
break
return urls, count
def three_way_split_indices(size, pct_b, pct_c):
"""
Utility for splitting an array
Returns (a, b) where a and b are indices for splitting the array into 3 pieces
Arguments:
size -- the size of the array
pct_b -- the percent of the array that should be used for group b
pct_c -- the percent of the array that should be used for group c
"""
assert 0 <= pct_b <= 100
assert 0 <= pct_c <= 100
pct_a = 100 - (pct_b + pct_c)
assert 0 <= pct_a <= 100
if pct_a >= 100:
return size, size
elif pct_b >= 100:
return 0, size
elif pct_c >= 100:
return 0, 0
else:
a = int(round(float(size) * pct_a / 100))
if pct_a and not a:
a = 1
b = int(round(float(size) * pct_b / 100))
if a + b > size:
b = size - a
if pct_b and not b:
if a > 1:
a -= 1
b = 1
elif a != size:
b = 1
c = size - (a + b)
if pct_c and not c:
if b > 1:
b -= 1
c = 1
elif a > 1:
a -= 1
c = 1
assert a + b + c == size
return a, a + b
def parse_folder(folder, labels_file,
train_file=None, percent_train=None,
val_file=None, percent_val=None,
test_file=None, percent_test=None,
min_per_category=2,
max_per_category=None,
):
"""
Parses a folder of images into three textfiles
Returns True on success
Arguments:
folder -- a folder containing folders of images (can be a filesystem path or a url)
labels_file -- file for labels
Keyword Arguments:
train_file -- output file for training images
percent_test -- percentage of images to use in the training set
val_file -- output file for validation images
percent_val -- percentage of images to use in the validation set
test_file -- output file for test images
percent_test -- percentage of images to use in the test set
min_per_category -- minimum number of images per category
max_per_category -- maximum number of images per category
"""
create_labels = (percent_train > 0)
labels = []
# Read the labels from labels_file
if not create_labels:
with open(labels_file) as infile:
for line in infile:
line = line.strip()
if line:
labels.append(line)
# Verify that at least two category folders exist
folder_is_url = utils.is_url(folder)
if folder_is_url:
if not folder.endswith('/'):
folder += '/'
subdirs, _ = parse_web_listing(folder)
else:
if os.path.exists(folder) and os.path.isdir(folder):
subdirs = []
for filename in os.listdir(folder):
subdir = os.path.join(folder, filename)
if os.path.isdir(subdir):
subdirs.append(subdir)
else:
logger.error('folder does not exist')
return False
subdirs.sort()
if len(subdirs) < 2:
logger.error('folder must contain at least two subdirectories')
return False
# Parse the folder
train_count = 0
val_count = 0
test_count = 0
if percent_train:
train_outfile = open(train_file, 'w')
if percent_val:
val_outfile = open(val_file, 'w')
if percent_test:
test_outfile = open(test_file, 'w')
subdir_index = 0
label_index = 0
for subdir in subdirs:
# Use the directory name as the label
label_name = subdir
if folder_is_url:
label_name = unescape(label_name)
else:
label_name = os.path.basename(label_name)
label_name = label_name.replace('_', ' ')
if label_name.endswith('/'):
# Remove trailing slash
label_name = label_name[0:-1]
if create_labels:
labels.append(label_name)
label_index = len(labels) - 1
else:
found = False
for i, l in enumerate(labels):
if label_name == l:
found = True
label_index = i
break
if not found:
logger.warning('Category "%s" not found in labels_file. Skipping.' % label_name)
continue
logger.debug('Category - %s' % label_name)
lines = []
# Read all images in the folder
if folder_is_url:
urls, _ = web_listing_all_files(folder + subdir, max_count=max_per_category)
for url in urls:
lines.append('%s %d' % (url, label_index))
else:
for dirpath, dirnames, filenames in os.walk(os.path.join(folder, subdir), followlinks=True):
for filename in filenames:
if filename.lower().endswith(utils.image.SUPPORTED_EXTENSIONS):
lines.append('%s %d' % (os.path.join(folder, subdir, dirpath, filename), label_index))
if max_per_category is not None and len(lines) >= max_per_category:
break
if max_per_category is not None and len(lines) >= max_per_category:
logger.warning('Reached maximum limit for this category')
break
# Split up the lines
train_lines = []
val_lines = []
test_lines = []
required_categories = 0
if percent_train > 0:
required_categories += 1
if percent_val > 0:
required_categories += 1
if percent_test > 0:
required_categories += 1
if not lines or len(lines) < required_categories or len(lines) < min_per_category:
logger.warning('Not enough images for this category')
labels.pop()
else:
random.shuffle(lines)
a, b = three_way_split_indices(len(lines), percent_val, percent_test)
train_lines = lines[:a]
val_lines = lines[a:b]
test_lines = lines[b:]
if train_lines:
train_outfile.write('\n'.join(train_lines) + '\n')
train_count += len(train_lines)
if val_lines:
val_outfile.write('\n'.join(val_lines) + '\n')
val_count += len(val_lines)
if test_lines:
test_outfile.write('\n'.join(test_lines) + '\n')
test_count += len(test_lines)
subdir_index += 1
logger.debug('Progress: %0.2f' % (float(subdir_index) / len(subdirs)))
if percent_train:
train_outfile.close()
if percent_val:
val_outfile.close()
if percent_test:
test_outfile.close()
if create_labels:
if len(labels) < 2:
logger.error('Did not find two valid categories')
return False
else:
with open(labels_file, 'w') as labels_outfile:
labels_outfile.write('\n'.join(labels) + '\n')
logger.info('Found %d images in %d categories.' % (train_count + val_count + test_count, len(labels)))
logger.info('Selected %d for training.' % train_count)
logger.info('Selected %d for validation.' % val_count)
logger.info('Selected %d for testing.' % test_count)
return True
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Parse-Folder tool - DIGITS')
# Positional arguments
parser.add_argument(
'folder',
help='A filesystem path or url to the folder of images'
)
parser.add_argument(
'labels_file',
help=('The file containing labels. If train_file is set, this file '
'will be generated (output). Otherwise, this file will be read (input).')
)
# Optional arguments
parser.add_argument(
'-t', '--train_file',
help='The output file for training images'
)
parser.add_argument(
'-T', '--percent_train', type=float,
help='Percent of images used for the training set (constant across all categories)'
)
parser.add_argument(
'-v', '--val_file',
help='The output file for validation images'
)
parser.add_argument(
'-V', '--percent_val', type=float,
help='Percent of images used for the validation set (constant across all categories)'
)
parser.add_argument(
'-s', '--test_file',
help='The output file for test images'
)
parser.add_argument(
'-S', '--percent_test', type=float,
help='Percent of images used for the test set (constant across all categories)'
)
parser.add_argument(
'--min', type=int, metavar='MIN_PER_CATEGORY', default=1,
help=("What is the minimum allowable number of images per category? "
"(categories which don't meet this criteria will be ignored) [default=2]")
)
parser.add_argument(
'--max', type=int, metavar='MAX_PER_CATEGORY',
help=("What is the maximum limit of images per category? "
"(categories which exceed this limit will be trimmed down) [default=None]")
)
args = vars(parser.parse_args())
for valid in [
validate_folder(args['folder']),
validate_range(args['percent_train'],
min_value=0, max_value=100, allow_none=True),
validate_output_file(args['train_file']),
validate_range(args['percent_val'],
min_value=0, max_value=100, allow_none=True),
validate_output_file(args['val_file']),
validate_range(args['percent_test'],
min_value=0, max_value=100, allow_none=True),
validate_output_file(args['test_file']),
validate_range(args['min'], min_value=1),
validate_range(args['max'], min_value=1, allow_none=True),
]:
if not valid:
sys.exit(1)
try:
percent_train, percent_val, percent_test = calculate_percentages(**args)
except Exception as e:
logger.error('%s: %s' % (type(e).__name__, e))
sys.exit(1)
start_time = time.time()
if parse_folder(args['folder'], args['labels_file'],
train_file=args['train_file'],
percent_train=percent_train,
val_file=args['val_file'],
percent_val=percent_val,
test_file=args['test_file'],
percent_test=percent_test,
min_per_category=args['min'],
max_per_category=args['max'],
):
logger.info('Done after %d seconds.' % (time.time() - start_time))
sys.exit(0)
else:
sys.exit(1)
| DIGITS-master | digits/tools/parse_folder.py |
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from digits import test_utils
import os.path
import tempfile
try:
from s3_walker import S3Walker
from boto.s3.bucket import Bucket
from boto.s3.key import Key
import_failed = False
except ImportError:
import_failed = True
global data, bucketData
data = 'content'
put_content = ''
if not import_failed:
bucketData = [Key(name='key0'), Key(name='key1'), Key(name='key2')]
def mock_get_bucket(bucketname):
bucket = Bucket(bucketname)
bucket.get_key = mock_get_key
bucket.list = mock_list_bucket
return bucket
def mock_get_key(keyname):
key = Key(name=keyname)
key.set_contents_from_string('content')
key.set_metadata('metadata_name', 'metadata_val')
return key
def mock_set_contents_from_string(self, content):
global data
data = content
def mock_get_contents_as_string(self):
return data
def mock_list_bucket(prefix='', delimiter='', marker=''):
return bucketData
def mock_set_contents_from_filename(self, filename):
file = open(filename, 'r')
read = file.read()
global put_content
put_content = read
file.close()
if not import_failed:
Key.set_contents_from_string = mock_set_contents_from_string
Key.get_contents_as_string = mock_get_contents_as_string
Key.set_contents_from_filename = mock_set_contents_from_filename
Bucket.list = mock_list_bucket
test_utils.skipIfNotFramework('none')
class TestInit():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import s3_walker, most likely cause is Boto not installed')
def test_valid_endpoint(self):
walker = S3Walker('http://endpoint.com', 'accesskey', 'secretkey')
assert walker.host == 'endpoint.com'
assert walker.accesskey == 'accesskey'
assert walker.secretkey == 'secretkey'
def test_http_https_endpoint(self):
# Test that HTTP endpoint is parsed properly and defaults to port 80
http_walker = S3Walker('http://endpoint.com', 'accesskey', 'secretkey')
assert http_walker.host == 'endpoint.com'
assert http_walker.port == 80
# Test that HTTPS endpoint is parsed properly and defaults to port 443
https_walker = S3Walker('https://endpoint.com', 'accesskey', 'secretkey')
assert https_walker.host == 'endpoint.com'
assert https_walker.port == 443
def test_port(self):
# Validate port is parsed properly
walker = S3Walker('http://endpoint.com:81', 'accesskey', 'secretkey')
assert walker.port == 81
def test_invalid_endpoint(self):
# Validate exception is thrown for invalid endpoint (no http:// or https://)
try:
S3Walker('endpoint.com', 'accesskey', 'secretkey')
except ValueError:
return
assert False
class TestGetMethods():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import s3_walker, most likely cause is Boto not installed')
cls.walker = S3Walker('http://endpoint.com', 'accesskey', 'secretkey')
cls.walker.connect()
cls.walker.conn.get_bucket = mock_get_bucket
def test_head(self):
# test head operation to confirm S3Walker requests correct key to S3 endpoint
key = self.walker.head('bucket', 'key')
assert key.name == 'key'
def test_get_to_filename(self):
# test get operation to confirm that key is properly stored to file
filename = tempfile.mkstemp()
self.walker.get('bucket', 'key', filename[1])
assert os.path.isfile(filename[1])
os.remove(filename[1])
def test_get_as_string(self):
# test get as string operation to confirm key is properly returned as string
assert self.walker.get_as_string('bucket', 'key') == 'content'
def test_get_meta(self):
# test get metadata operation to confirm metadata is properly returned from key
assert self.walker.get_meta('bucket', 'key', 'metadata_name') == 'metadata_val'
def test_list_bucket(self):
# test list bucket to confirm list of keys is returned from bucket
keys = self.walker.listbucket('bucket')
assert len(keys) == 3
count = 0
for key in keys:
assert key == 'key' + str(count)
count += 1
class TestPut():
@classmethod
def setUpClass(cls):
if import_failed:
test_utils.skipTest('Could not import s3_walker, most likely cause is Boto not installed')
cls.walker = S3Walker('http://endpoint.com', 'accesskey', 'secretkey')
cls.walker.connect()
cls.walker.conn.get_bucket = mock_get_bucket
def test_put(self):
putData = tempfile.mkstemp()
file = open(putData[1], 'w')
expected_data = 'this the data for test put'
file.write(expected_data)
file.close()
self.walker.put('bucket', 'key', putData[1])
assert put_content == expected_data
os.remove(putData[1])
| DIGITS-master | digits/tools/test_s3_walker.py |
#!/usr/bin/env python2
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
import argparse
from collections import Counter
import logging
import math
import os
import Queue
import random
import re
import shutil
import sys
import threading
import time
# Find the best implementation available
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import h5py
import lmdb
import numpy as np
import PIL.Image
# Add path for DIGITS package
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import digits.config # noqa
from digits import utils, log # noqa
# Import digits.config first to set the path to Caffe
import caffe.io # noqa
import caffe_pb2 # noqa
if digits.config.config_value('tensorflow')['enabled']:
import tensorflow as tf
else:
tf = None
logger = logging.getLogger('digits.tools.create_db')
class Error(Exception):
pass
class BadInputFileError(Error):
"""Input file is empty"""
pass
class ParseLineError(Error):
"""Failed to parse a line in the input file"""
pass
class LoadError(Error):
"""Failed to load image[s]"""
pass
class WriteError(Error):
"""Failed to write image[s]"""
pass
class Hdf5DatasetExtendError(Error):
"""Failed to extend an hdf5 dataset"""
pass
class DbWriter(object):
"""
Abstract class for writing to databases
"""
def __init__(self, output_dir, image_height, image_width, image_channels):
self._dir = output_dir
os.makedirs(output_dir)
self._image_height = image_height
self._image_width = image_width
self._image_channels = image_channels
self._count = 0
def write_batch(self, batch):
raise NotImplementedError
def count(self):
return self._count
class LmdbWriter(DbWriter):
# TODO
pass
class Hdf5Writer(DbWriter):
"""
A class for writing to HDF5 files
"""
LIST_FILENAME = 'list.txt'
DTYPE = 'float32'
def __init__(self, **kwargs):
"""
Keyword arguments:
compression -- the type of dataset compression
dset_limit -- the dataset size limit
"""
self._compression = kwargs.pop('compression', None)
self._dset_limit = kwargs.pop('dset_limit', None)
super(Hdf5Writer, self).__init__(**kwargs)
self._db = None
if self._dset_limit is not None:
self._max_count = self._dset_limit / (
self._image_height * self._image_width * self._image_channels)
else:
self._max_count = None
def write_batch(self, batch):
# convert batch to numpy arrays
if batch[0][0].ndim == 2:
# add channel axis for grayscale images
data_batch = np.array([i[0][..., np.newaxis] for i in batch])
else:
data_batch = np.array([i[0] for i in batch])
# Transpose to (channels, height, width)
data_batch = data_batch.transpose((0, 3, 1, 2))
label_batch = np.array([i[1] for i in batch])
# first batch
if self._db is None:
self._create_new_file(len(batch))
self._db['data'][:] = data_batch
self._db['label'][:] = label_batch
self._count += len(batch)
return
current_count = self._db['data'].len()
# will fit in current dataset
if current_count + len(batch) <= self._max_count:
self._db['data'].resize(current_count + len(batch), axis=0)
self._db['label'].resize(current_count + len(batch), axis=0)
self._db['data'][-len(batch):] = data_batch
self._db['label'][-len(batch):] = label_batch
self._count += len(batch)
return
# calculate how many will fit in current dataset
split = self._max_count - current_count
if split > 0:
# put what we can into the current dataset
self._db['data'].resize(self._max_count, axis=0)
self._db['label'].resize(self._max_count, axis=0)
self._db['data'][-split:] = data_batch[:split]
self._db['label'][-split:] = label_batch[:split]
self._count += split
self._create_new_file(len(batch) - split)
self._db['data'][:] = data_batch[split:]
self._db['label'][:] = label_batch[split:]
self._count += len(batch) - split
def _create_new_file(self, initial_count):
assert self._max_count is None or initial_count <= self._max_count, \
'Your batch size is too large for your dataset limit - %d vs %d' % \
(initial_count, self._max_count)
# close the old file
if self._db is not None:
self._db.close()
mode = 'a'
else:
mode = 'w'
# get the filename
filename = self._new_filename()
logger.info('Creating HDF5 database at "%s" ...' %
os.path.join(*filename.split(os.sep)[-2:]))
# update the list
with open(self._list_filename(), mode) as outfile:
outfile.write('%s\n' % filename)
# create the new file
self._db = h5py.File(os.path.join(self._dir, filename), 'w')
# initialize the datasets
self._db.create_dataset('data',
(initial_count, self._image_channels,
self._image_height, self._image_width),
maxshape=(self._max_count, self._image_channels,
self._image_height, self._image_width),
chunks=True, compression=self._compression, dtype=self.DTYPE)
self._db.create_dataset('label',
(initial_count,),
maxshape=(self._max_count,),
chunks=True, compression=self._compression, dtype=self.DTYPE)
def _list_filename(self):
return os.path.join(self._dir, self.LIST_FILENAME)
def _new_filename(self):
return '%s.h5' % self.count()
def create_db(input_file, output_dir,
image_width, image_height, image_channels,
backend,
resize_mode=None,
image_folder=None,
shuffle=True,
mean_files=None,
delete_files=False,
**kwargs):
"""
Create a database of images from a list of image paths
Raises exceptions on errors
Arguments:
input_file -- a textfile containing labelled image paths
output_dir -- the location to store the created database
image_width -- image resize width
image_height -- image resize height
image_channels -- image channels
backend -- the DB format (lmdb/hdf5)
Keyword arguments:
resize_mode -- passed to utils.image.resize_image()
shuffle -- if True, shuffle the images in the list before creating
mean_files -- a list of mean files to save
delete_files -- if True, delete raw images after creation of database
"""
# Validate arguments
if not os.path.exists(input_file):
raise ValueError('input_file does not exist')
if os.path.exists(output_dir):
logger.warning('removing existing database')
if os.path.isdir(output_dir):
shutil.rmtree(output_dir, ignore_errors=True)
else:
os.remove(output_dir)
if image_width <= 0:
raise ValueError('invalid image width')
if image_height <= 0:
raise ValueError('invalid image height')
if image_channels not in [1, 3]:
raise ValueError('invalid number of channels')
if resize_mode not in [None, 'crop', 'squash', 'fill', 'half_crop']:
raise ValueError('invalid resize_mode')
if image_folder is not None and not os.path.exists(image_folder):
raise ValueError('image_folder does not exist')
if mean_files:
for mean_file in mean_files:
if os.path.exists(mean_file):
logger.warning('overwriting existing mean file "%s"!' % mean_file)
else:
dirname = os.path.dirname(mean_file)
if not dirname:
dirname = '.'
if not os.path.exists(dirname):
raise ValueError('Cannot save mean file at "%s"' % mean_file)
compute_mean = bool(mean_files)
# Load lines from input_file into a load_queue
load_queue = Queue.Queue()
image_count = _fill_load_queue(input_file, load_queue, shuffle)
# Start some load threads
batch_size = _calculate_batch_size(image_count,
bool(backend == 'hdf5'), kwargs.get('hdf5_dset_limit'),
image_channels, image_height, image_width)
num_threads = _calculate_num_threads(batch_size, shuffle)
write_queue = Queue.Queue(2 * batch_size)
summary_queue = Queue.Queue()
for _ in xrange(num_threads):
p = threading.Thread(target=_load_thread,
args=(load_queue, write_queue, summary_queue,
image_width, image_height, image_channels,
resize_mode, image_folder, compute_mean),
kwargs={'backend': backend,
'encoding': kwargs.get('encoding', None)},
)
p.daemon = True
p.start()
start = time.time()
if backend == 'lmdb':
_create_lmdb(image_count, write_queue, batch_size, output_dir,
summary_queue, num_threads,
mean_files, **kwargs)
elif backend == 'hdf5':
_create_hdf5(image_count, write_queue, batch_size, output_dir,
image_width, image_height, image_channels,
summary_queue, num_threads,
mean_files, **kwargs)
else:
raise ValueError('invalid backend')
if delete_files:
# delete files
deleted_files = 0
distribution = Counter()
with open(input_file) as infile:
for line in infile:
try:
# delete file
[path, label] = _parse_line(line, distribution)
os.remove(path)
deleted_files += 1
except ParseLineError:
pass
logger.info("Deleted " + str(deleted_files) + " files")
logger.info('Database created after %d seconds.' % (time.time() - start))
def _create_tfrecords(image_count, write_queue, batch_size, output_dir,
summary_queue, num_threads,
mean_files=None,
encoding=None,
lmdb_map_size=None,
**kwargs):
"""
Creates the TFRecords database(s)
"""
LIST_FILENAME = 'list.txt'
if not tf:
raise ValueError("Can't create TFRecords as support for Tensorflow "
"is not enabled.")
wait_time = time.time()
threads_done = 0
images_loaded = 0
images_written = 0
image_sum = None
compute_mean = bool(mean_files)
os.makedirs(output_dir)
# We need shards to achieve good mixing properties because TFRecords
# is a sequential/streaming reader, and has no random access.
num_shards = 16 # @TODO(tzaman) put some logic behind this
writers = []
with open(os.path.join(output_dir, LIST_FILENAME), 'w') as outfile:
for shard_id in xrange(num_shards):
shard_name = 'SHARD_%03d.tfrecords' % (shard_id)
filename = os.path.join(output_dir, shard_name)
writers.append(tf.python_io.TFRecordWriter(filename))
outfile.write('%s\n' % (filename))
shard_id = 0
while (threads_done < num_threads) or not write_queue.empty():
# Send update every 2 seconds
if time.time() - wait_time > 2:
logger.debug('Processed %d/%d' % (images_written, image_count))
wait_time = time.time()
processed_something = False
if not summary_queue.empty():
result_count, result_sum = summary_queue.get()
images_loaded += result_count
# Update total_image_sum
if compute_mean and result_count > 0 and result_sum is not None:
if image_sum is None:
image_sum = result_sum
else:
image_sum += result_sum
threads_done += 1
processed_something = True
if not write_queue.empty():
writers[shard_id].write(write_queue.get())
shard_id += 1
if shard_id >= num_shards:
shard_id = 0
images_written += 1
processed_something = True
if not processed_something:
time.sleep(0.2)
if images_loaded == 0:
raise LoadError('no images loaded from input file')
logger.debug('%s images loaded' % images_loaded)
if images_written == 0:
raise WriteError('no images written to database')
logger.info('%s images written to database' % images_written)
if compute_mean:
_save_means(image_sum, images_written, mean_files)
for writer in writers:
writer.close()
def _create_lmdb(image_count, write_queue, batch_size, output_dir,
summary_queue, num_threads,
mean_files=None,
encoding=None,
lmdb_map_size=None,
**kwargs):
"""
Create an LMDB
Keyword arguments:
encoding -- image encoding format
lmdb_map_size -- the initial LMDB map size
"""
wait_time = time.time()
threads_done = 0
images_loaded = 0
images_written = 0
image_sum = None
batch = []
compute_mean = bool(mean_files)
db = lmdb.open(output_dir,
map_size=lmdb_map_size,
map_async=True,
max_dbs=0)
while (threads_done < num_threads) or not write_queue.empty():
# Send update every 2 seconds
if time.time() - wait_time > 2:
logger.debug('Processed %d/%d' % (images_written, image_count))
wait_time = time.time()
processed_something = False
if not summary_queue.empty():
result_count, result_sum = summary_queue.get()
images_loaded += result_count
# Update total_image_sum
if compute_mean and result_count > 0 and result_sum is not None:
if image_sum is None:
image_sum = result_sum
else:
image_sum += result_sum
threads_done += 1
processed_something = True
if not write_queue.empty():
datum = write_queue.get()
batch.append(datum)
if len(batch) == batch_size:
_write_batch_lmdb(db, batch, images_written)
images_written += len(batch)
batch = []
processed_something = True
if not processed_something:
time.sleep(0.2)
if len(batch) > 0:
_write_batch_lmdb(db, batch, images_written)
images_written += len(batch)
if images_loaded == 0:
raise LoadError('no images loaded from input file')
logger.debug('%s images loaded' % images_loaded)
if images_written == 0:
raise WriteError('no images written to database')
logger.info('%s images written to database' % images_written)
if compute_mean:
_save_means(image_sum, images_written, mean_files)
db.close()
def _create_hdf5(image_count, write_queue, batch_size, output_dir,
image_width, image_height, image_channels,
summary_queue, num_threads,
mean_files=None,
compression=None,
hdf5_dset_limit=None,
**kwargs):
"""
Create an HDF5 file
Keyword arguments:
compression -- dataset compression format
"""
wait_time = time.time()
threads_done = 0
images_loaded = 0
images_written = 0
image_sum = None
batch = []
compute_mean = bool(mean_files)
writer = Hdf5Writer(
output_dir=output_dir,
image_height=image_height,
image_width=image_width,
image_channels=image_channels,
dset_limit=hdf5_dset_limit,
compression=compression,
)
while (threads_done < num_threads) or not write_queue.empty():
# Send update every 2 seconds
if time.time() - wait_time > 2:
logger.debug('Processed %d/%d' % (images_written, image_count))
wait_time = time.time()
processed_something = False
if not summary_queue.empty():
result_count, result_sum = summary_queue.get()
images_loaded += result_count
# Update total_image_sum
if compute_mean and result_count > 0 and result_sum is not None:
if image_sum is None:
image_sum = result_sum
else:
image_sum += result_sum
threads_done += 1
processed_something = True
if not write_queue.empty():
batch.append(write_queue.get())
if len(batch) == batch_size:
writer.write_batch(batch)
images_written += len(batch)
batch = []
processed_something = True
if not processed_something:
time.sleep(0.2)
if len(batch) > 0:
writer.write_batch(batch)
images_written += len(batch)
assert images_written == writer.count()
if images_loaded == 0:
raise LoadError('no images loaded from input file')
logger.debug('%s images loaded' % images_loaded)
if images_written == 0:
raise WriteError('no images written to database')
logger.info('%s images written to database' % images_written)
if compute_mean:
_save_means(image_sum, images_written, mean_files)
def _fill_load_queue(filename, queue, shuffle):
"""
Fill the queue with data from the input file
Print the category distribution
Returns the number of lines added to the queue
NOTE: This can be slow on a large input file, but we need the total image
count in order to report the progress, so we might as well read it all
"""
total_lines = 0
valid_lines = 0
distribution = Counter()
with open(filename) as infile:
if shuffle:
lines = infile.readlines() # less memory efficient
random.shuffle(lines)
for line in lines:
total_lines += 1
try:
result = _parse_line(line, distribution)
valid_lines += 1
queue.put(result)
except ParseLineError:
pass
else:
for line in infile: # more memory efficient
total_lines += 1
try:
result = _parse_line(line, distribution)
valid_lines += 1
queue.put(result)
except ParseLineError:
pass
logger.debug('%s total lines in file' % total_lines)
if valid_lines == 0:
raise BadInputFileError('No valid lines in input file')
logger.info('%s valid lines in file' % valid_lines)
for key in sorted(distribution):
logger.debug('Category %s has %d images.' % (key, distribution[key]))
return valid_lines
def _parse_line(line, distribution):
"""
Parse a line in the input file into (path, label)
"""
line = line.strip()
if not line:
raise ParseLineError
# Expect format - [/]path/to/file.jpg 123
match = re.match(r'(.+)\s+(\d+)\s*$', line)
if match is None:
raise ParseLineError
path = match.group(1)
label = int(match.group(2))
distribution[label] += 1
return path, label
def _calculate_batch_size(image_count, is_hdf5=False, hdf5_dset_limit=None,
image_channels=None, image_height=None, image_width=None):
"""
Calculates an appropriate batch size for creating this database
"""
if is_hdf5 and hdf5_dset_limit is not None:
return min(100, image_count, hdf5_dset_limit / (image_channels * image_height * image_width))
else:
return min(100, image_count)
def _calculate_num_threads(batch_size, shuffle):
"""
Calculates an appropriate number of threads for creating this database
"""
if shuffle:
return min(10, int(round(math.sqrt(batch_size))))
else:
# XXX This is the only way to preserve order for now
# This obviously hurts performance considerably
return 1
def _load_thread(load_queue, write_queue, summary_queue,
image_width, image_height, image_channels,
resize_mode, image_folder, compute_mean,
backend=None, encoding=None):
"""
Consumes items in load_queue
Produces items to write_queue
Stores cumulative results in summary_queue
"""
images_added = 0
if compute_mean:
image_sum = _initial_image_sum(image_width, image_height, image_channels)
else:
image_sum = None
while not load_queue.empty():
try:
path, label = load_queue.get(True, 0.05)
except Queue.Empty:
continue
# prepend path with image_folder, if appropriate
if not utils.is_url(path) and image_folder and not os.path.isabs(path):
path = os.path.join(image_folder, path)
try:
image = utils.image.load_image(path)
except utils.errors.LoadImageError as e:
logger.warning('[%s %s] %s: %s' % (path, label, type(e).__name__, e))
continue
image = utils.image.resize_image(image,
image_height, image_width,
channels=image_channels,
resize_mode=resize_mode,
)
if compute_mean:
image_sum += image
if backend == 'lmdb':
datum = _array_to_datum(image, label, encoding)
write_queue.put(datum)
elif backend == 'tfrecords':
tf_example = _array_to_tf_feature(image, label, encoding)
write_queue.put(tf_example)
else:
write_queue.put((image, label))
images_added += 1
summary_queue.put((images_added, image_sum))
def _initial_image_sum(width, height, channels):
"""
Returns an array of zeros that will be used to store the accumulated sum of images
"""
if channels == 1:
return np.zeros((height, width), np.float64)
else:
return np.zeros((height, width, channels), np.float64)
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _array_to_tf_feature(image, label, encoding):
"""
Creates a tensorflow Example from a numpy.ndarray
"""
if not encoding:
image_raw = image.tostring()
encoding_id = 0
else:
s = StringIO()
if encoding == 'png':
PIL.Image.fromarray(image).save(s, format='PNG')
encoding_id = 1
elif encoding == 'jpg':
PIL.Image.fromarray(image).save(s, format='JPEG', quality=90)
encoding_id = 2
else:
raise ValueError('Invalid encoding type')
image_raw = s.getvalue()
depth = image.shape[2] if len(image.shape) > 2 else 1
example = tf.train.Example(
features=tf.train.Features(
feature={
'height': _int64_feature(image.shape[0]),
'width': _int64_feature(image.shape[1]),
'depth': _int64_feature(depth),
'label': _int64_feature(label),
'image_raw': _bytes_feature(image_raw),
'encoding': _int64_feature(encoding_id),
# @TODO(tzaman) - add bitdepth flag?
}
))
return example.SerializeToString()
def _array_to_datum(image, label, encoding):
"""
Create a caffe Datum from a numpy.ndarray
"""
if not encoding:
# Transform to caffe's format requirements
if image.ndim == 3:
# Transpose to (channels, height, width)
image = image.transpose((2, 0, 1))
if image.shape[0] == 3:
# channel swap
# XXX see issue #59
image = image[[2, 1, 0], ...]
elif image.ndim == 2:
# Add a channels axis
image = image[np.newaxis, :, :]
else:
raise Exception('Image has unrecognized shape: "%s"' % image.shape)
datum = caffe.io.array_to_datum(image, label)
else:
datum = caffe_pb2.Datum()
if image.ndim == 3:
datum.channels = image.shape[2]
else:
datum.channels = 1
datum.height = image.shape[0]
datum.width = image.shape[1]
datum.label = label
s = StringIO()
if encoding == 'png':
PIL.Image.fromarray(image).save(s, format='PNG')
elif encoding == 'jpg':
PIL.Image.fromarray(image).save(s, format='JPEG', quality=90)
else:
raise ValueError('Invalid encoding type')
datum.data = s.getvalue()
datum.encoded = True
return datum
def _write_batch_lmdb(db, batch, image_count):
"""
Write a batch to an LMDB database
"""
try:
with db.begin(write=True) as lmdb_txn:
for i, datum in enumerate(batch):
key = '%08d_%d' % (image_count + i, datum.label)
lmdb_txn.put(key, datum.SerializeToString())
except lmdb.MapFullError:
# double the map_size
curr_limit = db.info()['map_size']
new_limit = curr_limit * 2
try:
db.set_mapsize(new_limit) # double it
except AttributeError as e:
version = tuple(int(x) for x in lmdb.__version__.split('.'))
if version < (0, 87):
raise Error('py-lmdb is out of date (%s vs 0.87)' % lmdb.__version__)
else:
raise e
# try again
_write_batch_lmdb(db, batch, image_count)
def _save_means(image_sum, image_count, mean_files):
"""
Save mean[s] to file
"""
mean = np.around(image_sum / image_count).astype(np.uint8)
for mean_file in mean_files:
if mean_file.lower().endswith('.npy'):
np.save(mean_file, mean)
elif mean_file.lower().endswith('.binaryproto'):
data = mean
# Transform to caffe's format requirements
if data.ndim == 3:
# Transpose to (channels, height, width)
data = data.transpose((2, 0, 1))
if data.shape[0] == 3:
# channel swap
# XXX see issue #59
data = data[[2, 1, 0], ...]
elif mean.ndim == 2:
# Add a channels axis
data = data[np.newaxis, :, :]
blob = caffe_pb2.BlobProto()
blob.num = 1
blob.channels, blob.height, blob.width = data.shape
blob.data.extend(data.astype(float).flat)
with open(mean_file, 'wb') as outfile:
outfile.write(blob.SerializeToString())
elif mean_file.lower().endswith(('.jpg', '.jpeg', '.png')):
image = PIL.Image.fromarray(mean)
image.save(mean_file)
else:
logger.warning('Unrecognized file extension for mean file: "%s"' % mean_file)
continue
logger.info('Mean saved at "%s"' % mean_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create-Db tool - DIGITS')
# Positional arguments
parser.add_argument('input_file',
help='An input file of labeled images')
parser.add_argument('output_dir',
help='Path to the output database')
parser.add_argument('width',
type=int,
help='width of resized images'
)
parser.add_argument('height',
type=int,
help='height of resized images'
)
# Optional arguments
parser.add_argument('-c', '--channels',
type=int,
default=3,
help='channels of resized images (1 for grayscale, 3 for color [default])'
)
parser.add_argument('-r', '--resize_mode',
help='resize mode for images (must be "crop", "squash" [default], "fill" or "half_crop")'
)
parser.add_argument('-m', '--mean_file', action='append',
help="location to output the image mean (doesn't save mean if not specified)")
parser.add_argument('-f', '--image_folder',
help='folder containing the images (if the paths in input_file are not absolute)')
parser.add_argument('-s', '--shuffle',
action='store_true',
help='Shuffle images before saving'
)
parser.add_argument('-e', '--encoding',
help='Image encoding format (jpg/png)'
)
parser.add_argument('-C', '--compression',
help='Database compression format (gzip)'
)
parser.add_argument('-b', '--backend',
default='lmdb',
help='The database backend - lmdb[default], hdf5 or tfrecords')
parser.add_argument('--lmdb_map_size',
type=int,
help='The initial map size for LMDB (in MB)')
parser.add_argument('--hdf5_dset_limit',
type=int,
default=2**31,
help='The size limit for HDF5 datasets')
parser.add_argument('--delete_files',
action='store_true',
help='Specifies whether to keep files after creation of dataset')
args = vars(parser.parse_args())
if args['lmdb_map_size']:
# convert from MB to B
args['lmdb_map_size'] <<= 20
try:
create_db(args['input_file'], args['output_dir'],
args['width'], args['height'], args['channels'],
args['backend'],
resize_mode=args['resize_mode'],
image_folder=args['image_folder'],
shuffle=args['shuffle'],
mean_files=args['mean_file'],
encoding=args['encoding'],
compression=args['compression'],
lmdb_map_size=args['lmdb_map_size'],
hdf5_dset_limit=args['hdf5_dset_limit'],
delete_files=args['delete_files']
)
except Exception as e:
logger.error('%s: %s' % (type(e).__name__, e.message))
raise
| DIGITS-master | digits/tools/create_db.py |
#!/usr/bin/env python2
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
import argparse
import logging
import os
import sys
import PIL.Image
# Add path for DIGITS package
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import digits.config # noqa
from digits import utils, log # noqa
logger = logging.getLogger('digits.tools.resize_image')
def validate_output_file(filename):
if filename is None:
return True
if os.path.exists(filename):
if not os.access(filename, os.W_OK):
logger.error('cannot overwrite existing output file "%s"' % filename)
return False
output_dir = os.path.dirname(filename)
if not output_dir:
output_dir = '.'
if not os.path.exists(output_dir):
logger.error('output directory "%s" does not exist' % output_dir)
return False
if not os.access(output_dir, os.W_OK):
logger.error('you do not have write access to output directory "%s"' % output_dir)
return False
return True
def validate_input_file(filename):
if not os.path.exists(filename) or not os.path.isfile(filename):
logger.error('input file "%s" does not exist' % filename)
return False
if not os.access(filename, os.R_OK):
logger.error('you do not have read access to "%s"' % filename)
return False
return True
def validate_range(number, min_value=None, max_value=None, allow_none=False):
if number is None:
if allow_none:
return True
else:
logger.error('invalid value %s' % number)
return False
try:
float(number)
except ValueError:
logger.error('invalid value %s' % number)
return False
if min_value is not None and number < min_value:
logger.error('invalid value %s' % number)
return False
if max_value is not None and number > max_value:
logger.error('invalid value %s' % number)
return False
return True
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Resize-Image tool - DIGITS')
# Positional arguments
parser.add_argument('image',
help='A filesystem path or url to the image'
)
parser.add_argument('output',
help='The location to output the image'
)
parser.add_argument('width',
type=int,
help='The new width'
)
parser.add_argument('height',
type=int,
help='The new height'
)
# Optional arguments
parser.add_argument('-c', '--channels',
type=int,
help='The new number of channels [default is to remain unchanged]'
)
parser.add_argument('-m', '--mode',
default='squash',
help='Resize mode (squash/crop/fill/half_crop) [default is squash]'
)
args = vars(parser.parse_args())
for valid in [
validate_range(args['width'], min_value=1),
validate_range(args['height'], min_value=1),
validate_range(args['channels'],
min_value=1, max_value=3, allow_none=True),
validate_output_file(args['output']),
]:
if not valid:
sys.exit(1)
# load image
image = utils.image.load_image(args['image'])
# resize image
image = utils.image.resize_image(image, args['height'], args['width'],
channels=args['channels'],
resize_mode=args['mode'],
)
image = PIL.Image.fromarray(image)
try:
image.save(args['output'])
except KeyError:
logger.error('Unable to save file to "%s"' % args['output'])
sys.exit(1)
| DIGITS-master | digits/tools/resize_image.py |
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
#
# This document should comply with PEP-8 Style Guide
# Linter: pylint
"""
Class for generating Caffe-style learning rates using different policies.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import math
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO)
class LRPolicy(object):
"""This class contains details of learning rate policies that are used in caffe.
Calculates and returns the current learning rate. The currently implemented learning rate
policies are as follows:
- fixed: always return base_lr.
- step: return base_lr * gamma ^ (floor(iter / step))
- exp: return base_lr * gamma ^ iter
- inv: return base_lr * (1 + gamma * iter) ^ (- power)
- multistep: similar to step but it allows non uniform steps defined by
stepvalue
- poly: the effective learning rate follows a polynomial decay, to be
zero by the max_steps. return base_lr (1 - iter/max_steps) ^ (power)
- sigmoid: the effective learning rate follows a sigmod decay
return base_lr ( 1/(1 + exp(-gamma * (iter - stepsize))))
"""
def __init__(self, policy, base_rate, gamma, power, max_steps, step_values):
"""Initialize a learning rate policy
Args:
policy: Learning rate policy
base_rate: Base learning rate
gamma: parameter to compute learning rate
power: parameter to compute learning rate
max_steps: parameter to compute learning rate
step_values: parameter(s) to compute learning rate. should be a string, multiple values divided as csv
Returns:
-
"""
self.policy = policy
self.base_rate = base_rate
self.gamma = gamma
self.power = power
self.max_steps = max_steps
self.step_values = step_values
if self.step_values:
self.stepvalues_list = map(float, step_values.split(','))
else:
self.stepvalues_list = []
if (self.max_steps < len(self.stepvalues_list)):
self.policy = 'step'
self.stepvalues_list[0] = 1
logging.info("Maximum iterations (i.e., %s) is less than provided step values count "
"(i.e, %s), so learning rate policy is reset to (%s) policy with the "
"step value (%s).",
self.max_steps, len(self.stepvalues_list),
self.policy,
self.stepvalues_list[0])
else: # Converting stepsize percentages into values
for i in range(len(self.stepvalues_list)):
self.stepvalues_list[i] = round(self.max_steps * self.stepvalues_list[i] / 100)
# Avoids 'nan' values during learning rate calculation
if self.stepvalues_list[i] == 0:
self.stepvalues_list[i] = 1
if (self.policy == 'step') or (self.policy == 'sigmoid'):
# If the policy is not multistep, then even though multiple step values
# are provided as input, we will consider only the first value.
self.step_size = self.stepvalues_list[0]
elif (self.policy == 'multistep'):
self.current_step = 0 # This counter is important to take arbitary steps
self.stepvalue_size = len(self.stepvalues_list)
def get_learning_rate(self, step):
"""Initialize a learning rate policy
Args:
step: the current step for which the learning rate should be computed
Returns:
rate: the learning rate for the requested step
"""
rate = 0
progress = 100 * (step / self.max_steps) # expressed in percent units
if self.policy == "fixed":
rate = self.base_rate
elif self.policy == "step":
current_step = math.floor(step/self.step_size)
rate = self.base_rate * math.pow(self.gamma, current_step)
elif self.policy == "exp":
rate = self.base_rate * math.pow(self.gamma, progress)
elif self.policy == "inv":
rate = self.base_rate * math.pow(1 + self.gamma * progress, - self.power)
elif self.policy == "multistep":
if ((self.current_step < self.stepvalue_size) and (step > self.stepvalues_list[self.current_step])):
self.current_step = self.current_step + 1
rate = self.base_rate * math.pow(self.gamma, self.current_step)
elif self.policy == "poly":
rate = self.base_rate * math.pow(1.0 - (step / self.max_steps), self.power)
elif self.policy == "sigmoid":
rate = self.base_rate * \
(1.0 / (1.0 + math.exp(self.gamma * (progress - 100 * self.step_size / self.max_steps))))
else:
logging.error("Unknown learning rate policy: %s", self.policy)
exit(-1)
return rate
| DIGITS-master | digits/tools/tensorflow/lr_policy.py |
import time
import numpy as np
import wx
# This has been set up to optionally use the wx.BufferedDC if
# USE_BUFFERED_DC is True, it will be used. Otherwise, it uses the raw
# wx.Memory DC , etc.
# USE_BUFFERED_DC = False
USE_BUFFERED_DC = True
myEVT = wx.NewEventType()
DISPLAY_GRID_EVT = wx.PyEventBinder(myEVT, 1)
class MyEvent(wx.PyCommandEvent):
"""Event to signal that a count value is ready"""
def __init__(self, etype, eid, value=None):
"""Creates the event object"""
wx.PyCommandEvent.__init__(self, etype, eid)
self._value = value
def GetValue(self):
"""Returns the value from the event.
@return: the value of this event
"""
return self._value
class BufferedWindow(wx.Window):
"""
A Buffered window class.
To use it, subclass it and define a Draw(DC) method that takes a DC
to draw to. In that method, put the code needed to draw the picture
you want. The window will automatically be double buffered, and the
screen will be automatically updated when a Paint event is received.
When the drawing needs to change, you app needs to call the
UpdateDrawing() method. Since the drawing is stored in a bitmap, you
can also save the drawing to file by calling the
SaveToFile(self, file_name, file_type) method.
"""
def __init__(self, *args, **kwargs):
# make sure the NO_FULL_REPAINT_ON_RESIZE style flag is set.
kwargs['style'] = kwargs.setdefault('style', wx.NO_FULL_REPAINT_ON_RESIZE) | wx.NO_FULL_REPAINT_ON_RESIZE
wx.Window.__init__(self, *args, **kwargs)
wx.EVT_PAINT(self, self.OnPaint)
wx.EVT_SIZE(self, self.OnSize)
# OnSize called to make sure the buffer is initialized.
# This might result in OnSize getting called twice on some
# platforms at initialization, but little harm done.
self.OnSize(None)
self.paint_count = 0
def Draw(self, dc):
# just here as a place holder.
# This method should be over-ridden when subclassed
pass
def OnPaint(self, event):
# All that is needed here is to draw the buffer to screen
if USE_BUFFERED_DC:
dc = wx.BufferedPaintDC(self, self._Buffer)
else:
dc = wx.PaintDC(self)
dc.DrawBitmap(self._Buffer, 0, 0)
def OnSize(self, event):
# The Buffer init is done here, to make sure the buffer is always
# the same size as the Window
# Size = self.GetClientSizeTuple()
Size = self.ClientSize
# Make new offscreen bitmap: this bitmap will always have the
# current drawing in it, so it can be used to save the image to
# a file, or whatever.
self._Buffer = wx.EmptyBitmap(*Size)
self.UpdateDrawing()
def SaveToFile(self, FileName, FileType=wx.BITMAP_TYPE_PNG):
# This will save the contents of the buffer
# to the specified file. See the wxWindows docs for
# wx.Bitmap::SaveFile for the details
self._Buffer.SaveFile(FileName, FileType)
def UpdateDrawing(self):
"""
This would get called if the drawing needed to change, for whatever reason.
The idea here is that the drawing is based on some data generated
elsewhere in the system. If that data changes, the drawing needs to
be updated.
This code re-draws the buffer, then calls Update, which forces a paint event.
"""
dc = wx.MemoryDC()
dc.SelectObject(self._Buffer)
self.Draw(dc)
del dc # need to get rid of the MemoryDC before Update() is called.
self.Refresh()
self.Update()
class DrawWindow(BufferedWindow):
def __init__(self, *args, **kwargs):
# Any data the Draw() function needs must be initialized before
# calling BufferedWindow.__init__, as it will call the Draw function.
self.DrawData = {}
BufferedWindow.__init__(self, *args, **kwargs)
def Draw(self, dc):
dc.SetBackground(wx.Brush("White"))
dc.Clear() # make sure you clear the bitmap!
# Here's the actual drawing code.
for key, data in self.DrawData.items():
if key == "text":
dc.DrawText(data, 0, 0)
elif key == "np":
data = data.astype('uint8')
img_count = data.shape[0]
height = data.shape[1]
width = data.shape[2]
grid_size = int(np.sqrt(img_count))
size = (grid_size * width, grid_size * height)
if True: # self.size != size:
self.size = size
self.SetSize(size)
image = wx.EmptyImage(width, height)
for i in xrange(img_count):
x = width * (i // grid_size)
y = height * (i % grid_size)
s = data[i].tostring()
image.SetData(s)
wxBitmap = image.ConvertToBitmap()
dc.DrawBitmap(wxBitmap, x=x, y=y)
class TestFrame(wx.Frame):
SLIDER_WIDTH = 100
SLIDER_BORDER = 50
STATUS_HEIGHT = 20
def __init__(self, parent=None, grid_size=640, attributes=[]):
wx.Frame.__init__(self, parent,
size=(grid_size + self.SLIDER_WIDTH + self.SLIDER_BORDER, grid_size + self.STATUS_HEIGHT),
title="GAN Demo",
style=wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER)
# Set up the MenuBar
MenuBar = wx.MenuBar()
file_menu = wx.Menu()
item = file_menu.Append(wx.ID_EXIT, text="&Exit")
self.Bind(wx.EVT_MENU, self.OnQuit, item)
MenuBar.Append(file_menu, "&File")
self.SetMenuBar(MenuBar)
self.statusbar = self.CreateStatusBar()
self.statusbar.SetStatusText('Initialising...')
# Set up UI elements
panel = wx.Panel(self)
self.Window = DrawWindow(panel, size=(grid_size, grid_size))
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add(self.Window, 1, wx.ALIGN_LEFT)
# Sliders
vbox = wx.BoxSizer(wx.VERTICAL)
self.speed_slider = wx.Slider(panel, -1, value=5, minValue=0, maxValue=10, pos=wx.DefaultPosition,
size=(self.SLIDER_WIDTH, -1),
style=wx.SL_AUTOTICKS | wx.SL_HORIZONTAL | wx.SL_LABELS)
slider_text = wx.StaticText(panel, label='Speed')
vbox.Add(slider_text, 0, wx.ALIGN_CENTRE)
vbox.Add(self.speed_slider, 0, wx.ALIGN_CENTRE)
self.attribute_sliders = []
for attribute in attributes:
slider_text = wx.StaticText(panel, label=attribute)
slider = wx.Slider(panel, -1, value=0, minValue=-100, maxValue=100, pos=wx.DefaultPosition,
size=(self.SLIDER_WIDTH, -1),
style=wx.SL_AUTOTICKS | wx.SL_HORIZONTAL | wx.SL_LABELS)
vbox.Add(slider_text, 0, wx.ALIGN_CENTRE)
vbox.Add(slider, 0, wx.ALIGN_CENTRE)
self.attribute_sliders.append(slider)
hbox.Add(vbox, 0, wx.ALIGN_RIGHT)
panel.SetSizer(hbox)
self.Window.DrawData = {'text': u'Initialising...'}
self.Window.UpdateDrawing()
# to measure frames per second
self.last_frame_timestamp = None
self.last_fps_update = None
# add panel to frame
frameSizer = wx.BoxSizer(wx.VERTICAL)
frameSizer.Add(panel, 0, wx.EXPAND | wx.ALIGN_LEFT)
self.SetSizer(frameSizer)
self.Show()
self.Fit()
self.Bind(DISPLAY_GRID_EVT, self.OnDisplayCell)
def OnQuit(self, event):
self.Close(True)
def OnDisplayCell(self, evt):
array = evt.GetValue()
self.Window.DrawData = {'np': array}
self.Window.UpdateDrawing()
if self.last_frame_timestamp is not None:
fps = 1. / (time.time() - self.last_frame_timestamp)
if (self.last_fps_update is None) or (time.time() - self.last_fps_update > 0.5):
self.statusbar.SetStatusText('%.1ffps' % fps)
self.last_fps_update = time.time()
self.last_frame_timestamp = time.time()
class DemoApp(wx.App):
def __init__(self, arg, grid_size, attributes):
self.gan_grid_size = grid_size
self.attributes = attributes
super(DemoApp, self).__init__(arg)
def OnInit(self):
self.frame = TestFrame(grid_size=self.gan_grid_size, attributes=self.attributes)
self.SetTopWindow(self.frame)
return True
def DisplayCell(self, array):
evt = MyEvent(myEVT, -1, array)
wx.PostEvent(self.frame, evt)
def GetSpeed(self):
return self.frame.speed_slider.GetValue()
def GetAttributes(self):
return [s.GetValue() for s in self.frame.attribute_sliders]
| DIGITS-master | digits/tools/tensorflow/gandisplay.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.