text
stringlengths 145
7.65M
|
---|
======================================================================================================================
SOURCE CODE FILE: deform_conv.py
LINES: 2
SIZE: 7.02 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\ops\deform_conv.py
ENCODING: utf-8
```py
import math
from typing import Optional, Tuple
import torch
from torch import nn, Tensor
from torch.nn import init
from torch.nn.modules.utils import _pair
from torch.nn.parameter import Parameter
from torchvision.extension import _assert_has_ops
from ..utils import _log_api_usage_once
def deform_conv2d(
input: Tensor,
offset: Tensor,
weight: Tensor,
bias: Optional[Tensor] = None,
stride: Tuple[int, int] = (1, 1),
padding: Tuple[int, int] = (0, 0),
dilation: Tuple[int, int] = (1, 1),
mask: Optional[Tensor] = None,
) -> Tensor:
r"""
Performs Deformable Convolution v2, described in
`Deformable ConvNets v2: More Deformable, Better Results
<https://arxiv.org/abs/1811.11168>`__ if :attr:`mask` is not ``None`` and
Performs Deformable Convolution, described in
`Deformable Convolutional Networks
<https://arxiv.org/abs/1703.06211>`__ if :attr:`mask` is ``None``.
Args:
input (Tensor[batch_size, in_channels, in_height, in_width]): input tensor
offset (Tensor[batch_size, 2 * offset_groups * kernel_height * kernel_width, out_height, out_width]):
offsets to be applied for each position in the convolution kernel.
weight (Tensor[out_channels, in_channels // groups, kernel_height, kernel_width]): convolution weights,
split into groups of size (in_channels // groups)
bias (Tensor[out_channels]): optional bias of shape (out_channels,). Default: None
stride (int or Tuple[int, int]): distance between convolution centers. Default: 1
padding (int or Tuple[int, int]): height/width of padding of zeroes around
each image. Default: 0
dilation (int or Tuple[int, int]): the spacing between kernel elements. Default: 1
mask (Tensor[batch_size, offset_groups * kernel_height * kernel_width, out_height, out_width]):
masks to be applied for each position in the convolution kernel. Default: None
Returns:
Tensor[batch_sz, out_channels, out_h, out_w]: result of convolution
Examples::
>>> input = torch.rand(4, 3, 10, 10)
>>> kh, kw = 3, 3
>>> weight = torch.rand(5, 3, kh, kw)
>>> # offset and mask should have the same spatial size as the output
>>> # of the convolution. In this case, for an input of 10, stride of 1
>>> # and kernel size of 3, without padding, the output size is 8
>>> offset = torch.rand(4, 2 * kh * kw, 8, 8)
>>> mask = torch.rand(4, kh * kw, 8, 8)
>>> out = deform_conv2d(input, offset, weight, mask=mask)
>>> print(out.shape)
>>> # returns
>>> torch.Size([4, 5, 8, 8])
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(deform_conv2d)
_assert_has_ops()
out_channels = weight.shape[0]
use_mask = mask is not None
if mask is None:
mask = torch.zeros((input.shape[0], 1), device=input.device, dtype=input.dtype)
if bias is None:
bias = torch.zeros(out_channels, device=input.device, dtype=input.dtype)
stride_h, stride_w = _pair(stride)
pad_h, pad_w = _pair(padding)
dil_h, dil_w = _pair(dilation)
weights_h, weights_w = weight.shape[-2:]
_, n_in_channels, _, _ = input.shape
n_offset_grps = offset.shape[1] // (2 * weights_h * weights_w)
n_weight_grps = n_in_channels // weight.shape[1]
if n_offset_grps == 0:
raise RuntimeError(
"the shape of the offset tensor at dimension 1 is not valid. It should "
"be a multiple of 2 * weight.size[2] * weight.size[3].\n"
f"Got offset.shape[1]={offset.shape[1]}, while 2 * weight.size[2] * weight.size[3]={2 * weights_h * weights_w}"
)
return torch.ops.torchvision.deform_conv2d(
input,
weight,
offset,
mask,
bias,
stride_h,
stride_w,
pad_h,
pad_w,
dil_h,
dil_w,
n_weight_grps,
n_offset_grps,
use_mask,
)
class DeformConv2d(nn.Module):
"""
See :func:`deform_conv2d`.
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int = 1,
padding: int = 0,
dilation: int = 1,
groups: int = 1,
bias: bool = True,
):
super().__init__()
_log_api_usage_once(self)
if in_channels % groups != 0:
raise ValueError("in_channels must be divisible by groups")
if out_channels % groups != 0:
raise ValueError("out_channels must be divisible by groups")
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.groups = groups
self.weight = Parameter(
torch.empty(out_channels, in_channels // groups, self.kernel_size[0], self.kernel_size[1])
)
if bias:
self.bias = Parameter(torch.empty(out_channels))
else:
self.register_parameter("bias", None)
self.reset_parameters()
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input: Tensor, offset: Tensor, mask: Optional[Tensor] = None) -> Tensor:
"""
Args:
input (Tensor[batch_size, in_channels, in_height, in_width]): input tensor
offset (Tensor[batch_size, 2 * offset_groups * kernel_height * kernel_width, out_height, out_width]):
offsets to be applied for each position in the convolution kernel.
mask (Tensor[batch_size, offset_groups * kernel_height * kernel_width, out_height, out_width]):
masks to be applied for each position in the convolution kernel.
"""
return deform_conv2d(
input,
offset,
self.weight,
self.bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
mask=mask,
)
def __repr__(self) -> str:
s = (
f"{self.__class__.__name__}("
f"{self.in_channels}"
f", {self.out_channels}"
f", kernel_size={self.kernel_size}"
f", stride={self.stride}"
)
s += f", padding={self.padding}" if self.padding != (0, 0) else ""
s += f", dilation={self.dilation}" if self.dilation != (1, 1) else ""
s += f", groups={self.groups}" if self.groups != 1 else ""
s += ", bias=False" if self.bias is None else ""
s += ")"
return s
```
|
====================================================================================================================
SOURCE CODE FILE: diou_loss.py
LINES: 2
SIZE: 3.38 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\ops\diou_loss.py
ENCODING: utf-8
```py
from typing import Tuple
import torch
from ..utils import _log_api_usage_once
from ._utils import _loss_inter_union, _upcast_non_float
def distance_box_iou_loss(
boxes1: torch.Tensor,
boxes2: torch.Tensor,
reduction: str = "none",
eps: float = 1e-7,
) -> torch.Tensor:
"""
Gradient-friendly IoU loss with an additional penalty that is non-zero when the
distance between boxes' centers isn't zero. Indeed, for two exactly overlapping
boxes, the distance IoU is the same as the IoU loss.
This loss is symmetric, so the boxes1 and boxes2 arguments are interchangeable.
Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with
``0 <= x1 < x2`` and ``0 <= y1 < y2``, and The two boxes should have the
same dimensions.
Args:
boxes1 (Tensor[N, 4]): first set of boxes
boxes2 (Tensor[N, 4]): second set of boxes
reduction (string, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: No reduction will be
applied to the output. ``'mean'``: The output will be averaged.
``'sum'``: The output will be summed. Default: ``'none'``
eps (float, optional): small number to prevent division by zero. Default: 1e-7
Returns:
Tensor: Loss tensor with the reduction option applied.
Reference:
Zhaohui Zheng et al.: Distance Intersection over Union Loss:
https://arxiv.org/abs/1911.08287
"""
# Original Implementation from https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/losses.py
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(distance_box_iou_loss)
boxes1 = _upcast_non_float(boxes1)
boxes2 = _upcast_non_float(boxes2)
loss, _ = _diou_iou_loss(boxes1, boxes2, eps)
# Check reduction option and return loss accordingly
if reduction == "none":
pass
elif reduction == "mean":
loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum()
elif reduction == "sum":
loss = loss.sum()
else:
raise ValueError(
f"Invalid Value for arg 'reduction': '{reduction} \n Supported reduction modes: 'none', 'mean', 'sum'"
)
return loss
def _diou_iou_loss(
boxes1: torch.Tensor,
boxes2: torch.Tensor,
eps: float = 1e-7,
) -> Tuple[torch.Tensor, torch.Tensor]:
intsct, union = _loss_inter_union(boxes1, boxes2)
iou = intsct / (union + eps)
# smallest enclosing box
x1, y1, x2, y2 = boxes1.unbind(dim=-1)
x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1)
xc1 = torch.min(x1, x1g)
yc1 = torch.min(y1, y1g)
xc2 = torch.max(x2, x2g)
yc2 = torch.max(y2, y2g)
# The diagonal distance of the smallest enclosing box squared
diagonal_distance_squared = ((xc2 - xc1) ** 2) + ((yc2 - yc1) ** 2) + eps
# centers of boxes
x_p = (x2 + x1) / 2
y_p = (y2 + y1) / 2
x_g = (x1g + x2g) / 2
y_g = (y1g + y2g) / 2
# The distance between boxes' centers squared.
centers_distance_squared = ((x_p - x_g) ** 2) + ((y_p - y_g) ** 2)
# The distance IoU is the IoU penalized by a normalized
# distance between boxes' centers squared.
loss = 1 - iou + (centers_distance_squared / diagonal_distance_squared)
return loss, iou
```
|
=====================================================================================================================
SOURCE CODE FILE: drop_block.py
LINES: 1
SIZE: 5.87 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\ops\drop_block.py
ENCODING: utf-8
```py
import torch
import torch.fx
import torch.nn.functional as F
from torch import nn, Tensor
from ..utils import _log_api_usage_once
def drop_block2d(
input: Tensor, p: float, block_size: int, inplace: bool = False, eps: float = 1e-06, training: bool = True
) -> Tensor:
"""
Implements DropBlock2d from `"DropBlock: A regularization method for convolutional networks"
<https://arxiv.org/abs/1810.12890>`.
Args:
input (Tensor[N, C, H, W]): The input tensor or 4-dimensions with the first one
being its batch i.e. a batch with ``N`` rows.
p (float): Probability of an element to be dropped.
block_size (int): Size of the block to drop.
inplace (bool): If set to ``True``, will do this operation in-place. Default: ``False``.
eps (float): A value added to the denominator for numerical stability. Default: 1e-6.
training (bool): apply dropblock if is ``True``. Default: ``True``.
Returns:
Tensor[N, C, H, W]: The randomly zeroed tensor after dropblock.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(drop_block2d)
if p < 0.0 or p > 1.0:
raise ValueError(f"drop probability has to be between 0 and 1, but got {p}.")
if input.ndim != 4:
raise ValueError(f"input should be 4 dimensional. Got {input.ndim} dimensions.")
if not training or p == 0.0:
return input
N, C, H, W = input.size()
block_size = min(block_size, W, H)
# compute the gamma of Bernoulli distribution
gamma = (p * H * W) / ((block_size**2) * ((H - block_size + 1) * (W - block_size + 1)))
noise = torch.empty((N, C, H - block_size + 1, W - block_size + 1), dtype=input.dtype, device=input.device)
noise.bernoulli_(gamma)
noise = F.pad(noise, [block_size // 2] * 4, value=0)
noise = F.max_pool2d(noise, stride=(1, 1), kernel_size=(block_size, block_size), padding=block_size // 2)
noise = 1 - noise
normalize_scale = noise.numel() / (eps + noise.sum())
if inplace:
input.mul_(noise).mul_(normalize_scale)
else:
input = input * noise * normalize_scale
return input
def drop_block3d(
input: Tensor, p: float, block_size: int, inplace: bool = False, eps: float = 1e-06, training: bool = True
) -> Tensor:
"""
Implements DropBlock3d from `"DropBlock: A regularization method for convolutional networks"
<https://arxiv.org/abs/1810.12890>`.
Args:
input (Tensor[N, C, D, H, W]): The input tensor or 5-dimensions with the first one
being its batch i.e. a batch with ``N`` rows.
p (float): Probability of an element to be dropped.
block_size (int): Size of the block to drop.
inplace (bool): If set to ``True``, will do this operation in-place. Default: ``False``.
eps (float): A value added to the denominator for numerical stability. Default: 1e-6.
training (bool): apply dropblock if is ``True``. Default: ``True``.
Returns:
Tensor[N, C, D, H, W]: The randomly zeroed tensor after dropblock.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(drop_block3d)
if p < 0.0 or p > 1.0:
raise ValueError(f"drop probability has to be between 0 and 1, but got {p}.")
if input.ndim != 5:
raise ValueError(f"input should be 5 dimensional. Got {input.ndim} dimensions.")
if not training or p == 0.0:
return input
N, C, D, H, W = input.size()
block_size = min(block_size, D, H, W)
# compute the gamma of Bernoulli distribution
gamma = (p * D * H * W) / ((block_size**3) * ((D - block_size + 1) * (H - block_size + 1) * (W - block_size + 1)))
noise = torch.empty(
(N, C, D - block_size + 1, H - block_size + 1, W - block_size + 1), dtype=input.dtype, device=input.device
)
noise.bernoulli_(gamma)
noise = F.pad(noise, [block_size // 2] * 6, value=0)
noise = F.max_pool3d(
noise, stride=(1, 1, 1), kernel_size=(block_size, block_size, block_size), padding=block_size // 2
)
noise = 1 - noise
normalize_scale = noise.numel() / (eps + noise.sum())
if inplace:
input.mul_(noise).mul_(normalize_scale)
else:
input = input * noise * normalize_scale
return input
torch.fx.wrap("drop_block2d")
class DropBlock2d(nn.Module):
"""
See :func:`drop_block2d`.
"""
def __init__(self, p: float, block_size: int, inplace: bool = False, eps: float = 1e-06) -> None:
super().__init__()
self.p = p
self.block_size = block_size
self.inplace = inplace
self.eps = eps
def forward(self, input: Tensor) -> Tensor:
"""
Args:
input (Tensor): Input feature map on which some areas will be randomly
dropped.
Returns:
Tensor: The tensor after DropBlock layer.
"""
return drop_block2d(input, self.p, self.block_size, self.inplace, self.eps, self.training)
def __repr__(self) -> str:
s = f"{self.__class__.__name__}(p={self.p}, block_size={self.block_size}, inplace={self.inplace})"
return s
torch.fx.wrap("drop_block3d")
class DropBlock3d(DropBlock2d):
"""
See :func:`drop_block3d`.
"""
def __init__(self, p: float, block_size: int, inplace: bool = False, eps: float = 1e-06) -> None:
super().__init__(p, block_size, inplace, eps)
def forward(self, input: Tensor) -> Tensor:
"""
Args:
input (Tensor): Input feature map on which some areas will be randomly
dropped.
Returns:
Tensor: The tensor after DropBlock layer.
"""
return drop_block3d(input, self.p, self.block_size, self.inplace, self.eps, self.training)
```
|
==================================================================================================================================
SOURCE CODE FILE: feature_pyramid_network.py
LINES: 1
SIZE: 8.74 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\ops\feature_pyramid_network.py
ENCODING: utf-8
```py
from collections import OrderedDict
from typing import Callable, Dict, List, Optional, Tuple
import torch.nn.functional as F
from torch import nn, Tensor
from ..ops.misc import Conv2dNormActivation
from ..utils import _log_api_usage_once
class ExtraFPNBlock(nn.Module):
"""
Base class for the extra block in the FPN.
Args:
results (List[Tensor]): the result of the FPN
x (List[Tensor]): the original feature maps
names (List[str]): the names for each one of the
original feature maps
Returns:
results (List[Tensor]): the extended set of results
of the FPN
names (List[str]): the extended set of names for the results
"""
def forward(
self,
results: List[Tensor],
x: List[Tensor],
names: List[str],
) -> Tuple[List[Tensor], List[str]]:
pass
class FeaturePyramidNetwork(nn.Module):
"""
Module that adds a FPN from on top of a set of feature maps. This is based on
`"Feature Pyramid Network for Object Detection" <https://arxiv.org/abs/1612.03144>`_.
The feature maps are currently supposed to be in increasing depth
order.
The input to the model is expected to be an OrderedDict[Tensor], containing
the feature maps on top of which the FPN will be added.
Args:
in_channels_list (list[int]): number of channels for each feature map that
is passed to the module
out_channels (int): number of channels of the FPN representation
extra_blocks (ExtraFPNBlock or None): if provided, extra operations will
be performed. It is expected to take the fpn features, the original
features and the names of the original features as input, and returns
a new list of feature maps and their corresponding names
norm_layer (callable, optional): Module specifying the normalization layer to use. Default: None
Examples::
>>> m = torchvision.ops.FeaturePyramidNetwork([10, 20, 30], 5)
>>> # get some dummy data
>>> x = OrderedDict()
>>> x['feat0'] = torch.rand(1, 10, 64, 64)
>>> x['feat2'] = torch.rand(1, 20, 16, 16)
>>> x['feat3'] = torch.rand(1, 30, 8, 8)
>>> # compute the FPN on top of x
>>> output = m(x)
>>> print([(k, v.shape) for k, v in output.items()])
>>> # returns
>>> [('feat0', torch.Size([1, 5, 64, 64])),
>>> ('feat2', torch.Size([1, 5, 16, 16])),
>>> ('feat3', torch.Size([1, 5, 8, 8]))]
"""
_version = 2
def __init__(
self,
in_channels_list: List[int],
out_channels: int,
extra_blocks: Optional[ExtraFPNBlock] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
):
super().__init__()
_log_api_usage_once(self)
self.inner_blocks = nn.ModuleList()
self.layer_blocks = nn.ModuleList()
for in_channels in in_channels_list:
if in_channels == 0:
raise ValueError("in_channels=0 is currently not supported")
inner_block_module = Conv2dNormActivation(
in_channels, out_channels, kernel_size=1, padding=0, norm_layer=norm_layer, activation_layer=None
)
layer_block_module = Conv2dNormActivation(
out_channels, out_channels, kernel_size=3, norm_layer=norm_layer, activation_layer=None
)
self.inner_blocks.append(inner_block_module)
self.layer_blocks.append(layer_block_module)
# initialize parameters now to avoid modifying the initialization of top_blocks
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight, a=1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if extra_blocks is not None:
if not isinstance(extra_blocks, ExtraFPNBlock):
raise TypeError(f"extra_blocks should be of type ExtraFPNBlock not {type(extra_blocks)}")
self.extra_blocks = extra_blocks
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
version = local_metadata.get("version", None)
if version is None or version < 2:
num_blocks = len(self.inner_blocks)
for block in ["inner_blocks", "layer_blocks"]:
for i in range(num_blocks):
for type in ["weight", "bias"]:
old_key = f"{prefix}{block}.{i}.{type}"
new_key = f"{prefix}{block}.{i}.0.{type}"
if old_key in state_dict:
state_dict[new_key] = state_dict.pop(old_key)
super()._load_from_state_dict(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
)
def get_result_from_inner_blocks(self, x: Tensor, idx: int) -> Tensor:
"""
This is equivalent to self.inner_blocks[idx](x),
but torchscript doesn't support this yet
"""
num_blocks = len(self.inner_blocks)
if idx < 0:
idx += num_blocks
out = x
for i, module in enumerate(self.inner_blocks):
if i == idx:
out = module(x)
return out
def get_result_from_layer_blocks(self, x: Tensor, idx: int) -> Tensor:
"""
This is equivalent to self.layer_blocks[idx](x),
but torchscript doesn't support this yet
"""
num_blocks = len(self.layer_blocks)
if idx < 0:
idx += num_blocks
out = x
for i, module in enumerate(self.layer_blocks):
if i == idx:
out = module(x)
return out
def forward(self, x: Dict[str, Tensor]) -> Dict[str, Tensor]:
"""
Computes the FPN for a set of feature maps.
Args:
x (OrderedDict[Tensor]): feature maps for each feature level.
Returns:
results (OrderedDict[Tensor]): feature maps after FPN layers.
They are ordered from the highest resolution first.
"""
# unpack OrderedDict into two lists for easier handling
names = list(x.keys())
x = list(x.values())
last_inner = self.get_result_from_inner_blocks(x[-1], -1)
results = []
results.append(self.get_result_from_layer_blocks(last_inner, -1))
for idx in range(len(x) - 2, -1, -1):
inner_lateral = self.get_result_from_inner_blocks(x[idx], idx)
feat_shape = inner_lateral.shape[-2:]
inner_top_down = F.interpolate(last_inner, size=feat_shape, mode="nearest")
last_inner = inner_lateral + inner_top_down
results.insert(0, self.get_result_from_layer_blocks(last_inner, idx))
if self.extra_blocks is not None:
results, names = self.extra_blocks(results, x, names)
# make it back an OrderedDict
out = OrderedDict([(k, v) for k, v in zip(names, results)])
return out
class LastLevelMaxPool(ExtraFPNBlock):
"""
Applies a max_pool2d (not actual max_pool2d, we just subsample) on top of the last feature map
"""
def forward(
self,
x: List[Tensor],
y: List[Tensor],
names: List[str],
) -> Tuple[List[Tensor], List[str]]:
names.append("pool")
# Use max pooling to simulate stride 2 subsampling
x.append(F.max_pool2d(x[-1], kernel_size=1, stride=2, padding=0))
return x, names
class LastLevelP6P7(ExtraFPNBlock):
"""
This module is used in RetinaNet to generate extra layers, P6 and P7.
"""
def __init__(self, in_channels: int, out_channels: int):
super().__init__()
self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1)
self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1)
for module in [self.p6, self.p7]:
nn.init.kaiming_uniform_(module.weight, a=1)
nn.init.constant_(module.bias, 0)
self.use_P5 = in_channels == out_channels
def forward(
self,
p: List[Tensor],
c: List[Tensor],
names: List[str],
) -> Tuple[List[Tensor], List[str]]:
p5, c5 = p[-1], c[-1]
x = p5 if self.use_P5 else c5
p6 = self.p6(x)
p7 = self.p7(F.relu(p6))
p.extend([p6, p7])
names.extend(["p6", "p7"])
return p, names
```
|
=====================================================================================================================
SOURCE CODE FILE: focal_loss.py
LINES: 2
SIZE: 2.42 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\ops\focal_loss.py
ENCODING: utf-8
```py
import torch
import torch.nn.functional as F
from ..utils import _log_api_usage_once
def sigmoid_focal_loss(
inputs: torch.Tensor,
targets: torch.Tensor,
alpha: float = 0.25,
gamma: float = 2,
reduction: str = "none",
) -> torch.Tensor:
"""
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Args:
inputs (Tensor): A float tensor of arbitrary shape.
The predictions for each example.
targets (Tensor): A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
alpha (float): Weighting factor in range [0, 1] to balance
positive vs negative examples or -1 for ignore. Default: ``0.25``.
gamma (float): Exponent of the modulating factor (1 - p_t) to
balance easy vs hard examples. Default: ``2``.
reduction (string): ``'none'`` | ``'mean'`` | ``'sum'``
``'none'``: No reduction will be applied to the output.
``'mean'``: The output will be averaged.
``'sum'``: The output will be summed. Default: ``'none'``.
Returns:
Loss tensor with the reduction option applied.
"""
# Original implementation from https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/focal_loss.py
if not (0 <= alpha <= 1) and alpha != -1:
raise ValueError(f"Invalid alpha value: {alpha}. alpha must be in the range [0,1] or -1 for ignore.")
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(sigmoid_focal_loss)
p = torch.sigmoid(inputs)
ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
p_t = p * targets + (1 - p) * (1 - targets)
loss = ce_loss * ((1 - p_t) ** gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss = alpha_t * loss
# Check reduction option and return loss accordingly
if reduction == "none":
pass
elif reduction == "mean":
loss = loss.mean()
elif reduction == "sum":
loss = loss.sum()
else:
raise ValueError(
f"Invalid Value for arg 'reduction': '{reduction} \n Supported reduction modes: 'none', 'mean', 'sum'"
)
return loss
```
|
====================================================================================================================
SOURCE CODE FILE: giou_loss.py
LINES: 2
SIZE: 2.71 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\ops\giou_loss.py
ENCODING: utf-8
```py
import torch
from ..utils import _log_api_usage_once
from ._utils import _loss_inter_union, _upcast_non_float
def generalized_box_iou_loss(
boxes1: torch.Tensor,
boxes2: torch.Tensor,
reduction: str = "none",
eps: float = 1e-7,
) -> torch.Tensor:
"""
Gradient-friendly IoU loss with an additional penalty that is non-zero when the
boxes do not overlap and scales with the size of their smallest enclosing box.
This loss is symmetric, so the boxes1 and boxes2 arguments are interchangeable.
Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with
``0 <= x1 < x2`` and ``0 <= y1 < y2``, and The two boxes should have the
same dimensions.
Args:
boxes1 (Tensor[N, 4] or Tensor[4]): first set of boxes
boxes2 (Tensor[N, 4] or Tensor[4]): second set of boxes
reduction (string, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: No reduction will be
applied to the output. ``'mean'``: The output will be averaged.
``'sum'``: The output will be summed. Default: ``'none'``
eps (float): small number to prevent division by zero. Default: 1e-7
Returns:
Tensor: Loss tensor with the reduction option applied.
Reference:
Hamid Rezatofighi et al.: Generalized Intersection over Union:
A Metric and A Loss for Bounding Box Regression:
https://arxiv.org/abs/1902.09630
"""
# Original implementation from https://github.com/facebookresearch/fvcore/blob/bfff2ef/fvcore/nn/giou_loss.py
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(generalized_box_iou_loss)
boxes1 = _upcast_non_float(boxes1)
boxes2 = _upcast_non_float(boxes2)
intsctk, unionk = _loss_inter_union(boxes1, boxes2)
iouk = intsctk / (unionk + eps)
x1, y1, x2, y2 = boxes1.unbind(dim=-1)
x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1)
# smallest enclosing box
xc1 = torch.min(x1, x1g)
yc1 = torch.min(y1, y1g)
xc2 = torch.max(x2, x2g)
yc2 = torch.max(y2, y2g)
area_c = (xc2 - xc1) * (yc2 - yc1)
miouk = iouk - ((area_c - unionk) / (area_c + eps))
loss = 1 - miouk
# Check reduction option and return loss accordingly
if reduction == "none":
pass
elif reduction == "mean":
loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum()
elif reduction == "sum":
loss = loss.sum()
else:
raise ValueError(
f"Invalid Value for arg 'reduction': '{reduction} \n Supported reduction modes: 'none', 'mean', 'sum'"
)
return loss
```
|
===============================================================================================================
SOURCE CODE FILE: misc.py
LINES: 1
SIZE: 13.57 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\ops\misc.py
ENCODING: utf-8
```py
import warnings
from typing import Callable, List, Optional, Sequence, Tuple, Union
import torch
from torch import Tensor
from ..utils import _log_api_usage_once, _make_ntuple
interpolate = torch.nn.functional.interpolate
class FrozenBatchNorm2d(torch.nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed
Args:
num_features (int): Number of features ``C`` from an expected input of size ``(N, C, H, W)``
eps (float): a value added to the denominator for numerical stability. Default: 1e-5
"""
def __init__(
self,
num_features: int,
eps: float = 1e-5,
):
super().__init__()
_log_api_usage_once(self)
self.eps = eps
self.register_buffer("weight", torch.ones(num_features))
self.register_buffer("bias", torch.zeros(num_features))
self.register_buffer("running_mean", torch.zeros(num_features))
self.register_buffer("running_var", torch.ones(num_features))
def _load_from_state_dict(
self,
state_dict: dict,
prefix: str,
local_metadata: dict,
strict: bool,
missing_keys: List[str],
unexpected_keys: List[str],
error_msgs: List[str],
):
num_batches_tracked_key = prefix + "num_batches_tracked"
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super()._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
)
def forward(self, x: Tensor) -> Tensor:
# move reshapes to the beginning
# to make it fuser-friendly
w = self.weight.reshape(1, -1, 1, 1)
b = self.bias.reshape(1, -1, 1, 1)
rv = self.running_var.reshape(1, -1, 1, 1)
rm = self.running_mean.reshape(1, -1, 1, 1)
scale = w * (rv + self.eps).rsqrt()
bias = b - rm * scale
return x * scale + bias
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.weight.shape[0]}, eps={self.eps})"
class ConvNormActivation(torch.nn.Sequential):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Tuple[int, ...]] = 3,
stride: Union[int, Tuple[int, ...]] = 1,
padding: Optional[Union[int, Tuple[int, ...], str]] = None,
groups: int = 1,
norm_layer: Optional[Callable[..., torch.nn.Module]] = torch.nn.BatchNorm2d,
activation_layer: Optional[Callable[..., torch.nn.Module]] = torch.nn.ReLU,
dilation: Union[int, Tuple[int, ...]] = 1,
inplace: Optional[bool] = True,
bias: Optional[bool] = None,
conv_layer: Callable[..., torch.nn.Module] = torch.nn.Conv2d,
) -> None:
if padding is None:
if isinstance(kernel_size, int) and isinstance(dilation, int):
padding = (kernel_size - 1) // 2 * dilation
else:
_conv_dim = len(kernel_size) if isinstance(kernel_size, Sequence) else len(dilation)
kernel_size = _make_ntuple(kernel_size, _conv_dim)
dilation = _make_ntuple(dilation, _conv_dim)
padding = tuple((kernel_size[i] - 1) // 2 * dilation[i] for i in range(_conv_dim))
if bias is None:
bias = norm_layer is None
layers = [
conv_layer(
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=dilation,
groups=groups,
bias=bias,
)
]
if norm_layer is not None:
layers.append(norm_layer(out_channels))
if activation_layer is not None:
params = {} if inplace is None else {"inplace": inplace}
layers.append(activation_layer(**params))
super().__init__(*layers)
_log_api_usage_once(self)
self.out_channels = out_channels
if self.__class__ == ConvNormActivation:
warnings.warn(
"Don't use ConvNormActivation directly, please use Conv2dNormActivation and Conv3dNormActivation instead."
)
class Conv2dNormActivation(ConvNormActivation):
"""
Configurable block used for Convolution2d-Normalization-Activation blocks.
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the Convolution-Normalization-Activation block
kernel_size: (int, optional): Size of the convolving kernel. Default: 3
stride (int, optional): Stride of the convolution. Default: 1
padding (int, tuple or str, optional): Padding added to all four sides of the input. Default: None, in which case it will be calculated as ``padding = (kernel_size - 1) // 2 * dilation``
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
norm_layer (Callable[..., torch.nn.Module], optional): Norm layer that will be stacked on top of the convolution layer. If ``None`` this layer won't be used. Default: ``torch.nn.BatchNorm2d``
activation_layer (Callable[..., torch.nn.Module], optional): Activation function which will be stacked on top of the normalization layer (if not None), otherwise on top of the conv layer. If ``None`` this layer won't be used. Default: ``torch.nn.ReLU``
dilation (int): Spacing between kernel elements. Default: 1
inplace (bool): Parameter for the activation layer, which can optionally do the operation in-place. Default ``True``
bias (bool, optional): Whether to use bias in the convolution layer. By default, biases are included if ``norm_layer is None``.
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Tuple[int, int]] = 3,
stride: Union[int, Tuple[int, int]] = 1,
padding: Optional[Union[int, Tuple[int, int], str]] = None,
groups: int = 1,
norm_layer: Optional[Callable[..., torch.nn.Module]] = torch.nn.BatchNorm2d,
activation_layer: Optional[Callable[..., torch.nn.Module]] = torch.nn.ReLU,
dilation: Union[int, Tuple[int, int]] = 1,
inplace: Optional[bool] = True,
bias: Optional[bool] = None,
) -> None:
super().__init__(
in_channels,
out_channels,
kernel_size,
stride,
padding,
groups,
norm_layer,
activation_layer,
dilation,
inplace,
bias,
torch.nn.Conv2d,
)
class Conv3dNormActivation(ConvNormActivation):
"""
Configurable block used for Convolution3d-Normalization-Activation blocks.
Args:
in_channels (int): Number of channels in the input video.
out_channels (int): Number of channels produced by the Convolution-Normalization-Activation block
kernel_size: (int, optional): Size of the convolving kernel. Default: 3
stride (int, optional): Stride of the convolution. Default: 1
padding (int, tuple or str, optional): Padding added to all four sides of the input. Default: None, in which case it will be calculated as ``padding = (kernel_size - 1) // 2 * dilation``
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
norm_layer (Callable[..., torch.nn.Module], optional): Norm layer that will be stacked on top of the convolution layer. If ``None`` this layer won't be used. Default: ``torch.nn.BatchNorm3d``
activation_layer (Callable[..., torch.nn.Module], optional): Activation function which will be stacked on top of the normalization layer (if not None), otherwise on top of the conv layer. If ``None`` this layer won't be used. Default: ``torch.nn.ReLU``
dilation (int): Spacing between kernel elements. Default: 1
inplace (bool): Parameter for the activation layer, which can optionally do the operation in-place. Default ``True``
bias (bool, optional): Whether to use bias in the convolution layer. By default, biases are included if ``norm_layer is None``.
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Tuple[int, int, int]] = 3,
stride: Union[int, Tuple[int, int, int]] = 1,
padding: Optional[Union[int, Tuple[int, int, int], str]] = None,
groups: int = 1,
norm_layer: Optional[Callable[..., torch.nn.Module]] = torch.nn.BatchNorm3d,
activation_layer: Optional[Callable[..., torch.nn.Module]] = torch.nn.ReLU,
dilation: Union[int, Tuple[int, int, int]] = 1,
inplace: Optional[bool] = True,
bias: Optional[bool] = None,
) -> None:
super().__init__(
in_channels,
out_channels,
kernel_size,
stride,
padding,
groups,
norm_layer,
activation_layer,
dilation,
inplace,
bias,
torch.nn.Conv3d,
)
class SqueezeExcitation(torch.nn.Module):
"""
This block implements the Squeeze-and-Excitation block from https://arxiv.org/abs/1709.01507 (see Fig. 1).
Parameters ``activation``, and ``scale_activation`` correspond to ``delta`` and ``sigma`` in eq. 3.
Args:
input_channels (int): Number of channels in the input image
squeeze_channels (int): Number of squeeze channels
activation (Callable[..., torch.nn.Module], optional): ``delta`` activation. Default: ``torch.nn.ReLU``
scale_activation (Callable[..., torch.nn.Module]): ``sigma`` activation. Default: ``torch.nn.Sigmoid``
"""
def __init__(
self,
input_channels: int,
squeeze_channels: int,
activation: Callable[..., torch.nn.Module] = torch.nn.ReLU,
scale_activation: Callable[..., torch.nn.Module] = torch.nn.Sigmoid,
) -> None:
super().__init__()
_log_api_usage_once(self)
self.avgpool = torch.nn.AdaptiveAvgPool2d(1)
self.fc1 = torch.nn.Conv2d(input_channels, squeeze_channels, 1)
self.fc2 = torch.nn.Conv2d(squeeze_channels, input_channels, 1)
self.activation = activation()
self.scale_activation = scale_activation()
def _scale(self, input: Tensor) -> Tensor:
scale = self.avgpool(input)
scale = self.fc1(scale)
scale = self.activation(scale)
scale = self.fc2(scale)
return self.scale_activation(scale)
def forward(self, input: Tensor) -> Tensor:
scale = self._scale(input)
return scale * input
class MLP(torch.nn.Sequential):
"""This block implements the multi-layer perceptron (MLP) module.
Args:
in_channels (int): Number of channels of the input
hidden_channels (List[int]): List of the hidden channel dimensions
norm_layer (Callable[..., torch.nn.Module], optional): Norm layer that will be stacked on top of the linear layer. If ``None`` this layer won't be used. Default: ``None``
activation_layer (Callable[..., torch.nn.Module], optional): Activation function which will be stacked on top of the normalization layer (if not None), otherwise on top of the linear layer. If ``None`` this layer won't be used. Default: ``torch.nn.ReLU``
inplace (bool, optional): Parameter for the activation layer, which can optionally do the operation in-place.
Default is ``None``, which uses the respective default values of the ``activation_layer`` and Dropout layer.
bias (bool): Whether to use bias in the linear layer. Default ``True``
dropout (float): The probability for the dropout layer. Default: 0.0
"""
def __init__(
self,
in_channels: int,
hidden_channels: List[int],
norm_layer: Optional[Callable[..., torch.nn.Module]] = None,
activation_layer: Optional[Callable[..., torch.nn.Module]] = torch.nn.ReLU,
inplace: Optional[bool] = None,
bias: bool = True,
dropout: float = 0.0,
):
# The addition of `norm_layer` is inspired from the implementation of TorchMultimodal:
# https://github.com/facebookresearch/multimodal/blob/5dec8a/torchmultimodal/modules/layers/mlp.py
params = {} if inplace is None else {"inplace": inplace}
layers = []
in_dim = in_channels
for hidden_dim in hidden_channels[:-1]:
layers.append(torch.nn.Linear(in_dim, hidden_dim, bias=bias))
if norm_layer is not None:
layers.append(norm_layer(hidden_dim))
layers.append(activation_layer(**params))
layers.append(torch.nn.Dropout(dropout, **params))
in_dim = hidden_dim
layers.append(torch.nn.Linear(in_dim, hidden_channels[-1], bias=bias))
layers.append(torch.nn.Dropout(dropout, **params))
super().__init__(*layers)
_log_api_usage_once(self)
class Permute(torch.nn.Module):
"""This module returns a view of the tensor input with its dimensions permuted.
Args:
dims (List[int]): The desired ordering of dimensions
"""
def __init__(self, dims: List[int]):
super().__init__()
self.dims = dims
def forward(self, x: Tensor) -> Tensor:
return torch.permute(x, self.dims)
```
|
==================================================================================================================
SOURCE CODE FILE: poolers.py
LINES: 1
SIZE: 11.96 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\ops\poolers.py
ENCODING: utf-8
```py
from typing import Dict, List, Optional, Tuple, Union
import torch
import torch.fx
import torchvision
from torch import nn, Tensor
from torchvision.ops.boxes import box_area
from ..utils import _log_api_usage_once
from .roi_align import roi_align
# copying result_idx_in_level to a specific index in result[]
# is not supported by ONNX tracing yet.
# _onnx_merge_levels() is an implementation supported by ONNX
# that merges the levels to the right indices
@torch.jit.unused
def _onnx_merge_levels(levels: Tensor, unmerged_results: List[Tensor]) -> Tensor:
first_result = unmerged_results[0]
dtype, device = first_result.dtype, first_result.device
res = torch.zeros(
(levels.size(0), first_result.size(1), first_result.size(2), first_result.size(3)), dtype=dtype, device=device
)
for level in range(len(unmerged_results)):
index = torch.where(levels == level)[0].view(-1, 1, 1, 1)
index = index.expand(
index.size(0),
unmerged_results[level].size(1),
unmerged_results[level].size(2),
unmerged_results[level].size(3),
)
res = res.scatter(0, index, unmerged_results[level])
return res
# TODO: (eellison) T54974082 https://github.com/pytorch/pytorch/issues/26744/pytorch/issues/26744
def initLevelMapper(
k_min: int,
k_max: int,
canonical_scale: int = 224,
canonical_level: int = 4,
eps: float = 1e-6,
):
return LevelMapper(k_min, k_max, canonical_scale, canonical_level, eps)
class LevelMapper:
"""Determine which FPN level each RoI in a set of RoIs should map to based
on the heuristic in the FPN paper.
Args:
k_min (int)
k_max (int)
canonical_scale (int)
canonical_level (int)
eps (float)
"""
def __init__(
self,
k_min: int,
k_max: int,
canonical_scale: int = 224,
canonical_level: int = 4,
eps: float = 1e-6,
):
self.k_min = k_min
self.k_max = k_max
self.s0 = canonical_scale
self.lvl0 = canonical_level
self.eps = eps
def __call__(self, boxlists: List[Tensor]) -> Tensor:
"""
Args:
boxlists (list[BoxList])
"""
# Compute level ids
s = torch.sqrt(torch.cat([box_area(boxlist) for boxlist in boxlists]))
# Eqn.(1) in FPN paper
target_lvls = torch.floor(self.lvl0 + torch.log2(s / self.s0) + torch.tensor(self.eps, dtype=s.dtype))
target_lvls = torch.clamp(target_lvls, min=self.k_min, max=self.k_max)
return (target_lvls.to(torch.int64) - self.k_min).to(torch.int64)
def _convert_to_roi_format(boxes: List[Tensor]) -> Tensor:
concat_boxes = torch.cat(boxes, dim=0)
device, dtype = concat_boxes.device, concat_boxes.dtype
ids = torch.cat(
[torch.full_like(b[:, :1], i, dtype=dtype, layout=torch.strided, device=device) for i, b in enumerate(boxes)],
dim=0,
)
rois = torch.cat([ids, concat_boxes], dim=1)
return rois
def _infer_scale(feature: Tensor, original_size: List[int]) -> float:
# assumption: the scale is of the form 2 ** (-k), with k integer
size = feature.shape[-2:]
possible_scales: List[float] = []
for s1, s2 in zip(size, original_size):
approx_scale = float(s1) / float(s2)
scale = 2 ** float(torch.tensor(approx_scale).log2().round())
possible_scales.append(scale)
return possible_scales[0]
@torch.fx.wrap
def _setup_scales(
features: List[Tensor], image_shapes: List[Tuple[int, int]], canonical_scale: int, canonical_level: int
) -> Tuple[List[float], LevelMapper]:
if not image_shapes:
raise ValueError("images list should not be empty")
max_x = 0
max_y = 0
for shape in image_shapes:
max_x = max(shape[0], max_x)
max_y = max(shape[1], max_y)
original_input_shape = (max_x, max_y)
scales = [_infer_scale(feat, original_input_shape) for feat in features]
# get the levels in the feature map by leveraging the fact that the network always
# downsamples by a factor of 2 at each level.
lvl_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item()
lvl_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item()
map_levels = initLevelMapper(
int(lvl_min),
int(lvl_max),
canonical_scale=canonical_scale,
canonical_level=canonical_level,
)
return scales, map_levels
@torch.fx.wrap
def _filter_input(x: Dict[str, Tensor], featmap_names: List[str]) -> List[Tensor]:
x_filtered = []
for k, v in x.items():
if k in featmap_names:
x_filtered.append(v)
return x_filtered
@torch.fx.wrap
def _multiscale_roi_align(
x_filtered: List[Tensor],
boxes: List[Tensor],
output_size: List[int],
sampling_ratio: int,
scales: Optional[List[float]],
mapper: Optional[LevelMapper],
) -> Tensor:
"""
Args:
x_filtered (List[Tensor]): List of input tensors.
boxes (List[Tensor[N, 4]]): boxes to be used to perform the pooling operation, in
(x1, y1, x2, y2) format and in the image reference size, not the feature map
reference. The coordinate must satisfy ``0 <= x1 < x2`` and ``0 <= y1 < y2``.
output_size (Union[List[Tuple[int, int]], List[int]]): size of the output
sampling_ratio (int): sampling ratio for ROIAlign
scales (Optional[List[float]]): If None, scales will be automatically inferred. Default value is None.
mapper (Optional[LevelMapper]): If none, mapper will be automatically inferred. Default value is None.
Returns:
result (Tensor)
"""
if scales is None or mapper is None:
raise ValueError("scales and mapper should not be None")
num_levels = len(x_filtered)
rois = _convert_to_roi_format(boxes)
if num_levels == 1:
return roi_align(
x_filtered[0],
rois,
output_size=output_size,
spatial_scale=scales[0],
sampling_ratio=sampling_ratio,
)
levels = mapper(boxes)
num_rois = len(rois)
num_channels = x_filtered[0].shape[1]
dtype, device = x_filtered[0].dtype, x_filtered[0].device
result = torch.zeros(
(
num_rois,
num_channels,
)
+ output_size,
dtype=dtype,
device=device,
)
tracing_results = []
for level, (per_level_feature, scale) in enumerate(zip(x_filtered, scales)):
idx_in_level = torch.where(levels == level)[0]
rois_per_level = rois[idx_in_level]
result_idx_in_level = roi_align(
per_level_feature,
rois_per_level,
output_size=output_size,
spatial_scale=scale,
sampling_ratio=sampling_ratio,
)
if torchvision._is_tracing():
tracing_results.append(result_idx_in_level.to(dtype))
else:
# result and result_idx_in_level's dtypes are based on dtypes of different
# elements in x_filtered. x_filtered contains tensors output by different
# layers. When autocast is active, it may choose different dtypes for
# different layers' outputs. Therefore, we defensively match result's dtype
# before copying elements from result_idx_in_level in the following op.
# We need to cast manually (can't rely on autocast to cast for us) because
# the op acts on result in-place, and autocast only affects out-of-place ops.
result[idx_in_level] = result_idx_in_level.to(result.dtype)
if torchvision._is_tracing():
result = _onnx_merge_levels(levels, tracing_results)
return result
class MultiScaleRoIAlign(nn.Module):
"""
Multi-scale RoIAlign pooling, which is useful for detection with or without FPN.
It infers the scale of the pooling via the heuristics specified in eq. 1
of the `Feature Pyramid Network paper <https://arxiv.org/abs/1612.03144>`_.
They keyword-only parameters ``canonical_scale`` and ``canonical_level``
correspond respectively to ``224`` and ``k0=4`` in eq. 1, and
have the following meaning: ``canonical_level`` is the target level of the pyramid from
which to pool a region of interest with ``w x h = canonical_scale x canonical_scale``.
Args:
featmap_names (List[str]): the names of the feature maps that will be used
for the pooling.
output_size (List[Tuple[int, int]] or List[int]): output size for the pooled region
sampling_ratio (int): sampling ratio for ROIAlign
canonical_scale (int, optional): canonical_scale for LevelMapper
canonical_level (int, optional): canonical_level for LevelMapper
Examples::
>>> m = torchvision.ops.MultiScaleRoIAlign(['feat1', 'feat3'], 3, 2)
>>> i = OrderedDict()
>>> i['feat1'] = torch.rand(1, 5, 64, 64)
>>> i['feat2'] = torch.rand(1, 5, 32, 32) # this feature won't be used in the pooling
>>> i['feat3'] = torch.rand(1, 5, 16, 16)
>>> # create some random bounding boxes
>>> boxes = torch.rand(6, 4) * 256; boxes[:, 2:] += boxes[:, :2]
>>> # original image size, before computing the feature maps
>>> image_sizes = [(512, 512)]
>>> output = m(i, [boxes], image_sizes)
>>> print(output.shape)
>>> torch.Size([6, 5, 3, 3])
"""
__annotations__ = {"scales": Optional[List[float]], "map_levels": Optional[LevelMapper]}
def __init__(
self,
featmap_names: List[str],
output_size: Union[int, Tuple[int], List[int]],
sampling_ratio: int,
*,
canonical_scale: int = 224,
canonical_level: int = 4,
):
super().__init__()
_log_api_usage_once(self)
if isinstance(output_size, int):
output_size = (output_size, output_size)
self.featmap_names = featmap_names
self.sampling_ratio = sampling_ratio
self.output_size = tuple(output_size)
self.scales = None
self.map_levels = None
self.canonical_scale = canonical_scale
self.canonical_level = canonical_level
def forward(
self,
x: Dict[str, Tensor],
boxes: List[Tensor],
image_shapes: List[Tuple[int, int]],
) -> Tensor:
"""
Args:
x (OrderedDict[Tensor]): feature maps for each level. They are assumed to have
all the same number of channels, but they can have different sizes.
boxes (List[Tensor[N, 4]]): boxes to be used to perform the pooling operation, in
(x1, y1, x2, y2) format and in the image reference size, not the feature map
reference. The coordinate must satisfy ``0 <= x1 < x2`` and ``0 <= y1 < y2``.
image_shapes (List[Tuple[height, width]]): the sizes of each image before they
have been fed to a CNN to obtain feature maps. This allows us to infer the
scale factor for each one of the levels to be pooled.
Returns:
result (Tensor)
"""
x_filtered = _filter_input(x, self.featmap_names)
if self.scales is None or self.map_levels is None:
self.scales, self.map_levels = _setup_scales(
x_filtered, image_shapes, self.canonical_scale, self.canonical_level
)
return _multiscale_roi_align(
x_filtered,
boxes,
self.output_size,
self.sampling_ratio,
self.scales,
self.map_levels,
)
def __repr__(self) -> str:
return (
f"{self.__class__.__name__}(featmap_names={self.featmap_names}, "
f"output_size={self.output_size}, sampling_ratio={self.sampling_ratio})"
)
```
|
=======================================================================================================================
SOURCE CODE FILE: ps_roi_align.py
LINES: 1
SIZE: 3.63 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\ops\ps_roi_align.py
ENCODING: utf-8
```py
import torch
import torch.fx
from torch import nn, Tensor
from torch.nn.modules.utils import _pair
from torchvision.extension import _assert_has_ops
from ..utils import _log_api_usage_once
from ._utils import check_roi_boxes_shape, convert_boxes_to_roi_format
@torch.fx.wrap
def ps_roi_align(
input: Tensor,
boxes: Tensor,
output_size: int,
spatial_scale: float = 1.0,
sampling_ratio: int = -1,
) -> Tensor:
"""
Performs Position-Sensitive Region of Interest (RoI) Align operator
mentioned in Light-Head R-CNN.
Args:
input (Tensor[N, C, H, W]): The input tensor, i.e. a batch with ``N`` elements. Each element
contains ``C`` feature maps of dimensions ``H x W``.
boxes (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2)
format where the regions will be taken from.
The coordinate must satisfy ``0 <= x1 < x2`` and ``0 <= y1 < y2``.
If a single Tensor is passed, then the first column should
contain the index of the corresponding element in the batch, i.e. a number in ``[0, N - 1]``.
If a list of Tensors is passed, then each Tensor will correspond to the boxes for an element i
in the batch.
output_size (int or Tuple[int, int]): the size of the output (in bins or pixels) after the pooling
is performed, as (height, width).
spatial_scale (float): a scaling factor that maps the box coordinates to
the input coordinates. For example, if your boxes are defined on the scale
of a 224x224 image and your input is a 112x112 feature map (resulting from a 0.5x scaling of
the original image), you'll want to set this to 0.5. Default: 1.0
sampling_ratio (int): number of sampling points in the interpolation grid
used to compute the output value of each pooled output bin. If > 0,
then exactly ``sampling_ratio x sampling_ratio`` sampling points per bin are used. If
<= 0, then an adaptive number of grid points are used (computed as
``ceil(roi_width / output_width)``, and likewise for height). Default: -1
Returns:
Tensor[K, C / (output_size[0] * output_size[1]), output_size[0], output_size[1]]: The pooled RoIs
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(ps_roi_align)
_assert_has_ops()
check_roi_boxes_shape(boxes)
rois = boxes
output_size = _pair(output_size)
if not isinstance(rois, torch.Tensor):
rois = convert_boxes_to_roi_format(rois)
output, _ = torch.ops.torchvision.ps_roi_align(
input, rois, spatial_scale, output_size[0], output_size[1], sampling_ratio
)
return output
class PSRoIAlign(nn.Module):
"""
See :func:`ps_roi_align`.
"""
def __init__(
self,
output_size: int,
spatial_scale: float,
sampling_ratio: int,
):
super().__init__()
_log_api_usage_once(self)
self.output_size = output_size
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
def forward(self, input: Tensor, rois: Tensor) -> Tensor:
return ps_roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio)
def __repr__(self) -> str:
s = (
f"{self.__class__.__name__}("
f"output_size={self.output_size}"
f", spatial_scale={self.spatial_scale}"
f", sampling_ratio={self.sampling_ratio}"
f")"
)
return s
```
|
======================================================================================================================
SOURCE CODE FILE: ps_roi_pool.py
LINES: 1
SIZE: 2.87 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\ops\ps_roi_pool.py
ENCODING: utf-8
```py
import torch
import torch.fx
from torch import nn, Tensor
from torch.nn.modules.utils import _pair
from torchvision.extension import _assert_has_ops
from ..utils import _log_api_usage_once
from ._utils import check_roi_boxes_shape, convert_boxes_to_roi_format
@torch.fx.wrap
def ps_roi_pool(
input: Tensor,
boxes: Tensor,
output_size: int,
spatial_scale: float = 1.0,
) -> Tensor:
"""
Performs Position-Sensitive Region of Interest (RoI) Pool operator
described in R-FCN
Args:
input (Tensor[N, C, H, W]): The input tensor, i.e. a batch with ``N`` elements. Each element
contains ``C`` feature maps of dimensions ``H x W``.
boxes (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2)
format where the regions will be taken from.
The coordinate must satisfy ``0 <= x1 < x2`` and ``0 <= y1 < y2``.
If a single Tensor is passed, then the first column should
contain the index of the corresponding element in the batch, i.e. a number in ``[0, N - 1]``.
If a list of Tensors is passed, then each Tensor will correspond to the boxes for an element i
in the batch.
output_size (int or Tuple[int, int]): the size of the output (in bins or pixels) after the pooling
is performed, as (height, width).
spatial_scale (float): a scaling factor that maps the box coordinates to
the input coordinates. For example, if your boxes are defined on the scale
of a 224x224 image and your input is a 112x112 feature map (resulting from a 0.5x scaling of
the original image), you'll want to set this to 0.5. Default: 1.0
Returns:
Tensor[K, C / (output_size[0] * output_size[1]), output_size[0], output_size[1]]: The pooled RoIs.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(ps_roi_pool)
_assert_has_ops()
check_roi_boxes_shape(boxes)
rois = boxes
output_size = _pair(output_size)
if not isinstance(rois, torch.Tensor):
rois = convert_boxes_to_roi_format(rois)
output, _ = torch.ops.torchvision.ps_roi_pool(input, rois, spatial_scale, output_size[0], output_size[1])
return output
class PSRoIPool(nn.Module):
"""
See :func:`ps_roi_pool`.
"""
def __init__(self, output_size: int, spatial_scale: float):
super().__init__()
_log_api_usage_once(self)
self.output_size = output_size
self.spatial_scale = spatial_scale
def forward(self, input: Tensor, rois: Tensor) -> Tensor:
return ps_roi_pool(input, rois, self.output_size, self.spatial_scale)
def __repr__(self) -> str:
s = f"{self.__class__.__name__}(output_size={self.output_size}, spatial_scale={self.spatial_scale})"
return s
```
|
====================================================================================================================
SOURCE CODE FILE: roi_align.py
LINES: 1
SIZE: 11.34 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\ops\roi_align.py
ENCODING: utf-8
```py
import functools
from typing import List, Union
import torch
import torch.fx
from torch import nn, Tensor
from torch._dynamo.utils import is_compile_supported
from torch.jit.annotations import BroadcastingList2
from torch.nn.modules.utils import _pair
from torchvision.extension import _assert_has_ops, _has_ops
from ..utils import _log_api_usage_once
from ._utils import check_roi_boxes_shape, convert_boxes_to_roi_format
def lazy_compile(**compile_kwargs):
"""Lazily wrap a function with torch.compile on the first call
This avoids eagerly importing dynamo.
"""
def decorate_fn(fn):
@functools.wraps(fn)
def compile_hook(*args, **kwargs):
compiled_fn = torch.compile(fn, **compile_kwargs)
globals()[fn.__name__] = functools.wraps(fn)(compiled_fn)
return compiled_fn(*args, **kwargs)
return compile_hook
return decorate_fn
# NB: all inputs are tensors
def _bilinear_interpolate(
input, # [N, C, H, W]
roi_batch_ind, # [K]
y, # [K, PH, IY]
x, # [K, PW, IX]
ymask, # [K, IY]
xmask, # [K, IX]
):
_, channels, height, width = input.size()
# deal with inverse element out of feature map boundary
y = y.clamp(min=0)
x = x.clamp(min=0)
y_low = y.int()
x_low = x.int()
y_high = torch.where(y_low >= height - 1, height - 1, y_low + 1)
y_low = torch.where(y_low >= height - 1, height - 1, y_low)
y = torch.where(y_low >= height - 1, y.to(input.dtype), y)
x_high = torch.where(x_low >= width - 1, width - 1, x_low + 1)
x_low = torch.where(x_low >= width - 1, width - 1, x_low)
x = torch.where(x_low >= width - 1, x.to(input.dtype), x)
ly = y - y_low
lx = x - x_low
hy = 1.0 - ly
hx = 1.0 - lx
# do bilinear interpolation, but respect the masking!
# TODO: It's possible the masking here is unnecessary if y and
# x were clamped appropriately; hard to tell
def masked_index(
y, # [K, PH, IY]
x, # [K, PW, IX]
):
if ymask is not None:
assert xmask is not None
y = torch.where(ymask[:, None, :], y, 0)
x = torch.where(xmask[:, None, :], x, 0)
return input[
roi_batch_ind[:, None, None, None, None, None],
torch.arange(channels, device=input.device)[None, :, None, None, None, None],
y[:, None, :, None, :, None], # prev [K, PH, IY]
x[:, None, None, :, None, :], # prev [K, PW, IX]
] # [K, C, PH, PW, IY, IX]
v1 = masked_index(y_low, x_low)
v2 = masked_index(y_low, x_high)
v3 = masked_index(y_high, x_low)
v4 = masked_index(y_high, x_high)
# all ws preemptively [K, C, PH, PW, IY, IX]
def outer_prod(y, x):
return y[:, None, :, None, :, None] * x[:, None, None, :, None, :]
w1 = outer_prod(hy, hx)
w2 = outer_prod(hy, lx)
w3 = outer_prod(ly, hx)
w4 = outer_prod(ly, lx)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
return val
# TODO: this doesn't actually cache
# TODO: main library should make this easier to do
def maybe_cast(tensor):
if torch.is_autocast_enabled() and tensor.is_cuda and tensor.dtype != torch.double:
return tensor.float()
else:
return tensor
# This is a pure Python and differentiable implementation of roi_align. When
# run in eager mode, it uses a lot of memory, but when compiled it has
# acceptable memory usage. The main point of this implementation is that
# its backwards is deterministic.
# It is transcribed directly off of the roi_align CUDA kernel, see
# https://dev-discuss.pytorch.org/t/a-pure-python-implementation-of-roi-align-that-looks-just-like-its-cuda-kernel/1266
@lazy_compile(dynamic=True)
def _roi_align(input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio, aligned):
orig_dtype = input.dtype
input = maybe_cast(input)
rois = maybe_cast(rois)
_, _, height, width = input.size()
ph = torch.arange(pooled_height, device=input.device) # [PH]
pw = torch.arange(pooled_width, device=input.device) # [PW]
# input: [N, C, H, W]
# rois: [K, 5]
roi_batch_ind = rois[:, 0].int() # [K]
offset = 0.5 if aligned else 0.0
roi_start_w = rois[:, 1] * spatial_scale - offset # [K]
roi_start_h = rois[:, 2] * spatial_scale - offset # [K]
roi_end_w = rois[:, 3] * spatial_scale - offset # [K]
roi_end_h = rois[:, 4] * spatial_scale - offset # [K]
roi_width = roi_end_w - roi_start_w # [K]
roi_height = roi_end_h - roi_start_h # [K]
if not aligned:
roi_width = torch.clamp(roi_width, min=1.0) # [K]
roi_height = torch.clamp(roi_height, min=1.0) # [K]
bin_size_h = roi_height / pooled_height # [K]
bin_size_w = roi_width / pooled_width # [K]
exact_sampling = sampling_ratio > 0
roi_bin_grid_h = sampling_ratio if exact_sampling else torch.ceil(roi_height / pooled_height) # scalar or [K]
roi_bin_grid_w = sampling_ratio if exact_sampling else torch.ceil(roi_width / pooled_width) # scalar or [K]
"""
iy, ix = dims(2)
"""
if exact_sampling:
count = max(roi_bin_grid_h * roi_bin_grid_w, 1) # scalar
iy = torch.arange(roi_bin_grid_h, device=input.device) # [IY]
ix = torch.arange(roi_bin_grid_w, device=input.device) # [IX]
ymask = None
xmask = None
else:
count = torch.clamp(roi_bin_grid_h * roi_bin_grid_w, min=1) # [K]
# When doing adaptive sampling, the number of samples we need to do
# is data-dependent based on how big the ROIs are. This is a bit
# awkward because first-class dims can't actually handle this.
# So instead, we inefficiently suppose that we needed to sample ALL
# the points and mask out things that turned out to be unnecessary
iy = torch.arange(height, device=input.device) # [IY]
ix = torch.arange(width, device=input.device) # [IX]
ymask = iy[None, :] < roi_bin_grid_h[:, None] # [K, IY]
xmask = ix[None, :] < roi_bin_grid_w[:, None] # [K, IX]
def from_K(t):
return t[:, None, None]
y = (
from_K(roi_start_h)
+ ph[None, :, None] * from_K(bin_size_h)
+ (iy[None, None, :] + 0.5).to(input.dtype) * from_K(bin_size_h / roi_bin_grid_h)
) # [K, PH, IY]
x = (
from_K(roi_start_w)
+ pw[None, :, None] * from_K(bin_size_w)
+ (ix[None, None, :] + 0.5).to(input.dtype) * from_K(bin_size_w / roi_bin_grid_w)
) # [K, PW, IX]
val = _bilinear_interpolate(input, roi_batch_ind, y, x, ymask, xmask) # [K, C, PH, PW, IY, IX]
# Mask out samples that weren't actually adaptively needed
if not exact_sampling:
val = torch.where(ymask[:, None, None, None, :, None], val, 0)
val = torch.where(xmask[:, None, None, None, None, :], val, 0)
output = val.sum((-1, -2)) # remove IY, IX ~> [K, C, PH, PW]
if isinstance(count, torch.Tensor):
output /= count[:, None, None, None]
else:
output /= count
output = output.to(orig_dtype)
return output
@torch.fx.wrap
def roi_align(
input: Tensor,
boxes: Union[Tensor, List[Tensor]],
output_size: BroadcastingList2[int],
spatial_scale: float = 1.0,
sampling_ratio: int = -1,
aligned: bool = False,
) -> Tensor:
"""
Performs Region of Interest (RoI) Align operator with average pooling, as described in Mask R-CNN.
Args:
input (Tensor[N, C, H, W]): The input tensor, i.e. a batch with ``N`` elements. Each element
contains ``C`` feature maps of dimensions ``H x W``.
If the tensor is quantized, we expect a batch size of ``N == 1``.
boxes (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2)
format where the regions will be taken from.
The coordinate must satisfy ``0 <= x1 < x2`` and ``0 <= y1 < y2``.
If a single Tensor is passed, then the first column should
contain the index of the corresponding element in the batch, i.e. a number in ``[0, N - 1]``.
If a list of Tensors is passed, then each Tensor will correspond to the boxes for an element i
in the batch.
output_size (int or Tuple[int, int]): the size of the output (in bins or pixels) after the pooling
is performed, as (height, width).
spatial_scale (float): a scaling factor that maps the box coordinates to
the input coordinates. For example, if your boxes are defined on the scale
of a 224x224 image and your input is a 112x112 feature map (resulting from a 0.5x scaling of
the original image), you'll want to set this to 0.5. Default: 1.0
sampling_ratio (int): number of sampling points in the interpolation grid
used to compute the output value of each pooled output bin. If > 0,
then exactly ``sampling_ratio x sampling_ratio`` sampling points per bin are used. If
<= 0, then an adaptive number of grid points are used (computed as
``ceil(roi_width / output_width)``, and likewise for height). Default: -1
aligned (bool): If False, use the legacy implementation.
If True, pixel shift the box coordinates it by -0.5 for a better alignment with the two
neighboring pixel indices. This version is used in Detectron2
Returns:
Tensor[K, C, output_size[0], output_size[1]]: The pooled RoIs.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(roi_align)
check_roi_boxes_shape(boxes)
rois = boxes
output_size = _pair(output_size)
if not isinstance(rois, torch.Tensor):
rois = convert_boxes_to_roi_format(rois)
if not torch.jit.is_scripting():
if (
not _has_ops()
or (torch.are_deterministic_algorithms_enabled() and (input.is_cuda or input.is_mps or input.is_xpu))
) and is_compile_supported(input.device.type):
return _roi_align(input, rois, spatial_scale, output_size[0], output_size[1], sampling_ratio, aligned)
_assert_has_ops()
return torch.ops.torchvision.roi_align(
input, rois, spatial_scale, output_size[0], output_size[1], sampling_ratio, aligned
)
class RoIAlign(nn.Module):
"""
See :func:`roi_align`.
"""
def __init__(
self,
output_size: BroadcastingList2[int],
spatial_scale: float,
sampling_ratio: int,
aligned: bool = False,
):
super().__init__()
_log_api_usage_once(self)
self.output_size = output_size
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
self.aligned = aligned
def forward(self, input: Tensor, rois: Union[Tensor, List[Tensor]]) -> Tensor:
return roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio, self.aligned)
def __repr__(self) -> str:
s = (
f"{self.__class__.__name__}("
f"output_size={self.output_size}"
f", spatial_scale={self.spatial_scale}"
f", sampling_ratio={self.sampling_ratio}"
f", aligned={self.aligned}"
f")"
)
return s
```
|
===================================================================================================================
SOURCE CODE FILE: roi_pool.py
LINES: 1
SIZE: 2.94 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\ops\roi_pool.py
ENCODING: utf-8
```py
from typing import List, Union
import torch
import torch.fx
from torch import nn, Tensor
from torch.jit.annotations import BroadcastingList2
from torch.nn.modules.utils import _pair
from torchvision.extension import _assert_has_ops
from ..utils import _log_api_usage_once
from ._utils import check_roi_boxes_shape, convert_boxes_to_roi_format
@torch.fx.wrap
def roi_pool(
input: Tensor,
boxes: Union[Tensor, List[Tensor]],
output_size: BroadcastingList2[int],
spatial_scale: float = 1.0,
) -> Tensor:
"""
Performs Region of Interest (RoI) Pool operator described in Fast R-CNN
Args:
input (Tensor[N, C, H, W]): The input tensor, i.e. a batch with ``N`` elements. Each element
contains ``C`` feature maps of dimensions ``H x W``.
boxes (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2)
format where the regions will be taken from.
The coordinate must satisfy ``0 <= x1 < x2`` and ``0 <= y1 < y2``.
If a single Tensor is passed, then the first column should
contain the index of the corresponding element in the batch, i.e. a number in ``[0, N - 1]``.
If a list of Tensors is passed, then each Tensor will correspond to the boxes for an element i
in the batch.
output_size (int or Tuple[int, int]): the size of the output after the cropping
is performed, as (height, width)
spatial_scale (float): a scaling factor that maps the box coordinates to
the input coordinates. For example, if your boxes are defined on the scale
of a 224x224 image and your input is a 112x112 feature map (resulting from a 0.5x scaling of
the original image), you'll want to set this to 0.5. Default: 1.0
Returns:
Tensor[K, C, output_size[0], output_size[1]]: The pooled RoIs.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(roi_pool)
_assert_has_ops()
check_roi_boxes_shape(boxes)
rois = boxes
output_size = _pair(output_size)
if not isinstance(rois, torch.Tensor):
rois = convert_boxes_to_roi_format(rois)
output, _ = torch.ops.torchvision.roi_pool(input, rois, spatial_scale, output_size[0], output_size[1])
return output
class RoIPool(nn.Module):
"""
See :func:`roi_pool`.
"""
def __init__(self, output_size: BroadcastingList2[int], spatial_scale: float):
super().__init__()
_log_api_usage_once(self)
self.output_size = output_size
self.spatial_scale = spatial_scale
def forward(self, input: Tensor, rois: Union[Tensor, List[Tensor]]) -> Tensor:
return roi_pool(input, rois, self.output_size, self.spatial_scale)
def __repr__(self) -> str:
s = f"{self.__class__.__name__}(output_size={self.output_size}, spatial_scale={self.spatial_scale})"
return s
```
|
===========================================================================================================================
SOURCE CODE FILE: stochastic_depth.py
LINES: 1
SIZE: 2.25 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\ops\stochastic_depth.py
ENCODING: utf-8
```py
import torch
import torch.fx
from torch import nn, Tensor
from ..utils import _log_api_usage_once
def stochastic_depth(input: Tensor, p: float, mode: str, training: bool = True) -> Tensor:
"""
Implements the Stochastic Depth from `"Deep Networks with Stochastic Depth"
<https://arxiv.org/abs/1603.09382>`_ used for randomly dropping residual
branches of residual architectures.
Args:
input (Tensor[N, ...]): The input tensor or arbitrary dimensions with the first one
being its batch i.e. a batch with ``N`` rows.
p (float): probability of the input to be zeroed.
mode (str): ``"batch"`` or ``"row"``.
``"batch"`` randomly zeroes the entire input, ``"row"`` zeroes
randomly selected rows from the batch.
training: apply stochastic depth if is ``True``. Default: ``True``
Returns:
Tensor[N, ...]: The randomly zeroed tensor.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(stochastic_depth)
if p < 0.0 or p > 1.0:
raise ValueError(f"drop probability has to be between 0 and 1, but got {p}")
if mode not in ["batch", "row"]:
raise ValueError(f"mode has to be either 'batch' or 'row', but got {mode}")
if not training or p == 0.0:
return input
survival_rate = 1.0 - p
if mode == "row":
size = [input.shape[0]] + [1] * (input.ndim - 1)
else:
size = [1] * input.ndim
noise = torch.empty(size, dtype=input.dtype, device=input.device)
noise = noise.bernoulli_(survival_rate)
if survival_rate > 0.0:
noise.div_(survival_rate)
return input * noise
torch.fx.wrap("stochastic_depth")
class StochasticDepth(nn.Module):
"""
See :func:`stochastic_depth`.
"""
def __init__(self, p: float, mode: str) -> None:
super().__init__()
_log_api_usage_once(self)
self.p = p
self.mode = mode
def forward(self, input: Tensor) -> Tensor:
return stochastic_depth(input, self.p, self.mode, self.training)
def __repr__(self) -> str:
s = f"{self.__class__.__name__}(p={self.p}, mode={self.mode})"
return s
```
|
=========================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.05 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\__init__.py
ENCODING: utf-8
```py
from . import models, transforms, tv_tensors, utils
```
|
==================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.63 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\datasets\__init__.py
ENCODING: utf-8
```py
try:
import torchdata
except ModuleNotFoundError:
raise ModuleNotFoundError(
"`torchvision.prototype.datasets` depends on PyTorch's `torchdata` (https://github.com/pytorch/data). "
"You can install it with `pip install --pre torchdata --extra-index-url https://download.pytorch.org/whl/nightly/cpu"
) from None
from . import utils
from ._home import home
# Load this last, since some parts depend on the above being loaded first
from ._api import list_datasets, info, load, register_info, register_dataset # usort: skip
from ._folder import from_data_folder, from_image_folder
from ._builtin import *
```
|
==============================================================================================================================
SOURCE CODE FILE: _api.py
LINES: 1
SIZE: 1.79 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\datasets\_api.py
ENCODING: utf-8
```py
import pathlib
from typing import Any, Callable, Dict, List, Optional, Type, TypeVar, Union
from torchvision.prototype.datasets import home
from torchvision.prototype.datasets.utils import Dataset
from torchvision.prototype.utils._internal import add_suggestion
T = TypeVar("T")
D = TypeVar("D", bound=Type[Dataset])
BUILTIN_INFOS: Dict[str, Dict[str, Any]] = {}
def register_info(name: str) -> Callable[[Callable[[], Dict[str, Any]]], Callable[[], Dict[str, Any]]]:
def wrapper(fn: Callable[[], Dict[str, Any]]) -> Callable[[], Dict[str, Any]]:
BUILTIN_INFOS[name] = fn()
return fn
return wrapper
BUILTIN_DATASETS = {}
def register_dataset(name: str) -> Callable[[D], D]:
def wrapper(dataset_cls: D) -> D:
BUILTIN_DATASETS[name] = dataset_cls
return dataset_cls
return wrapper
def list_datasets() -> List[str]:
return sorted(BUILTIN_DATASETS.keys())
def find(dct: Dict[str, T], name: str) -> T:
name = name.lower()
try:
return dct[name]
except KeyError as error:
raise ValueError(
add_suggestion(
f"Unknown dataset '{name}'.",
word=name,
possibilities=dct.keys(),
alternative_hint=lambda _: (
"You can use torchvision.datasets.list_datasets() to get a list of all available datasets."
),
)
) from error
def info(name: str) -> Dict[str, Any]:
return find(BUILTIN_INFOS, name)
def load(name: str, *, root: Optional[Union[str, pathlib.Path]] = None, **config: Any) -> Dataset:
dataset_cls = find(BUILTIN_DATASETS, name)
if root is None:
root = pathlib.Path(home()) / name
return dataset_cls(root, **config)
```
|
===========================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.67 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\datasets\_builtin\__init__.py
ENCODING: utf-8
```py
from .caltech import Caltech101, Caltech256
from .celeba import CelebA
from .cifar import Cifar10, Cifar100
from .clevr import CLEVR
from .coco import Coco
from .country211 import Country211
from .cub200 import CUB200
from .dtd import DTD
from .eurosat import EuroSAT
from .fer2013 import FER2013
from .food101 import Food101
from .gtsrb import GTSRB
from .imagenet import ImageNet
from .mnist import EMNIST, FashionMNIST, KMNIST, MNIST, QMNIST
from .oxford_iiit_pet import OxfordIIITPet
from .pcam import PCAM
from .sbd import SBD
from .semeion import SEMEION
from .stanford_cars import StanfordCars
from .svhn import SVHN
from .usps import USPS
from .voc import VOC
```
|
==========================================================================================================================================
SOURCE CODE FILE: caltech.py
LINES: 1
SIZE: 6.91 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\datasets\_builtin\caltech.py
ENCODING: utf-8
```py
import pathlib
import re
from typing import Any, BinaryIO, Dict, List, Tuple, Union
import numpy as np
import torch
from torchdata.datapipes.iter import Filter, IterDataPipe, IterKeyZipper, Mapper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, GDriveResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
read_categories_file,
read_mat,
)
from torchvision.prototype.tv_tensors import Label
from torchvision.tv_tensors import BoundingBoxes
from .._api import register_dataset, register_info
@register_info("caltech101")
def _caltech101_info() -> Dict[str, Any]:
return dict(categories=read_categories_file("caltech101"))
@register_dataset("caltech101")
class Caltech101(Dataset):
"""
- **homepage**: https://data.caltech.edu/records/20086
- **dependencies**:
- <scipy `https://scipy.org/`>_
"""
def __init__(
self,
root: Union[str, pathlib.Path],
skip_integrity_check: bool = False,
) -> None:
self._categories = _caltech101_info()["categories"]
super().__init__(
root,
dependencies=("scipy",),
skip_integrity_check=skip_integrity_check,
)
def _resources(self) -> List[OnlineResource]:
images = GDriveResource(
"137RyRjvTBkBiIfeYBNZBtViDHQ6_Ewsp",
file_name="101_ObjectCategories.tar.gz",
sha256="af6ece2f339791ca20f855943d8b55dd60892c0a25105fcd631ee3d6430f9926",
preprocess="decompress",
)
anns = GDriveResource(
"175kQy3UsZ0wUEHZjqkUDdNVssr7bgh_m",
file_name="Annotations.tar",
sha256="1717f4e10aa837b05956e3f4c94456527b143eec0d95e935028b30aff40663d8",
)
return [images, anns]
_IMAGES_NAME_PATTERN = re.compile(r"image_(?P<id>\d+)[.]jpg")
_ANNS_NAME_PATTERN = re.compile(r"annotation_(?P<id>\d+)[.]mat")
_ANNS_CATEGORY_MAP = {
"Faces_2": "Faces",
"Faces_3": "Faces_easy",
"Motorbikes_16": "Motorbikes",
"Airplanes_Side_2": "airplanes",
}
def _is_not_background_image(self, data: Tuple[str, Any]) -> bool:
path = pathlib.Path(data[0])
return path.parent.name != "BACKGROUND_Google"
def _is_ann(self, data: Tuple[str, Any]) -> bool:
path = pathlib.Path(data[0])
return bool(self._ANNS_NAME_PATTERN.match(path.name))
def _images_key_fn(self, data: Tuple[str, Any]) -> Tuple[str, str]:
path = pathlib.Path(data[0])
category = path.parent.name
id = self._IMAGES_NAME_PATTERN.match(path.name).group("id") # type: ignore[union-attr]
return category, id
def _anns_key_fn(self, data: Tuple[str, Any]) -> Tuple[str, str]:
path = pathlib.Path(data[0])
category = path.parent.name
if category in self._ANNS_CATEGORY_MAP:
category = self._ANNS_CATEGORY_MAP[category]
id = self._ANNS_NAME_PATTERN.match(path.name).group("id") # type: ignore[union-attr]
return category, id
def _prepare_sample(
self, data: Tuple[Tuple[str, str], Tuple[Tuple[str, BinaryIO], Tuple[str, BinaryIO]]]
) -> Dict[str, Any]:
key, (image_data, ann_data) = data
category, _ = key
image_path, image_buffer = image_data
ann_path, ann_buffer = ann_data
image = EncodedImage.from_file(image_buffer)
ann = read_mat(ann_buffer)
return dict(
label=Label.from_category(category, categories=self._categories),
image_path=image_path,
image=image,
ann_path=ann_path,
bounding_boxes=BoundingBoxes(
ann["box_coord"].astype(np.int64).squeeze()[[2, 0, 3, 1]],
format="xyxy",
spatial_size=image.spatial_size,
),
contour=torch.as_tensor(ann["obj_contour"].T),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
images_dp, anns_dp = resource_dps
images_dp = Filter(images_dp, self._is_not_background_image)
images_dp = hint_shuffling(images_dp)
images_dp = hint_sharding(images_dp)
anns_dp = Filter(anns_dp, self._is_ann)
dp = IterKeyZipper(
images_dp,
anns_dp,
key_fn=self._images_key_fn,
ref_key_fn=self._anns_key_fn,
buffer_size=INFINITE_BUFFER_SIZE,
keep_key=True,
)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 8677
def _generate_categories(self) -> List[str]:
resources = self._resources()
dp = resources[0].load(self._root)
dp = Filter(dp, self._is_not_background_image)
return sorted({pathlib.Path(path).parent.name for path, _ in dp})
@register_info("caltech256")
def _caltech256_info() -> Dict[str, Any]:
return dict(categories=read_categories_file("caltech256"))
@register_dataset("caltech256")
class Caltech256(Dataset):
"""
- **homepage**: https://data.caltech.edu/records/20087
"""
def __init__(
self,
root: Union[str, pathlib.Path],
skip_integrity_check: bool = False,
) -> None:
self._categories = _caltech256_info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
return [
GDriveResource(
"1r6o0pSROcV1_VwT4oSjA2FBUSCWGuxLK",
file_name="256_ObjectCategories.tar",
sha256="08ff01b03c65566014ae88eb0490dbe4419fc7ac4de726ee1163e39fd809543e",
)
]
def _is_not_rogue_file(self, data: Tuple[str, Any]) -> bool:
path = pathlib.Path(data[0])
return path.name != "RENAME2"
def _prepare_sample(self, data: Tuple[str, BinaryIO]) -> Dict[str, Any]:
path, buffer = data
return dict(
path=path,
image=EncodedImage.from_file(buffer),
label=Label(int(pathlib.Path(path).parent.name.split(".", 1)[0]) - 1, categories=self._categories),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = Filter(dp, self._is_not_rogue_file)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 30607
def _generate_categories(self) -> List[str]:
resources = self._resources()
dp = resources[0].load(self._root)
dir_names = {pathlib.Path(path).parent.name for path, _ in dp}
return [name.split(".")[1] for name in sorted(dir_names)]
```
|
=========================================================================================================================================
SOURCE CODE FILE: celeba.py
LINES: 1
SIZE: 7.16 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\datasets\_builtin\celeba.py
ENCODING: utf-8
```py
import csv
import pathlib
from typing import Any, BinaryIO, Dict, Iterator, List, Optional, Sequence, Tuple, Union
import torch
from torchdata.datapipes.iter import Filter, IterDataPipe, IterKeyZipper, Mapper, Zipper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, GDriveResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
getitem,
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_accessor,
)
from torchvision.prototype.tv_tensors import Label
from torchvision.tv_tensors import BoundingBoxes
from .._api import register_dataset, register_info
csv.register_dialect("celeba", delimiter=" ", skipinitialspace=True)
class CelebACSVParser(IterDataPipe[Tuple[str, Dict[str, str]]]):
def __init__(
self,
datapipe: IterDataPipe[Tuple[Any, BinaryIO]],
*,
fieldnames: Optional[Sequence[str]] = None,
) -> None:
self.datapipe = datapipe
self.fieldnames = fieldnames
def __iter__(self) -> Iterator[Tuple[str, Dict[str, str]]]:
for _, file in self.datapipe:
try:
lines = (line.decode() for line in file)
if self.fieldnames:
fieldnames = self.fieldnames
else:
# The first row is skipped, because it only contains the number of samples
next(lines)
# Empty field names are filtered out, because some files have an extra white space after the header
# line, which is recognized as extra column
fieldnames = [name for name in next(csv.reader([next(lines)], dialect="celeba")) if name]
# Some files do not include a label for the image ID column
if fieldnames[0] != "image_id":
fieldnames.insert(0, "image_id")
for line in csv.DictReader(lines, fieldnames=fieldnames, dialect="celeba"):
yield line.pop("image_id"), line
finally:
file.close()
NAME = "celeba"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict()
@register_dataset(NAME)
class CelebA(Dataset):
"""
- **homepage**: https://mmlab.ie.cuhk.edu.hk/projects/CelebA.html
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", ("train", "val", "test"))
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
splits = GDriveResource(
"0B7EVK8r0v71pY0NSMzRuSXJEVkk",
sha256="fc955bcb3ef8fbdf7d5640d9a8693a8431b5f2ee291a5c1449a1549e7e073fe7",
file_name="list_eval_partition.txt",
)
images = GDriveResource(
"0B7EVK8r0v71pZjFTYXZWM3FlRnM",
sha256="46fb89443c578308acf364d7d379fe1b9efb793042c0af734b6112e4fd3a8c74",
file_name="img_align_celeba.zip",
)
identities = GDriveResource(
"1_ee_0u7vcNLOfNLegJRHmolfH5ICW-XS",
sha256="c6143857c3e2630ac2da9f782e9c1232e5e59be993a9d44e8a7916c78a6158c0",
file_name="identity_CelebA.txt",
)
attributes = GDriveResource(
"0B7EVK8r0v71pblRyaVFSWGxPY0U",
sha256="f0e5da289d5ccf75ffe8811132694922b60f2af59256ed362afa03fefba324d0",
file_name="list_attr_celeba.txt",
)
bounding_boxes = GDriveResource(
"0B7EVK8r0v71pbThiMVRxWXZ4dU0",
sha256="7487a82e57c4bb956c5445ae2df4a91ffa717e903c5fa22874ede0820c8ec41b",
file_name="list_bbox_celeba.txt",
)
landmarks = GDriveResource(
"0B7EVK8r0v71pd0FJY3Blby1HUTQ",
sha256="6c02a87569907f6db2ba99019085697596730e8129f67a3d61659f198c48d43b",
file_name="list_landmarks_align_celeba.txt",
)
return [splits, images, identities, attributes, bounding_boxes, landmarks]
def _filter_split(self, data: Tuple[str, Dict[str, str]]) -> bool:
split_id = {
"train": "0",
"val": "1",
"test": "2",
}[self._split]
return data[1]["split_id"] == split_id
def _prepare_sample(
self,
data: Tuple[
Tuple[str, Tuple[Tuple[str, List[str]], Tuple[str, BinaryIO]]],
Tuple[
Tuple[str, Dict[str, str]],
Tuple[str, Dict[str, str]],
Tuple[str, Dict[str, str]],
Tuple[str, Dict[str, str]],
],
],
) -> Dict[str, Any]:
split_and_image_data, ann_data = data
_, (_, image_data) = split_and_image_data
path, buffer = image_data
image = EncodedImage.from_file(buffer)
(_, identity), (_, attributes), (_, bounding_boxes), (_, landmarks) = ann_data
return dict(
path=path,
image=image,
identity=Label(int(identity["identity"])),
attributes={attr: value == "1" for attr, value in attributes.items()},
bounding_boxes=BoundingBoxes(
[int(bounding_boxes[key]) for key in ("x_1", "y_1", "width", "height")],
format="xywh",
spatial_size=image.spatial_size,
),
landmarks={
landmark: torch.tensor((int(landmarks[f"{landmark}_x"]), int(landmarks[f"{landmark}_y"])))
for landmark in {key[:-2] for key in landmarks.keys()}
},
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
splits_dp, images_dp, identities_dp, attributes_dp, bounding_boxes_dp, landmarks_dp = resource_dps
splits_dp = CelebACSVParser(splits_dp, fieldnames=("image_id", "split_id"))
splits_dp = Filter(splits_dp, self._filter_split)
splits_dp = hint_shuffling(splits_dp)
splits_dp = hint_sharding(splits_dp)
anns_dp = Zipper(
*[
CelebACSVParser(dp, fieldnames=fieldnames)
for dp, fieldnames in (
(identities_dp, ("image_id", "identity")),
(attributes_dp, None),
(bounding_boxes_dp, None),
(landmarks_dp, None),
)
]
)
dp = IterKeyZipper(
splits_dp,
images_dp,
key_fn=getitem(0),
ref_key_fn=path_accessor("name"),
buffer_size=INFINITE_BUFFER_SIZE,
keep_key=True,
)
dp = IterKeyZipper(
dp,
anns_dp,
key_fn=getitem(0),
ref_key_fn=getitem(0, 0),
buffer_size=INFINITE_BUFFER_SIZE,
)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return {
"train": 162_770,
"val": 19_867,
"test": 19_962,
}[self._split]
```
|
========================================================================================================================================
SOURCE CODE FILE: cifar.py
LINES: 1
SIZE: 4.62 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\datasets\_builtin\cifar.py
ENCODING: utf-8
```py
import abc
import io
import pathlib
import pickle
from typing import Any, BinaryIO, cast, Dict, Iterator, List, Optional, Tuple, Union
import numpy as np
from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
path_comparator,
read_categories_file,
)
from torchvision.prototype.tv_tensors import Label
from torchvision.tv_tensors import Image
from .._api import register_dataset, register_info
class CifarFileReader(IterDataPipe[Tuple[np.ndarray, int]]):
def __init__(self, datapipe: IterDataPipe[Dict[str, Any]], *, labels_key: str) -> None:
self.datapipe = datapipe
self.labels_key = labels_key
def __iter__(self) -> Iterator[Tuple[np.ndarray, int]]:
for mapping in self.datapipe:
image_arrays = mapping["data"].reshape((-1, 3, 32, 32))
category_idcs = mapping[self.labels_key]
yield from iter(zip(image_arrays, category_idcs))
class _CifarBase(Dataset):
_FILE_NAME: str
_SHA256: str
_LABELS_KEY: str
_META_FILE_NAME: str
_CATEGORIES_KEY: str
_categories: List[str]
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", ("train", "test"))
super().__init__(root, skip_integrity_check=skip_integrity_check)
@abc.abstractmethod
def _is_data_file(self, data: Tuple[str, BinaryIO]) -> Optional[int]:
pass
def _resources(self) -> List[OnlineResource]:
return [
HttpResource(
f"https://www.cs.toronto.edu/~kriz/{self._FILE_NAME}",
sha256=self._SHA256,
)
]
def _unpickle(self, data: Tuple[str, io.BytesIO]) -> Dict[str, Any]:
_, file = data
content = cast(Dict[str, Any], pickle.load(file, encoding="latin1"))
file.close()
return content
def _prepare_sample(self, data: Tuple[np.ndarray, int]) -> Dict[str, Any]:
image_array, category_idx = data
return dict(
image=Image(image_array),
label=Label(category_idx, categories=self._categories),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = Filter(dp, self._is_data_file)
dp = Mapper(dp, self._unpickle)
dp = CifarFileReader(dp, labels_key=self._LABELS_KEY)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 50_000 if self._split == "train" else 10_000
def _generate_categories(self) -> List[str]:
resources = self._resources()
dp = resources[0].load(self._root)
dp = Filter(dp, path_comparator("name", self._META_FILE_NAME))
dp = Mapper(dp, self._unpickle)
return cast(List[str], next(iter(dp))[self._CATEGORIES_KEY])
@register_info("cifar10")
def _cifar10_info() -> Dict[str, Any]:
return dict(categories=read_categories_file("cifar10"))
@register_dataset("cifar10")
class Cifar10(_CifarBase):
"""
- **homepage**: https://www.cs.toronto.edu/~kriz/cifar.html
"""
_FILE_NAME = "cifar-10-python.tar.gz"
_SHA256 = "6d958be074577803d12ecdefd02955f39262c83c16fe9348329d7fe0b5c001ce"
_LABELS_KEY = "labels"
_META_FILE_NAME = "batches.meta"
_CATEGORIES_KEY = "label_names"
_categories = _cifar10_info()["categories"]
def _is_data_file(self, data: Tuple[str, Any]) -> bool:
path = pathlib.Path(data[0])
return path.name.startswith("data" if self._split == "train" else "test")
@register_info("cifar100")
def _cifar100_info() -> Dict[str, Any]:
return dict(categories=read_categories_file("cifar100"))
@register_dataset("cifar100")
class Cifar100(_CifarBase):
"""
- **homepage**: https://www.cs.toronto.edu/~kriz/cifar.html
"""
_FILE_NAME = "cifar-100-python.tar.gz"
_SHA256 = "85cd44d02ba6437773c5bbd22e183051d648de2e7d6b014e1ef29b855ba677a7"
_LABELS_KEY = "fine_labels"
_META_FILE_NAME = "meta"
_CATEGORIES_KEY = "fine_label_names"
_categories = _cifar100_info()["categories"]
def _is_data_file(self, data: Tuple[str, Any]) -> bool:
path = pathlib.Path(data[0])
return path.name == self._split
```
|
========================================================================================================================================
SOURCE CODE FILE: clevr.py
LINES: 1
SIZE: 3.62 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\datasets\_builtin\clevr.py
ENCODING: utf-8
```py
import pathlib
from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import Demultiplexer, Filter, IterDataPipe, IterKeyZipper, JsonParser, Mapper, UnBatcher
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
getitem,
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_accessor,
path_comparator,
)
from torchvision.prototype.tv_tensors import Label
from .._api import register_dataset, register_info
NAME = "clevr"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict()
@register_dataset(NAME)
class CLEVR(Dataset):
"""
- **homepage**: https://cs.stanford.edu/people/jcjohns/clevr/
"""
def __init__(
self, root: Union[str, pathlib.Path], *, split: str = "train", skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", ("train", "val", "test"))
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
archive = HttpResource(
"https://dl.fbaipublicfiles.com/clevr/CLEVR_v1.0.zip",
sha256="5cd61cf1096ed20944df93c9adb31e74d189b8459a94f54ba00090e5c59936d1",
)
return [archive]
def _classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
if path.parents[1].name == "images":
return 0
elif path.parent.name == "scenes":
return 1
else:
return None
def _filter_scene_anns(self, data: Tuple[str, Any]) -> bool:
key, _ = data
return key == "scenes"
def _add_empty_anns(self, data: Tuple[str, BinaryIO]) -> Tuple[Tuple[str, BinaryIO], None]:
return data, None
def _prepare_sample(self, data: Tuple[Tuple[str, BinaryIO], Optional[Dict[str, Any]]]) -> Dict[str, Any]:
image_data, scenes_data = data
path, buffer = image_data
return dict(
path=path,
image=EncodedImage.from_file(buffer),
label=Label(len(scenes_data["objects"])) if scenes_data else None,
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
archive_dp = resource_dps[0]
images_dp, scenes_dp = Demultiplexer(
archive_dp,
2,
self._classify_archive,
drop_none=True,
buffer_size=INFINITE_BUFFER_SIZE,
)
images_dp = Filter(images_dp, path_comparator("parent.name", self._split))
images_dp = hint_shuffling(images_dp)
images_dp = hint_sharding(images_dp)
if self._split != "test":
scenes_dp = Filter(scenes_dp, path_comparator("name", f"CLEVR_{self._split}_scenes.json"))
scenes_dp = JsonParser(scenes_dp)
scenes_dp = Mapper(scenes_dp, getitem(1, "scenes"))
scenes_dp = UnBatcher(scenes_dp)
dp = IterKeyZipper(
images_dp,
scenes_dp,
key_fn=path_accessor("name"),
ref_key_fn=getitem("image_filename"),
buffer_size=INFINITE_BUFFER_SIZE,
)
else:
for _, file in scenes_dp:
file.close()
dp = Mapper(images_dp, self._add_empty_anns)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 70_000 if self._split == "train" else 15_000
```
|
=======================================================================================================================================
SOURCE CODE FILE: coco.py
LINES: 1
SIZE: 10.14 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\datasets\_builtin\coco.py
ENCODING: utf-8
```py
import pathlib
import re
from collections import defaultdict, OrderedDict
from typing import Any, BinaryIO, cast, Dict, List, Optional, Tuple, Union
import torch
from torchdata.datapipes.iter import (
Demultiplexer,
Filter,
Grouper,
IterDataPipe,
IterKeyZipper,
JsonParser,
Mapper,
UnBatcher,
)
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
getitem,
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
MappingIterator,
path_accessor,
read_categories_file,
)
from torchvision.prototype.tv_tensors import Label
from torchvision.tv_tensors import BoundingBoxes, Mask
from .._api import register_dataset, register_info
NAME = "coco"
@register_info(NAME)
def _info() -> Dict[str, Any]:
categories, super_categories = zip(*read_categories_file(NAME))
return dict(categories=categories, super_categories=super_categories)
@register_dataset(NAME)
class Coco(Dataset):
"""
- **homepage**: https://cocodataset.org/
- **dependencies**:
- <pycocotools `https://github.com/cocodataset/cocoapi`>_
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
year: str = "2017",
annotations: Optional[str] = "instances",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "val"})
self._year = self._verify_str_arg(year, "year", {"2017", "2014"})
self._annotations = (
self._verify_str_arg(annotations, "annotations", self._ANN_DECODERS.keys())
if annotations is not None
else None
)
info = _info()
categories, super_categories = info["categories"], info["super_categories"]
self._categories = categories
self._category_to_super_category = dict(zip(categories, super_categories))
super().__init__(root, dependencies=("pycocotools",), skip_integrity_check=skip_integrity_check)
_IMAGE_URL_BASE = "http://images.cocodataset.org/zips"
_IMAGES_CHECKSUMS = {
("2014", "train"): "ede4087e640bddba550e090eae701092534b554b42b05ac33f0300b984b31775",
("2014", "val"): "fe9be816052049c34717e077d9e34aa60814a55679f804cd043e3cbee3b9fde0",
("2017", "train"): "69a8bb58ea5f8f99d24875f21416de2e9ded3178e903f1f7603e283b9e06d929",
("2017", "val"): "4f7e2ccb2866ec5041993c9cf2a952bbed69647b115d0f74da7ce8f4bef82f05",
}
_META_URL_BASE = "http://images.cocodataset.org/annotations"
_META_CHECKSUMS = {
"2014": "031296bbc80c45a1d1f76bf9a90ead27e94e99ec629208449507a4917a3bf009",
"2017": "113a836d90195ee1f884e704da6304dfaaecff1f023f49b6ca93c4aaae470268",
}
def _resources(self) -> List[OnlineResource]:
images = HttpResource(
f"{self._IMAGE_URL_BASE}/{self._split}{self._year}.zip",
sha256=self._IMAGES_CHECKSUMS[(self._year, self._split)],
)
meta = HttpResource(
f"{self._META_URL_BASE}/annotations_trainval{self._year}.zip",
sha256=self._META_CHECKSUMS[self._year],
)
return [images, meta]
def _segmentation_to_mask(
self, segmentation: Any, *, is_crowd: bool, spatial_size: Tuple[int, int]
) -> torch.Tensor:
from pycocotools import mask
if is_crowd:
segmentation = mask.frPyObjects(segmentation, *spatial_size)
else:
segmentation = mask.merge(mask.frPyObjects(segmentation, *spatial_size))
return torch.from_numpy(mask.decode(segmentation)).to(torch.bool)
def _decode_instances_anns(self, anns: List[Dict[str, Any]], image_meta: Dict[str, Any]) -> Dict[str, Any]:
spatial_size = (image_meta["height"], image_meta["width"])
labels = [ann["category_id"] for ann in anns]
return dict(
segmentations=Mask(
torch.stack(
[
self._segmentation_to_mask(
ann["segmentation"], is_crowd=ann["iscrowd"], spatial_size=spatial_size
)
for ann in anns
]
)
),
areas=torch.as_tensor([ann["area"] for ann in anns]),
crowds=torch.as_tensor([ann["iscrowd"] for ann in anns], dtype=torch.bool),
bounding_boxes=BoundingBoxes(
[ann["bbox"] for ann in anns],
format="xywh",
spatial_size=spatial_size,
),
labels=Label(labels, categories=self._categories),
super_categories=[self._category_to_super_category[self._categories[label]] for label in labels],
ann_ids=[ann["id"] for ann in anns],
)
def _decode_captions_ann(self, anns: List[Dict[str, Any]], image_meta: Dict[str, Any]) -> Dict[str, Any]:
return dict(
captions=[ann["caption"] for ann in anns],
ann_ids=[ann["id"] for ann in anns],
)
_ANN_DECODERS = OrderedDict(
[
("instances", _decode_instances_anns),
("captions", _decode_captions_ann),
]
)
_META_FILE_PATTERN = re.compile(
rf"(?P<annotations>({'|'.join(_ANN_DECODERS.keys())}))_(?P<split>[a-zA-Z]+)(?P<year>\d+)[.]json"
)
def _filter_meta_files(self, data: Tuple[str, Any]) -> bool:
match = self._META_FILE_PATTERN.match(pathlib.Path(data[0]).name)
return bool(
match
and match["split"] == self._split
and match["year"] == self._year
and match["annotations"] == self._annotations
)
def _classify_meta(self, data: Tuple[str, Any]) -> Optional[int]:
key, _ = data
if key == "images":
return 0
elif key == "annotations":
return 1
else:
return None
def _prepare_image(self, data: Tuple[str, BinaryIO]) -> Dict[str, Any]:
path, buffer = data
return dict(
path=path,
image=EncodedImage.from_file(buffer),
)
def _prepare_sample(
self,
data: Tuple[Tuple[List[Dict[str, Any]], Dict[str, Any]], Tuple[str, BinaryIO]],
) -> Dict[str, Any]:
ann_data, image_data = data
anns, image_meta = ann_data
sample = self._prepare_image(image_data)
# this method is only called if we have annotations
annotations = cast(str, self._annotations)
sample.update(self._ANN_DECODERS[annotations](self, anns, image_meta))
return sample
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
images_dp, meta_dp = resource_dps
if self._annotations is None:
dp = hint_shuffling(images_dp)
dp = hint_sharding(dp)
dp = hint_shuffling(dp)
return Mapper(dp, self._prepare_image)
meta_dp = Filter(meta_dp, self._filter_meta_files)
meta_dp = JsonParser(meta_dp)
meta_dp = Mapper(meta_dp, getitem(1))
meta_dp: IterDataPipe[Dict[str, Dict[str, Any]]] = MappingIterator(meta_dp)
images_meta_dp, anns_meta_dp = Demultiplexer(
meta_dp,
2,
self._classify_meta,
drop_none=True,
buffer_size=INFINITE_BUFFER_SIZE,
)
images_meta_dp = Mapper(images_meta_dp, getitem(1))
images_meta_dp = UnBatcher(images_meta_dp)
anns_meta_dp = Mapper(anns_meta_dp, getitem(1))
anns_meta_dp = UnBatcher(anns_meta_dp)
anns_meta_dp = Grouper(anns_meta_dp, group_key_fn=getitem("image_id"), buffer_size=INFINITE_BUFFER_SIZE)
anns_meta_dp = hint_shuffling(anns_meta_dp)
anns_meta_dp = hint_sharding(anns_meta_dp)
anns_dp = IterKeyZipper(
anns_meta_dp,
images_meta_dp,
key_fn=getitem(0, "image_id"),
ref_key_fn=getitem("id"),
buffer_size=INFINITE_BUFFER_SIZE,
)
dp = IterKeyZipper(
anns_dp,
images_dp,
key_fn=getitem(1, "file_name"),
ref_key_fn=path_accessor("name"),
buffer_size=INFINITE_BUFFER_SIZE,
)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return {
("train", "2017"): defaultdict(lambda: 118_287, instances=117_266),
("train", "2014"): defaultdict(lambda: 82_783, instances=82_081),
("val", "2017"): defaultdict(lambda: 5_000, instances=4_952),
("val", "2014"): defaultdict(lambda: 40_504, instances=40_137),
}[(self._split, self._year)][
self._annotations # type: ignore[index]
]
def _generate_categories(self) -> Tuple[Tuple[str, str]]:
self._annotations = "instances"
resources = self._resources()
dp = resources[1].load(self._root)
dp = Filter(dp, self._filter_meta_files)
dp = JsonParser(dp)
_, meta = next(iter(dp))
# List[Tuple[super_category, id, category]]
label_data = [cast(Tuple[str, int, str], tuple(info.values())) for info in meta["categories"]]
# COCO actually defines 91 categories, but only 80 of them have instances. Still, the category_id refers to the
# full set. To keep the labels dense, we fill the gaps with N/A. Note that there are only 10 gaps, so the total
# number of categories is 90 rather than 91.
_, ids, _ = zip(*label_data)
missing_ids = set(range(1, max(ids) + 1)) - set(ids)
label_data.extend([("N/A", id, "N/A") for id in missing_ids])
# We also add a background category to be used during segmentation.
label_data.append(("N/A", 0, "__background__"))
super_categories, _, categories = zip(*sorted(label_data, key=lambda info: info[1]))
return cast(Tuple[Tuple[str, str]], tuple(zip(categories, super_categories)))
```
|
=============================================================================================================================================
SOURCE CODE FILE: country211.py
LINES: 1
SIZE: 2.68 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\datasets\_builtin\country211.py
ENCODING: utf-8
```py
import pathlib
from typing import Any, Dict, List, Tuple, Union
from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
path_comparator,
read_categories_file,
)
from torchvision.prototype.tv_tensors import Label
from .._api import register_dataset, register_info
NAME = "country211"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class Country211(Dataset):
"""
- **homepage**: https://github.com/openai/CLIP/blob/main/data/country211.md
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", ("train", "val", "test"))
self._split_folder_name = "valid" if split == "val" else split
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
return [
HttpResource(
"https://openaipublic.azureedge.net/clip/data/country211.tgz",
sha256="c011343cdc1296a8c31ff1d7129cf0b5e5b8605462cffd24f89266d6e6f4da3c",
)
]
def _prepare_sample(self, data: Tuple[str, Any]) -> Dict[str, Any]:
path, buffer = data
category = pathlib.Path(path).parent.name
return dict(
label=Label.from_category(category, categories=self._categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _filter_split(self, data: Tuple[str, Any], *, split: str) -> bool:
return pathlib.Path(data[0]).parent.parent.name == split
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = Filter(dp, path_comparator("parent.parent.name", self._split_folder_name))
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return {
"train": 31_650,
"val": 10_550,
"test": 21_100,
}[self._split]
def _generate_categories(self) -> List[str]:
resources = self._resources()
dp = resources[0].load(self._root)
return sorted({pathlib.Path(path).parent.name for path, _ in dp})
```
|
=========================================================================================================================================
SOURCE CODE FILE: cub200.py
LINES: 1
SIZE: 9.37 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\datasets\_builtin\cub200.py
ENCODING: utf-8
```py
import csv
import functools
import pathlib
from typing import Any, BinaryIO, Callable, Dict, List, Optional, Tuple, Union
import torch
from torchdata.datapipes.iter import (
CSVDictParser,
CSVParser,
Demultiplexer,
Filter,
IterDataPipe,
IterKeyZipper,
LineReader,
Mapper,
)
from torchdata.datapipes.map import IterToMapConverter
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, GDriveResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
getitem,
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_accessor,
path_comparator,
read_categories_file,
read_mat,
)
from torchvision.prototype.tv_tensors import Label
from torchvision.tv_tensors import BoundingBoxes
from .._api import register_dataset, register_info
csv.register_dialect("cub200", delimiter=" ")
NAME = "cub200"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class CUB200(Dataset):
"""
- **homepage**: http://www.vision.caltech.edu/visipedia/CUB-200.html
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
year: str = "2011",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", ("train", "test"))
self._year = self._verify_str_arg(year, "year", ("2010", "2011"))
self._categories = _info()["categories"]
super().__init__(
root,
# TODO: this will only be available after https://github.com/pytorch/vision/pull/5473
# dependencies=("scipy",),
skip_integrity_check=skip_integrity_check,
)
def _resources(self) -> List[OnlineResource]:
if self._year == "2011":
archive = GDriveResource(
"1hbzc_P1FuxMkcabkgn9ZKinBwW683j45",
file_name="CUB_200_2011.tgz",
sha256="0c685df5597a8b24909f6a7c9db6d11e008733779a671760afef78feb49bf081",
preprocess="decompress",
)
segmentations = GDriveResource(
"1EamOKGLoTuZdtcVYbHMWNpkn3iAVj8TP",
file_name="segmentations.tgz",
sha256="dc77f6cffea0cbe2e41d4201115c8f29a6320ecb04fffd2444f51b8066e4b84f",
preprocess="decompress",
)
return [archive, segmentations]
else: # self._year == "2010"
split = GDriveResource(
"1vZuZPqha0JjmwkdaS_XtYryE3Jf5Q1AC",
file_name="lists.tgz",
sha256="aeacbd5e3539ae84ea726e8a266a9a119c18f055cd80f3836d5eb4500b005428",
preprocess="decompress",
)
images = GDriveResource(
"1GDr1OkoXdhaXWGA8S3MAq3a522Tak-nx",
file_name="images.tgz",
sha256="2a6d2246bbb9778ca03aa94e2e683ccb4f8821a36b7f235c0822e659d60a803e",
preprocess="decompress",
)
anns = GDriveResource(
"16NsbTpMs5L6hT4hUJAmpW2u7wH326WTR",
file_name="annotations.tgz",
sha256="c17b7841c21a66aa44ba8fe92369cc95dfc998946081828b1d7b8a4b716805c1",
preprocess="decompress",
)
return [split, images, anns]
def _2011_classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
if path.parents[1].name == "images":
return 0
elif path.name == "train_test_split.txt":
return 1
elif path.name == "images.txt":
return 2
elif path.name == "bounding_boxes.txt":
return 3
else:
return None
def _2011_extract_file_name(self, rel_posix_path: str) -> str:
return rel_posix_path.rsplit("/", maxsplit=1)[1]
def _2011_filter_split(self, row: List[str]) -> bool:
_, split_id = row
return {
"0": "test",
"1": "train",
}[split_id] == self._split
def _2011_segmentation_key(self, data: Tuple[str, Any]) -> str:
path = pathlib.Path(data[0])
return path.with_suffix(".jpg").name
def _2011_prepare_ann(
self, data: Tuple[str, Tuple[List[str], Tuple[str, BinaryIO]]], spatial_size: Tuple[int, int]
) -> Dict[str, Any]:
_, (bounding_boxes_data, segmentation_data) = data
segmentation_path, segmentation_buffer = segmentation_data
return dict(
bounding_boxes=BoundingBoxes(
[float(part) for part in bounding_boxes_data[1:]], format="xywh", spatial_size=spatial_size
),
segmentation_path=segmentation_path,
segmentation=EncodedImage.from_file(segmentation_buffer),
)
def _2010_split_key(self, data: str) -> str:
return data.rsplit("/", maxsplit=1)[1]
def _2010_anns_key(self, data: Tuple[str, BinaryIO]) -> Tuple[str, Tuple[str, BinaryIO]]:
path = pathlib.Path(data[0])
return path.with_suffix(".jpg").name, data
def _2010_prepare_ann(
self, data: Tuple[str, Tuple[str, BinaryIO]], spatial_size: Tuple[int, int]
) -> Dict[str, Any]:
_, (path, buffer) = data
content = read_mat(buffer)
return dict(
ann_path=path,
bounding_boxes=BoundingBoxes(
[int(content["bbox"][coord]) for coord in ("left", "bottom", "right", "top")],
format="xyxy",
spatial_size=spatial_size,
),
segmentation=torch.as_tensor(content["seg"]),
)
def _prepare_sample(
self,
data: Tuple[Tuple[str, Tuple[str, BinaryIO]], Any],
*,
prepare_ann_fn: Callable[[Any, Tuple[int, int]], Dict[str, Any]],
) -> Dict[str, Any]:
data, anns_data = data
_, image_data = data
path, buffer = image_data
image = EncodedImage.from_file(buffer)
return dict(
prepare_ann_fn(anns_data, image.spatial_size),
image=image,
label=Label(
int(pathlib.Path(path).parent.name.rsplit(".", 1)[0]) - 1,
categories=self._categories,
),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
prepare_ann_fn: Callable
if self._year == "2011":
archive_dp, segmentations_dp = resource_dps
images_dp, split_dp, image_files_dp, bounding_boxes_dp = Demultiplexer(
archive_dp, 4, self._2011_classify_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
image_files_dp = CSVParser(image_files_dp, dialect="cub200")
image_files_dp = Mapper(image_files_dp, self._2011_extract_file_name, input_col=1)
image_files_map = IterToMapConverter(image_files_dp)
split_dp = CSVParser(split_dp, dialect="cub200")
split_dp = Filter(split_dp, self._2011_filter_split)
split_dp = Mapper(split_dp, getitem(0))
split_dp = Mapper(split_dp, image_files_map.__getitem__)
bounding_boxes_dp = CSVParser(bounding_boxes_dp, dialect="cub200")
bounding_boxes_dp = Mapper(bounding_boxes_dp, image_files_map.__getitem__, input_col=0)
anns_dp = IterKeyZipper(
bounding_boxes_dp,
segmentations_dp,
key_fn=getitem(0),
ref_key_fn=self._2011_segmentation_key,
keep_key=True,
buffer_size=INFINITE_BUFFER_SIZE,
)
prepare_ann_fn = self._2011_prepare_ann
else: # self._year == "2010"
split_dp, images_dp, anns_dp = resource_dps
split_dp = Filter(split_dp, path_comparator("name", f"{self._split}.txt"))
split_dp = LineReader(split_dp, decode=True, return_path=False)
split_dp = Mapper(split_dp, self._2010_split_key)
anns_dp = Mapper(anns_dp, self._2010_anns_key)
prepare_ann_fn = self._2010_prepare_ann
split_dp = hint_shuffling(split_dp)
split_dp = hint_sharding(split_dp)
dp = IterKeyZipper(
split_dp,
images_dp,
getitem(),
path_accessor("name"),
buffer_size=INFINITE_BUFFER_SIZE,
)
dp = IterKeyZipper(
dp,
anns_dp,
getitem(0),
buffer_size=INFINITE_BUFFER_SIZE,
)
return Mapper(dp, functools.partial(self._prepare_sample, prepare_ann_fn=prepare_ann_fn))
def __len__(self) -> int:
return {
("train", "2010"): 3_000,
("test", "2010"): 3_033,
("train", "2011"): 5_994,
("test", "2011"): 5_794,
}[(self._split, self._year)]
def _generate_categories(self) -> List[str]:
self._year = "2011"
resources = self._resources()
dp = resources[0].load(self._root)
dp = Filter(dp, path_comparator("name", "classes.txt"))
dp = CSVDictParser(dp, fieldnames=("label", "category"), dialect="cub200")
return [row["category"].split(".")[1] for row in dp]
```
|
======================================================================================================================================
SOURCE CODE FILE: dtd.py
LINES: 1
SIZE: 4.67 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\datasets\_builtin\dtd.py
ENCODING: utf-8
```py
import enum
import pathlib
from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import CSVParser, Demultiplexer, Filter, IterDataPipe, IterKeyZipper, LineReader, Mapper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
getitem,
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_comparator,
read_categories_file,
)
from torchvision.prototype.tv_tensors import Label
from .._api import register_dataset, register_info
NAME = "dtd"
class DTDDemux(enum.IntEnum):
SPLIT = 0
JOINT_CATEGORIES = 1
IMAGES = 2
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class DTD(Dataset):
"""DTD Dataset.
homepage="https://www.robots.ox.ac.uk/~vgg/data/dtd/",
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
fold: int = 1,
skip_validation_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "val", "test"})
if not (1 <= fold <= 10):
raise ValueError(f"The fold parameter should be an integer in [1, 10]. Got {fold}")
self._fold = fold
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_validation_check)
def _resources(self) -> List[OnlineResource]:
archive = HttpResource(
"https://www.robots.ox.ac.uk/~vgg/data/dtd/download/dtd-r1.0.1.tar.gz",
sha256="e42855a52a4950a3b59612834602aa253914755c95b0cff9ead6d07395f8e205",
preprocess="decompress",
)
return [archive]
def _classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
if path.parent.name == "labels":
if path.name == "labels_joint_anno.txt":
return DTDDemux.JOINT_CATEGORIES
return DTDDemux.SPLIT
elif path.parents[1].name == "images":
return DTDDemux.IMAGES
else:
return None
def _image_key_fn(self, data: Tuple[str, Any]) -> str:
path = pathlib.Path(data[0])
# The split files contain hardcoded posix paths for the images, e.g. banded/banded_0001.jpg
return str(path.relative_to(path.parents[1]).as_posix())
def _prepare_sample(self, data: Tuple[Tuple[str, List[str]], Tuple[str, BinaryIO]]) -> Dict[str, Any]:
(_, joint_categories_data), image_data = data
_, *joint_categories = joint_categories_data
path, buffer = image_data
category = pathlib.Path(path).parent.name
return dict(
joint_categories={category for category in joint_categories if category},
label=Label.from_category(category, categories=self._categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
archive_dp = resource_dps[0]
splits_dp, joint_categories_dp, images_dp = Demultiplexer(
archive_dp, 3, self._classify_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
splits_dp = Filter(splits_dp, path_comparator("name", f"{self._split}{self._fold}.txt"))
splits_dp = LineReader(splits_dp, decode=True, return_path=False)
splits_dp = hint_shuffling(splits_dp)
splits_dp = hint_sharding(splits_dp)
joint_categories_dp = CSVParser(joint_categories_dp, delimiter=" ")
dp = IterKeyZipper(
splits_dp,
joint_categories_dp,
key_fn=getitem(),
ref_key_fn=getitem(0),
buffer_size=INFINITE_BUFFER_SIZE,
)
dp = IterKeyZipper(
dp,
images_dp,
key_fn=getitem(0),
ref_key_fn=self._image_key_fn,
buffer_size=INFINITE_BUFFER_SIZE,
)
return Mapper(dp, self._prepare_sample)
def _filter_images(self, data: Tuple[str, Any]) -> bool:
return self._classify_archive(data) == DTDDemux.IMAGES
def _generate_categories(self) -> List[str]:
resources = self._resources()
dp = resources[0].load(self._root)
dp = Filter(dp, self._filter_images)
return sorted({pathlib.Path(path).parent.name for path, _ in dp})
def __len__(self) -> int:
return 1_880 # All splits have the same length
```
|
==========================================================================================================================================
SOURCE CODE FILE: eurosat.py
LINES: 1
SIZE: 2.07 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\datasets\_builtin\eurosat.py
ENCODING: utf-8
```py
import pathlib
from typing import Any, Dict, List, Tuple, Union
from torchdata.datapipes.iter import IterDataPipe, Mapper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from torchvision.prototype.tv_tensors import Label
from .._api import register_dataset, register_info
NAME = "eurosat"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(
categories=(
"AnnualCrop",
"Forest",
"HerbaceousVegetation",
"Highway",
"Industrial",
"Pasture",
"PermanentCrop",
"Residential",
"River",
"SeaLake",
)
)
@register_dataset(NAME)
class EuroSAT(Dataset):
"""EuroSAT Dataset.
homepage="https://github.com/phelber/eurosat",
"""
def __init__(self, root: Union[str, pathlib.Path], *, skip_integrity_check: bool = False) -> None:
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
return [
HttpResource(
"https://madm.dfki.de/files/sentinel/EuroSAT.zip",
sha256="8ebea626349354c5328b142b96d0430e647051f26efc2dc974c843f25ecf70bd",
)
]
def _prepare_sample(self, data: Tuple[str, Any]) -> Dict[str, Any]:
path, buffer = data
category = pathlib.Path(path).parent.name
return dict(
label=Label.from_category(category, categories=self._categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 27_000
```
|
==========================================================================================================================================
SOURCE CODE FILE: fer2013.py
LINES: 1
SIZE: 2.45 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\datasets\_builtin\fer2013.py
ENCODING: utf-8
```py
import pathlib
from typing import Any, Dict, List, Union
import torch
from torchdata.datapipes.iter import CSVDictParser, IterDataPipe, Mapper
from torchvision.prototype.datasets.utils import Dataset, KaggleDownloadResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from torchvision.prototype.tv_tensors import Label
from torchvision.tv_tensors import Image
from .._api import register_dataset, register_info
NAME = "fer2013"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=("angry", "disgust", "fear", "happy", "sad", "surprise", "neutral"))
@register_dataset(NAME)
class FER2013(Dataset):
"""FER 2013 Dataset
homepage="https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge"
"""
def __init__(
self, root: Union[str, pathlib.Path], *, split: str = "train", skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
_CHECKSUMS = {
"train": "a2b7c9360cc0b38d21187e5eece01c2799fce5426cdeecf746889cc96cda2d10",
"test": "dec8dfe8021e30cd6704b85ec813042b4a5d99d81cb55e023291a94104f575c3",
}
def _resources(self) -> List[OnlineResource]:
archive = KaggleDownloadResource(
"https://www.kaggle.com/c/challenges-in-representation-learning-facial-expression-recognition-challenge",
file_name=f"{self._split}.csv.zip",
sha256=self._CHECKSUMS[self._split],
)
return [archive]
def _prepare_sample(self, data: Dict[str, Any]) -> Dict[str, Any]:
label_id = data.get("emotion")
return dict(
image=Image(torch.tensor([int(idx) for idx in data["pixels"].split()], dtype=torch.uint8).reshape(48, 48)),
label=Label(int(label_id), categories=self._categories) if label_id is not None else None,
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = CSVDictParser(dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 28_709 if self._split == "train" else 3_589
```
|
==========================================================================================================================================
SOURCE CODE FILE: food101.py
LINES: 1
SIZE: 3.43 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\datasets\_builtin\food101.py
ENCODING: utf-8
```py
from pathlib import Path
from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import Demultiplexer, Filter, IterDataPipe, IterKeyZipper, LineReader, Mapper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
getitem,
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_comparator,
read_categories_file,
)
from torchvision.prototype.tv_tensors import Label
from .._api import register_dataset, register_info
NAME = "food101"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class Food101(Dataset):
"""Food 101 dataset
homepage="https://data.vision.ee.ethz.ch/cvl/datasets_extra/food-101",
"""
def __init__(self, root: Union[str, Path], *, split: str = "train", skip_integrity_check: bool = False) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
return [
HttpResource(
url="http://data.vision.ee.ethz.ch/cvl/food-101.tar.gz",
sha256="d97d15e438b7f4498f96086a4f7e2fa42a32f2712e87d3295441b2b6314053a4",
preprocess="decompress",
)
]
def _classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = Path(data[0])
if path.parents[1].name == "images":
return 0
elif path.parents[0].name == "meta":
return 1
else:
return None
def _prepare_sample(self, data: Tuple[str, Tuple[str, BinaryIO]]) -> Dict[str, Any]:
id, (path, buffer) = data
return dict(
label=Label.from_category(id.split("/", 1)[0], categories=self._categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _image_key(self, data: Tuple[str, Any]) -> str:
path = Path(data[0])
return path.relative_to(path.parents[1]).with_suffix("").as_posix()
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
archive_dp = resource_dps[0]
images_dp, split_dp = Demultiplexer(
archive_dp, 2, self._classify_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
split_dp = Filter(split_dp, path_comparator("name", f"{self._split}.txt"))
split_dp = LineReader(split_dp, decode=True, return_path=False)
split_dp = hint_sharding(split_dp)
split_dp = hint_shuffling(split_dp)
dp = IterKeyZipper(
split_dp,
images_dp,
key_fn=getitem(),
ref_key_fn=self._image_key,
buffer_size=INFINITE_BUFFER_SIZE,
)
return Mapper(dp, self._prepare_sample)
def _generate_categories(self) -> List[str]:
resources = self._resources()
dp = resources[0].load(self._root)
dp = Filter(dp, path_comparator("name", "classes.txt"))
dp = LineReader(dp, decode=True, return_path=False)
return list(dp)
def __len__(self) -> int:
return 75_750 if self._split == "train" else 25_250
```
|
========================================================================================================================================
SOURCE CODE FILE: gtsrb.py
LINES: 1
SIZE: 4.07 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\datasets\_builtin\gtsrb.py
ENCODING: utf-8
```py
import pathlib
from typing import Any, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import CSVDictParser, Demultiplexer, Filter, IterDataPipe, Mapper, Zipper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_comparator,
)
from torchvision.prototype.tv_tensors import Label
from torchvision.tv_tensors import BoundingBoxes
from .._api import register_dataset, register_info
NAME = "gtsrb"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(
categories=[f"{label:05d}" for label in range(43)],
)
@register_dataset(NAME)
class GTSRB(Dataset):
"""GTSRB Dataset
homepage="https://benchmark.ini.rub.de"
"""
def __init__(
self, root: Union[str, pathlib.Path], *, split: str = "train", skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
_URL_ROOT = "https://sid.erda.dk/public/archives/daaeac0d7ce1152aea9b61d9f1e19370/"
_URLS = {
"train": f"{_URL_ROOT}GTSRB-Training_fixed.zip",
"test": f"{_URL_ROOT}GTSRB_Final_Test_Images.zip",
"test_ground_truth": f"{_URL_ROOT}GTSRB_Final_Test_GT.zip",
}
_CHECKSUMS = {
"train": "df4144942083645bd60b594de348aa6930126c3e0e5de09e39611630abf8455a",
"test": "48ba6fab7e877eb64eaf8de99035b0aaecfbc279bee23e35deca4ac1d0a837fa",
"test_ground_truth": "f94e5a7614d75845c74c04ddb26b8796b9e483f43541dd95dd5b726504e16d6d",
}
def _resources(self) -> List[OnlineResource]:
rsrcs: List[OnlineResource] = [HttpResource(self._URLS[self._split], sha256=self._CHECKSUMS[self._split])]
if self._split == "test":
rsrcs.append(
HttpResource(
self._URLS["test_ground_truth"],
sha256=self._CHECKSUMS["test_ground_truth"],
)
)
return rsrcs
def _classify_train_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
if path.suffix == ".ppm":
return 0
elif path.suffix == ".csv":
return 1
else:
return None
def _prepare_sample(self, data: Tuple[Tuple[str, Any], Dict[str, Any]]) -> Dict[str, Any]:
(path, buffer), csv_info = data
label = int(csv_info["ClassId"])
bounding_boxes = BoundingBoxes(
[int(csv_info[k]) for k in ("Roi.X1", "Roi.Y1", "Roi.X2", "Roi.Y2")],
format="xyxy",
spatial_size=(int(csv_info["Height"]), int(csv_info["Width"])),
)
return {
"path": path,
"image": EncodedImage.from_file(buffer),
"label": Label(label, categories=self._categories),
"bounding_boxes": bounding_boxes,
}
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
if self._split == "train":
images_dp, ann_dp = Demultiplexer(
resource_dps[0], 2, self._classify_train_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
else:
images_dp, ann_dp = resource_dps
images_dp = Filter(images_dp, path_comparator("suffix", ".ppm"))
# The order of the image files in the .zip archives perfectly match the order of the entries in the
# (possibly concatenated) .csv files. So we're able to use Zipper here instead of a IterKeyZipper.
ann_dp = CSVDictParser(ann_dp, delimiter=";")
dp = Zipper(images_dp, ann_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 26_640 if self._split == "train" else 12_630
```
|
===========================================================================================================================================
SOURCE CODE FILE: imagenet.py
LINES: 1
SIZE: 8.12 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\datasets\_builtin\imagenet.py
ENCODING: utf-8
```py
import enum
import pathlib
import re
from typing import Any, BinaryIO, cast, Dict, Iterator, List, Match, Optional, Tuple, Union
from torchdata.datapipes.iter import (
Demultiplexer,
Enumerator,
Filter,
IterDataPipe,
IterKeyZipper,
LineReader,
Mapper,
TarArchiveLoader,
)
from torchdata.datapipes.map import IterToMapConverter
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, ManualDownloadResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
getitem,
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_accessor,
read_categories_file,
read_mat,
)
from torchvision.prototype.tv_tensors import Label
from .._api import register_dataset, register_info
NAME = "imagenet"
@register_info(NAME)
def _info() -> Dict[str, Any]:
categories, wnids = zip(*read_categories_file(NAME))
return dict(categories=categories, wnids=wnids)
class ImageNetResource(ManualDownloadResource):
def __init__(self, **kwargs: Any) -> None:
super().__init__("Register on https://image-net.org/ and follow the instructions there.", **kwargs)
class ImageNetDemux(enum.IntEnum):
META = 0
LABEL = 1
class CategoryAndWordNetIDExtractor(IterDataPipe):
# Although the WordNet IDs (wnids) are unique, the corresponding categories are not. For example, both n02012849
# and n03126707 are labeled 'crane' while the first means the bird and the latter means the construction equipment
_WNID_MAP = {
"n03126707": "construction crane",
"n03710721": "tank suit",
}
def __init__(self, datapipe: IterDataPipe[Tuple[str, BinaryIO]]) -> None:
self.datapipe = datapipe
def __iter__(self) -> Iterator[Tuple[str, str]]:
for _, stream in self.datapipe:
synsets = read_mat(stream, squeeze_me=True)["synsets"]
for _, wnid, category, _, num_children, *_ in synsets:
if num_children > 0:
# we are looking at a superclass that has no direct instance
continue
yield self._WNID_MAP.get(wnid, category.split(",", 1)[0]), wnid
@register_dataset(NAME)
class ImageNet(Dataset):
"""
- **homepage**: https://www.image-net.org/
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "val", "test"})
info = _info()
categories, wnids = info["categories"], info["wnids"]
self._categories = categories
self._wnids = wnids
self._wnid_to_category = dict(zip(wnids, categories))
super().__init__(root, skip_integrity_check=skip_integrity_check)
_IMAGES_CHECKSUMS = {
"train": "b08200a27a8e34218a0e58fde36b0fe8f73bc377f4acea2d91602057c3ca45bb",
"val": "c7e06a6c0baccf06d8dbeb6577d71efff84673a5dbdd50633ab44f8ea0456ae0",
"test_v10102019": "9cf7f8249639510f17d3d8a0deb47cd22a435886ba8e29e2b3223e65a4079eb4",
}
def _resources(self) -> List[OnlineResource]:
name = "test_v10102019" if self._split == "test" else self._split
images = ImageNetResource(
file_name=f"ILSVRC2012_img_{name}.tar",
sha256=self._IMAGES_CHECKSUMS[name],
)
resources: List[OnlineResource] = [images]
if self._split == "val":
devkit = ImageNetResource(
file_name="ILSVRC2012_devkit_t12.tar.gz",
sha256="b59243268c0d266621fd587d2018f69e906fb22875aca0e295b48cafaa927953",
)
resources.append(devkit)
return resources
_TRAIN_IMAGE_NAME_PATTERN = re.compile(r"(?P<wnid>n\d{8})_\d+[.]JPEG")
def _prepare_train_data(self, data: Tuple[str, BinaryIO]) -> Tuple[Tuple[Label, str], Tuple[str, BinaryIO]]:
path = pathlib.Path(data[0])
wnid = cast(Match[str], self._TRAIN_IMAGE_NAME_PATTERN.match(path.name))["wnid"]
label = Label.from_category(self._wnid_to_category[wnid], categories=self._categories)
return (label, wnid), data
def _prepare_test_data(self, data: Tuple[str, BinaryIO]) -> Tuple[None, Tuple[str, BinaryIO]]:
return None, data
def _classifiy_devkit(self, data: Tuple[str, BinaryIO]) -> Optional[int]:
return {
"meta.mat": ImageNetDemux.META,
"ILSVRC2012_validation_ground_truth.txt": ImageNetDemux.LABEL,
}.get(pathlib.Path(data[0]).name)
_VAL_TEST_IMAGE_NAME_PATTERN = re.compile(r"ILSVRC2012_(val|test)_(?P<id>\d{8})[.]JPEG")
def _val_test_image_key(self, path: pathlib.Path) -> int:
return int(self._VAL_TEST_IMAGE_NAME_PATTERN.match(path.name)["id"]) # type: ignore[index]
def _prepare_val_data(
self, data: Tuple[Tuple[int, str], Tuple[str, BinaryIO]]
) -> Tuple[Tuple[Label, str], Tuple[str, BinaryIO]]:
label_data, image_data = data
_, wnid = label_data
label = Label.from_category(self._wnid_to_category[wnid], categories=self._categories)
return (label, wnid), image_data
def _prepare_sample(
self,
data: Tuple[Optional[Tuple[Label, str]], Tuple[str, BinaryIO]],
) -> Dict[str, Any]:
label_data, (path, buffer) = data
return dict(
dict(zip(("label", "wnid"), label_data if label_data else (None, None))),
path=path,
image=EncodedImage.from_file(buffer),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
if self._split in {"train", "test"}:
dp = resource_dps[0]
# the train archive is a tar of tars
if self._split == "train":
dp = TarArchiveLoader(dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
dp = Mapper(dp, self._prepare_train_data if self._split == "train" else self._prepare_test_data)
else: # config.split == "val":
images_dp, devkit_dp = resource_dps
meta_dp, label_dp = Demultiplexer(
devkit_dp, 2, self._classifiy_devkit, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
# We cannot use self._wnids here, since we use a different order than the dataset
meta_dp = CategoryAndWordNetIDExtractor(meta_dp)
wnid_dp = Mapper(meta_dp, getitem(1))
wnid_dp = Enumerator(wnid_dp, 1)
wnid_map = IterToMapConverter(wnid_dp)
label_dp = LineReader(label_dp, decode=True, return_path=False)
label_dp = Mapper(label_dp, int)
label_dp = Mapper(label_dp, wnid_map.__getitem__)
label_dp: IterDataPipe[Tuple[int, str]] = Enumerator(label_dp, 1)
label_dp = hint_shuffling(label_dp)
label_dp = hint_sharding(label_dp)
dp = IterKeyZipper(
label_dp,
images_dp,
key_fn=getitem(0),
ref_key_fn=path_accessor(self._val_test_image_key),
buffer_size=INFINITE_BUFFER_SIZE,
)
dp = Mapper(dp, self._prepare_val_data)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return {
"train": 1_281_167,
"val": 50_000,
"test": 100_000,
}[self._split]
def _filter_meta(self, data: Tuple[str, Any]) -> bool:
return self._classifiy_devkit(data) == ImageNetDemux.META
def _generate_categories(self) -> List[Tuple[str, ...]]:
self._split = "val"
resources = self._resources()
devkit_dp = resources[1].load(self._root)
meta_dp = Filter(devkit_dp, self._filter_meta)
meta_dp = CategoryAndWordNetIDExtractor(meta_dp)
categories_and_wnids = cast(List[Tuple[str, ...]], list(meta_dp))
categories_and_wnids.sort(key=lambda category_and_wnid: category_and_wnid[1])
return categories_and_wnids
```
|
========================================================================================================================================
SOURCE CODE FILE: mnist.py
LINES: 1
SIZE: 15.22 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\datasets\_builtin\mnist.py
ENCODING: utf-8
```py
import abc
import functools
import operator
import pathlib
import string
from typing import Any, BinaryIO, cast, Dict, Iterator, List, Optional, Sequence, Tuple, Union
import torch
from torchdata.datapipes.iter import Decompressor, Demultiplexer, IterDataPipe, Mapper, Zipper
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling, INFINITE_BUFFER_SIZE
from torchvision.prototype.tv_tensors import Label
from torchvision.prototype.utils._internal import fromfile
from torchvision.tv_tensors import Image
from .._api import register_dataset, register_info
prod = functools.partial(functools.reduce, operator.mul)
class MNISTFileReader(IterDataPipe[torch.Tensor]):
_DTYPE_MAP = {
8: torch.uint8,
9: torch.int8,
11: torch.int16,
12: torch.int32,
13: torch.float32,
14: torch.float64,
}
def __init__(
self, datapipe: IterDataPipe[Tuple[Any, BinaryIO]], *, start: Optional[int], stop: Optional[int]
) -> None:
self.datapipe = datapipe
self.start = start
self.stop = stop
def __iter__(self) -> Iterator[torch.Tensor]:
for _, file in self.datapipe:
try:
read = functools.partial(fromfile, file, byte_order="big")
magic = int(read(dtype=torch.int32, count=1))
dtype = self._DTYPE_MAP[magic // 256]
ndim = magic % 256 - 1
num_samples = int(read(dtype=torch.int32, count=1))
shape = cast(List[int], read(dtype=torch.int32, count=ndim).tolist()) if ndim else []
count = prod(shape) if shape else 1
start = self.start or 0
stop = min(self.stop, num_samples) if self.stop else num_samples
if start:
num_bytes_per_value = (torch.finfo if dtype.is_floating_point else torch.iinfo)(dtype).bits // 8
file.seek(num_bytes_per_value * count * start, 1)
for _ in range(stop - start):
yield read(dtype=dtype, count=count).reshape(shape)
finally:
file.close()
class _MNISTBase(Dataset):
_URL_BASE: Union[str, Sequence[str]]
@abc.abstractmethod
def _files_and_checksums(self) -> Tuple[Tuple[str, str], Tuple[str, str]]:
pass
def _resources(self) -> List[OnlineResource]:
(images_file, images_sha256), (
labels_file,
labels_sha256,
) = self._files_and_checksums()
url_bases = self._URL_BASE
if isinstance(url_bases, str):
url_bases = (url_bases,)
images_urls = [f"{url_base}/{images_file}" for url_base in url_bases]
images = HttpResource(images_urls[0], sha256=images_sha256, mirrors=images_urls[1:])
labels_urls = [f"{url_base}/{labels_file}" for url_base in url_bases]
labels = HttpResource(labels_urls[0], sha256=labels_sha256, mirrors=labels_urls[1:])
return [images, labels]
def start_and_stop(self) -> Tuple[Optional[int], Optional[int]]:
return None, None
_categories: List[str]
def _prepare_sample(self, data: Tuple[torch.Tensor, torch.Tensor]) -> Dict[str, Any]:
image, label = data
return dict(
image=Image(image),
label=Label(label, dtype=torch.int64, categories=self._categories),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
images_dp, labels_dp = resource_dps
start, stop = self.start_and_stop()
images_dp = Decompressor(images_dp)
images_dp = MNISTFileReader(images_dp, start=start, stop=stop)
labels_dp = Decompressor(labels_dp)
labels_dp = MNISTFileReader(labels_dp, start=start, stop=stop)
dp = Zipper(images_dp, labels_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
@register_info("mnist")
def _mnist_info() -> Dict[str, Any]:
return dict(
categories=[str(label) for label in range(10)],
)
@register_dataset("mnist")
class MNIST(_MNISTBase):
"""
- **homepage**: http://yann.lecun.com/exdb/mnist
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", ("train", "test"))
super().__init__(root, skip_integrity_check=skip_integrity_check)
_URL_BASE: Union[str, Sequence[str]] = (
"http://yann.lecun.com/exdb/mnist",
"https://ossci-datasets.s3.amazonaws.com/mnist",
)
_CHECKSUMS = {
"train-images-idx3-ubyte.gz": "440fcabf73cc546fa21475e81ea370265605f56be210a4024d2ca8f203523609",
"train-labels-idx1-ubyte.gz": "3552534a0a558bbed6aed32b30c495cca23d567ec52cac8be1a0730e8010255c",
"t10k-images-idx3-ubyte.gz": "8d422c7b0a1c1c79245a5bcf07fe86e33eeafee792b84584aec276f5a2dbc4e6",
"t10k-labels-idx1-ubyte.gz": "f7ae60f92e00ec6debd23a6088c31dbd2371eca3ffa0defaefb259924204aec6",
}
def _files_and_checksums(self) -> Tuple[Tuple[str, str], Tuple[str, str]]:
prefix = "train" if self._split == "train" else "t10k"
images_file = f"{prefix}-images-idx3-ubyte.gz"
labels_file = f"{prefix}-labels-idx1-ubyte.gz"
return (images_file, self._CHECKSUMS[images_file]), (
labels_file,
self._CHECKSUMS[labels_file],
)
_categories = _mnist_info()["categories"]
def __len__(self) -> int:
return 60_000 if self._split == "train" else 10_000
@register_info("fashionmnist")
def _fashionmnist_info() -> Dict[str, Any]:
return dict(
categories=[
"T-shirt/top",
"Trouser",
"Pullover",
"Dress",
"Coat",
"Sandal",
"Shirt",
"Sneaker",
"Bag",
"Ankle boot",
],
)
@register_dataset("fashionmnist")
class FashionMNIST(MNIST):
"""
- **homepage**: https://github.com/zalandoresearch/fashion-mnist
"""
_URL_BASE = "http://fashion-mnist.s3-website.eu-central-1.amazonaws.com"
_CHECKSUMS = {
"train-images-idx3-ubyte.gz": "3aede38d61863908ad78613f6a32ed271626dd12800ba2636569512369268a84",
"train-labels-idx1-ubyte.gz": "a04f17134ac03560a47e3764e11b92fc97de4d1bfaf8ba1a3aa29af54cc90845",
"t10k-images-idx3-ubyte.gz": "346e55b948d973a97e58d2351dde16a484bd415d4595297633bb08f03db6a073",
"t10k-labels-idx1-ubyte.gz": "67da17c76eaffca5446c3361aaab5c3cd6d1c2608764d35dfb1850b086bf8dd5",
}
_categories = _fashionmnist_info()["categories"]
@register_info("kmnist")
def _kmnist_info() -> Dict[str, Any]:
return dict(
categories=["o", "ki", "su", "tsu", "na", "ha", "ma", "ya", "re", "wo"],
)
@register_dataset("kmnist")
class KMNIST(MNIST):
"""
- **homepage**: http://codh.rois.ac.jp/kmnist/index.html.en
"""
_URL_BASE = "http://codh.rois.ac.jp/kmnist/dataset/kmnist"
_CHECKSUMS = {
"train-images-idx3-ubyte.gz": "51467d22d8cc72929e2a028a0428f2086b092bb31cfb79c69cc0a90ce135fde4",
"train-labels-idx1-ubyte.gz": "e38f9ebcd0f3ebcdec7fc8eabdcdaef93bb0df8ea12bee65224341c8183d8e17",
"t10k-images-idx3-ubyte.gz": "edd7a857845ad6bb1d0ba43fe7e794d164fe2dce499a1694695a792adfac43c5",
"t10k-labels-idx1-ubyte.gz": "20bb9a0ef54c7db3efc55a92eef5582c109615df22683c380526788f98e42a1c",
}
_categories = _kmnist_info()["categories"]
@register_info("emnist")
def _emnist_info() -> Dict[str, Any]:
return dict(
categories=list(string.digits + string.ascii_uppercase + string.ascii_lowercase),
)
@register_dataset("emnist")
class EMNIST(_MNISTBase):
"""
- **homepage**: https://www.westernsydney.edu.au/icns/reproducible_research/publication_support_materials/emnist
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
image_set: str = "Balanced",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", ("train", "test"))
self._image_set = self._verify_str_arg(
image_set, "image_set", ("Balanced", "By_Merge", "By_Class", "Letters", "Digits", "MNIST")
)
super().__init__(root, skip_integrity_check=skip_integrity_check)
_URL_BASE = "https://rds.westernsydney.edu.au/Institutes/MARCS/BENS/EMNIST"
def _files_and_checksums(self) -> Tuple[Tuple[str, str], Tuple[str, str]]:
prefix = f"emnist-{self._image_set.replace('_', '').lower()}-{self._split}"
images_file = f"{prefix}-images-idx3-ubyte.gz"
labels_file = f"{prefix}-labels-idx1-ubyte.gz"
# Since EMNIST provides the data files inside an archive, we don't need to provide checksums for them
return (images_file, ""), (labels_file, "")
def _resources(self) -> List[OnlineResource]:
return [
HttpResource(
f"{self._URL_BASE}/emnist-gzip.zip",
sha256="909a2a39c5e86bdd7662425e9b9c4a49bb582bf8d0edad427f3c3a9d0c6f7259",
)
]
def _classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
(images_file, _), (labels_file, _) = self._files_and_checksums()
if path.name == images_file:
return 0
elif path.name == labels_file:
return 1
else:
return None
_categories = _emnist_info()["categories"]
_LABEL_OFFSETS = {
38: 1,
39: 1,
40: 1,
41: 1,
42: 1,
43: 6,
44: 8,
45: 8,
46: 9,
}
def _prepare_sample(self, data: Tuple[torch.Tensor, torch.Tensor]) -> Dict[str, Any]:
# In these two splits, some lowercase letters are merged into their uppercase ones (see Fig 2. in the paper).
# That means for example that there is 'D', 'd', and 'C', but not 'c'. Since the labels are nevertheless dense,
# i.e. no gaps between 0 and 46 for 47 total classes, we need to add an offset to create these gaps. For
# example, since there is no 'c', 'd' corresponds to
# label 38 (10 digits + 26 uppercase letters + 3rd unmerged lower case letter - 1 for zero indexing),
# and at the same time corresponds to
# index 39 (10 digits + 26 uppercase letters + 4th lower case letter - 1 for zero indexing)
# in self._categories. Thus, we need to add 1 to the label to correct this.
if self._image_set in ("Balanced", "By_Merge"):
image, label = data
label += self._LABEL_OFFSETS.get(int(label), 0)
data = (image, label)
return super()._prepare_sample(data)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
archive_dp = resource_dps[0]
images_dp, labels_dp = Demultiplexer(
archive_dp,
2,
self._classify_archive,
drop_none=True,
buffer_size=INFINITE_BUFFER_SIZE,
)
return super()._datapipe([images_dp, labels_dp])
def __len__(self) -> int:
return {
("train", "Balanced"): 112_800,
("train", "By_Merge"): 697_932,
("train", "By_Class"): 697_932,
("train", "Letters"): 124_800,
("train", "Digits"): 240_000,
("train", "MNIST"): 60_000,
("test", "Balanced"): 18_800,
("test", "By_Merge"): 116_323,
("test", "By_Class"): 116_323,
("test", "Letters"): 20_800,
("test", "Digits"): 40_000,
("test", "MNIST"): 10_000,
}[(self._split, self._image_set)]
@register_info("qmnist")
def _qmnist_info() -> Dict[str, Any]:
return dict(
categories=[str(label) for label in range(10)],
)
@register_dataset("qmnist")
class QMNIST(_MNISTBase):
"""
- **homepage**: https://github.com/facebookresearch/qmnist
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", ("train", "test", "test10k", "test50k", "nist"))
super().__init__(root, skip_integrity_check=skip_integrity_check)
_URL_BASE = "https://raw.githubusercontent.com/facebookresearch/qmnist/master"
_CHECKSUMS = {
"qmnist-train-images-idx3-ubyte.gz": "9e26a7bf1683614e065d7b76460ccd52807165b3f22561fb782bd9f38c52b51d",
"qmnist-train-labels-idx2-int.gz": "2c05dc77f6b916b38e455e97ab129a42a444f3dbef09b278a366f82904e0dd9f",
"qmnist-test-images-idx3-ubyte.gz": "43fc22bf7498b8fc98de98369d72f752d0deabc280a43a7bcc364ab19e57b375",
"qmnist-test-labels-idx2-int.gz": "9fbcbe594c3766fdf4f0b15c5165dc0d1e57ac604e01422608bb72c906030d06",
"xnist-images-idx3-ubyte.xz": "f075553993026d4359ded42208eff77a1941d3963c1eff49d6015814f15f0984",
"xnist-labels-idx2-int.xz": "db042968723ec2b7aed5f1beac25d2b6e983b9286d4f4bf725f1086e5ae55c4f",
}
def _files_and_checksums(self) -> Tuple[Tuple[str, str], Tuple[str, str]]:
prefix = "xnist" if self._split == "nist" else f"qmnist-{'train' if self._split == 'train' else 'test'}"
suffix = "xz" if self._split == "nist" else "gz"
images_file = f"{prefix}-images-idx3-ubyte.{suffix}"
labels_file = f"{prefix}-labels-idx2-int.{suffix}"
return (images_file, self._CHECKSUMS[images_file]), (
labels_file,
self._CHECKSUMS[labels_file],
)
def start_and_stop(self) -> Tuple[Optional[int], Optional[int]]:
start: Optional[int]
stop: Optional[int]
if self._split == "test10k":
start = 0
stop = 10000
elif self._split == "test50k":
start = 10000
stop = None
else:
start = stop = None
return start, stop
_categories = _emnist_info()["categories"]
def _prepare_sample(self, data: Tuple[torch.Tensor, torch.Tensor]) -> Dict[str, Any]:
image, ann = data
label, *extra_anns = ann
sample = super()._prepare_sample((image, label))
sample.update(
dict(
zip(
("nist_hsf_series", "nist_writer_id", "digit_index", "nist_label", "global_digit_index"),
[int(value) for value in extra_anns[:5]],
)
)
)
sample.update(dict(zip(("duplicate", "unused"), [bool(value) for value in extra_anns[-2:]])))
return sample
def __len__(self) -> int:
return {
"train": 60_000,
"test": 60_000,
"test10k": 10_000,
"test50k": 50_000,
"nist": 402_953,
}[self._split]
```
|
==================================================================================================================================================
SOURCE CODE FILE: oxford_iiit_pet.py
LINES: 1
SIZE: 5.66 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\datasets\_builtin\oxford_iiit_pet.py
ENCODING: utf-8
```py
import enum
import pathlib
from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import CSVDictParser, Demultiplexer, Filter, IterDataPipe, IterKeyZipper, Mapper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
getitem,
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_accessor,
path_comparator,
read_categories_file,
)
from torchvision.prototype.tv_tensors import Label
from .._api import register_dataset, register_info
NAME = "oxford-iiit-pet"
class OxfordIIITPetDemux(enum.IntEnum):
SPLIT_AND_CLASSIFICATION = 0
SEGMENTATIONS = 1
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class OxfordIIITPet(Dataset):
"""Oxford IIIT Pet Dataset
homepage="https://www.robots.ox.ac.uk/~vgg/data/pets/",
"""
def __init__(
self, root: Union[str, pathlib.Path], *, split: str = "trainval", skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", {"trainval", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
images = HttpResource(
"https://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz",
sha256="67195c5e1c01f1ab5f9b6a5d22b8c27a580d896ece458917e61d459337fa318d",
preprocess="decompress",
)
anns = HttpResource(
"https://www.robots.ox.ac.uk/~vgg/data/pets/data/annotations.tar.gz",
sha256="52425fb6de5c424942b7626b428656fcbd798db970a937df61750c0f1d358e91",
preprocess="decompress",
)
return [images, anns]
def _classify_anns(self, data: Tuple[str, Any]) -> Optional[int]:
return {
"annotations": OxfordIIITPetDemux.SPLIT_AND_CLASSIFICATION,
"trimaps": OxfordIIITPetDemux.SEGMENTATIONS,
}.get(pathlib.Path(data[0]).parent.name)
def _filter_images(self, data: Tuple[str, Any]) -> bool:
return pathlib.Path(data[0]).suffix == ".jpg"
def _filter_segmentations(self, data: Tuple[str, Any]) -> bool:
return not pathlib.Path(data[0]).name.startswith(".")
def _prepare_sample(
self, data: Tuple[Tuple[Dict[str, str], Tuple[str, BinaryIO]], Tuple[str, BinaryIO]]
) -> Dict[str, Any]:
ann_data, image_data = data
classification_data, segmentation_data = ann_data
segmentation_path, segmentation_buffer = segmentation_data
image_path, image_buffer = image_data
return dict(
label=Label(int(classification_data["label"]) - 1, categories=self._categories),
species="cat" if classification_data["species"] == "1" else "dog",
segmentation_path=segmentation_path,
segmentation=EncodedImage.from_file(segmentation_buffer),
image_path=image_path,
image=EncodedImage.from_file(image_buffer),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
images_dp, anns_dp = resource_dps
images_dp = Filter(images_dp, self._filter_images)
split_and_classification_dp, segmentations_dp = Demultiplexer(
anns_dp,
2,
self._classify_anns,
drop_none=True,
buffer_size=INFINITE_BUFFER_SIZE,
)
split_and_classification_dp = Filter(split_and_classification_dp, path_comparator("name", f"{self._split}.txt"))
split_and_classification_dp = CSVDictParser(
split_and_classification_dp, fieldnames=("image_id", "label", "species"), delimiter=" "
)
split_and_classification_dp = hint_shuffling(split_and_classification_dp)
split_and_classification_dp = hint_sharding(split_and_classification_dp)
segmentations_dp = Filter(segmentations_dp, self._filter_segmentations)
anns_dp = IterKeyZipper(
split_and_classification_dp,
segmentations_dp,
key_fn=getitem("image_id"),
ref_key_fn=path_accessor("stem"),
buffer_size=INFINITE_BUFFER_SIZE,
)
dp = IterKeyZipper(
anns_dp,
images_dp,
key_fn=getitem(0, "image_id"),
ref_key_fn=path_accessor("stem"),
buffer_size=INFINITE_BUFFER_SIZE,
)
return Mapper(dp, self._prepare_sample)
def _filter_split_and_classification_anns(self, data: Tuple[str, Any]) -> bool:
return self._classify_anns(data) == OxfordIIITPetDemux.SPLIT_AND_CLASSIFICATION
def _generate_categories(self) -> List[str]:
resources = self._resources()
dp = resources[1].load(self._root)
dp = Filter(dp, self._filter_split_and_classification_anns)
dp = Filter(dp, path_comparator("name", "trainval.txt"))
dp = CSVDictParser(dp, fieldnames=("image_id", "label"), delimiter=" ")
raw_categories_and_labels = {(data["image_id"].rsplit("_", 1)[0], data["label"]) for data in dp}
raw_categories, _ = zip(
*sorted(raw_categories_and_labels, key=lambda raw_category_and_label: int(raw_category_and_label[1]))
)
return [" ".join(part.title() for part in raw_category.split("_")) for raw_category in raw_categories]
def __len__(self) -> int:
return 3_680 if self._split == "trainval" else 3_669
```
|
=======================================================================================================================================
SOURCE CODE FILE: pcam.py
LINES: 1
SIZE: 4.78 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\datasets\_builtin\pcam.py
ENCODING: utf-8
```py
import io
import pathlib
from collections import namedtuple
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
from torchdata.datapipes.iter import IterDataPipe, Mapper, Zipper
from torchvision.prototype.datasets.utils import Dataset, GDriveResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from torchvision.prototype.tv_tensors import Label
from torchvision.tv_tensors import Image
from .._api import register_dataset, register_info
NAME = "pcam"
class PCAMH5Reader(IterDataPipe[Tuple[str, io.IOBase]]):
def __init__(
self,
datapipe: IterDataPipe[Tuple[str, io.IOBase]],
key: Optional[str] = None, # Note: this key thing might be very specific to the PCAM dataset
) -> None:
self.datapipe = datapipe
self.key = key
def __iter__(self) -> Iterator[Tuple[str, io.IOBase]]:
import h5py
for _, handle in self.datapipe:
try:
with h5py.File(handle) as data:
if self.key is not None:
data = data[self.key]
yield from data
finally:
handle.close()
_Resource = namedtuple("_Resource", ("file_name", "gdrive_id", "sha256"))
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=["0", "1"])
@register_dataset(NAME)
class PCAM(Dataset):
# TODO write proper docstring
"""PCAM Dataset
homepage="https://github.com/basveeling/pcam"
"""
def __init__(
self, root: Union[str, pathlib.Path], split: str = "train", *, skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "val", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check, dependencies=("h5py",))
_RESOURCES = {
"train": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_train_x.h5.gz",
gdrive_id="1Ka0XfEMiwgCYPdTI-vv6eUElOBnKFKQ2",
sha256="d619e741468a7ab35c7e4a75e6821b7e7e6c9411705d45708f2a0efc8960656c",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_train_y.h5.gz",
gdrive_id="1269yhu3pZDP8UYFQs-NYs3FPwuK-nGSG",
sha256="b74126d2c01b20d3661f9b46765d29cf4e4fba6faba29c8e0d09d406331ab75a",
),
),
"test": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_test_x.h5.gz",
gdrive_id="1qV65ZqZvWzuIVthK8eVDhIwrbnsJdbg_",
sha256="79174c2201ad521602a5888be8f36ee10875f37403dd3f2086caf2182ef87245",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_test_y.h5.gz",
gdrive_id="17BHrSrwWKjYsOgTMmoqrIjDy6Fa2o_gP",
sha256="0a522005fccc8bbd04c5a117bfaf81d8da2676f03a29d7499f71d0a0bd6068ef",
),
),
"val": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_valid_x.h5.gz",
gdrive_id="1hgshYGWK8V-eGRy8LToWJJgDU_rXWVJ3",
sha256="f82ee1670d027b4ec388048d9eabc2186b77c009655dae76d624c0ecb053ccb2",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_valid_y.h5.gz",
gdrive_id="1bH8ZRbhSVAhScTS0p9-ZzGnX91cHT3uO",
sha256="ce1ae30f08feb468447971cfd0472e7becd0ad96d877c64120c72571439ae48c",
),
),
}
def _resources(self) -> List[OnlineResource]:
return [ # = [images resource, targets resource]
GDriveResource(file_name=file_name, id=gdrive_id, sha256=sha256, preprocess="decompress")
for file_name, gdrive_id, sha256 in self._RESOURCES[self._split]
]
def _prepare_sample(self, data: Tuple[Any, Any]) -> Dict[str, Any]:
image, target = data # They're both numpy arrays at this point
return {
"image": Image(image.transpose(2, 0, 1)),
"label": Label(target.item(), categories=self._categories),
}
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
images_dp, targets_dp = resource_dps
images_dp = PCAMH5Reader(images_dp, key="x")
targets_dp = PCAMH5Reader(targets_dp, key="y")
dp = Zipper(images_dp, targets_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 262_144 if self._split == "train" else 32_768
```
|
======================================================================================================================================
SOURCE CODE FILE: sbd.py
LINES: 1
SIZE: 5.68 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\datasets\_builtin\sbd.py
ENCODING: utf-8
```py
import pathlib
import re
from typing import Any, BinaryIO, cast, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from torchdata.datapipes.iter import Demultiplexer, Filter, IterDataPipe, IterKeyZipper, LineReader, Mapper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
getitem,
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_accessor,
path_comparator,
read_categories_file,
read_mat,
)
from .._api import register_dataset, register_info
NAME = "sbd"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class SBD(Dataset):
"""
- **homepage**: http://home.bharathh.info/pubs/codes/SBD/download.html
- **dependencies**:
- <scipy `https://scipy.org`>_
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", ("train", "val", "train_noval"))
self._categories = _info()["categories"]
super().__init__(root, dependencies=("scipy",), skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
resources = [
HttpResource(
"https://www2.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz",
sha256="6a5a2918d5c73ce032fdeba876574d150d9d04113ab87540a1304cbcc715be53",
)
]
if self._split == "train_noval":
resources.append(
HttpResource(
"http://home.bharathh.info/pubs/codes/SBD/train_noval.txt",
sha256="0b2068f7a359d2907431803e1cd63bf6162da37d7d503b589d3b08c6fd0c2432",
)
)
return resources # type: ignore[return-value]
def _classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
parent, grandparent, *_ = path.parents
if grandparent.name == "dataset":
if parent.name == "img":
return 0
elif parent.name == "cls":
return 1
if parent.name == "dataset" and self._split != "train_noval":
return 2
return None
def _prepare_sample(self, data: Tuple[Tuple[Any, Tuple[str, BinaryIO]], Tuple[str, BinaryIO]]) -> Dict[str, Any]:
split_and_image_data, ann_data = data
_, image_data = split_and_image_data
image_path, image_buffer = image_data
ann_path, ann_buffer = ann_data
anns = read_mat(ann_buffer, squeeze_me=True)["GTcls"]
return dict(
image_path=image_path,
image=EncodedImage.from_file(image_buffer),
ann_path=ann_path,
# the boundaries are stored in sparse CSC format, which is not supported by PyTorch
boundaries=torch.as_tensor(
np.stack([raw_boundary.toarray() for raw_boundary in anns["Boundaries"].item()])
),
segmentation=torch.as_tensor(anns["Segmentation"].item()),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
if self._split == "train_noval":
archive_dp, split_dp = resource_dps
images_dp, anns_dp = Demultiplexer(
archive_dp,
2,
self._classify_archive,
buffer_size=INFINITE_BUFFER_SIZE,
drop_none=True,
)
else:
archive_dp = resource_dps[0]
images_dp, anns_dp, split_dp = Demultiplexer(
archive_dp,
3,
self._classify_archive,
buffer_size=INFINITE_BUFFER_SIZE,
drop_none=True,
)
split_dp = Filter(split_dp, path_comparator("name", f"{self._split}.txt"))
split_dp = LineReader(split_dp, decode=True)
split_dp = hint_shuffling(split_dp)
split_dp = hint_sharding(split_dp)
dp = split_dp
for level, data_dp in enumerate((images_dp, anns_dp)):
dp = IterKeyZipper(
dp,
data_dp,
key_fn=getitem(*[0] * level, 1),
ref_key_fn=path_accessor("stem"),
buffer_size=INFINITE_BUFFER_SIZE,
)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return {
"train": 8_498,
"val": 2_857,
"train_noval": 5_623,
}[self._split]
def _generate_categories(self) -> Tuple[str, ...]:
resources = self._resources()
dp = resources[0].load(self._root)
dp = Filter(dp, path_comparator("name", "category_names.m"))
dp = LineReader(dp)
dp = Mapper(dp, bytes.decode, input_col=1)
lines = tuple(zip(*iter(dp)))[1]
pattern = re.compile(r"\s*'(?P<category>\w+)';\s*%(?P<label>\d+)")
categories_and_labels = cast(
List[Tuple[str, ...]],
[
pattern.match(line).groups() # type: ignore[union-attr]
# the first and last line contain no information
for line in lines[1:-1]
],
)
categories_and_labels.sort(key=lambda category_and_label: int(category_and_label[1]))
categories, _ = zip(*categories_and_labels)
return categories
```
|
==========================================================================================================================================
SOURCE CODE FILE: semeion.py
LINES: 1
SIZE: 1.99 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\datasets\_builtin\semeion.py
ENCODING: utf-8
```py
import pathlib
from typing import Any, Dict, List, Tuple, Union
import torch
from torchdata.datapipes.iter import CSVParser, IterDataPipe, Mapper
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from torchvision.prototype.tv_tensors import OneHotLabel
from torchvision.tv_tensors import Image
from .._api import register_dataset, register_info
NAME = "semeion"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=[str(i) for i in range(10)])
@register_dataset(NAME)
class SEMEION(Dataset):
"""Semeion dataset
homepage="https://archive.ics.uci.edu/ml/datasets/Semeion+Handwritten+Digit",
"""
def __init__(self, root: Union[str, pathlib.Path], *, skip_integrity_check: bool = False) -> None:
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
data = HttpResource(
"http://archive.ics.uci.edu/ml/machine-learning-databases/semeion/semeion.data",
sha256="f43228ae3da5ea6a3c95069d53450b86166770e3b719dcc333182128fe08d4b1",
)
return [data]
def _prepare_sample(self, data: Tuple[str, ...]) -> Dict[str, Any]:
image_data, label_data = data[:256], data[256:-1]
return dict(
image=Image(torch.tensor([float(pixel) for pixel in image_data], dtype=torch.float).reshape(16, 16)),
label=OneHotLabel([int(label) for label in label_data], categories=self._categories),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = CSVParser(dp, delimiter=" ")
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 1_593
```
|
================================================================================================================================================
SOURCE CODE FILE: stanford_cars.py
LINES: 1
SIZE: 4.41 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\datasets\_builtin\stanford_cars.py
ENCODING: utf-8
```py
import pathlib
from typing import Any, BinaryIO, Dict, Iterator, List, Tuple, Union
from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper, Zipper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
path_comparator,
read_categories_file,
read_mat,
)
from torchvision.prototype.tv_tensors import Label
from torchvision.tv_tensors import BoundingBoxes
from .._api import register_dataset, register_info
class StanfordCarsLabelReader(IterDataPipe[Tuple[int, int, int, int, int, str]]):
def __init__(self, datapipe: IterDataPipe[Dict[str, Any]]) -> None:
self.datapipe = datapipe
def __iter__(self) -> Iterator[Tuple[int, int, int, int, int, str]]:
for _, file in self.datapipe:
data = read_mat(file, squeeze_me=True)
for ann in data["annotations"]:
yield tuple(ann) # type: ignore[misc]
NAME = "stanford-cars"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class StanfordCars(Dataset):
"""Stanford Cars dataset.
homepage="https://ai.stanford.edu/~jkrause/cars/car_dataset.html",
dependencies=scipy
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check, dependencies=("scipy",))
_URL_ROOT = "https://ai.stanford.edu/~jkrause/"
_URLS = {
"train": f"{_URL_ROOT}car196/cars_train.tgz",
"test": f"{_URL_ROOT}car196/cars_test.tgz",
"cars_test_annos_withlabels": f"{_URL_ROOT}car196/cars_test_annos_withlabels.mat",
"car_devkit": f"{_URL_ROOT}cars/car_devkit.tgz",
}
_CHECKSUM = {
"train": "b97deb463af7d58b6bfaa18b2a4de9829f0f79e8ce663dfa9261bf7810e9accd",
"test": "bffea656d6f425cba3c91c6d83336e4c5f86c6cffd8975b0f375d3a10da8e243",
"cars_test_annos_withlabels": "790f75be8ea34eeded134cc559332baf23e30e91367e9ddca97d26ed9b895f05",
"car_devkit": "512b227b30e2f0a8aab9e09485786ab4479582073a144998da74d64b801fd288",
}
def _resources(self) -> List[OnlineResource]:
resources: List[OnlineResource] = [HttpResource(self._URLS[self._split], sha256=self._CHECKSUM[self._split])]
if self._split == "train":
resources.append(HttpResource(url=self._URLS["car_devkit"], sha256=self._CHECKSUM["car_devkit"]))
else:
resources.append(
HttpResource(
self._URLS["cars_test_annos_withlabels"], sha256=self._CHECKSUM["cars_test_annos_withlabels"]
)
)
return resources
def _prepare_sample(self, data: Tuple[Tuple[str, BinaryIO], Tuple[int, int, int, int, int, str]]) -> Dict[str, Any]:
image, target = data
path, buffer = image
image = EncodedImage.from_file(buffer)
return dict(
path=path,
image=image,
label=Label(target[4] - 1, categories=self._categories),
bounding_boxes=BoundingBoxes(target[:4], format="xyxy", spatial_size=image.spatial_size),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
images_dp, targets_dp = resource_dps
if self._split == "train":
targets_dp = Filter(targets_dp, path_comparator("name", "cars_train_annos.mat"))
targets_dp = StanfordCarsLabelReader(targets_dp)
dp = Zipper(images_dp, targets_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def _generate_categories(self) -> List[str]:
resources = self._resources()
devkit_dp = resources[1].load(self._root)
meta_dp = Filter(devkit_dp, path_comparator("name", "cars_meta.mat"))
_, meta_file = next(iter(meta_dp))
return list(read_mat(meta_file, squeeze_me=True)["class_names"])
def __len__(self) -> int:
return 8_144 if self._split == "train" else 8_041
```
|
=======================================================================================================================================
SOURCE CODE FILE: svhn.py
LINES: 1
SIZE: 2.81 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\datasets\_builtin\svhn.py
ENCODING: utf-8
```py
import pathlib
from typing import Any, BinaryIO, Dict, List, Tuple, Union
import numpy as np
from torchdata.datapipes.iter import IterDataPipe, Mapper, UnBatcher
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling, read_mat
from torchvision.prototype.tv_tensors import Label
from torchvision.tv_tensors import Image
from .._api import register_dataset, register_info
NAME = "svhn"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=[str(c) for c in range(10)])
@register_dataset(NAME)
class SVHN(Dataset):
"""SVHN Dataset.
homepage="http://ufldl.stanford.edu/housenumbers/",
dependencies = scipy
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test", "extra"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check, dependencies=("scipy",))
_CHECKSUMS = {
"train": "435e94d69a87fde4fd4d7f3dd208dfc32cb6ae8af2240d066de1df7508d083b8",
"test": "cdce80dfb2a2c4c6160906d0bd7c68ec5a99d7ca4831afa54f09182025b6a75b",
"extra": "a133a4beb38a00fcdda90c9489e0c04f900b660ce8a316a5e854838379a71eb3",
}
def _resources(self) -> List[OnlineResource]:
data = HttpResource(
f"http://ufldl.stanford.edu/housenumbers/{self._split}_32x32.mat",
sha256=self._CHECKSUMS[self._split],
)
return [data]
def _read_images_and_labels(self, data: Tuple[str, BinaryIO]) -> List[Tuple[np.ndarray, np.ndarray]]:
_, buffer = data
content = read_mat(buffer)
return list(
zip(
content["X"].transpose((3, 0, 1, 2)),
content["y"].squeeze(),
)
)
def _prepare_sample(self, data: Tuple[np.ndarray, np.ndarray]) -> Dict[str, Any]:
image_array, label_array = data
return dict(
image=Image(image_array.transpose((2, 0, 1))),
label=Label(int(label_array) % 10, categories=self._categories),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = Mapper(dp, self._read_images_and_labels)
dp = UnBatcher(dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return {
"train": 73_257,
"test": 26_032,
"extra": 531_131,
}[self._split]
```
|
=======================================================================================================================================
SOURCE CODE FILE: usps.py
LINES: 1
SIZE: 2.43 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\datasets\_builtin\usps.py
ENCODING: utf-8
```py
import pathlib
from typing import Any, Dict, List, Union
import torch
from torchdata.datapipes.iter import Decompressor, IterDataPipe, LineReader, Mapper
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from torchvision.prototype.tv_tensors import Label
from torchvision.tv_tensors import Image
from .._api import register_dataset, register_info
NAME = "usps"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=[str(c) for c in range(10)])
@register_dataset(NAME)
class USPS(Dataset):
"""USPS Dataset
homepage="https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass.html#usps",
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
_URL = "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass"
_RESOURCES = {
"train": HttpResource(
f"{_URL}/usps.bz2", sha256="3771e9dd6ba685185f89867b6e249233dd74652389f263963b3b741e994b034f"
),
"test": HttpResource(
f"{_URL}/usps.t.bz2", sha256="a9c0164e797d60142a50604917f0baa604f326e9a689698763793fa5d12ffc4e"
),
}
def _resources(self) -> List[OnlineResource]:
return [USPS._RESOURCES[self._split]]
def _prepare_sample(self, line: str) -> Dict[str, Any]:
label, *values = line.strip().split(" ")
values = [float(value.split(":")[1]) for value in values]
pixels = torch.tensor(values).add_(1).div_(2)
return dict(
image=Image(pixels.reshape(16, 16)),
label=Label(int(label) - 1, categories=self._categories),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = Decompressor(resource_dps[0])
dp = LineReader(dp, decode=True, return_path=False)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 7_291 if self._split == "train" else 2_007
```
|
======================================================================================================================================
SOURCE CODE FILE: voc.py
LINES: 1
SIZE: 9.32 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\datasets\_builtin\voc.py
ENCODING: utf-8
```py
import enum
import functools
import pathlib
from typing import Any, BinaryIO, cast, Dict, List, Optional, Tuple, Union
from xml.etree import ElementTree
from torchdata.datapipes.iter import Demultiplexer, Filter, IterDataPipe, IterKeyZipper, LineReader, Mapper
from torchvision.datasets import VOCDetection
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
getitem,
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_accessor,
path_comparator,
read_categories_file,
)
from torchvision.prototype.tv_tensors import Label
from torchvision.tv_tensors import BoundingBoxes
from .._api import register_dataset, register_info
NAME = "voc"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class VOC(Dataset):
"""
- **homepage**: http://host.robots.ox.ac.uk/pascal/VOC/
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
year: str = "2012",
task: str = "detection",
skip_integrity_check: bool = False,
) -> None:
self._year = self._verify_str_arg(year, "year", ("2007", "2008", "2009", "2010", "2011", "2012"))
if split == "test" and year != "2007":
raise ValueError("`split='test'` is only available for `year='2007'`")
else:
self._split = self._verify_str_arg(split, "split", ("train", "val", "trainval", "test"))
self._task = self._verify_str_arg(task, "task", ("detection", "segmentation"))
self._anns_folder = "Annotations" if task == "detection" else "SegmentationClass"
self._split_folder = "Main" if task == "detection" else "Segmentation"
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
_TRAIN_VAL_ARCHIVES = {
"2007": ("VOCtrainval_06-Nov-2007.tar", "7d8cd951101b0957ddfd7a530bdc8a94f06121cfc1e511bb5937e973020c7508"),
"2008": ("VOCtrainval_14-Jul-2008.tar", "7f0ca53c1b5a838fbe946965fc106c6e86832183240af5c88e3f6c306318d42e"),
"2009": ("VOCtrainval_11-May-2009.tar", "11cbe1741fb5bdadbbca3c08e9ec62cd95c14884845527d50847bc2cf57e7fd6"),
"2010": ("VOCtrainval_03-May-2010.tar", "1af4189cbe44323ab212bff7afbc7d0f55a267cc191eb3aac911037887e5c7d4"),
"2011": ("VOCtrainval_25-May-2011.tar", "0a7f5f5d154f7290ec65ec3f78b72ef72c6d93ff6d79acd40dc222a9ee5248ba"),
"2012": ("VOCtrainval_11-May-2012.tar", "e14f763270cf193d0b5f74b169f44157a4b0c6efa708f4dd0ff78ee691763bcb"),
}
_TEST_ARCHIVES = {
"2007": ("VOCtest_06-Nov-2007.tar", "6836888e2e01dca84577a849d339fa4f73e1e4f135d312430c4856b5609b4892")
}
def _resources(self) -> List[OnlineResource]:
file_name, sha256 = (self._TEST_ARCHIVES if self._split == "test" else self._TRAIN_VAL_ARCHIVES)[self._year]
archive = HttpResource(f"http://host.robots.ox.ac.uk/pascal/VOC/voc{self._year}/{file_name}", sha256=sha256)
return [archive]
def _is_in_folder(self, data: Tuple[str, Any], *, name: str, depth: int = 1) -> bool:
path = pathlib.Path(data[0])
return name in path.parent.parts[-depth:]
class _Demux(enum.IntEnum):
SPLIT = 0
IMAGES = 1
ANNS = 2
def _classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
if self._is_in_folder(data, name="ImageSets", depth=2):
return self._Demux.SPLIT
elif self._is_in_folder(data, name="JPEGImages"):
return self._Demux.IMAGES
elif self._is_in_folder(data, name=self._anns_folder):
return self._Demux.ANNS
else:
return None
def _parse_detection_ann(self, buffer: BinaryIO) -> Dict[str, Any]:
ann = cast(Dict[str, Any], VOCDetection.parse_voc_xml(ElementTree.parse(buffer).getroot())["annotation"])
buffer.close()
return ann
def _prepare_detection_ann(self, buffer: BinaryIO) -> Dict[str, Any]:
anns = self._parse_detection_ann(buffer)
instances = anns["object"]
return dict(
bounding_boxes=BoundingBoxes(
[
[int(instance["bndbox"][part]) for part in ("xmin", "ymin", "xmax", "ymax")]
for instance in instances
],
format="xyxy",
spatial_size=cast(Tuple[int, int], tuple(int(anns["size"][dim]) for dim in ("height", "width"))),
),
labels=Label(
[self._categories.index(instance["name"]) for instance in instances], categories=self._categories
),
)
def _prepare_segmentation_ann(self, buffer: BinaryIO) -> Dict[str, Any]:
return dict(segmentation=EncodedImage.from_file(buffer))
def _prepare_sample(
self,
data: Tuple[Tuple[Tuple[str, str], Tuple[str, BinaryIO]], Tuple[str, BinaryIO]],
) -> Dict[str, Any]:
split_and_image_data, ann_data = data
_, image_data = split_and_image_data
image_path, image_buffer = image_data
ann_path, ann_buffer = ann_data
return dict(
(self._prepare_detection_ann if self._task == "detection" else self._prepare_segmentation_ann)(ann_buffer),
image_path=image_path,
image=EncodedImage.from_file(image_buffer),
ann_path=ann_path,
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
archive_dp = resource_dps[0]
split_dp, images_dp, anns_dp = Demultiplexer(
archive_dp,
3,
self._classify_archive,
drop_none=True,
buffer_size=INFINITE_BUFFER_SIZE,
)
split_dp = Filter(split_dp, functools.partial(self._is_in_folder, name=self._split_folder))
split_dp = Filter(split_dp, path_comparator("name", f"{self._split}.txt"))
split_dp = LineReader(split_dp, decode=True)
split_dp = hint_shuffling(split_dp)
split_dp = hint_sharding(split_dp)
dp = split_dp
for level, data_dp in enumerate((images_dp, anns_dp)):
dp = IterKeyZipper(
dp,
data_dp,
key_fn=getitem(*[0] * level, 1),
ref_key_fn=path_accessor("stem"),
buffer_size=INFINITE_BUFFER_SIZE,
)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return {
("train", "2007", "detection"): 2_501,
("train", "2007", "segmentation"): 209,
("train", "2008", "detection"): 2_111,
("train", "2008", "segmentation"): 511,
("train", "2009", "detection"): 3_473,
("train", "2009", "segmentation"): 749,
("train", "2010", "detection"): 4_998,
("train", "2010", "segmentation"): 964,
("train", "2011", "detection"): 5_717,
("train", "2011", "segmentation"): 1_112,
("train", "2012", "detection"): 5_717,
("train", "2012", "segmentation"): 1_464,
("val", "2007", "detection"): 2_510,
("val", "2007", "segmentation"): 213,
("val", "2008", "detection"): 2_221,
("val", "2008", "segmentation"): 512,
("val", "2009", "detection"): 3_581,
("val", "2009", "segmentation"): 750,
("val", "2010", "detection"): 5_105,
("val", "2010", "segmentation"): 964,
("val", "2011", "detection"): 5_823,
("val", "2011", "segmentation"): 1_111,
("val", "2012", "detection"): 5_823,
("val", "2012", "segmentation"): 1_449,
("trainval", "2007", "detection"): 5_011,
("trainval", "2007", "segmentation"): 422,
("trainval", "2008", "detection"): 4_332,
("trainval", "2008", "segmentation"): 1_023,
("trainval", "2009", "detection"): 7_054,
("trainval", "2009", "segmentation"): 1_499,
("trainval", "2010", "detection"): 10_103,
("trainval", "2010", "segmentation"): 1_928,
("trainval", "2011", "detection"): 11_540,
("trainval", "2011", "segmentation"): 2_223,
("trainval", "2012", "detection"): 11_540,
("trainval", "2012", "segmentation"): 2_913,
("test", "2007", "detection"): 4_952,
("test", "2007", "segmentation"): 210,
}[(self._split, self._year, self._task)]
def _filter_anns(self, data: Tuple[str, Any]) -> bool:
return self._classify_archive(data) == self._Demux.ANNS
def _generate_categories(self) -> List[str]:
self._task = "detection"
resources = self._resources()
archive_dp = resources[0].load(self._root)
dp = Filter(archive_dp, self._filter_anns)
dp = Mapper(dp, self._parse_detection_ann, input_col=1)
categories = sorted({instance["name"] for _, anns in dp for instance in anns["object"]})
# We add a background category to be used during segmentation
categories.insert(0, "__background__")
return categories
```
|
=================================================================================================================================
SOURCE CODE FILE: _folder.py
LINES: 1
SIZE: 2.50 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\datasets\_folder.py
ENCODING: utf-8
```py
import functools
import os
import os.path
import pathlib
from typing import Any, BinaryIO, Collection, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import FileLister, FileOpener, Filter, IterDataPipe, Mapper
from torchvision.prototype.datasets.utils import EncodedData, EncodedImage
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from torchvision.prototype.tv_tensors import Label
__all__ = ["from_data_folder", "from_image_folder"]
def _is_not_top_level_file(path: str, *, root: pathlib.Path) -> bool:
rel_path = pathlib.Path(path).relative_to(root)
return rel_path.is_dir() or rel_path.parent != pathlib.Path(".")
def _prepare_sample(
data: Tuple[str, BinaryIO],
*,
root: pathlib.Path,
categories: List[str],
) -> Dict[str, Any]:
path, buffer = data
category = pathlib.Path(path).relative_to(root).parts[0]
return dict(
path=path,
data=EncodedData.from_file(buffer),
label=Label.from_category(category, categories=categories),
)
def from_data_folder(
root: Union[str, pathlib.Path],
*,
valid_extensions: Optional[Collection[str]] = None,
recursive: bool = True,
) -> Tuple[IterDataPipe, List[str]]:
root = pathlib.Path(root).expanduser().resolve()
categories = sorted(entry.name for entry in os.scandir(root) if entry.is_dir())
masks: Union[List[str], str] = [f"*.{ext}" for ext in valid_extensions] if valid_extensions is not None else ""
dp = FileLister(str(root), recursive=recursive, masks=masks)
dp: IterDataPipe = Filter(dp, functools.partial(_is_not_top_level_file, root=root))
dp = hint_sharding(dp)
dp = hint_shuffling(dp)
dp = FileOpener(dp, mode="rb")
return Mapper(dp, functools.partial(_prepare_sample, root=root, categories=categories)), categories
def _data_to_image_key(sample: Dict[str, Any]) -> Dict[str, Any]:
sample["image"] = EncodedImage(sample.pop("data").data)
return sample
def from_image_folder(
root: Union[str, pathlib.Path],
*,
valid_extensions: Collection[str] = ("jpg", "jpeg", "png", "ppm", "bmp", "pgm", "tif", "tiff", "webp"),
**kwargs: Any,
) -> Tuple[IterDataPipe, List[str]]:
valid_extensions = [valid_extension for ext in valid_extensions for valid_extension in (ext.lower(), ext.upper())]
dp, categories = from_data_folder(root, valid_extensions=valid_extensions, **kwargs)
return Mapper(dp, _data_to_image_key), categories
```
|
===============================================================================================================================
SOURCE CODE FILE: _home.py
LINES: 1
SIZE: 0.66 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\datasets\_home.py
ENCODING: utf-8
```py
import os
from typing import Optional
import torchvision._internally_replaced_utils as _iru
def home(root: Optional[str] = None) -> str:
if root is not None:
_iru._HOME = root
return _iru._HOME
root = os.getenv("TORCHVISION_DATASETS_HOME")
if root is not None:
return root
return _iru._HOME
def use_sharded_dataset(use: Optional[bool] = None) -> bool:
if use is not None:
_iru._USE_SHARDED_DATASETS = use
return _iru._USE_SHARDED_DATASETS
use = os.getenv("TORCHVISION_SHARDED_DATASETS")
if use is not None:
return use == "1"
return _iru._USE_SHARDED_DATASETS
```
|
===================================================================================================================================
SOURCE CODE FILE: benchmark.py
LINES: 1
SIZE: 21.70 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\datasets\benchmark.py
ENCODING: utf-8
```py
# type: ignore
import argparse
import collections.abc
import contextlib
import inspect
import itertools
import os
import os.path
import pathlib
import shutil
import sys
import tempfile
import time
import unittest.mock
import warnings
import torch
from torch.utils.data import DataLoader
from torch.utils.data.dataloader_experimental import DataLoader2
from torchvision import datasets as legacy_datasets
from torchvision.datasets.utils import extract_archive
from torchvision.prototype import datasets as new_datasets
from torchvision.transforms import PILToTensor
def main(
name,
*,
variant=None,
legacy=True,
new=True,
start=True,
iteration=True,
num_starts=3,
num_samples=10_000,
temp_root=None,
num_workers=0,
):
benchmarks = [
benchmark
for benchmark in DATASET_BENCHMARKS
if benchmark.name == name and (variant is None or benchmark.variant == variant)
]
if not benchmarks:
msg = f"No DatasetBenchmark available for dataset '{name}'"
if variant is not None:
msg += f" and variant '{variant}'"
raise ValueError(msg)
for benchmark in benchmarks:
print("#" * 80)
print(f"{benchmark.name}" + (f" ({benchmark.variant})" if benchmark.variant is not None else ""))
if legacy and start:
print(
"legacy",
"cold_start",
Measurement.time(benchmark.legacy_cold_start(temp_root, num_workers=num_workers), number=num_starts),
)
print(
"legacy",
"warm_start",
Measurement.time(benchmark.legacy_warm_start(temp_root, num_workers=num_workers), number=num_starts),
)
if legacy and iteration:
print(
"legacy",
"iteration",
Measurement.iterations_per_time(
benchmark.legacy_iteration(temp_root, num_workers=num_workers, num_samples=num_samples)
),
)
if new and start:
print(
"new",
"cold_start",
Measurement.time(benchmark.new_cold_start(num_workers=num_workers), number=num_starts),
)
if new and iteration:
print(
"new",
"iteration",
Measurement.iterations_per_time(
benchmark.new_iteration(num_workers=num_workers, num_samples=num_samples)
),
)
class DatasetBenchmark:
def __init__(
self,
name: str,
*,
variant=None,
legacy_cls=None,
new_config=None,
legacy_config_map=None,
legacy_special_options_map=None,
prepare_legacy_root=None,
):
self.name = name
self.variant = variant
self.new_raw_dataset = new_datasets._api.find(name)
self.legacy_cls = legacy_cls or self._find_legacy_cls()
if new_config is None:
new_config = self.new_raw_dataset.default_config
elif isinstance(new_config, dict):
new_config = self.new_raw_dataset.info.make_config(**new_config)
self.new_config = new_config
self.legacy_config_map = legacy_config_map
self.legacy_special_options_map = legacy_special_options_map or self._legacy_special_options_map
self.prepare_legacy_root = prepare_legacy_root
def new_dataset(self, *, num_workers=0):
return DataLoader2(new_datasets.load(self.name, **self.new_config), num_workers=num_workers)
def new_cold_start(self, *, num_workers):
def fn(timer):
with timer:
dataset = self.new_dataset(num_workers=num_workers)
next(iter(dataset))
return fn
def new_iteration(self, *, num_samples, num_workers):
def fn(timer):
dataset = self.new_dataset(num_workers=num_workers)
num_sample = 0
with timer:
for _ in dataset:
num_sample += 1
if num_sample == num_samples:
break
return num_sample
return fn
def suppress_output(self):
@contextlib.contextmanager
def context_manager():
with open(os.devnull, "w") as devnull:
with contextlib.redirect_stdout(devnull), contextlib.redirect_stderr(devnull):
yield
return context_manager()
def legacy_dataset(self, root, *, num_workers=0, download=None):
legacy_config = self.legacy_config_map(self, root) if self.legacy_config_map else dict()
special_options = self.legacy_special_options_map(self)
if "download" in special_options and download is not None:
special_options["download"] = download
with self.suppress_output():
return DataLoader(
self.legacy_cls(legacy_config.pop("root", str(root)), **legacy_config, **special_options),
shuffle=True,
num_workers=num_workers,
)
@contextlib.contextmanager
def patch_download_and_integrity_checks(self):
patches = [
("download_url", dict()),
("download_file_from_google_drive", dict()),
("check_integrity", dict(new=lambda path, md5=None: os.path.isfile(path))),
]
dataset_module = sys.modules[self.legacy_cls.__module__]
utils_module = legacy_datasets.utils
with contextlib.ExitStack() as stack:
for name, patch_kwargs in patches:
patch_module = dataset_module if name in dir(dataset_module) else utils_module
stack.enter_context(unittest.mock.patch(f"{patch_module.__name__}.{name}", **patch_kwargs))
yield stack
def _find_resource_file_names(self):
info = self.new_raw_dataset.info
valid_options = info._valid_options
file_names = set()
for options in (
dict(zip(valid_options.keys(), values)) for values in itertools.product(*valid_options.values())
):
resources = self.new_raw_dataset.resources(info.make_config(**options))
file_names.update([resource.file_name for resource in resources])
return file_names
@contextlib.contextmanager
def legacy_root(self, temp_root):
new_root = pathlib.Path(new_datasets.home()) / self.name
legacy_root = pathlib.Path(tempfile.mkdtemp(dir=temp_root))
if os.stat(new_root).st_dev != os.stat(legacy_root).st_dev:
warnings.warn(
"The temporary root directory for the legacy dataset was created on a different storage device than "
"the raw data that is used by the new dataset. If the devices have different I/O stats, this will "
"distort the benchmark. You can use the '--temp-root' flag to relocate the root directory of the "
"temporary directories.",
RuntimeWarning,
)
try:
for file_name in self._find_resource_file_names():
(legacy_root / file_name).symlink_to(new_root / file_name)
if self.prepare_legacy_root:
self.prepare_legacy_root(self, legacy_root)
with self.patch_download_and_integrity_checks():
yield legacy_root
finally:
shutil.rmtree(legacy_root)
def legacy_cold_start(self, temp_root, *, num_workers):
def fn(timer):
with self.legacy_root(temp_root) as root:
with timer:
dataset = self.legacy_dataset(root, num_workers=num_workers)
next(iter(dataset))
return fn
def legacy_warm_start(self, temp_root, *, num_workers):
def fn(timer):
with self.legacy_root(temp_root) as root:
self.legacy_dataset(root, num_workers=num_workers)
with timer:
dataset = self.legacy_dataset(root, num_workers=num_workers, download=False)
next(iter(dataset))
return fn
def legacy_iteration(self, temp_root, *, num_samples, num_workers):
def fn(timer):
with self.legacy_root(temp_root) as root:
dataset = self.legacy_dataset(root, num_workers=num_workers)
with timer:
for num_sample, _ in enumerate(dataset, 1):
if num_sample == num_samples:
break
return num_sample
return fn
def _find_legacy_cls(self):
legacy_clss = {
name.lower(): dataset_class
for name, dataset_class in legacy_datasets.__dict__.items()
if isinstance(dataset_class, type) and issubclass(dataset_class, legacy_datasets.VisionDataset)
}
try:
return legacy_clss[self.name]
except KeyError as error:
raise RuntimeError(
f"Can't determine the legacy dataset class for '{self.name}' automatically. "
f"Please set the 'legacy_cls' keyword argument manually."
) from error
_SPECIAL_KWARGS = {
"transform",
"target_transform",
"transforms",
"download",
}
@staticmethod
def _legacy_special_options_map(benchmark):
available_parameters = set()
for cls in benchmark.legacy_cls.__mro__:
if cls is legacy_datasets.VisionDataset:
break
available_parameters.update(inspect.signature(cls.__init__).parameters)
available_special_kwargs = benchmark._SPECIAL_KWARGS.intersection(available_parameters)
special_options = dict()
if "download" in available_special_kwargs:
special_options["download"] = True
if "transform" in available_special_kwargs:
special_options["transform"] = PILToTensor()
if "target_transform" in available_special_kwargs:
special_options["target_transform"] = torch.tensor
elif "transforms" in available_special_kwargs:
special_options["transforms"] = JointTransform(PILToTensor(), PILToTensor())
return special_options
class Measurement:
@classmethod
def time(cls, fn, *, number):
results = Measurement._timeit(fn, number=number)
times = torch.tensor(tuple(zip(*results))[1])
return cls._format(times, unit="s")
@classmethod
def iterations_per_time(cls, fn):
num_samples, time = Measurement._timeit(fn, number=1)[0]
iterations_per_second = torch.tensor(num_samples) / torch.tensor(time)
return cls._format(iterations_per_second, unit="it/s")
class Timer:
def __init__(self):
self._start = None
self._stop = None
def __enter__(self):
self._start = time.perf_counter()
def __exit__(self, exc_type, exc_val, exc_tb):
self._stop = time.perf_counter()
@property
def delta(self):
if self._start is None:
raise RuntimeError()
elif self._stop is None:
raise RuntimeError()
return self._stop - self._start
@classmethod
def _timeit(cls, fn, number):
results = []
for _ in range(number):
timer = cls.Timer()
output = fn(timer)
results.append((output, timer.delta))
return results
@classmethod
def _format(cls, measurements, *, unit):
measurements = torch.as_tensor(measurements).to(torch.float64).flatten()
if measurements.numel() == 1:
# TODO format that into engineering format
return f"{float(measurements):.3f} {unit}"
mean, std = Measurement._compute_mean_and_std(measurements)
# TODO format that into engineering format
return f"{mean:.3f} ± {std:.3f} {unit}"
@classmethod
def _compute_mean_and_std(cls, t):
mean = float(t.mean())
std = float(t.std(0, unbiased=t.numel() > 1))
return mean, std
def no_split(benchmark, root):
legacy_config = dict(benchmark.new_config)
del legacy_config["split"]
return legacy_config
def bool_split(name="train"):
def legacy_config_map(benchmark, root):
legacy_config = dict(benchmark.new_config)
legacy_config[name] = legacy_config.pop("split") == "train"
return legacy_config
return legacy_config_map
def base_folder(rel_folder=None):
if rel_folder is None:
def rel_folder(benchmark):
return benchmark.name
elif not callable(rel_folder):
name = rel_folder
def rel_folder(_):
return name
def prepare_legacy_root(benchmark, root):
files = list(root.glob("*"))
folder = root / rel_folder(benchmark)
folder.mkdir(parents=True)
for file in files:
shutil.move(str(file), str(folder))
return folder
return prepare_legacy_root
class JointTransform:
def __init__(self, *transforms):
self.transforms = transforms
def __call__(self, *inputs):
if len(inputs) == 1 and isinstance(inputs, collections.abc.Sequence):
inputs = inputs[0]
if len(inputs) != len(self.transforms):
raise RuntimeError(
f"The number of inputs and transforms mismatches: {len(inputs)} != {len(self.transforms)}."
)
return tuple(transform(input) for transform, input in zip(self.transforms, inputs))
def caltech101_legacy_config_map(benchmark, root):
legacy_config = no_split(benchmark, root)
# The new dataset always returns the category and annotation
legacy_config["target_type"] = ("category", "annotation")
return legacy_config
mnist_base_folder = base_folder(lambda benchmark: pathlib.Path(benchmark.legacy_cls.__name__) / "raw")
def mnist_legacy_config_map(benchmark, root):
return dict(train=benchmark.new_config.split == "train")
def emnist_prepare_legacy_root(benchmark, root):
folder = mnist_base_folder(benchmark, root)
shutil.move(str(folder / "emnist-gzip.zip"), str(folder / "gzip.zip"))
return folder
def emnist_legacy_config_map(benchmark, root):
legacy_config = mnist_legacy_config_map(benchmark, root)
legacy_config["split"] = benchmark.new_config.image_set.replace("_", "").lower()
return legacy_config
def qmnist_legacy_config_map(benchmark, root):
legacy_config = mnist_legacy_config_map(benchmark, root)
legacy_config["what"] = benchmark.new_config.split
# The new dataset always returns the full label
legacy_config["compat"] = False
return legacy_config
def coco_legacy_config_map(benchmark, root):
images, _ = benchmark.new_raw_dataset.resources(benchmark.new_config)
return dict(
root=str(root / pathlib.Path(images.file_name).stem),
annFile=str(
root / "annotations" / f"{benchmark.variant}_{benchmark.new_config.split}{benchmark.new_config.year}.json"
),
)
def coco_prepare_legacy_root(benchmark, root):
images, annotations = benchmark.new_raw_dataset.resources(benchmark.new_config)
extract_archive(str(root / images.file_name))
extract_archive(str(root / annotations.file_name))
DATASET_BENCHMARKS = [
DatasetBenchmark(
"caltech101",
legacy_config_map=caltech101_legacy_config_map,
prepare_legacy_root=base_folder(),
legacy_special_options_map=lambda config: dict(
download=True,
transform=PILToTensor(),
target_transform=JointTransform(torch.tensor, torch.tensor),
),
),
DatasetBenchmark(
"caltech256",
legacy_config_map=no_split,
prepare_legacy_root=base_folder(),
),
DatasetBenchmark(
"celeba",
prepare_legacy_root=base_folder(),
legacy_config_map=lambda benchmark: dict(
split="valid" if benchmark.new_config.split == "val" else benchmark.new_config.split,
# The new dataset always returns all annotations
target_type=("attr", "identity", "bbox", "landmarks"),
),
),
DatasetBenchmark(
"cifar10",
legacy_config_map=bool_split(),
),
DatasetBenchmark(
"cifar100",
legacy_config_map=bool_split(),
),
DatasetBenchmark(
"emnist",
prepare_legacy_root=emnist_prepare_legacy_root,
legacy_config_map=emnist_legacy_config_map,
),
DatasetBenchmark(
"fashionmnist",
prepare_legacy_root=mnist_base_folder,
legacy_config_map=mnist_legacy_config_map,
),
DatasetBenchmark(
"kmnist",
prepare_legacy_root=mnist_base_folder,
legacy_config_map=mnist_legacy_config_map,
),
DatasetBenchmark(
"mnist",
prepare_legacy_root=mnist_base_folder,
legacy_config_map=mnist_legacy_config_map,
),
DatasetBenchmark(
"qmnist",
prepare_legacy_root=mnist_base_folder,
legacy_config_map=mnist_legacy_config_map,
),
DatasetBenchmark(
"sbd",
legacy_cls=legacy_datasets.SBDataset,
legacy_config_map=lambda benchmark: dict(
image_set=benchmark.new_config.split,
mode="boundaries" if benchmark.new_config.boundaries else "segmentation",
),
legacy_special_options_map=lambda benchmark: dict(
download=True,
transforms=JointTransform(
PILToTensor(), torch.tensor if benchmark.new_config.boundaries else PILToTensor()
),
),
),
DatasetBenchmark("voc", legacy_cls=legacy_datasets.VOCDetection),
DatasetBenchmark("imagenet", legacy_cls=legacy_datasets.ImageNet),
DatasetBenchmark(
"coco",
variant="instances",
legacy_cls=legacy_datasets.CocoDetection,
new_config=dict(split="train", annotations="instances"),
legacy_config_map=coco_legacy_config_map,
prepare_legacy_root=coco_prepare_legacy_root,
legacy_special_options_map=lambda benchmark: dict(transform=PILToTensor(), target_transform=None),
),
DatasetBenchmark(
"coco",
variant="captions",
legacy_cls=legacy_datasets.CocoCaptions,
new_config=dict(split="train", annotations="captions"),
legacy_config_map=coco_legacy_config_map,
prepare_legacy_root=coco_prepare_legacy_root,
legacy_special_options_map=lambda benchmark: dict(transform=PILToTensor(), target_transform=None),
),
]
def parse_args(argv=None):
parser = argparse.ArgumentParser(
prog="torchvision.prototype.datasets.benchmark.py",
description="Utility to benchmark new datasets against their legacy variants.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("name", help="Name of the dataset to benchmark.")
parser.add_argument(
"--variant", help="Variant of the dataset. If omitted all available variants will be benchmarked."
)
parser.add_argument(
"-n",
"--num-starts",
type=int,
default=3,
help="Number of warm and cold starts of each benchmark. Default to 3.",
)
parser.add_argument(
"-N",
"--num-samples",
type=int,
default=10_000,
help="Maximum number of samples to draw during iteration benchmarks. Defaults to 10_000.",
)
parser.add_argument(
"--nl",
"--no-legacy",
dest="legacy",
action="store_false",
help="Skip legacy benchmarks.",
)
parser.add_argument(
"--nn",
"--no-new",
dest="new",
action="store_false",
help="Skip new benchmarks.",
)
parser.add_argument(
"--ns",
"--no-start",
dest="start",
action="store_false",
help="Skip start benchmarks.",
)
parser.add_argument(
"--ni",
"--no-iteration",
dest="iteration",
action="store_false",
help="Skip iteration benchmarks.",
)
parser.add_argument(
"-t",
"--temp-root",
type=pathlib.Path,
help=(
"Root of the temporary legacy root directories. Use this if your system default temporary directory is on "
"another storage device as the raw data to avoid distortions due to differing I/O stats."
),
)
parser.add_argument(
"-j",
"--num-workers",
type=int,
default=0,
help=(
"Number of subprocesses used to load the data. Setting this to 0 (default) will load all data in the main "
"process and thus disable multi-processing."
),
)
return parser.parse_args(argv or sys.argv[1:])
if __name__ == "__main__":
args = parse_args()
try:
main(
args.name,
variant=args.variant,
legacy=args.legacy,
new=args.new,
start=args.start,
iteration=args.iteration,
num_starts=args.num_starts,
num_samples=args.num_samples,
temp_root=args.temp_root,
num_workers=args.num_workers,
)
except Exception as error:
msg = str(error)
print(msg or f"Unspecified {type(error)} was raised during execution.", file=sys.stderr)
sys.exit(1)
```
|
=================================================================================================================================================
SOURCE CODE FILE: generate_category_files.py
LINES: 2
SIZE: 1.64 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\datasets\generate_category_files.py
ENCODING: utf-8
```py
# type: ignore
import argparse
import csv
import sys
from torchvision.prototype import datasets
from torchvision.prototype.datasets.utils._internal import BUILTIN_DIR
def main(*names, force=False):
for name in names:
path = BUILTIN_DIR / f"{name}.categories"
if path.exists() and not force:
continue
dataset = datasets.load(name)
try:
categories = dataset._generate_categories()
except NotImplementedError:
continue
with open(path, "w") as file:
writer = csv.writer(file, lineterminator="\n")
for category in categories:
writer.writerow((category,) if isinstance(category, str) else category)
def parse_args(argv=None):
parser = argparse.ArgumentParser(prog="torchvision.prototype.datasets.generate_category_files.py")
parser.add_argument(
"names",
nargs="*",
type=str,
help="Names of datasets to generate category files for. If omitted, all datasets will be used.",
)
parser.add_argument(
"-f",
"--force",
action="store_true",
help="Force regeneration of category files.",
)
args = parser.parse_args(argv or sys.argv[1:])
if not args.names:
args.names = datasets.list_datasets()
return args
if __name__ == "__main__":
args = parse_args()
try:
main(*args.names, force=args.force)
except Exception as error:
msg = str(error)
print(msg or f"Unspecified {type(error)} was raised during execution.", file=sys.stderr)
sys.exit(1)
```
|
========================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.23 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\datasets\utils\__init__.py
ENCODING: utf-8
```py
from . import _internal # usort: skip
from ._dataset import Dataset
from ._encoded import EncodedData, EncodedImage
from ._resource import GDriveResource, HttpResource, KaggleDownloadResource, ManualDownloadResource, OnlineResource
```
|
========================================================================================================================================
SOURCE CODE FILE: _dataset.py
LINES: 1
SIZE: 1.93 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\datasets\utils\_dataset.py
ENCODING: utf-8
```py
import abc
import importlib
import pathlib
from typing import Any, Collection, Dict, Iterator, List, Optional, Sequence, Union
from torchdata.datapipes.iter import IterDataPipe
from torchvision.datasets.utils import verify_str_arg
from ._resource import OnlineResource
class Dataset(IterDataPipe[Dict[str, Any]], abc.ABC):
@staticmethod
def _verify_str_arg(
value: str,
arg: Optional[str] = None,
valid_values: Optional[Collection[str]] = None,
*,
custom_msg: Optional[str] = None,
) -> str:
return verify_str_arg(value, arg, valid_values, custom_msg=custom_msg)
def __init__(
self, root: Union[str, pathlib.Path], *, skip_integrity_check: bool = False, dependencies: Collection[str] = ()
) -> None:
for dependency in dependencies:
try:
importlib.import_module(dependency)
except ModuleNotFoundError:
raise ModuleNotFoundError(
f"{type(self).__name__}() depends on the third-party package '{dependency}'. "
f"Please install it, for example with `pip install {dependency}`."
) from None
self._root = pathlib.Path(root).expanduser().resolve()
resources = [
resource.load(self._root, skip_integrity_check=skip_integrity_check) for resource in self._resources()
]
self._dp = self._datapipe(resources)
def __iter__(self) -> Iterator[Dict[str, Any]]:
yield from self._dp
@abc.abstractmethod
def _resources(self) -> List[OnlineResource]:
pass
@abc.abstractmethod
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
pass
@abc.abstractmethod
def __len__(self) -> int:
pass
def _generate_categories(self) -> Sequence[Union[str, Sequence[str]]]:
raise NotImplementedError
```
|
========================================================================================================================================
SOURCE CODE FILE: _encoded.py
LINES: 1
SIZE: 1.89 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\datasets\utils\_encoded.py
ENCODING: utf-8
```py
from __future__ import annotations
import os
import sys
from typing import Any, BinaryIO, Optional, Tuple, Type, TypeVar, Union
import PIL.Image
import torch
from torchvision.prototype.utils._internal import fromfile, ReadOnlyTensorBuffer
from torchvision.tv_tensors._tv_tensor import TVTensor
D = TypeVar("D", bound="EncodedData")
class EncodedData(TVTensor):
@classmethod
def _wrap(cls: Type[D], tensor: torch.Tensor) -> D:
return tensor.as_subclass(cls)
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: bool = False,
) -> EncodedData:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
# TODO: warn / bail out if we encounter a tensor with shape other than (N,) or with dtype other than uint8?
return cls._wrap(tensor)
@classmethod
def wrap_like(cls: Type[D], other: D, tensor: torch.Tensor) -> D:
return cls._wrap(tensor)
@classmethod
def from_file(cls: Type[D], file: BinaryIO, **kwargs: Any) -> D:
encoded_data = cls(fromfile(file, dtype=torch.uint8, byte_order=sys.byteorder), **kwargs)
file.close()
return encoded_data
@classmethod
def from_path(cls: Type[D], path: Union[str, os.PathLike], **kwargs: Any) -> D:
with open(path, "rb") as file:
return cls.from_file(file, **kwargs)
class EncodedImage(EncodedData):
# TODO: Use @functools.cached_property if we can depend on Python 3.8
@property
def spatial_size(self) -> Tuple[int, int]:
if not hasattr(self, "_spatial_size"):
with PIL.Image.open(ReadOnlyTensorBuffer(self)) as image:
self._spatial_size = image.height, image.width
return self._spatial_size
```
|
=========================================================================================================================================
SOURCE CODE FILE: _internal.py
LINES: 1
SIZE: 6.55 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\datasets\utils\_internal.py
ENCODING: utf-8
```py
import csv
import functools
import pathlib
import pickle
from typing import Any, BinaryIO, Callable, Dict, IO, Iterator, List, Sequence, Sized, Tuple, TypeVar, Union
import torch
import torch.distributed as dist
import torch.utils.data
from torchdata.datapipes.iter import IoPathFileLister, IoPathFileOpener, IterDataPipe, ShardingFilter, Shuffler
from torchvision.prototype.utils._internal import fromfile
__all__ = [
"INFINITE_BUFFER_SIZE",
"BUILTIN_DIR",
"read_mat",
"MappingIterator",
"getitem",
"path_accessor",
"path_comparator",
"read_flo",
"hint_sharding",
"hint_shuffling",
]
K = TypeVar("K")
D = TypeVar("D")
# pseudo-infinite until a true infinite buffer is supported by all datapipes
INFINITE_BUFFER_SIZE = 1_000_000_000
BUILTIN_DIR = pathlib.Path(__file__).parent.parent / "_builtin"
def read_mat(buffer: BinaryIO, **kwargs: Any) -> Any:
try:
import scipy.io as sio
except ImportError as error:
raise ModuleNotFoundError("Package `scipy` is required to be installed to read .mat files.") from error
data = sio.loadmat(buffer, **kwargs)
buffer.close()
return data
class MappingIterator(IterDataPipe[Union[Tuple[K, D], D]]):
def __init__(self, datapipe: IterDataPipe[Dict[K, D]], *, drop_key: bool = False) -> None:
self.datapipe = datapipe
self.drop_key = drop_key
def __iter__(self) -> Iterator[Union[Tuple[K, D], D]]:
for mapping in self.datapipe:
yield from iter(mapping.values() if self.drop_key else mapping.items())
def _getitem_closure(obj: Any, *, items: Sequence[Any]) -> Any:
for item in items:
obj = obj[item]
return obj
def getitem(*items: Any) -> Callable[[Any], Any]:
return functools.partial(_getitem_closure, items=items)
def _getattr_closure(obj: Any, *, attrs: Sequence[str]) -> Any:
for attr in attrs:
obj = getattr(obj, attr)
return obj
def _path_attribute_accessor(path: pathlib.Path, *, name: str) -> Any:
return _getattr_closure(path, attrs=name.split("."))
def _path_accessor_closure(data: Tuple[str, Any], *, getter: Callable[[pathlib.Path], D]) -> D:
return getter(pathlib.Path(data[0]))
def path_accessor(getter: Union[str, Callable[[pathlib.Path], D]]) -> Callable[[Tuple[str, Any]], D]:
if isinstance(getter, str):
getter = functools.partial(_path_attribute_accessor, name=getter)
return functools.partial(_path_accessor_closure, getter=getter)
def _path_comparator_closure(data: Tuple[str, Any], *, accessor: Callable[[Tuple[str, Any]], D], value: D) -> bool:
return accessor(data) == value
def path_comparator(getter: Union[str, Callable[[pathlib.Path], D]], value: D) -> Callable[[Tuple[str, Any]], bool]:
return functools.partial(_path_comparator_closure, accessor=path_accessor(getter), value=value)
class PicklerDataPipe(IterDataPipe):
def __init__(self, source_datapipe: IterDataPipe[Tuple[str, IO[bytes]]]) -> None:
self.source_datapipe = source_datapipe
def __iter__(self) -> Iterator[Any]:
for _, fobj in self.source_datapipe:
data = pickle.load(fobj)
for _, d in enumerate(data):
yield d
class SharderDataPipe(ShardingFilter):
def __init__(self, source_datapipe: IterDataPipe) -> None:
super().__init__(source_datapipe)
self.rank = 0
self.world_size = 1
if dist.is_available() and dist.is_initialized():
self.rank = dist.get_rank()
self.world_size = dist.get_world_size()
self.apply_sharding(self.world_size, self.rank)
def __iter__(self) -> Iterator[Any]:
num_workers = self.world_size
worker_id = self.rank
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None:
worker_id = worker_id + worker_info.id * num_workers
num_workers *= worker_info.num_workers
self.apply_sharding(num_workers, worker_id)
yield from super().__iter__()
class TakerDataPipe(IterDataPipe):
def __init__(self, source_datapipe: IterDataPipe, num_take: int) -> None:
super().__init__()
self.source_datapipe = source_datapipe
self.num_take = num_take
self.world_size = 1
if dist.is_available() and dist.is_initialized():
self.world_size = dist.get_world_size()
def __iter__(self) -> Iterator[Any]:
num_workers = self.world_size
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None:
num_workers *= worker_info.num_workers
# TODO: this is weird as it drops more elements than it should
num_take = self.num_take // num_workers
for i, data in enumerate(self.source_datapipe):
if i < num_take:
yield data
else:
break
def __len__(self) -> int:
num_take = self.num_take // self.world_size
if isinstance(self.source_datapipe, Sized):
if len(self.source_datapipe) < num_take:
num_take = len(self.source_datapipe)
# TODO: might be weird to not take `num_workers` into account
return num_take
def _make_sharded_datapipe(root: str, dataset_size: int) -> IterDataPipe[Dict[str, Any]]:
dp = IoPathFileLister(root=root)
dp = SharderDataPipe(dp)
dp = dp.shuffle(buffer_size=INFINITE_BUFFER_SIZE)
dp = IoPathFileOpener(dp, mode="rb")
dp = PicklerDataPipe(dp)
# dp = dp.cycle(2)
dp = TakerDataPipe(dp, dataset_size)
return dp
def read_flo(file: BinaryIO) -> torch.Tensor:
if file.read(4) != b"PIEH":
raise ValueError("Magic number incorrect. Invalid .flo file")
width, height = fromfile(file, dtype=torch.int32, byte_order="little", count=2)
flow = fromfile(file, dtype=torch.float32, byte_order="little", count=height * width * 2)
return flow.reshape((height, width, 2)).permute((2, 0, 1))
def hint_sharding(datapipe: IterDataPipe) -> ShardingFilter:
return ShardingFilter(datapipe)
def hint_shuffling(datapipe: IterDataPipe[D]) -> Shuffler[D]:
return Shuffler(datapipe, buffer_size=INFINITE_BUFFER_SIZE).set_shuffle(False)
def read_categories_file(name: str) -> List[Union[str, Sequence[str]]]:
path = BUILTIN_DIR / f"{name}.categories"
with open(path, newline="") as file:
rows = list(csv.reader(file))
rows = [row[0] if len(row) == 1 else row for row in rows]
return rows
```
|
=========================================================================================================================================
SOURCE CODE FILE: _resource.py
LINES: 4
SIZE: 8.39 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\datasets\utils\_resource.py
ENCODING: utf-8
```py
import abc
import hashlib
import itertools
import pathlib
from typing import Any, Callable, IO, Literal, NoReturn, Optional, Sequence, Set, Tuple, Union
from urllib.parse import urlparse
from torchdata.datapipes.iter import (
FileLister,
FileOpener,
IterableWrapper,
IterDataPipe,
RarArchiveLoader,
TarArchiveLoader,
ZipArchiveLoader,
)
from torchvision.datasets.utils import (
_decompress,
_detect_file_type,
_get_google_drive_file_id,
_get_redirect_url,
download_file_from_google_drive,
download_url,
extract_archive,
)
class OnlineResource(abc.ABC):
def __init__(
self,
*,
file_name: str,
sha256: Optional[str] = None,
preprocess: Optional[Union[Literal["decompress", "extract"], Callable[[pathlib.Path], None]]] = None,
) -> None:
self.file_name = file_name
self.sha256 = sha256
if isinstance(preprocess, str):
if preprocess == "decompress":
preprocess = self._decompress
elif preprocess == "extract":
preprocess = self._extract
else:
raise ValueError(
f"Only `'decompress'` or `'extract'` are valid if `preprocess` is passed as string,"
f"but got {preprocess} instead."
)
self._preprocess = preprocess
@staticmethod
def _extract(file: pathlib.Path) -> None:
extract_archive(str(file), to_path=str(file).replace("".join(file.suffixes), ""), remove_finished=False)
@staticmethod
def _decompress(file: pathlib.Path) -> None:
_decompress(str(file), remove_finished=True)
def _loader(self, path: pathlib.Path) -> IterDataPipe[Tuple[str, IO]]:
if path.is_dir():
return FileOpener(FileLister(str(path), recursive=True), mode="rb")
dp = FileOpener(IterableWrapper((str(path),)), mode="rb")
archive_loader = self._guess_archive_loader(path)
if archive_loader:
dp = archive_loader(dp)
return dp
_ARCHIVE_LOADERS = {
".tar": TarArchiveLoader,
".zip": ZipArchiveLoader,
".rar": RarArchiveLoader,
}
def _guess_archive_loader(
self, path: pathlib.Path
) -> Optional[Callable[[IterDataPipe[Tuple[str, IO]]], IterDataPipe[Tuple[str, IO]]]]:
try:
_, archive_type, _ = _detect_file_type(path.name)
except RuntimeError:
return None
return self._ARCHIVE_LOADERS.get(archive_type) # type: ignore[arg-type]
def load(
self, root: Union[str, pathlib.Path], *, skip_integrity_check: bool = False
) -> IterDataPipe[Tuple[str, IO]]:
root = pathlib.Path(root)
path = root / self.file_name
# Instead of the raw file, there might also be files with fewer suffixes after decompression or directories
# with no suffixes at all. `pathlib.Path().stem` will only give us the name with the last suffix removed, which
# is not sufficient for files with multiple suffixes, e.g. foo.tar.gz.
stem = path.name.replace("".join(path.suffixes), "")
def find_candidates() -> Set[pathlib.Path]:
# Although it looks like we could glob for f"{stem}*" to find the file candidates as well as the folder
# candidate simultaneously, that would also pick up other files that share the same prefix. For example, the
# test split of the stanford-cars dataset uses the files
# - cars_test.tgz
# - cars_test_annos_withlabels.mat
# Globbing for `"cars_test*"` picks up both.
candidates = {file for file in path.parent.glob(f"{stem}.*")}
folder_candidate = path.parent / stem
if folder_candidate.exists():
candidates.add(folder_candidate)
return candidates
candidates = find_candidates()
if not candidates:
self.download(root, skip_integrity_check=skip_integrity_check)
if self._preprocess is not None:
self._preprocess(path)
candidates = find_candidates()
# We use the path with the fewest suffixes. This gives us the
# extracted > decompressed > raw
# priority that we want for the best I/O performance.
return self._loader(min(candidates, key=lambda candidate: len(candidate.suffixes)))
@abc.abstractmethod
def _download(self, root: pathlib.Path) -> None:
pass
def download(self, root: Union[str, pathlib.Path], *, skip_integrity_check: bool = False) -> pathlib.Path:
root = pathlib.Path(root)
self._download(root)
path = root / self.file_name
if self.sha256 and not skip_integrity_check:
self._check_sha256(path)
return path
def _check_sha256(self, path: pathlib.Path, *, chunk_size: int = 1024 * 1024) -> None:
hash = hashlib.sha256()
with open(path, "rb") as file:
while chunk := file.read(chunk_size):
hash.update(chunk)
sha256 = hash.hexdigest()
if sha256 != self.sha256:
raise RuntimeError(
f"After the download, the SHA256 checksum of {path} didn't match the expected one: "
f"{sha256} != {self.sha256}"
)
class HttpResource(OnlineResource):
def __init__(
self, url: str, *, file_name: Optional[str] = None, mirrors: Sequence[str] = (), **kwargs: Any
) -> None:
super().__init__(file_name=file_name or pathlib.Path(urlparse(url).path).name, **kwargs)
self.url = url
self.mirrors = mirrors
self._resolved = False
def resolve(self) -> OnlineResource:
if self._resolved:
return self
redirect_url = _get_redirect_url(self.url)
if redirect_url == self.url:
self._resolved = True
return self
meta = {
attr.lstrip("_"): getattr(self, attr)
for attr in (
"file_name",
"sha256",
"_preprocess",
)
}
gdrive_id = _get_google_drive_file_id(redirect_url)
if gdrive_id:
return GDriveResource(gdrive_id, **meta)
http_resource = HttpResource(redirect_url, **meta)
http_resource._resolved = True
return http_resource
def _download(self, root: pathlib.Path) -> None:
if not self._resolved:
return self.resolve()._download(root)
for url in itertools.chain((self.url,), self.mirrors):
try:
download_url(url, str(root), filename=self.file_name, md5=None)
# TODO: make this more precise
except Exception:
continue
return
else:
# TODO: make this more informative
raise RuntimeError("Download failed!")
class GDriveResource(OnlineResource):
def __init__(self, id: str, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.id = id
def _download(self, root: pathlib.Path) -> None:
download_file_from_google_drive(self.id, root=str(root), filename=self.file_name, md5=None)
class ManualDownloadResource(OnlineResource):
def __init__(self, instructions: str, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.instructions = instructions
def _download(self, root: pathlib.Path) -> NoReturn:
raise RuntimeError(
f"The file {self.file_name} cannot be downloaded automatically. "
f"Please follow the instructions below and place it in {root}\n\n"
f"{self.instructions}"
)
class KaggleDownloadResource(ManualDownloadResource):
def __init__(self, challenge_url: str, *, file_name: str, **kwargs: Any) -> None:
instructions = "\n".join(
(
"1. Register and login at https://www.kaggle.com",
f"2. Navigate to {challenge_url}",
"3. Click 'Join Competition' and follow the instructions there",
"4. Navigate to the 'Data' tab",
f"5. Select {file_name} in the 'Data Explorer' and click the download button",
)
)
super().__init__(instructions, file_name=file_name, **kwargs)
```
|
================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.02 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\models\__init__.py
ENCODING: utf-8
```py
from . import depth
```
|
======================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.02 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\models\depth\__init__.py
ENCODING: utf-8
```py
from . import stereo
```
|
=============================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.05 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\models\depth\stereo\__init__.py
ENCODING: utf-8
```py
from .raft_stereo import *
from .crestereo import *
```
|
==============================================================================================================================================
SOURCE CODE FILE: crestereo.py
LINES: 1
SIZE: 64.55 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\models\depth\stereo\crestereo.py
ENCODING: utf-8
```py
import math
from functools import partial
from typing import Callable, Dict, Iterable, List, Optional, Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models.optical_flow.raft as raft
from torch import Tensor
from torchvision.models._api import register_model, Weights, WeightsEnum
from torchvision.models._utils import handle_legacy_interface
from torchvision.models.optical_flow._utils import grid_sample, make_coords_grid, upsample_flow
from torchvision.ops import Conv2dNormActivation
from torchvision.prototype.transforms._presets import StereoMatching
all = (
"CREStereo",
"CREStereo_Base_Weights",
"crestereo_base",
)
class ConvexMaskPredictor(nn.Module):
def __init__(
self,
*,
in_channels: int,
hidden_size: int,
upsample_factor: int,
multiplier: float = 0.25,
) -> None:
super().__init__()
self.mask_head = nn.Sequential(
Conv2dNormActivation(in_channels, hidden_size, norm_layer=None, kernel_size=3),
# https://arxiv.org/pdf/2003.12039.pdf (Annex section B) for the
# following convolution output size
nn.Conv2d(hidden_size, upsample_factor**2 * 9, 1, padding=0),
)
self.multiplier = multiplier
def forward(self, x: Tensor) -> Tensor:
x = self.mask_head(x) * self.multiplier
return x
def get_correlation(
left_feature: Tensor,
right_feature: Tensor,
window_size: Tuple[int, int] = (3, 3),
dilate: Tuple[int, int] = (1, 1),
) -> Tensor:
"""Function that computes a correlation product between the left and right features.
The correlation is computed in a sliding window fashion, namely the left features are fixed
and for each ``(i, j)`` location we compute the correlation with a sliding window anchored in
``(i, j)`` from the right feature map. The sliding window selects pixels obtained in the range of the sliding
window; i.e ``(i - window_size // 2, i + window_size // 2)`` respectively ``(j - window_size // 2, j + window_size // 2)``.
"""
B, C, H, W = left_feature.shape
di_y, di_x = dilate[0], dilate[1]
pad_y, pad_x = window_size[0] // 2 * di_y, window_size[1] // 2 * di_x
right_padded = F.pad(right_feature, (pad_x, pad_x, pad_y, pad_y), mode="replicate")
# in order to vectorize the correlation computation over all pixel candidates
# we create multiple shifted right images which we stack on an extra dimension
right_padded = F.unfold(right_padded, kernel_size=(H, W), dilation=dilate)
# torch unfold returns a tensor of shape [B, flattened_values, n_selections]
right_padded = right_padded.permute(0, 2, 1)
# we consider rehsape back into [B, n_views, C, H, W]
right_padded = right_padded.reshape(B, (window_size[0] * window_size[1]), C, H, W)
# we expand the left features for broadcasting
left_feature = left_feature.unsqueeze(1)
# this will compute an element product of between [B, 1, C, H, W] * [B, n_views, C, H, W]
# to obtain correlations over the pixel candidates we perform a mean on the C dimension
correlation = torch.mean(left_feature * right_padded, dim=2, keepdim=False)
# the final correlation tensor shape will be [B, n_views, H, W]
# where on the i-th position of the n_views dimension we will have
# the correlation value between the left pixel
# and the i-th candidate on the right feature map
return correlation
def _check_window_specs(
search_window_1d: Tuple[int, int] = (1, 9),
search_dilate_1d: Tuple[int, int] = (1, 1),
search_window_2d: Tuple[int, int] = (3, 3),
search_dilate_2d: Tuple[int, int] = (1, 1),
) -> None:
if not np.prod(search_window_1d) == np.prod(search_window_2d):
raise ValueError(
f"The 1D and 2D windows should contain the same number of elements. "
f"1D shape: {search_window_1d} 2D shape: {search_window_2d}"
)
if not np.prod(search_window_1d) % 2 == 1:
raise ValueError(
f"Search windows should contain an odd number of elements in them."
f"Window of shape {search_window_1d} has {np.prod(search_window_1d)} elements."
)
if not any(size == 1 for size in search_window_1d):
raise ValueError(f"The 1D search window should have at least one size equal to 1. 1D shape: {search_window_1d}")
if any(size == 1 for size in search_window_2d):
raise ValueError(
f"The 2D search window should have all dimensions greater than 1. 2D shape: {search_window_2d}"
)
if any(dilate < 1 for dilate in search_dilate_1d):
raise ValueError(
f"The 1D search dilation should have all elements equal or greater than 1. 1D shape: {search_dilate_1d}"
)
if any(dilate < 1 for dilate in search_dilate_2d):
raise ValueError(
f"The 2D search dilation should have all elements equal greater than 1. 2D shape: {search_dilate_2d}"
)
class IterativeCorrelationLayer(nn.Module):
def __init__(
self,
groups: int = 4,
search_window_1d: Tuple[int, int] = (1, 9),
search_dilate_1d: Tuple[int, int] = (1, 1),
search_window_2d: Tuple[int, int] = (3, 3),
search_dilate_2d: Tuple[int, int] = (1, 1),
) -> None:
super().__init__()
_check_window_specs(
search_window_1d=search_window_1d,
search_dilate_1d=search_dilate_1d,
search_window_2d=search_window_2d,
search_dilate_2d=search_dilate_2d,
)
self.search_pixels = np.prod(search_window_1d)
self.groups = groups
# two selection tables for dealing with the small_patch argument in the forward function
self.patch_sizes = {
"2d": [search_window_2d for _ in range(self.groups)],
"1d": [search_window_1d for _ in range(self.groups)],
}
self.dilate_sizes = {
"2d": [search_dilate_2d for _ in range(self.groups)],
"1d": [search_dilate_1d for _ in range(self.groups)],
}
def forward(self, left_feature: Tensor, right_feature: Tensor, flow: Tensor, window_type: str = "1d") -> Tensor:
"""Function that computes 1 pass of non-offsetted Group-Wise correlation"""
coords = make_coords_grid(
left_feature.shape[0], left_feature.shape[2], left_feature.shape[3], device=str(left_feature.device)
)
# we offset the coordinate grid in the flow direction
coords = coords + flow
coords = coords.permute(0, 2, 3, 1)
# resample right features according to off-setted grid
right_feature = grid_sample(right_feature, coords, mode="bilinear", align_corners=True)
# use_small_patch is a flag by which we decide on how many axes
# we perform candidate search. See section 3.1 ``Deformable search window`` & Figure 4 in the paper.
patch_size_list = self.patch_sizes[window_type]
dilate_size_list = self.dilate_sizes[window_type]
# chunking the left and right feature to perform group-wise correlation
# mechanism similar to GroupNorm. See section 3.1 ``Group-wise correlation``.
left_groups = torch.chunk(left_feature, self.groups, dim=1)
right_groups = torch.chunk(right_feature, self.groups, dim=1)
correlations = []
# this boils down to rather than performing the correlation product
# over the entire C dimensions, we use subsets of C to get multiple correlation sets
for i in range(len(patch_size_list)):
correlation = get_correlation(left_groups[i], right_groups[i], patch_size_list[i], dilate_size_list[i])
correlations.append(correlation)
final_correlations = torch.cat(correlations, dim=1)
return final_correlations
class AttentionOffsetCorrelationLayer(nn.Module):
def __init__(
self,
groups: int = 4,
attention_module: Optional[nn.Module] = None,
search_window_1d: Tuple[int, int] = (1, 9),
search_dilate_1d: Tuple[int, int] = (1, 1),
search_window_2d: Tuple[int, int] = (3, 3),
search_dilate_2d: Tuple[int, int] = (1, 1),
) -> None:
super().__init__()
_check_window_specs(
search_window_1d=search_window_1d,
search_dilate_1d=search_dilate_1d,
search_window_2d=search_window_2d,
search_dilate_2d=search_dilate_2d,
)
# convert to python scalar
self.search_pixels = int(np.prod(search_window_1d))
self.groups = groups
# two selection tables for dealing with the small_patch argument in the forward function
self.patch_sizes = {
"2d": [search_window_2d for _ in range(self.groups)],
"1d": [search_window_1d for _ in range(self.groups)],
}
self.dilate_sizes = {
"2d": [search_dilate_2d for _ in range(self.groups)],
"1d": [search_dilate_1d for _ in range(self.groups)],
}
self.attention_module = attention_module
def forward(
self,
left_feature: Tensor,
right_feature: Tensor,
flow: Tensor,
extra_offset: Tensor,
window_type: str = "1d",
) -> Tensor:
"""Function that computes 1 pass of offsetted Group-Wise correlation
If the class was provided with an attention layer, the left and right feature maps
will be passed through a transformer first
"""
B, C, H, W = left_feature.shape
if self.attention_module is not None:
# prepare for transformer required input shapes
left_feature = left_feature.permute(0, 2, 3, 1).reshape(B, H * W, C)
right_feature = right_feature.permute(0, 2, 3, 1).reshape(B, H * W, C)
# this can be either self attention or cross attention, hence the tuple return
left_feature, right_feature = self.attention_module(left_feature, right_feature)
left_feature = left_feature.reshape(B, H, W, C).permute(0, 3, 1, 2)
right_feature = right_feature.reshape(B, H, W, C).permute(0, 3, 1, 2)
left_groups = torch.chunk(left_feature, self.groups, dim=1)
right_groups = torch.chunk(right_feature, self.groups, dim=1)
num_search_candidates = self.search_pixels
# for each pixel (i, j) we have a number of search candidates
# thus, for each candidate we should have an X-axis and Y-axis offset value
extra_offset = extra_offset.reshape(B, num_search_candidates, 2, H, W).permute(0, 1, 3, 4, 2)
patch_size_list = self.patch_sizes[window_type]
dilate_size_list = self.dilate_sizes[window_type]
group_channels = C // self.groups
correlations = []
for i in range(len(patch_size_list)):
left_group, right_group = left_groups[i], right_groups[i]
patch_size, dilate = patch_size_list[i], dilate_size_list[i]
di_y, di_x = dilate
ps_y, ps_x = patch_size
# define the search based on the window patch shape
ry, rx = ps_y // 2 * di_y, ps_x // 2 * di_x
# base offsets for search (i.e. where to look on the search index)
x_grid, y_grid = torch.meshgrid(
torch.arange(-rx, rx + 1, di_x), torch.arange(-ry, ry + 1, di_y), indexing="xy"
)
x_grid, y_grid = x_grid.to(flow.device), y_grid.to(flow.device)
offsets = torch.stack((x_grid, y_grid))
offsets = offsets.reshape(2, -1).permute(1, 0)
for d in (0, 2, 3):
offsets = offsets.unsqueeze(d)
# extra offsets for search (i.e. deformed search indexes. Similar concept to deformable convolutions)
offsets = offsets + extra_offset
coords = (
make_coords_grid(
left_feature.shape[0], left_feature.shape[2], left_feature.shape[3], device=str(left_feature.device)
)
+ flow
)
coords = coords.permute(0, 2, 3, 1).unsqueeze(1)
coords = coords + offsets
coords = coords.reshape(B, -1, W, 2)
right_group = grid_sample(right_group, coords, mode="bilinear", align_corners=True)
# we do not need to perform any window shifting because the grid sample op
# will return a multi-view right based on the num_search_candidates dimension in the offsets
right_group = right_group.reshape(B, group_channels, -1, H, W)
left_group = left_group.reshape(B, group_channels, -1, H, W)
correlation = torch.mean(left_group * right_group, dim=1)
correlations.append(correlation)
final_correlation = torch.cat(correlations, dim=1)
return final_correlation
class AdaptiveGroupCorrelationLayer(nn.Module):
"""
Container for computing various correlation types between a left and right feature map.
This module does not contain any optimisable parameters, it's solely a collection of ops.
We wrap in a nn.Module for torch.jit.script compatibility
Adaptive Group Correlation operations from: https://openaccess.thecvf.com/content/CVPR2022/papers/Li_Practical_Stereo_Matching_via_Cascaded_Recurrent_Network_With_Adaptive_Correlation_CVPR_2022_paper.pdf
Canonical reference implementation: https://github.com/megvii-research/CREStereo/blob/master/nets/corr.py
"""
def __init__(
self,
iterative_correlation_layer: IterativeCorrelationLayer,
attention_offset_correlation_layer: AttentionOffsetCorrelationLayer,
) -> None:
super().__init__()
self.iterative_correlation_layer = iterative_correlation_layer
self.attention_offset_correlation_layer = attention_offset_correlation_layer
def forward(
self,
left_features: Tensor,
right_features: Tensor,
flow: torch.Tensor,
extra_offset: Optional[Tensor],
window_type: str = "1d",
iter_mode: bool = False,
) -> Tensor:
if iter_mode or extra_offset is None:
corr = self.iterative_correlation_layer(left_features, right_features, flow, window_type)
else:
corr = self.attention_offset_correlation_layer(
left_features, right_features, flow, extra_offset, window_type
) # type: ignore
return corr
def elu_feature_map(x: Tensor) -> Tensor:
"""Elu feature map operation from: https://arxiv.org/pdf/2006.16236.pdf"""
return F.elu(x) + 1
class LinearAttention(nn.Module):
"""
Linear attention operation from: https://arxiv.org/pdf/2006.16236.pdf
Canonical implementation reference: https://github.com/idiap/fast-transformers/blob/master/fast_transformers/attention/linear_attention.py
LoFTR implementation reference: https://github.com/zju3dv/LoFTR/blob/2122156015b61fbb650e28b58a958e4d632b1058/src/loftr/loftr_module/linear_attention.py
"""
def __init__(self, eps: float = 1e-6, feature_map_fn: Callable[[Tensor], Tensor] = elu_feature_map) -> None:
super().__init__()
self.eps = eps
self.feature_map_fn = feature_map_fn
def forward(
self,
queries: Tensor,
keys: Tensor,
values: Tensor,
q_mask: Optional[Tensor] = None,
kv_mask: Optional[Tensor] = None,
) -> Tensor:
"""
Args:
queries (torch.Tensor): [N, S1, H, D]
keys (torch.Tensor): [N, S2, H, D]
values (torch.Tensor): [N, S2, H, D]
q_mask (torch.Tensor): [N, S1] (optional)
kv_mask (torch.Tensor): [N, S2] (optional)
Returns:
queried_values (torch.Tensor): [N, S1, H, D]
"""
queries = self.feature_map_fn(queries)
keys = self.feature_map_fn(keys)
if q_mask is not None:
queries = queries * q_mask[:, :, None, None]
if kv_mask is not None:
keys = keys * kv_mask[:, :, None, None]
values = values * kv_mask[:, :, None, None]
# mitigates fp16 overflows
values_length = values.shape[1]
values = values / values_length
kv = torch.einsum("NSHD, NSHV -> NHDV", keys, values)
z = 1 / (torch.einsum("NLHD, NHD -> NLH", queries, keys.sum(dim=1)) + self.eps)
# rescale at the end to account for fp16 mitigation
queried_values = torch.einsum("NLHD, NHDV, NLH -> NLHV", queries, kv, z) * values_length
return queried_values
class SoftmaxAttention(nn.Module):
"""
A simple softmax attention operation
LoFTR implementation reference: https://github.com/zju3dv/LoFTR/blob/2122156015b61fbb650e28b58a958e4d632b1058/src/loftr/loftr_module/linear_attention.py
"""
def __init__(self, dropout: float = 0.0) -> None:
super().__init__()
self.dropout = nn.Dropout(dropout) if dropout else nn.Identity()
def forward(
self,
queries: Tensor,
keys: Tensor,
values: Tensor,
q_mask: Optional[Tensor] = None,
kv_mask: Optional[Tensor] = None,
) -> Tensor:
"""
Computes classical softmax full-attention between all queries and keys.
Args:
queries (torch.Tensor): [N, S1, H, D]
keys (torch.Tensor): [N, S2, H, D]
values (torch.Tensor): [N, S2, H, D]
q_mask (torch.Tensor): [N, S1] (optional)
kv_mask (torch.Tensor): [N, S2] (optional)
Returns:
queried_values: [N, S1, H, D]
"""
scale_factor = 1.0 / queries.shape[3] ** 0.5 # irsqrt(D) scaling
queries = queries * scale_factor
qk = torch.einsum("NLHD, NSHD -> NLSH", queries, keys)
if kv_mask is not None and q_mask is not None:
qk.masked_fill_(~(q_mask[:, :, None, None] * kv_mask[:, None, :, None]), float("-inf"))
attention = torch.softmax(qk, dim=2)
attention = self.dropout(attention)
queried_values = torch.einsum("NLSH, NSHD -> NLHD", attention, values)
return queried_values
class PositionalEncodingSine(nn.Module):
"""
Sinusoidal positional encodings
Using the scaling term from https://github.com/megvii-research/CREStereo/blob/master/nets/attention/position_encoding.py
Reference implementation from https://github.com/facebookresearch/detr/blob/8a144f83a287f4d3fece4acdf073f387c5af387d/models/position_encoding.py#L28-L48
"""
def __init__(self, dim_model: int, max_size: int = 256) -> None:
super().__init__()
self.dim_model = dim_model
self.max_size = max_size
# pre-registered for memory efficiency during forward pass
pe = self._make_pe_of_size(self.max_size)
self.register_buffer("pe", pe)
def _make_pe_of_size(self, size: int) -> Tensor:
pe = torch.zeros((self.dim_model, *(size, size)), dtype=torch.float32)
y_positions = torch.ones((size, size)).cumsum(0).float().unsqueeze(0)
x_positions = torch.ones((size, size)).cumsum(1).float().unsqueeze(0)
div_term = torch.exp(torch.arange(0.0, self.dim_model // 2, 2) * (-math.log(10000.0) / self.dim_model // 2))
div_term = div_term[:, None, None]
pe[0::4, :, :] = torch.sin(x_positions * div_term)
pe[1::4, :, :] = torch.cos(x_positions * div_term)
pe[2::4, :, :] = torch.sin(y_positions * div_term)
pe[3::4, :, :] = torch.cos(y_positions * div_term)
pe = pe.unsqueeze(0)
return pe
def forward(self, x: Tensor) -> Tensor:
"""
Args:
x: [B, C, H, W]
Returns:
x: [B, C, H, W]
"""
torch._assert(
len(x.shape) == 4,
f"PositionalEncodingSine requires a 4-D dimensional input. Provided tensor is of shape {x.shape}",
)
B, C, H, W = x.shape
return x + self.pe[:, :, :H, :W] # type: ignore
class LocalFeatureEncoderLayer(nn.Module):
"""
LoFTR transformer module from: https://arxiv.org/pdf/2104.00680.pdf
Canonical implementations at: https://github.com/zju3dv/LoFTR/blob/master/src/loftr/loftr_module/transformer.py
"""
def __init__(
self,
*,
dim_model: int,
num_heads: int,
attention_module: Callable[..., nn.Module] = LinearAttention,
) -> None:
super().__init__()
self.attention_op = attention_module()
if not isinstance(self.attention_op, (LinearAttention, SoftmaxAttention)):
raise ValueError(
f"attention_module must be an instance of LinearAttention or SoftmaxAttention. Got {type(self.attention_op)}"
)
self.dim_head = dim_model // num_heads
self.num_heads = num_heads
# multi-head attention
self.query_proj = nn.Linear(dim_model, dim_model, bias=False)
self.key_proj = nn.Linear(dim_model, dim_model, bias=False)
self.value_proj = nn.Linear(dim_model, dim_model, bias=False)
self.merge = nn.Linear(dim_model, dim_model, bias=False)
# feed forward network
self.ffn = nn.Sequential(
nn.Linear(dim_model * 2, dim_model * 2, bias=False),
nn.ReLU(),
nn.Linear(dim_model * 2, dim_model, bias=False),
)
# norm layers
self.attention_norm = nn.LayerNorm(dim_model)
self.ffn_norm = nn.LayerNorm(dim_model)
def forward(
self, x: Tensor, source: Tensor, x_mask: Optional[Tensor] = None, source_mask: Optional[Tensor] = None
) -> Tensor:
"""
Args:
x (torch.Tensor): [B, S1, D]
source (torch.Tensor): [B, S2, D]
x_mask (torch.Tensor): [B, S1] (optional)
source_mask (torch.Tensor): [B, S2] (optional)
"""
B, S, D = x.shape
queries, keys, values = x, source, source
queries = self.query_proj(queries).reshape(B, S, self.num_heads, self.dim_head)
keys = self.key_proj(keys).reshape(B, S, self.num_heads, self.dim_head)
values = self.value_proj(values).reshape(B, S, self.num_heads, self.dim_head)
# attention operation
message = self.attention_op(queries, keys, values, x_mask, source_mask)
# concatenating attention heads together before passing through projection layer
message = self.merge(message.reshape(B, S, D))
message = self.attention_norm(message)
# ffn operation
message = self.ffn(torch.cat([x, message], dim=2))
message = self.ffn_norm(message)
return x + message
class LocalFeatureTransformer(nn.Module):
"""
LoFTR transformer module from: https://arxiv.org/pdf/2104.00680.pdf
Canonical implementations at: https://github.com/zju3dv/LoFTR/blob/master/src/loftr/loftr_module/transformer.py
"""
def __init__(
self,
*,
dim_model: int,
num_heads: int,
attention_directions: List[str],
attention_module: Callable[..., nn.Module] = LinearAttention,
) -> None:
super(LocalFeatureTransformer, self).__init__()
self.attention_module = attention_module
self.attention_directions = attention_directions
for direction in attention_directions:
if direction not in ["self", "cross"]:
raise ValueError(
f"Attention direction {direction} unsupported. LocalFeatureTransformer accepts only ``attention_type`` in ``[self, cross]``."
)
self.layers = nn.ModuleList(
[
LocalFeatureEncoderLayer(dim_model=dim_model, num_heads=num_heads, attention_module=attention_module)
for _ in attention_directions
]
)
def forward(
self,
left_features: Tensor,
right_features: Tensor,
left_mask: Optional[Tensor] = None,
right_mask: Optional[Tensor] = None,
) -> Tuple[Tensor, Tensor]:
"""
Args:
left_features (torch.Tensor): [N, S1, D]
right_features (torch.Tensor): [N, S2, D]
left_mask (torch.Tensor): [N, S1] (optional)
right_mask (torch.Tensor): [N, S2] (optional)
Returns:
left_features (torch.Tensor): [N, S1, D]
right_features (torch.Tensor): [N, S2, D]
"""
torch._assert(
left_features.shape[2] == right_features.shape[2],
f"left_features and right_features should have the same embedding dimensions. left_features: {left_features.shape[2]} right_features: {right_features.shape[2]}",
)
for idx, layer in enumerate(self.layers):
attention_direction = self.attention_directions[idx]
if attention_direction == "self":
left_features = layer(left_features, left_features, left_mask, left_mask)
right_features = layer(right_features, right_features, right_mask, right_mask)
elif attention_direction == "cross":
left_features = layer(left_features, right_features, left_mask, right_mask)
right_features = layer(right_features, left_features, right_mask, left_mask)
return left_features, right_features
class PyramidDownsample(nn.Module):
"""
A simple wrapper that return and Avg Pool feature pyramid based on the provided scales.
Implicitly returns the input as well.
"""
def __init__(self, factors: Iterable[int]) -> None:
super().__init__()
self.factors = factors
def forward(self, x: torch.Tensor) -> List[Tensor]:
results = [x]
for factor in self.factors:
results.append(F.avg_pool2d(x, kernel_size=factor, stride=factor))
return results
class CREStereo(nn.Module):
"""
Implements CREStereo from the `"Practical Stereo Matching via Cascaded Recurrent Network
With Adaptive Correlation" <https://openaccess.thecvf.com/content/CVPR2022/papers/Li_Practical_Stereo_Matching_via_Cascaded_Recurrent_Network_With_Adaptive_Correlation_CVPR_2022_paper.pdf>`_ paper.
Args:
feature_encoder (raft.FeatureEncoder): Raft-like Feature Encoder module extract low-level features from inputs.
update_block (raft.UpdateBlock): Raft-like Update Block which recursively refines a flow-map.
flow_head (raft.FlowHead): Raft-like Flow Head which predics a flow-map from some inputs.
self_attn_block (LocalFeatureTransformer): A Local Feature Transformer that performs self attention on the two feature maps.
cross_attn_block (LocalFeatureTransformer): A Local Feature Transformer that performs cross attention between the two feature maps
used in the Adaptive Group Correlation module.
feature_downsample_rates (List[int]): The downsample rates used to build a feature pyramid from the outputs of the `feature_encoder`. Default: [2, 4]
correlation_groups (int): In how many groups should the features be split when computer per-pixel correlation. Defaults 4.
search_window_1d (Tuple[int, int]): The alternate search window size in the x and y directions for the 1D case. Defaults to (1, 9).
search_dilate_1d (Tuple[int, int]): The dilation used in the `search_window_1d` when selecting pixels. Similar to `nn.Conv2d` dilate. Defaults to (1, 1).
search_window_2d (Tuple[int, int]): The alternate search window size in the x and y directions for the 2D case. Defaults to (3, 3).
search_dilate_2d (Tuple[int, int]): The dilation used in the `search_window_2d` when selecting pixels. Similar to `nn.Conv2d` dilate. Defaults to (1, 1).
"""
def __init__(
self,
*,
feature_encoder: raft.FeatureEncoder,
update_block: raft.UpdateBlock,
flow_head: raft.FlowHead,
self_attn_block: LocalFeatureTransformer,
cross_attn_block: LocalFeatureTransformer,
feature_downsample_rates: Tuple[int, ...] = (2, 4),
correlation_groups: int = 4,
search_window_1d: Tuple[int, int] = (1, 9),
search_dilate_1d: Tuple[int, int] = (1, 1),
search_window_2d: Tuple[int, int] = (3, 3),
search_dilate_2d: Tuple[int, int] = (1, 1),
) -> None:
super().__init__()
self.output_channels = 2
self.feature_encoder = feature_encoder
self.update_block = update_block
self.flow_head = flow_head
self.self_attn_block = self_attn_block
# average pooling for the feature encoder outputs
self.downsampling_pyramid = PyramidDownsample(feature_downsample_rates)
self.downsampling_factors: List[int] = [feature_encoder.downsample_factor]
base_downsample_factor: int = self.downsampling_factors[0]
for rate in feature_downsample_rates:
self.downsampling_factors.append(base_downsample_factor * rate)
# output resolution tracking
self.resolutions: List[str] = [f"1 / {factor}" for factor in self.downsampling_factors]
self.search_pixels = int(np.prod(search_window_1d))
# flow convex upsampling mask predictor
self.mask_predictor = ConvexMaskPredictor(
in_channels=feature_encoder.output_dim // 2,
hidden_size=feature_encoder.output_dim,
upsample_factor=feature_encoder.downsample_factor,
multiplier=0.25,
)
# offsets modules for offsetted feature selection
self.offset_convs = nn.ModuleDict()
self.correlation_layers = nn.ModuleDict()
offset_conv_layer = partial(
Conv2dNormActivation,
in_channels=feature_encoder.output_dim,
out_channels=self.search_pixels * 2,
norm_layer=None,
activation_layer=None,
)
# populate the dicts in top to bottom order
# useful for iterating through torch.jit.script module given the network forward pass
#
# Ignore the largest resolution. We handle that separately due to torch.jit.script
# not being able to access to runtime generated keys in ModuleDicts.
# This way, we can keep a generic way of processing all pyramid levels but except
# the final one
iterative_correlation_layer = partial(
IterativeCorrelationLayer,
groups=correlation_groups,
search_window_1d=search_window_1d,
search_dilate_1d=search_dilate_1d,
search_window_2d=search_window_2d,
search_dilate_2d=search_dilate_2d,
)
attention_offset_correlation_layer = partial(
AttentionOffsetCorrelationLayer,
groups=correlation_groups,
search_window_1d=search_window_1d,
search_dilate_1d=search_dilate_1d,
search_window_2d=search_window_2d,
search_dilate_2d=search_dilate_2d,
)
for idx, resolution in enumerate(reversed(self.resolutions[1:])):
# the largest resolution does use offset convolutions for sampling grid coords
offset_conv = None if idx == len(self.resolutions) - 1 else offset_conv_layer()
if offset_conv:
self.offset_convs[resolution] = offset_conv
# only the lowest resolution uses the cross attention module when computing correlation scores
attention_module = cross_attn_block if idx == 0 else None
self.correlation_layers[resolution] = AdaptiveGroupCorrelationLayer(
iterative_correlation_layer=iterative_correlation_layer(),
attention_offset_correlation_layer=attention_offset_correlation_layer(
attention_module=attention_module
),
)
# correlation layer for the largest resolution
self.max_res_correlation_layer = AdaptiveGroupCorrelationLayer(
iterative_correlation_layer=iterative_correlation_layer(),
attention_offset_correlation_layer=attention_offset_correlation_layer(),
)
# simple 2D Postional Encodings
self.positional_encodings = PositionalEncodingSine(feature_encoder.output_dim)
def _get_window_type(self, iteration: int) -> str:
return "1d" if iteration % 2 == 0 else "2d"
def forward(
self, left_image: Tensor, right_image: Tensor, flow_init: Optional[Tensor] = None, num_iters: int = 10
) -> List[Tensor]:
features = torch.cat([left_image, right_image], dim=0)
features = self.feature_encoder(features)
left_features, right_features = features.chunk(2, dim=0)
# update block network state and input context are derived from the left feature map
net, ctx = left_features.chunk(2, dim=1)
net = torch.tanh(net)
ctx = torch.relu(ctx)
# will output lists of tensor.
l_pyramid = self.downsampling_pyramid(left_features)
r_pyramid = self.downsampling_pyramid(right_features)
net_pyramid = self.downsampling_pyramid(net)
ctx_pyramid = self.downsampling_pyramid(ctx)
# we store in reversed order because we process the pyramid from top to bottom
l_pyramid = {res: l_pyramid[idx] for idx, res in enumerate(self.resolutions)}
r_pyramid = {res: r_pyramid[idx] for idx, res in enumerate(self.resolutions)}
net_pyramid = {res: net_pyramid[idx] for idx, res in enumerate(self.resolutions)}
ctx_pyramid = {res: ctx_pyramid[idx] for idx, res in enumerate(self.resolutions)}
# offsets for sampling pixel candidates in the correlation ops
offsets: Dict[str, Tensor] = {}
for resolution, offset_conv in self.offset_convs.items():
feature_map = l_pyramid[resolution]
offset = offset_conv(feature_map)
offsets[resolution] = (torch.sigmoid(offset) - 0.5) * 2.0
# the smallest resolution is prepared for passing through self attention
min_res = self.resolutions[-1]
max_res = self.resolutions[0]
B, C, MIN_H, MIN_W = l_pyramid[min_res].shape
# add positional encodings
l_pyramid[min_res] = self.positional_encodings(l_pyramid[min_res])
r_pyramid[min_res] = self.positional_encodings(r_pyramid[min_res])
# reshaping for transformer
l_pyramid[min_res] = l_pyramid[min_res].permute(0, 2, 3, 1).reshape(B, MIN_H * MIN_W, C)
r_pyramid[min_res] = r_pyramid[min_res].permute(0, 2, 3, 1).reshape(B, MIN_H * MIN_W, C)
# perform self attention
l_pyramid[min_res], r_pyramid[min_res] = self.self_attn_block(l_pyramid[min_res], r_pyramid[min_res])
# now we need to reshape back into [B, C, H, W] format
l_pyramid[min_res] = l_pyramid[min_res].reshape(B, MIN_H, MIN_W, C).permute(0, 3, 1, 2)
r_pyramid[min_res] = r_pyramid[min_res].reshape(B, MIN_H, MIN_W, C).permute(0, 3, 1, 2)
predictions: List[Tensor] = []
flow_estimates: Dict[str, Tensor] = {}
# we added this because of torch.script.jit
# also, the predicition prior is always going to have the
# spatial size of the features outputted by the feature encoder
flow_pred_prior: Tensor = torch.empty(
size=(B, 2, left_features.shape[2], left_features.shape[3]),
dtype=l_pyramid[max_res].dtype,
device=l_pyramid[max_res].device,
)
if flow_init is not None:
scale = l_pyramid[max_res].shape[2] / flow_init.shape[2]
# in CREStereo implementation they multiply with -scale instead of scale
# this can be either a downsample or an upsample based on the cascaded inference
# configuration
# we use a -scale because the flow used inside the network is a negative flow
# from the right to the left, so we flip the flow direction
flow_estimates[max_res] = -scale * F.interpolate(
input=flow_init,
size=l_pyramid[max_res].shape[2:],
mode="bilinear",
align_corners=True,
)
# when not provided with a flow prior, we construct one using the lower resolution maps
else:
# initialize a zero flow with the smallest resolution
flow = torch.zeros(size=(B, 2, MIN_H, MIN_W), device=left_features.device, dtype=left_features.dtype)
# flows from coarse resolutions are refined similarly
# we always need to fetch the next pyramid feature map as well
# when updating coarse resolutions, therefore we create a reversed
# view which has its order synced with the ModuleDict keys iterator
coarse_resolutions: List[str] = self.resolutions[::-1] # using slicing because of torch.jit.script
fine_grained_resolution = max_res
# set the coarsest flow to the zero flow
flow_estimates[coarse_resolutions[0]] = flow
# the correlation layer ModuleDict will contain layers ordered from coarse to fine resolution
# i.e ["1 / 16", "1 / 8", "1 / 4"]
# the correlation layer ModuleDict has layers for all the resolutions except the fine one
# i.e {"1 / 16": Module, "1 / 8": Module}
# for these resolution we perform only half of the number of refinement iterations
for idx, (resolution, correlation_layer) in enumerate(self.correlation_layers.items()):
# compute the scale difference between the first pyramid scale and the current pyramid scale
scale_to_base = l_pyramid[fine_grained_resolution].shape[2] // l_pyramid[resolution].shape[2]
for it in range(num_iters // 2):
# set whether we want to search on (X, Y) axes for correlation or just on X axis
window_type = self._get_window_type(it)
# we consider this a prior, therefore we do not want to back-propagate through it
flow_estimates[resolution] = flow_estimates[resolution].detach()
correlations = correlation_layer(
l_pyramid[resolution], # left
r_pyramid[resolution], # right
flow_estimates[resolution],
offsets[resolution],
window_type,
)
# update the recurrent network state and the flow deltas
net_pyramid[resolution], delta_flow = self.update_block(
net_pyramid[resolution], ctx_pyramid[resolution], correlations, flow_estimates[resolution]
)
# the convex upsampling weights are computed w.r.t.
# the recurrent update state
up_mask = self.mask_predictor(net_pyramid[resolution])
flow_estimates[resolution] = flow_estimates[resolution] + delta_flow
# convex upsampling with the initial feature encoder downsampling rate
flow_pred_prior = upsample_flow(
flow_estimates[resolution], up_mask, factor=self.downsampling_factors[0]
)
# we then bilinear upsample to the final resolution
# we use a factor that's equivalent to the difference between
# the current downsample resolution and the base downsample resolution
#
# i.e. if a 1 / 16 flow is upsampled by 4 (base downsampling) we get a 1 / 4 flow.
# therefore we have to further upscale it by the difference between
# the current level 1 / 16 and the base level 1 / 4.
#
# we use a -scale because the flow used inside the network is a negative flow
# from the right to the left, so we flip the flow direction in order to get the
# left to right flow
flow_pred = -upsample_flow(flow_pred_prior, None, factor=scale_to_base)
predictions.append(flow_pred)
# when constructing the next resolution prior, we resample w.r.t
# to the scale of the next level in the pyramid
next_resolution = coarse_resolutions[idx + 1]
scale_to_next = l_pyramid[next_resolution].shape[2] / flow_pred_prior.shape[2]
# we use the flow_up_prior because this is a more accurate estimation of the true flow
# due to the convex upsample, which resembles a learned super-resolution module.
# this is not necessarily an upsample, it can be a downsample, based on the provided configuration
flow_estimates[next_resolution] = -scale_to_next * F.interpolate(
input=flow_pred_prior,
size=l_pyramid[next_resolution].shape[2:],
mode="bilinear",
align_corners=True,
)
# finally we will be doing a full pass through the fine-grained resolution
# this coincides with the maximum resolution
# we keep a separate loop here in order to avoid python control flow
# to decide how many iterations should we do based on the current resolution
# furthermore, if provided with an initial flow, there is no need to generate
# a prior estimate when moving into the final refinement stage
for it in range(num_iters):
search_window_type = self._get_window_type(it)
flow_estimates[max_res] = flow_estimates[max_res].detach()
# we run the fine-grained resolution correlations in iterative mode
# this means that we are using the fixed window pixel selections
# instead of the deformed ones as with the previous steps
correlations = self.max_res_correlation_layer(
l_pyramid[max_res],
r_pyramid[max_res],
flow_estimates[max_res],
extra_offset=None,
window_type=search_window_type,
iter_mode=True,
)
net_pyramid[max_res], delta_flow = self.update_block(
net_pyramid[max_res], ctx_pyramid[max_res], correlations, flow_estimates[max_res]
)
up_mask = self.mask_predictor(net_pyramid[max_res])
flow_estimates[max_res] = flow_estimates[max_res] + delta_flow
# at the final resolution we simply do a convex upsample using the base downsample rate
flow_pred = -upsample_flow(flow_estimates[max_res], up_mask, factor=self.downsampling_factors[0])
predictions.append(flow_pred)
return predictions
def _crestereo(
*,
weights: Optional[WeightsEnum],
progress: bool,
# Feature Encoder
feature_encoder_layers: Tuple[int, int, int, int, int],
feature_encoder_strides: Tuple[int, int, int, int],
feature_encoder_block: Callable[..., nn.Module],
feature_encoder_norm_layer: Callable[..., nn.Module],
# Average Pooling Pyramid
feature_downsample_rates: Tuple[int, ...],
# Adaptive Correlation Layer
corr_groups: int,
corr_search_window_2d: Tuple[int, int],
corr_search_dilate_2d: Tuple[int, int],
corr_search_window_1d: Tuple[int, int],
corr_search_dilate_1d: Tuple[int, int],
# Flow head
flow_head_hidden_size: int,
# Recurrent block
recurrent_block_hidden_state_size: int,
recurrent_block_kernel_size: Tuple[Tuple[int, int], Tuple[int, int]],
recurrent_block_padding: Tuple[Tuple[int, int], Tuple[int, int]],
# Motion Encoder
motion_encoder_corr_layers: Tuple[int, int],
motion_encoder_flow_layers: Tuple[int, int],
motion_encoder_out_channels: int,
# Transformer Blocks
num_attention_heads: int,
num_self_attention_layers: int,
num_cross_attention_layers: int,
self_attention_module: Callable[..., nn.Module],
cross_attention_module: Callable[..., nn.Module],
**kwargs,
) -> CREStereo:
feature_encoder = kwargs.pop("feature_encoder", None) or raft.FeatureEncoder(
block=feature_encoder_block,
layers=feature_encoder_layers,
strides=feature_encoder_strides,
norm_layer=feature_encoder_norm_layer,
)
if feature_encoder.output_dim % corr_groups != 0:
raise ValueError(
f"Final ``feature_encoder_layers`` size should be divisible by ``corr_groups`` argument."
f"Feature encoder output size : {feature_encoder.output_dim}, Correlation groups: {corr_groups}."
)
motion_encoder = kwargs.pop("motion_encoder", None) or raft.MotionEncoder(
in_channels_corr=corr_groups * int(np.prod(corr_search_window_1d)),
corr_layers=motion_encoder_corr_layers,
flow_layers=motion_encoder_flow_layers,
out_channels=motion_encoder_out_channels,
)
out_channels_context = feature_encoder_layers[-1] - recurrent_block_hidden_state_size
recurrent_block = kwargs.pop("recurrent_block", None) or raft.RecurrentBlock(
input_size=motion_encoder.out_channels + out_channels_context,
hidden_size=recurrent_block_hidden_state_size,
kernel_size=recurrent_block_kernel_size,
padding=recurrent_block_padding,
)
flow_head = kwargs.pop("flow_head", None) or raft.FlowHead(
in_channels=out_channels_context, hidden_size=flow_head_hidden_size
)
update_block = raft.UpdateBlock(motion_encoder=motion_encoder, recurrent_block=recurrent_block, flow_head=flow_head)
self_attention_module = kwargs.pop("self_attention_module", None) or LinearAttention
self_attn_block = LocalFeatureTransformer(
dim_model=feature_encoder.output_dim,
num_heads=num_attention_heads,
attention_directions=["self"] * num_self_attention_layers,
attention_module=self_attention_module,
)
cross_attention_module = kwargs.pop("cross_attention_module", None) or LinearAttention
cross_attn_block = LocalFeatureTransformer(
dim_model=feature_encoder.output_dim,
num_heads=num_attention_heads,
attention_directions=["cross"] * num_cross_attention_layers,
attention_module=cross_attention_module,
)
model = CREStereo(
feature_encoder=feature_encoder,
update_block=update_block,
flow_head=flow_head,
self_attn_block=self_attn_block,
cross_attn_block=cross_attn_block,
feature_downsample_rates=feature_downsample_rates,
correlation_groups=corr_groups,
search_window_1d=corr_search_window_1d,
search_window_2d=corr_search_window_2d,
search_dilate_1d=corr_search_dilate_1d,
search_dilate_2d=corr_search_dilate_2d,
)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
_COMMON_META = {
"resize_size": (384, 512),
}
class CREStereo_Base_Weights(WeightsEnum):
"""The metrics reported here are as follows.
``mae`` is the "mean-average-error" and indicates how far (in pixels) the
predicted disparity is from its true value (equivalent to ``epe``). This is averaged over all pixels
of all images. ``1px``, ``3px``, ``5px`` and indicate the percentage of pixels that have a lower
error than that of the ground truth. ``relepe`` is the "relative-end-point-error" and is the
average ``epe`` divided by the average ground truth disparity. ``fl-all`` corresponds to the average of pixels whose epe
is either <3px, or whom's ``relepe`` is lower than 0.05 (therefore higher is better).
"""
MEGVII_V1 = Weights(
# Weights ported from https://github.com/megvii-research/CREStereo
url="https://download.pytorch.org/models/crestereo-756c8b0f.pth",
transforms=StereoMatching,
meta={
**_COMMON_META,
"num_params": 5432948,
"recipe": "https://github.com/megvii-research/CREStereo",
"_metrics": {
"Middlebury2014-train": {
# metrics for 10 refinement iterations and 1 cascade
"mae": 0.792,
"rmse": 2.765,
"1px": 0.905,
"3px": 0.958,
"5px": 0.97,
"relepe": 0.114,
"fl-all": 90.429,
"_detailed": {
# 1 is the number of cascades
1: {
# 2 is number of refininement iterations
2: {
"mae": 1.704,
"rmse": 3.738,
"1px": 0.738,
"3px": 0.896,
"5px": 0.933,
"relepe": 0.157,
"fl-all": 76.464,
},
5: {
"mae": 0.956,
"rmse": 2.963,
"1px": 0.88,
"3px": 0.948,
"5px": 0.965,
"relepe": 0.124,
"fl-all": 88.186,
},
10: {
"mae": 0.792,
"rmse": 2.765,
"1px": 0.905,
"3px": 0.958,
"5px": 0.97,
"relepe": 0.114,
"fl-all": 90.429,
},
20: {
"mae": 0.749,
"rmse": 2.706,
"1px": 0.907,
"3px": 0.961,
"5px": 0.972,
"relepe": 0.113,
"fl-all": 90.807,
},
},
2: {
2: {
"mae": 1.702,
"rmse": 3.784,
"1px": 0.784,
"3px": 0.894,
"5px": 0.924,
"relepe": 0.172,
"fl-all": 80.313,
},
5: {
"mae": 0.932,
"rmse": 2.907,
"1px": 0.877,
"3px": 0.944,
"5px": 0.963,
"relepe": 0.125,
"fl-all": 87.979,
},
10: {
"mae": 0.773,
"rmse": 2.768,
"1px": 0.901,
"3px": 0.958,
"5px": 0.972,
"relepe": 0.117,
"fl-all": 90.43,
},
20: {
"mae": 0.854,
"rmse": 2.971,
"1px": 0.9,
"3px": 0.957,
"5px": 0.97,
"relepe": 0.122,
"fl-all": 90.269,
},
},
},
}
},
"_docs": """These weights were ported from the original paper. They
are trained on a dataset mixture of the author's choice.""",
},
)
CRESTEREO_ETH_MBL_V1 = Weights(
# Weights ported from https://github.com/megvii-research/CREStereo
url="https://download.pytorch.org/models/crestereo-8f0e0e9a.pth",
transforms=StereoMatching,
meta={
**_COMMON_META,
"num_params": 5432948,
"recipe": "https://github.com/pytorch/vision/tree/main/references/depth/stereo",
"_metrics": {
"Middlebury2014-train": {
# metrics for 10 refinement iterations and 1 cascade
"mae": 1.416,
"rmse": 3.53,
"1px": 0.777,
"3px": 0.896,
"5px": 0.933,
"relepe": 0.148,
"fl-all": 78.388,
"_detailed": {
# 1 is the number of cascades
1: {
# 2 is the number of refinement iterations
2: {
"mae": 2.363,
"rmse": 4.352,
"1px": 0.611,
"3px": 0.828,
"5px": 0.891,
"relepe": 0.176,
"fl-all": 64.511,
},
5: {
"mae": 1.618,
"rmse": 3.71,
"1px": 0.761,
"3px": 0.879,
"5px": 0.918,
"relepe": 0.154,
"fl-all": 77.128,
},
10: {
"mae": 1.416,
"rmse": 3.53,
"1px": 0.777,
"3px": 0.896,
"5px": 0.933,
"relepe": 0.148,
"fl-all": 78.388,
},
20: {
"mae": 1.448,
"rmse": 3.583,
"1px": 0.771,
"3px": 0.893,
"5px": 0.931,
"relepe": 0.145,
"fl-all": 77.7,
},
},
2: {
2: {
"mae": 1.972,
"rmse": 4.125,
"1px": 0.73,
"3px": 0.865,
"5px": 0.908,
"relepe": 0.169,
"fl-all": 74.396,
},
5: {
"mae": 1.403,
"rmse": 3.448,
"1px": 0.793,
"3px": 0.905,
"5px": 0.937,
"relepe": 0.151,
"fl-all": 80.186,
},
10: {
"mae": 1.312,
"rmse": 3.368,
"1px": 0.799,
"3px": 0.912,
"5px": 0.943,
"relepe": 0.148,
"fl-all": 80.379,
},
20: {
"mae": 1.376,
"rmse": 3.542,
"1px": 0.796,
"3px": 0.91,
"5px": 0.942,
"relepe": 0.149,
"fl-all": 80.054,
},
},
},
}
},
"_docs": """These weights were trained from scratch on
:class:`~torchvision.datasets._stereo_matching.CREStereo` +
:class:`~torchvision.datasets._stereo_matching.Middlebury2014Stereo` +
:class:`~torchvision.datasets._stereo_matching.ETH3DStereo`.""",
},
)
CRESTEREO_FINETUNE_MULTI_V1 = Weights(
# Weights ported from https://github.com/megvii-research/CREStereo
url="https://download.pytorch.org/models/crestereo-697c38f4.pth ",
transforms=StereoMatching,
meta={
**_COMMON_META,
"num_params": 5432948,
"recipe": "https://github.com/pytorch/vision/tree/main/references/depth/stereo",
"_metrics": {
"Middlebury2014-train": {
# metrics for 10 refinement iterations and 1 cascade
"mae": 1.038,
"rmse": 3.108,
"1px": 0.852,
"3px": 0.942,
"5px": 0.963,
"relepe": 0.129,
"fl-all": 85.522,
"_detailed": {
# 1 is the number of cascades
1: {
# 2 is number of refininement iterations
2: {
"mae": 1.85,
"rmse": 3.797,
"1px": 0.673,
"3px": 0.862,
"5px": 0.917,
"relepe": 0.171,
"fl-all": 69.736,
},
5: {
"mae": 1.111,
"rmse": 3.166,
"1px": 0.838,
"3px": 0.93,
"5px": 0.957,
"relepe": 0.134,
"fl-all": 84.596,
},
10: {
"mae": 1.02,
"rmse": 3.073,
"1px": 0.854,
"3px": 0.938,
"5px": 0.96,
"relepe": 0.129,
"fl-all": 86.042,
},
20: {
"mae": 0.993,
"rmse": 3.059,
"1px": 0.855,
"3px": 0.942,
"5px": 0.967,
"relepe": 0.126,
"fl-all": 85.784,
},
},
2: {
2: {
"mae": 1.667,
"rmse": 3.867,
"1px": 0.78,
"3px": 0.891,
"5px": 0.922,
"relepe": 0.165,
"fl-all": 78.89,
},
5: {
"mae": 1.158,
"rmse": 3.278,
"1px": 0.843,
"3px": 0.926,
"5px": 0.955,
"relepe": 0.135,
"fl-all": 84.556,
},
10: {
"mae": 1.046,
"rmse": 3.13,
"1px": 0.85,
"3px": 0.934,
"5px": 0.96,
"relepe": 0.13,
"fl-all": 85.464,
},
20: {
"mae": 1.021,
"rmse": 3.102,
"1px": 0.85,
"3px": 0.935,
"5px": 0.963,
"relepe": 0.129,
"fl-all": 85.417,
},
},
},
},
},
"_docs": """These weights were finetuned on a mixture of
:class:`~torchvision.datasets._stereo_matching.CREStereo` +
:class:`~torchvision.datasets._stereo_matching.Middlebury2014Stereo` +
:class:`~torchvision.datasets._stereo_matching.ETH3DStereo` +
:class:`~torchvision.datasets._stereo_matching.InStereo2k` +
:class:`~torchvision.datasets._stereo_matching.CarlaStereo` +
:class:`~torchvision.datasets._stereo_matching.SintelStereo` +
:class:`~torchvision.datasets._stereo_matching.FallingThingsStereo` +
.""",
},
)
DEFAULT = MEGVII_V1
@register_model()
@handle_legacy_interface(weights=("pretrained", CREStereo_Base_Weights.MEGVII_V1))
def crestereo_base(*, weights: Optional[CREStereo_Base_Weights] = None, progress=True, **kwargs) -> CREStereo:
"""CREStereo model from
`Practical Stereo Matching via Cascaded Recurrent Network
With Adaptive Correlation <https://openaccess.thecvf.com/content/CVPR2022/papers/Li_Practical_Stereo_Matching_via_Cascaded_Recurrent_Network_With_Adaptive_Correlation_CVPR_2022_paper.pdf>`_.
Please see the example below for a tutorial on how to use this model.
Args:
weights(:class:`~torchvision.prototype.models.depth.stereo.CREStereo_Base_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.prototype.models.depth.stereo.CREStereo_Base_Weights`
below for more details, and possible values. By default, no
pre-trained weights are used.
progress (bool): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.prototype.models.depth.stereo.raft_stereo.RaftStereo``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/optical_flow/crestereo.py>`_
for more details about this class.
.. autoclass:: torchvision.prototype.models.depth.stereo.CREStereo_Base_Weights
:members:
"""
weights = CREStereo_Base_Weights.verify(weights)
return _crestereo(
weights=weights,
progress=progress,
# Feature encoder
feature_encoder_layers=(64, 64, 96, 128, 256),
feature_encoder_strides=(2, 1, 2, 1),
feature_encoder_block=partial(raft.ResidualBlock, always_project=True),
feature_encoder_norm_layer=nn.InstanceNorm2d,
# Average pooling pyramid
feature_downsample_rates=(2, 4),
# Motion encoder
motion_encoder_corr_layers=(256, 192),
motion_encoder_flow_layers=(128, 64),
motion_encoder_out_channels=128,
# Recurrent block
recurrent_block_hidden_state_size=128,
recurrent_block_kernel_size=((1, 5), (5, 1)),
recurrent_block_padding=((0, 2), (2, 0)),
# Flow head
flow_head_hidden_size=256,
# Transformer blocks
num_attention_heads=8,
num_self_attention_layers=1,
num_cross_attention_layers=1,
self_attention_module=LinearAttention,
cross_attention_module=LinearAttention,
# Adaptive Correlation layer
corr_groups=4,
corr_search_window_2d=(3, 3),
corr_search_dilate_2d=(1, 1),
corr_search_window_1d=(1, 9),
corr_search_dilate_1d=(1, 1),
)
```
|
================================================================================================================================================
SOURCE CODE FILE: raft_stereo.py
LINES: 1
SIZE: 37.12 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\models\depth\stereo\raft_stereo.py
ENCODING: utf-8
```py
from functools import partial
from typing import Callable, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models.optical_flow.raft as raft
from torch import Tensor
from torchvision.models._api import register_model, Weights, WeightsEnum
from torchvision.models._utils import handle_legacy_interface
from torchvision.models.optical_flow._utils import grid_sample, make_coords_grid, upsample_flow
from torchvision.models.optical_flow.raft import FlowHead, MotionEncoder, ResidualBlock
from torchvision.ops import Conv2dNormActivation
from torchvision.prototype.transforms._presets import StereoMatching
from torchvision.utils import _log_api_usage_once
__all__ = (
"RaftStereo",
"raft_stereo_base",
"raft_stereo_realtime",
"Raft_Stereo_Base_Weights",
"Raft_Stereo_Realtime_Weights",
)
class BaseEncoder(raft.FeatureEncoder):
"""Base encoder for FeatureEncoder and ContextEncoder in which weight may be shared.
See the Raft-Stereo paper section 4.6 on backbone part.
"""
def __init__(
self,
*,
block: Callable[..., nn.Module] = ResidualBlock,
layers: Tuple[int, int, int, int] = (64, 64, 96, 128),
strides: Tuple[int, int, int, int] = (2, 1, 2, 2),
norm_layer: Callable[..., nn.Module] = nn.BatchNorm2d,
):
# We use layers + (256,) because raft.FeatureEncoder require 5 layers
# but here we will set the last conv layer to identity
super().__init__(block=block, layers=layers + (256,), strides=strides, norm_layer=norm_layer)
# Base encoder don't have the last conv layer of feature encoder
self.conv = nn.Identity()
self.output_dim = layers[3]
num_downsampling = sum([x - 1 for x in strides])
self.downsampling_ratio = 2 ** (num_downsampling)
class FeatureEncoder(nn.Module):
"""Feature Encoder for Raft-Stereo (see paper section 3.1) that may have shared weight with the Context Encoder.
The FeatureEncoder takes concatenation of left and right image as input. It produces feature embedding that later
will be used to construct correlation volume.
"""
def __init__(
self,
base_encoder: BaseEncoder,
output_dim: int = 256,
shared_base: bool = False,
block: Callable[..., nn.Module] = ResidualBlock,
):
super().__init__()
self.base_encoder = base_encoder
self.base_downsampling_ratio = base_encoder.downsampling_ratio
base_dim = base_encoder.output_dim
if not shared_base:
self.residual_block: nn.Module = nn.Identity()
self.conv = nn.Conv2d(base_dim, output_dim, kernel_size=1)
else:
# If we share base encoder weight for Feature and Context Encoder
# we need to add residual block with InstanceNorm2d and change the kernel size for conv layer
# see: https://github.com/princeton-vl/RAFT-Stereo/blob/main/core/raft_stereo.py#L35-L37
self.residual_block = block(base_dim, base_dim, norm_layer=nn.InstanceNorm2d, stride=1)
self.conv = nn.Conv2d(base_dim, output_dim, kernel_size=3, padding=1)
def forward(self, x: Tensor) -> Tensor:
x = self.base_encoder(x)
x = self.residual_block(x)
x = self.conv(x)
return x
class MultiLevelContextEncoder(nn.Module):
"""Context Encoder for Raft-Stereo (see paper section 3.1) that may have shared weight with the Feature Encoder.
The ContextEncoder takes left image as input, and it outputs concatenated hidden_states and contexts.
In Raft-Stereo we have multi level GRUs and this context encoder will also multi outputs (list of Tensor)
that correspond to each GRUs.
Take note that the length of "out_with_blocks" parameter represent the number of GRU's level.
args:
base_encoder (nn.Module): The base encoder part that can have a shared weight with feature_encoder's
base_encoder because they have same architecture.
out_with_blocks (List[bool]): The length represent the number of GRU's level (length of output), and
if the element is True then the output layer on that position will have additional block
output_dim (int): The dimension of output on each level (default: 256)
block (Callable[..., nn.Module]): The type of basic block used for downsampling and output layer
(default: ResidualBlock)
"""
def __init__(
self,
base_encoder: nn.Module,
out_with_blocks: List[bool],
output_dim: int = 256,
block: Callable[..., nn.Module] = ResidualBlock,
):
super().__init__()
self.num_level = len(out_with_blocks)
self.base_encoder = base_encoder
self.base_downsampling_ratio = base_encoder.downsampling_ratio
base_dim = base_encoder.output_dim
self.downsample_and_out_layers = nn.ModuleList(
[
nn.ModuleDict(
{
"downsampler": self._make_downsampler(block, base_dim, base_dim) if i > 0 else nn.Identity(),
"out_hidden_state": self._make_out_layer(
base_dim, output_dim // 2, with_block=out_with_blocks[i], block=block
),
"out_context": self._make_out_layer(
base_dim, output_dim // 2, with_block=out_with_blocks[i], block=block
),
}
)
for i in range(self.num_level)
]
)
def _make_out_layer(self, in_channels, out_channels, with_block=True, block=ResidualBlock):
layers = []
if with_block:
layers.append(block(in_channels, in_channels, norm_layer=nn.BatchNorm2d, stride=1))
layers.append(nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1))
return nn.Sequential(*layers)
def _make_downsampler(self, block, in_channels, out_channels):
block1 = block(in_channels, out_channels, norm_layer=nn.BatchNorm2d, stride=2)
block2 = block(out_channels, out_channels, norm_layer=nn.BatchNorm2d, stride=1)
return nn.Sequential(block1, block2)
def forward(self, x: Tensor) -> List[Tensor]:
x = self.base_encoder(x)
outs = []
for layer_dict in self.downsample_and_out_layers:
x = layer_dict["downsampler"](x)
outs.append(torch.cat([layer_dict["out_hidden_state"](x), layer_dict["out_context"](x)], dim=1))
return outs
class ConvGRU(raft.ConvGRU):
"""Convolutional Gru unit."""
# Modified from raft.ConvGRU to accept pre-convolved contexts,
# see: https://github.com/princeton-vl/RAFT-Stereo/blob/main/core/update.py#L23
def forward(self, h: Tensor, x: Tensor, context: List[Tensor]) -> Tensor: # type: ignore[override]
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz(hx) + context[0])
r = torch.sigmoid(self.convr(hx) + context[1])
q = torch.tanh(self.convq(torch.cat([r * h, x], dim=1)) + context[2])
h = (1 - z) * h + z * q
return h
class MultiLevelUpdateBlock(nn.Module):
"""The update block which contains the motion encoder and grus
It must expose a ``hidden_dims`` attribute which is the hidden dimension size of its gru blocks
"""
def __init__(self, *, motion_encoder: MotionEncoder, hidden_dims: List[int]):
super().__init__()
self.motion_encoder = motion_encoder
# The GRU input size is the size of previous level hidden_dim plus next level hidden_dim
# if this is the first gru, then we replace previous level with motion_encoder output channels
# for the last GRU, we don't add the next level hidden_dim
gru_input_dims = []
for i in range(len(hidden_dims)):
input_dim = hidden_dims[i - 1] if i > 0 else motion_encoder.out_channels
if i < len(hidden_dims) - 1:
input_dim += hidden_dims[i + 1]
gru_input_dims.append(input_dim)
self.grus = nn.ModuleList(
[
ConvGRU(input_size=gru_input_dims[i], hidden_size=hidden_dims[i], kernel_size=3, padding=1)
# Ideally we should reverse the direction during forward to use the gru with the smallest resolution
# first however currently there is no way to reverse a ModuleList that is jit script compatible
# hence we reverse the ordering of self.grus on the constructor instead
# see: https://github.com/pytorch/pytorch/issues/31772
for i in reversed(list(range(len(hidden_dims))))
]
)
self.hidden_dims = hidden_dims
def forward(
self,
hidden_states: List[Tensor],
contexts: List[List[Tensor]],
corr_features: Tensor,
disparity: Tensor,
level_processed: List[bool],
) -> List[Tensor]:
# We call it reverse_i because it has a reversed ordering compared to hidden_states
# see self.grus on the constructor for more detail
for reverse_i, gru in enumerate(self.grus):
i = len(self.grus) - 1 - reverse_i
if level_processed[i]:
# X is concatenation of 2x downsampled hidden_dim (or motion_features if no bigger dim) with
# upsampled hidden_dim (or nothing if not exist).
if i == 0:
features = self.motion_encoder(disparity, corr_features)
else:
# 2x downsampled features from larger hidden states
features = F.avg_pool2d(hidden_states[i - 1], kernel_size=3, stride=2, padding=1)
if i < len(self.grus) - 1:
# Concat with 2x upsampled features from smaller hidden states
_, _, h, w = hidden_states[i + 1].shape
features = torch.cat(
[
features,
F.interpolate(
hidden_states[i + 1], size=(2 * h, 2 * w), mode="bilinear", align_corners=True
),
],
dim=1,
)
hidden_states[i] = gru(hidden_states[i], features, contexts[i])
# NOTE: For slow-fast gru, we don't always want to calculate delta disparity for every call on UpdateBlock
# Hence we move the delta disparity calculation to the RAFT-Stereo main forward
return hidden_states
class MaskPredictor(raft.MaskPredictor):
"""Mask predictor to be used when upsampling the predicted disparity."""
# We add out_channels compared to raft.MaskPredictor
def __init__(self, *, in_channels: int, hidden_size: int, out_channels: int, multiplier: float = 0.25):
super(raft.MaskPredictor, self).__init__()
self.convrelu = Conv2dNormActivation(in_channels, hidden_size, norm_layer=None, kernel_size=3)
self.conv = nn.Conv2d(hidden_size, out_channels, kernel_size=1, padding=0)
self.multiplier = multiplier
class CorrPyramid1d(nn.Module):
"""Row-wise correlation pyramid.
Create a row-wise correlation pyramid with ``num_levels`` level from the outputs of the feature encoder,
this correlation pyramid will later be used as index to create correlation features using CorrBlock1d.
"""
def __init__(self, num_levels: int = 4):
super().__init__()
self.num_levels = num_levels
def forward(self, fmap1: Tensor, fmap2: Tensor) -> List[Tensor]:
"""Build the correlation pyramid from two feature maps.
The correlation volume is first computed as the dot product of each pair (pixel_in_fmap1, pixel_in_fmap2) on the same row.
The last 2 dimensions of the correlation volume are then pooled num_levels times at different resolutions
to build the correlation pyramid.
"""
torch._assert(
fmap1.shape == fmap2.shape,
f"Input feature maps should have the same shape, instead got {fmap1.shape} (fmap1.shape) != {fmap2.shape} (fmap2.shape)",
)
batch_size, num_channels, h, w = fmap1.shape
fmap1 = fmap1.view(batch_size, num_channels, h, w)
fmap2 = fmap2.view(batch_size, num_channels, h, w)
corr = torch.einsum("aijk,aijh->ajkh", fmap1, fmap2)
corr = corr.view(batch_size, h, w, 1, w)
corr_volume = corr / torch.sqrt(torch.tensor(num_channels, device=corr.device))
corr_volume = corr_volume.reshape(batch_size * h * w, 1, 1, w)
corr_pyramid = [corr_volume]
for _ in range(self.num_levels - 1):
corr_volume = F.avg_pool2d(corr_volume, kernel_size=(1, 2), stride=(1, 2))
corr_pyramid.append(corr_volume)
return corr_pyramid
class CorrBlock1d(nn.Module):
"""The row-wise correlation block.
Use indexes from correlation pyramid to create correlation features.
The "indexing" of a given centroid pixel x' is done by concatenating its surrounding row neighbours
within radius
"""
def __init__(self, *, num_levels: int = 4, radius: int = 4):
super().__init__()
self.radius = radius
self.out_channels = num_levels * (2 * radius + 1)
def forward(self, centroids_coords: Tensor, corr_pyramid: List[Tensor]) -> Tensor:
"""Return correlation features by indexing from the pyramid."""
neighborhood_side_len = 2 * self.radius + 1 # see note in __init__ about out_channels
di = torch.linspace(-self.radius, self.radius, neighborhood_side_len, device=centroids_coords.device)
di = di.view(1, 1, neighborhood_side_len, 1).to(centroids_coords.device)
batch_size, _, h, w = centroids_coords.shape # _ = 2 but we only use the first one
# We only consider 1d and take the first dim only
centroids_coords = centroids_coords[:, :1].permute(0, 2, 3, 1).reshape(batch_size * h * w, 1, 1, 1)
indexed_pyramid = []
for corr_volume in corr_pyramid:
x0 = centroids_coords + di # end shape is (batch_size * h * w, 1, side_len, 1)
y0 = torch.zeros_like(x0)
sampling_coords = torch.cat([x0, y0], dim=-1)
indexed_corr_volume = grid_sample(corr_volume, sampling_coords, align_corners=True, mode="bilinear").view(
batch_size, h, w, -1
)
indexed_pyramid.append(indexed_corr_volume)
centroids_coords = centroids_coords / 2
corr_features = torch.cat(indexed_pyramid, dim=-1).permute(0, 3, 1, 2).contiguous()
expected_output_shape = (batch_size, self.out_channels, h, w)
torch._assert(
corr_features.shape == expected_output_shape,
f"Output shape of index pyramid is incorrect. Should be {expected_output_shape}, got {corr_features.shape}",
)
return corr_features
class RaftStereo(nn.Module):
def __init__(
self,
*,
feature_encoder: FeatureEncoder,
context_encoder: MultiLevelContextEncoder,
corr_pyramid: CorrPyramid1d,
corr_block: CorrBlock1d,
update_block: MultiLevelUpdateBlock,
disparity_head: nn.Module,
mask_predictor: Optional[nn.Module] = None,
slow_fast: bool = False,
):
"""RAFT-Stereo model from
`RAFT-Stereo: Multilevel Recurrent Field Transforms for Stereo Matching <https://arxiv.org/abs/2109.07547>`_.
args:
feature_encoder (FeatureEncoder): The feature encoder. Its input is the concatenation of ``left_image`` and ``right_image``.
context_encoder (MultiLevelContextEncoder): The context encoder. Its input is ``left_image``.
It has multi-level output and each level will have 2 parts:
- one part will be used as the actual "context", passed to the recurrent unit of the ``update_block``
- one part will be used to initialize the hidden state of the recurrent unit of
the ``update_block``
corr_pyramid (CorrPyramid1d): Module to build the correlation pyramid from feature encoder output
corr_block (CorrBlock1d): The correlation block, which uses the correlation pyramid indexes
to create correlation features. It takes the coordinate of the centroid pixel and correlation pyramid
as input and returns the correlation features.
It must expose an ``out_channels`` attribute.
update_block (MultiLevelUpdateBlock): The update block, which contains the motion encoder, and the recurrent unit.
It takes as input the hidden state of its recurrent unit, the context, the correlation
features, and the current predicted disparity. It outputs an updated hidden state
disparity_head (nn.Module): The disparity head block will convert from the hidden state into changes in disparity.
mask_predictor (nn.Module, optional): Predicts the mask that will be used to upsample the predicted flow.
If ``None`` (default), the flow is upsampled using interpolation.
slow_fast (bool): A boolean that specify whether we should use slow-fast GRU or not. See RAFT-Stereo paper
on section 3.4 for more detail.
"""
super().__init__()
_log_api_usage_once(self)
# This indicates that the disparity output will be only have 1 channel (represent horizontal axis).
# We need this because some stereo matching model like CREStereo might have 2 channel on the output
self.output_channels = 1
self.feature_encoder = feature_encoder
self.context_encoder = context_encoder
self.base_downsampling_ratio = feature_encoder.base_downsampling_ratio
self.num_level = self.context_encoder.num_level
self.corr_pyramid = corr_pyramid
self.corr_block = corr_block
self.update_block = update_block
self.disparity_head = disparity_head
self.mask_predictor = mask_predictor
hidden_dims = self.update_block.hidden_dims
# Follow the original implementation to do pre convolution on the context
# See: https://github.com/princeton-vl/RAFT-Stereo/blob/main/core/raft_stereo.py#L32
self.context_convs = nn.ModuleList(
[nn.Conv2d(hidden_dims[i], hidden_dims[i] * 3, kernel_size=3, padding=1) for i in range(self.num_level)]
)
self.slow_fast = slow_fast
def forward(
self, left_image: Tensor, right_image: Tensor, flow_init: Optional[Tensor] = None, num_iters: int = 12
) -> List[Tensor]:
"""
Return disparity predictions on every iteration as a list of Tensor.
args:
left_image (Tensor): The input left image with layout B, C, H, W
right_image (Tensor): The input right image with layout B, C, H, W
flow_init (Optional[Tensor]): Initial estimate for the disparity. Default: None
num_iters (int): Number of update block iteration on the largest resolution. Default: 12
"""
batch_size, _, h, w = left_image.shape
torch._assert(
(h, w) == right_image.shape[-2:],
f"input images should have the same shape, instead got ({h}, {w}) != {right_image.shape[-2:]}",
)
torch._assert(
(h % self.base_downsampling_ratio == 0 and w % self.base_downsampling_ratio == 0),
f"input image H and W should be divisible by {self.base_downsampling_ratio}, instead got H={h} and W={w}",
)
fmaps = self.feature_encoder(torch.cat([left_image, right_image], dim=0))
fmap1, fmap2 = torch.chunk(fmaps, chunks=2, dim=0)
torch._assert(
fmap1.shape[-2:] == (h // self.base_downsampling_ratio, w // self.base_downsampling_ratio),
f"The feature encoder should downsample H and W by {self.base_downsampling_ratio}",
)
corr_pyramid = self.corr_pyramid(fmap1, fmap2)
# Multi level contexts
context_outs = self.context_encoder(left_image)
hidden_dims = self.update_block.hidden_dims
context_out_channels = [context_outs[i].shape[1] - hidden_dims[i] for i in range(len(context_outs))]
hidden_states: List[Tensor] = []
contexts: List[List[Tensor]] = []
for i, context_conv in enumerate(self.context_convs):
# As in the original paper, the actual output of the context encoder is split in 2 parts:
# - one part is used to initialize the hidden state of the recurent units of the update block
# - the rest is the "actual" context.
hidden_state, context = torch.split(context_outs[i], [hidden_dims[i], context_out_channels[i]], dim=1)
hidden_states.append(torch.tanh(hidden_state))
contexts.append(
# mypy is technically correct here. The return type of `torch.split` was incorrectly annotated with
# `List[int]` although it should have been `Tuple[Tensor, ...]`. However, the latter is not supported by
# JIT and thus we have to keep the wrong annotation here and silence mypy.
torch.split( # type: ignore[arg-type]
context_conv(F.relu(context)), [hidden_dims[i], hidden_dims[i], hidden_dims[i]], dim=1
)
)
_, Cf, Hf, Wf = fmap1.shape
coords0 = make_coords_grid(batch_size, Hf, Wf).to(fmap1.device)
coords1 = make_coords_grid(batch_size, Hf, Wf).to(fmap1.device)
# We use flow_init for cascade inference
if flow_init is not None:
coords1 = coords1 + flow_init
disparity_predictions = []
for _ in range(num_iters):
coords1 = coords1.detach() # Don't backpropagate gradients through this branch, see paper
corr_features = self.corr_block(centroids_coords=coords1, corr_pyramid=corr_pyramid)
disparity = coords1 - coords0
if self.slow_fast:
# Using slow_fast GRU (see paper section 3.4). The lower resolution are processed more often
for i in range(1, self.num_level):
# We only processed the smallest i levels
level_processed = [False] * (self.num_level - i) + [True] * i
hidden_states = self.update_block(
hidden_states, contexts, corr_features, disparity, level_processed=level_processed
)
hidden_states = self.update_block(
hidden_states, contexts, corr_features, disparity, level_processed=[True] * self.num_level
)
# Take the largest hidden_state to get the disparity
hidden_state = hidden_states[0]
delta_disparity = self.disparity_head(hidden_state)
# in stereo mode, project disparity onto epipolar
delta_disparity[:, 1] = 0.0
coords1 = coords1 + delta_disparity
up_mask = None if self.mask_predictor is None else self.mask_predictor(hidden_state)
upsampled_disparity = upsample_flow(
(coords1 - coords0), up_mask=up_mask, factor=self.base_downsampling_ratio
)
disparity_predictions.append(upsampled_disparity[:, :1])
return disparity_predictions
def _raft_stereo(
*,
weights: Optional[WeightsEnum],
progress: bool,
shared_encoder_weight: bool,
# Feature encoder
feature_encoder_layers: Tuple[int, int, int, int, int],
feature_encoder_strides: Tuple[int, int, int, int],
feature_encoder_block: Callable[..., nn.Module],
# Context encoder
context_encoder_layers: Tuple[int, int, int, int, int],
context_encoder_strides: Tuple[int, int, int, int],
# if the `out_with_blocks` param of the context_encoder is True, then
# the particular output on that level position will have additional `context_encoder_block` layer
context_encoder_out_with_blocks: List[bool],
context_encoder_block: Callable[..., nn.Module],
# Correlation block
corr_num_levels: int,
corr_radius: int,
# Motion encoder
motion_encoder_corr_layers: Tuple[int, int],
motion_encoder_flow_layers: Tuple[int, int],
motion_encoder_out_channels: int,
# Update block
update_block_hidden_dims: List[int],
# Flow Head
flow_head_hidden_size: int,
# Mask predictor
mask_predictor_hidden_size: int,
use_mask_predictor: bool,
slow_fast: bool,
**kwargs,
):
if len(context_encoder_out_with_blocks) != len(update_block_hidden_dims):
raise ValueError(
"Length of context_encoder_out_with_blocks and update_block_hidden_dims must be the same"
+ "because both of them represent the number of GRUs level"
)
if shared_encoder_weight:
if (
feature_encoder_layers[:-1] != context_encoder_layers[:-1]
or feature_encoder_strides != context_encoder_strides
):
raise ValueError(
"If shared_encoder_weight is True, then the feature_encoder_layers[:-1]"
+ " and feature_encoder_strides must be the same with context_encoder_layers[:-1] and context_encoder_strides!"
)
base_encoder = kwargs.pop("base_encoder", None) or BaseEncoder(
block=context_encoder_block,
layers=context_encoder_layers[:-1],
strides=context_encoder_strides,
norm_layer=nn.BatchNorm2d,
)
feature_base_encoder = base_encoder
context_base_encoder = base_encoder
else:
feature_base_encoder = BaseEncoder(
block=feature_encoder_block,
layers=feature_encoder_layers[:-1],
strides=feature_encoder_strides,
norm_layer=nn.InstanceNorm2d,
)
context_base_encoder = BaseEncoder(
block=context_encoder_block,
layers=context_encoder_layers[:-1],
strides=context_encoder_strides,
norm_layer=nn.BatchNorm2d,
)
feature_encoder = kwargs.pop("feature_encoder", None) or FeatureEncoder(
feature_base_encoder,
output_dim=feature_encoder_layers[-1],
shared_base=shared_encoder_weight,
block=feature_encoder_block,
)
context_encoder = kwargs.pop("context_encoder", None) or MultiLevelContextEncoder(
context_base_encoder,
out_with_blocks=context_encoder_out_with_blocks,
output_dim=context_encoder_layers[-1],
block=context_encoder_block,
)
feature_downsampling_ratio = feature_encoder.base_downsampling_ratio
corr_pyramid = kwargs.pop("corr_pyramid", None) or CorrPyramid1d(num_levels=corr_num_levels)
corr_block = kwargs.pop("corr_block", None) or CorrBlock1d(num_levels=corr_num_levels, radius=corr_radius)
motion_encoder = kwargs.pop("motion_encoder", None) or MotionEncoder(
in_channels_corr=corr_block.out_channels,
corr_layers=motion_encoder_corr_layers,
flow_layers=motion_encoder_flow_layers,
out_channels=motion_encoder_out_channels,
)
update_block = kwargs.pop("update_block", None) or MultiLevelUpdateBlock(
motion_encoder=motion_encoder, hidden_dims=update_block_hidden_dims
)
# We use the largest scale hidden_dims of update_block to get the predicted disparity
disparity_head = kwargs.pop("disparity_head", None) or FlowHead(
in_channels=update_block_hidden_dims[0],
hidden_size=flow_head_hidden_size,
)
mask_predictor = kwargs.pop("mask_predictor", None)
if use_mask_predictor:
mask_predictor = MaskPredictor(
in_channels=update_block.hidden_dims[0],
hidden_size=mask_predictor_hidden_size,
out_channels=9 * feature_downsampling_ratio * feature_downsampling_ratio,
)
else:
mask_predictor = None
model = RaftStereo(
feature_encoder=feature_encoder,
context_encoder=context_encoder,
corr_pyramid=corr_pyramid,
corr_block=corr_block,
update_block=update_block,
disparity_head=disparity_head,
mask_predictor=mask_predictor,
slow_fast=slow_fast,
**kwargs, # not really needed, all params should be consumed by now
)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
class Raft_Stereo_Realtime_Weights(WeightsEnum):
SCENEFLOW_V1 = Weights(
# Weights ported from https://github.com/princeton-vl/RAFT-Stereo
url="https://download.pytorch.org/models/raft_stereo_realtime-cf345ccb.pth",
transforms=partial(StereoMatching, resize_size=(224, 224)),
meta={
"num_params": 8077152,
"recipe": "https://github.com/princeton-vl/RAFT-Stereo",
"_metrics": {
# Following metrics from paper: https://arxiv.org/abs/2109.07547
"Kitty2015": {
"3px": 0.9409,
}
},
},
)
DEFAULT = SCENEFLOW_V1
class Raft_Stereo_Base_Weights(WeightsEnum):
SCENEFLOW_V1 = Weights(
# Weights ported from https://github.com/princeton-vl/RAFT-Stereo
url="https://download.pytorch.org/models/raft_stereo_base_sceneflow-eff3f2e6.pth",
transforms=partial(StereoMatching, resize_size=(224, 224)),
meta={
"num_params": 11116176,
"recipe": "https://github.com/princeton-vl/RAFT-Stereo",
"_metrics": {
# Following metrics from paper: https://arxiv.org/abs/2109.07547
# Using standard metrics for each dataset
"Kitty2015": {
# Ratio of pixels with difference less than 3px from ground truth
"3px": 0.9426,
},
# For middlebury, ratio of pixels with difference less than 2px from ground truth
# on full, half, and quarter image resolution
"Middlebury2014-val-full": {
"2px": 0.8167,
},
"Middlebury2014-val-half": {
"2px": 0.8741,
},
"Middlebury2014-val-quarter": {
"2px": 0.9064,
},
"ETH3D-val": {
# Ratio of pixels with difference less than 1px from ground truth
"1px": 0.9672,
},
},
},
)
MIDDLEBURY_V1 = Weights(
# Weights ported from https://github.com/princeton-vl/RAFT-Stereo
url="https://download.pytorch.org/models/raft_stereo_base_middlebury-afa9d252.pth",
transforms=partial(StereoMatching, resize_size=(224, 224)),
meta={
"num_params": 11116176,
"recipe": "https://github.com/princeton-vl/RAFT-Stereo",
"_metrics": {
# Following metrics from paper: https://arxiv.org/abs/2109.07547
"Middlebury-test": {
"mae": 1.27,
"1px": 0.9063,
"2px": 0.9526,
"5px": 0.9725,
}
},
},
)
ETH3D_V1 = Weights(
# Weights ported from https://github.com/princeton-vl/RAFT-Stereo
url="https://download.pytorch.org/models/raft_stereo_base_eth3d-d4830f22.pth",
transforms=partial(StereoMatching, resize_size=(224, 224)),
meta={
"num_params": 11116176,
"recipe": "https://github.com/princeton-vl/RAFT-Stereo",
"_metrics": {
# Following metrics from paper: https://arxiv.org/abs/2109.07547
"ETH3D-test": {
"mae": 0.18,
"1px": 0.9756,
"2px": 0.9956,
}
},
},
)
DEFAULT = MIDDLEBURY_V1
@register_model()
@handle_legacy_interface(weights=("pretrained", None))
def raft_stereo_realtime(
*, weights: Optional[Raft_Stereo_Realtime_Weights] = None, progress=True, **kwargs
) -> RaftStereo:
"""RAFT-Stereo model from
`RAFT-Stereo: Multilevel Recurrent Field Transforms for Stereo Matching <https://arxiv.org/abs/2109.07547>`_.
This is the realtime variant of the Raft-Stereo model that is described on the paper section 4.7.
Please see the example below for a tutorial on how to use this model.
Args:
weights(:class:`~torchvision.prototype.models.depth.stereo.Raft_Stereo_Realtime_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.prototype.models.depth.stereo.Raft_Stereo_Realtime_Weights`
below for more details, and possible values. By default, no
pre-trained weights are used.
progress (bool): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.prototype.models.depth.stereo.raft_stereo.RaftStereo``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/optical_flow/raft.py>`_
for more details about this class.
.. autoclass:: torchvision.prototype.models.depth.stereo.Raft_Stereo_Realtime_Weights
:members:
"""
weights = Raft_Stereo_Realtime_Weights.verify(weights)
return _raft_stereo(
weights=weights,
progress=progress,
shared_encoder_weight=True,
# Feature encoder
feature_encoder_layers=(64, 64, 96, 128, 256),
feature_encoder_strides=(2, 1, 2, 2),
feature_encoder_block=ResidualBlock,
# Context encoder
context_encoder_layers=(64, 64, 96, 128, 256),
context_encoder_strides=(2, 1, 2, 2),
context_encoder_out_with_blocks=[True, True],
context_encoder_block=ResidualBlock,
# Correlation block
corr_num_levels=4,
corr_radius=4,
# Motion encoder
motion_encoder_corr_layers=(64, 64),
motion_encoder_flow_layers=(64, 64),
motion_encoder_out_channels=128,
# Update block
update_block_hidden_dims=[128, 128],
# Flow head
flow_head_hidden_size=256,
# Mask predictor
mask_predictor_hidden_size=256,
use_mask_predictor=True,
slow_fast=True,
**kwargs,
)
@register_model()
@handle_legacy_interface(weights=("pretrained", None))
def raft_stereo_base(*, weights: Optional[Raft_Stereo_Base_Weights] = None, progress=True, **kwargs) -> RaftStereo:
"""RAFT-Stereo model from
`RAFT-Stereo: Multilevel Recurrent Field Transforms for Stereo Matching <https://arxiv.org/abs/2109.07547>`_.
Please see the example below for a tutorial on how to use this model.
Args:
weights(:class:`~torchvision.prototype.models.depth.stereo.Raft_Stereo_Base_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.prototype.models.depth.stereo.Raft_Stereo_Base_Weights`
below for more details, and possible values. By default, no
pre-trained weights are used.
progress (bool): If True, displays a progress bar of the download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.prototype.models.depth.stereo.raft_stereo.RaftStereo``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/optical_flow/raft.py>`_
for more details about this class.
.. autoclass:: torchvision.prototype.models.depth.stereo.Raft_Stereo_Base_Weights
:members:
"""
weights = Raft_Stereo_Base_Weights.verify(weights)
return _raft_stereo(
weights=weights,
progress=progress,
shared_encoder_weight=False,
# Feature encoder
feature_encoder_layers=(64, 64, 96, 128, 256),
feature_encoder_strides=(1, 1, 2, 2),
feature_encoder_block=ResidualBlock,
# Context encoder
context_encoder_layers=(64, 64, 96, 128, 256),
context_encoder_strides=(1, 1, 2, 2),
context_encoder_out_with_blocks=[True, True, False],
context_encoder_block=ResidualBlock,
# Correlation block
corr_num_levels=4,
corr_radius=4,
# Motion encoder
motion_encoder_corr_layers=(64, 64),
motion_encoder_flow_layers=(64, 64),
motion_encoder_out_channels=128,
# Update block
update_block_hidden_dims=[128, 128, 128],
# Flow head
flow_head_hidden_size=256,
# Mask predictor
mask_predictor_hidden_size=256,
use_mask_predictor=True,
slow_fast=False,
**kwargs,
)
```
|
====================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.23 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\transforms\__init__.py
ENCODING: utf-8
```py
from ._presets import StereoMatching # usort: skip
from ._augment import SimpleCopyPaste
from ._geometry import FixedSizeCrop
from ._misc import PermuteDimensions, TransposeDimensions
from ._type_conversion import LabelToOneHot
```
|
====================================================================================================================================
SOURCE CODE FILE: _augment.py
LINES: 1
SIZE: 8.95 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\transforms\_augment.py
ENCODING: utf-8
```py
from typing import Any, cast, Dict, List, Optional, Tuple, Union
import PIL.Image
import torch
from torch.utils._pytree import tree_flatten, tree_unflatten
from torchvision import tv_tensors
from torchvision.ops import masks_to_boxes
from torchvision.prototype import tv_tensors as proto_tv_tensors
from torchvision.transforms.v2 import functional as F, InterpolationMode, Transform
from torchvision.transforms.v2._utils import is_pure_tensor
from torchvision.transforms.v2.functional._geometry import _check_interpolation
class SimpleCopyPaste(Transform):
def __init__(
self,
blending: bool = True,
resize_interpolation: Union[int, InterpolationMode] = F.InterpolationMode.BILINEAR,
antialias: Optional[bool] = None,
) -> None:
super().__init__()
self.resize_interpolation = _check_interpolation(resize_interpolation)
self.blending = blending
self.antialias = antialias
def _copy_paste(
self,
image: Union[torch.Tensor, tv_tensors.Image],
target: Dict[str, Any],
paste_image: Union[torch.Tensor, tv_tensors.Image],
paste_target: Dict[str, Any],
random_selection: torch.Tensor,
blending: bool,
resize_interpolation: F.InterpolationMode,
antialias: Optional[bool],
) -> Tuple[torch.Tensor, Dict[str, Any]]:
paste_masks = tv_tensors.wrap(paste_target["masks"][random_selection], like=paste_target["masks"])
paste_boxes = tv_tensors.wrap(paste_target["boxes"][random_selection], like=paste_target["boxes"])
paste_labels = tv_tensors.wrap(paste_target["labels"][random_selection], like=paste_target["labels"])
masks = target["masks"]
# We resize source and paste data if they have different sizes
# This is something different to TF implementation we introduced here as
# originally the algorithm works on equal-sized data
# (for example, coming from LSJ data augmentations)
size1 = cast(List[int], image.shape[-2:])
size2 = paste_image.shape[-2:]
if size1 != size2:
paste_image = F.resize(paste_image, size=size1, interpolation=resize_interpolation, antialias=antialias)
paste_masks = F.resize(paste_masks, size=size1)
paste_boxes = F.resize(paste_boxes, size=size1)
paste_alpha_mask = paste_masks.sum(dim=0) > 0
if blending:
paste_alpha_mask = F.gaussian_blur(paste_alpha_mask.unsqueeze(0), kernel_size=[5, 5], sigma=[2.0])
inverse_paste_alpha_mask = paste_alpha_mask.logical_not()
# Copy-paste images:
image = image.mul(inverse_paste_alpha_mask).add_(paste_image.mul(paste_alpha_mask))
# Copy-paste masks:
masks = masks * inverse_paste_alpha_mask
non_all_zero_masks = masks.sum((-1, -2)) > 0
masks = masks[non_all_zero_masks]
# Do a shallow copy of the target dict
out_target = {k: v for k, v in target.items()}
out_target["masks"] = torch.cat([masks, paste_masks])
# Copy-paste boxes and labels
bbox_format = target["boxes"].format
xyxy_boxes = masks_to_boxes(masks)
# masks_to_boxes produces bboxes with x2y2 inclusive but x2y2 should be exclusive
# we need to add +1 to x2y2.
# There is a similar +1 in other reference implementations:
# https://github.com/pytorch/vision/blob/b6feccbc4387766b76a3e22b13815dbbbfa87c0f/torchvision/models/detection/roi_heads.py#L418-L422
xyxy_boxes[:, 2:] += 1
boxes = F.convert_bounding_box_format(
xyxy_boxes, old_format=tv_tensors.BoundingBoxFormat.XYXY, new_format=bbox_format, inplace=True
)
out_target["boxes"] = torch.cat([boxes, paste_boxes])
labels = target["labels"][non_all_zero_masks]
out_target["labels"] = torch.cat([labels, paste_labels])
# Check for degenerated boxes and remove them
boxes = F.convert_bounding_box_format(
out_target["boxes"], old_format=bbox_format, new_format=tv_tensors.BoundingBoxFormat.XYXY
)
degenerate_boxes = boxes[:, 2:] <= boxes[:, :2]
if degenerate_boxes.any():
valid_targets = ~degenerate_boxes.any(dim=1)
out_target["boxes"] = boxes[valid_targets]
out_target["masks"] = out_target["masks"][valid_targets]
out_target["labels"] = out_target["labels"][valid_targets]
return image, out_target
def _extract_image_targets(
self, flat_sample: List[Any]
) -> Tuple[List[Union[torch.Tensor, tv_tensors.Image]], List[Dict[str, Any]]]:
# fetch all images, bboxes, masks and labels from unstructured input
# with List[image], List[BoundingBoxes], List[Mask], List[Label]
images, bboxes, masks, labels = [], [], [], []
for obj in flat_sample:
if isinstance(obj, tv_tensors.Image) or is_pure_tensor(obj):
images.append(obj)
elif isinstance(obj, PIL.Image.Image):
images.append(F.to_image(obj))
elif isinstance(obj, tv_tensors.BoundingBoxes):
bboxes.append(obj)
elif isinstance(obj, tv_tensors.Mask):
masks.append(obj)
elif isinstance(obj, (proto_tv_tensors.Label, proto_tv_tensors.OneHotLabel)):
labels.append(obj)
if not (len(images) == len(bboxes) == len(masks) == len(labels)):
raise TypeError(
f"{type(self).__name__}() requires input sample to contain equal sized list of Images, "
"BoundingBoxes, Masks and Labels or OneHotLabels."
)
targets = []
for bbox, mask, label in zip(bboxes, masks, labels):
targets.append({"boxes": bbox, "masks": mask, "labels": label})
return images, targets
def _insert_outputs(
self,
flat_sample: List[Any],
output_images: List[torch.Tensor],
output_targets: List[Dict[str, Any]],
) -> None:
c0, c1, c2, c3 = 0, 0, 0, 0
for i, obj in enumerate(flat_sample):
if isinstance(obj, tv_tensors.Image):
flat_sample[i] = tv_tensors.wrap(output_images[c0], like=obj)
c0 += 1
elif isinstance(obj, PIL.Image.Image):
flat_sample[i] = F.to_pil_image(output_images[c0])
c0 += 1
elif is_pure_tensor(obj):
flat_sample[i] = output_images[c0]
c0 += 1
elif isinstance(obj, tv_tensors.BoundingBoxes):
flat_sample[i] = tv_tensors.wrap(output_targets[c1]["boxes"], like=obj)
c1 += 1
elif isinstance(obj, tv_tensors.Mask):
flat_sample[i] = tv_tensors.wrap(output_targets[c2]["masks"], like=obj)
c2 += 1
elif isinstance(obj, (proto_tv_tensors.Label, proto_tv_tensors.OneHotLabel)):
flat_sample[i] = tv_tensors.wrap(output_targets[c3]["labels"], like=obj)
c3 += 1
def forward(self, *inputs: Any) -> Any:
flat_inputs, spec = tree_flatten(inputs if len(inputs) > 1 else inputs[0])
images, targets = self._extract_image_targets(flat_inputs)
# images = [t1, t2, ..., tN]
# Let's define paste_images as shifted list of input images
# paste_images = [t2, t3, ..., tN, t1]
# FYI: in TF they mix data on the dataset level
images_rolled = images[-1:] + images[:-1]
targets_rolled = targets[-1:] + targets[:-1]
output_images, output_targets = [], []
for image, target, paste_image, paste_target in zip(images, targets, images_rolled, targets_rolled):
# Random paste targets selection:
num_masks = len(paste_target["masks"])
if num_masks < 1:
# Such degerante case with num_masks=0 can happen with LSJ
# Let's just return (image, target)
output_image, output_target = image, target
else:
random_selection = torch.randint(0, num_masks, (num_masks,), device=paste_image.device)
random_selection = torch.unique(random_selection)
output_image, output_target = self._copy_paste(
image,
target,
paste_image,
paste_target,
random_selection=random_selection,
blending=self.blending,
resize_interpolation=self.resize_interpolation,
antialias=self.antialias,
)
output_images.append(output_image)
output_targets.append(output_target)
# Insert updated images and targets into input flat_sample
self._insert_outputs(flat_inputs, output_images, output_targets)
return tree_unflatten(flat_inputs, spec)
```
|
=====================================================================================================================================
SOURCE CODE FILE: _geometry.py
LINES: 1
SIZE: 4.68 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\transforms\_geometry.py
ENCODING: utf-8
```py
from typing import Any, Dict, List, Optional, Sequence, Type, Union
import PIL.Image
import torch
from torchvision import tv_tensors
from torchvision.prototype.tv_tensors import Label, OneHotLabel
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import (
_FillType,
_get_fill,
_setup_fill_arg,
_setup_size,
get_bounding_boxes,
has_any,
is_pure_tensor,
query_size,
)
class FixedSizeCrop(Transform):
def __init__(
self,
size: Union[int, Sequence[int]],
fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = 0,
padding_mode: str = "constant",
) -> None:
super().__init__()
size = tuple(_setup_size(size, error_msg="Please provide only two dimensions (h, w) for size."))
self.crop_height = size[0]
self.crop_width = size[1]
self.fill = fill
self._fill = _setup_fill_arg(fill)
self.padding_mode = padding_mode
def check_inputs(self, flat_inputs: List[Any]) -> None:
if not has_any(
flat_inputs,
PIL.Image.Image,
tv_tensors.Image,
is_pure_tensor,
tv_tensors.Video,
):
raise TypeError(
f"{type(self).__name__}() requires input sample to contain an tensor or PIL image or a Video."
)
if has_any(flat_inputs, tv_tensors.BoundingBoxes) and not has_any(flat_inputs, Label, OneHotLabel):
raise TypeError(
f"If a BoundingBoxes is contained in the input sample, "
f"{type(self).__name__}() also requires it to contain a Label or OneHotLabel."
)
def make_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
height, width = query_size(flat_inputs)
new_height = min(height, self.crop_height)
new_width = min(width, self.crop_width)
needs_crop = new_height != height or new_width != width
offset_height = max(height - self.crop_height, 0)
offset_width = max(width - self.crop_width, 0)
r = torch.rand(1)
top = int(offset_height * r)
left = int(offset_width * r)
bounding_boxes: Optional[torch.Tensor]
try:
bounding_boxes = get_bounding_boxes(flat_inputs)
except ValueError:
bounding_boxes = None
if needs_crop and bounding_boxes is not None:
format = bounding_boxes.format
bounding_boxes, canvas_size = F.crop_bounding_boxes(
bounding_boxes.as_subclass(torch.Tensor),
format=format,
top=top,
left=left,
height=new_height,
width=new_width,
)
bounding_boxes = F.clamp_bounding_boxes(bounding_boxes, format=format, canvas_size=canvas_size)
height_and_width = F.convert_bounding_box_format(
bounding_boxes, old_format=format, new_format=tv_tensors.BoundingBoxFormat.XYWH
)[..., 2:]
is_valid = torch.all(height_and_width > 0, dim=-1)
else:
is_valid = None
pad_bottom = max(self.crop_height - new_height, 0)
pad_right = max(self.crop_width - new_width, 0)
needs_pad = pad_bottom != 0 or pad_right != 0
return dict(
needs_crop=needs_crop,
top=top,
left=left,
height=new_height,
width=new_width,
is_valid=is_valid,
padding=[0, 0, pad_right, pad_bottom],
needs_pad=needs_pad,
)
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if params["needs_crop"]:
inpt = self._call_kernel(
F.crop,
inpt,
top=params["top"],
left=params["left"],
height=params["height"],
width=params["width"],
)
if params["is_valid"] is not None:
if isinstance(inpt, (Label, OneHotLabel, tv_tensors.Mask)):
inpt = tv_tensors.wrap(inpt[params["is_valid"]], like=inpt)
elif isinstance(inpt, tv_tensors.BoundingBoxes):
inpt = tv_tensors.wrap(
F.clamp_bounding_boxes(inpt[params["is_valid"]], format=inpt.format, canvas_size=inpt.canvas_size),
like=inpt,
)
if params["needs_pad"]:
fill = _get_fill(self._fill, type(inpt))
inpt = self._call_kernel(F.pad, inpt, params["padding"], fill=fill, padding_mode=self.padding_mode)
return inpt
```
|
=================================================================================================================================
SOURCE CODE FILE: _misc.py
LINES: 1
SIZE: 2.75 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\transforms\_misc.py
ENCODING: utf-8
```py
import functools
import warnings
from collections import defaultdict
from typing import Any, Dict, Optional, Sequence, Tuple, Type, TypeVar, Union
import torch
from torchvision import tv_tensors
from torchvision.transforms.v2 import Transform
from torchvision.transforms.v2._utils import is_pure_tensor
T = TypeVar("T")
def _default_arg(value: T) -> T:
return value
def _get_defaultdict(default: T) -> Dict[Any, T]:
# This weird looking construct only exists, since `lambda`'s cannot be serialized by pickle.
# If it were possible, we could replace this with `defaultdict(lambda: default)`
return defaultdict(functools.partial(_default_arg, default))
class PermuteDimensions(Transform):
_transformed_types = (is_pure_tensor, tv_tensors.Image, tv_tensors.Video)
def __init__(self, dims: Union[Sequence[int], Dict[Type, Optional[Sequence[int]]]]) -> None:
super().__init__()
if not isinstance(dims, dict):
dims = _get_defaultdict(dims)
if torch.Tensor in dims and any(cls in dims for cls in [tv_tensors.Image, tv_tensors.Video]):
warnings.warn(
"Got `dims` values for `torch.Tensor` and either `tv_tensors.Image` or `tv_tensors.Video`. "
"Note that a plain `torch.Tensor` will *not* be transformed by this (or any other transformation) "
"in case a `tv_tensors.Image` or `tv_tensors.Video` is present in the input."
)
self.dims = dims
def transform(self, inpt: Any, params: Dict[str, Any]) -> torch.Tensor:
dims = self.dims[type(inpt)]
if dims is None:
return inpt.as_subclass(torch.Tensor)
return inpt.permute(*dims)
class TransposeDimensions(Transform):
_transformed_types = (is_pure_tensor, tv_tensors.Image, tv_tensors.Video)
def __init__(self, dims: Union[Tuple[int, int], Dict[Type, Optional[Tuple[int, int]]]]) -> None:
super().__init__()
if not isinstance(dims, dict):
dims = _get_defaultdict(dims)
if torch.Tensor in dims and any(cls in dims for cls in [tv_tensors.Image, tv_tensors.Video]):
warnings.warn(
"Got `dims` values for `torch.Tensor` and either `tv_tensors.Image` or `tv_tensors.Video`. "
"Note that a plain `torch.Tensor` will *not* be transformed by this (or any other transformation) "
"in case a `tv_tensors.Image` or `tv_tensors.Video` is present in the input."
)
self.dims = dims
def transform(self, inpt: Any, params: Dict[str, Any]) -> torch.Tensor:
dims = self.dims[type(inpt)]
if dims is None:
return inpt.as_subclass(torch.Tensor)
return inpt.transpose(*dims)
```
|
====================================================================================================================================
SOURCE CODE FILE: _presets.py
LINES: 6
SIZE: 3.20 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\transforms\_presets.py
ENCODING: utf-8
```py
"""
This file is part of the private API. Please do not use directly these classes as they will be modified on
future versions without warning. The classes should be accessed only via the transforms argument of Weights.
"""
from typing import List, Optional, Tuple, Union
import PIL.Image
import torch
from torch import Tensor
from torchvision.transforms.v2 import functional as F, InterpolationMode
from torchvision.transforms.v2.functional._geometry import _check_interpolation
__all__ = ["StereoMatching"]
class StereoMatching(torch.nn.Module):
def __init__(
self,
*,
use_gray_scale: bool = False,
resize_size: Optional[Tuple[int, ...]],
mean: Tuple[float, ...] = (0.5, 0.5, 0.5),
std: Tuple[float, ...] = (0.5, 0.5, 0.5),
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
) -> None:
super().__init__()
# pacify mypy
self.resize_size: Union[None, List]
if resize_size is not None:
self.resize_size = list(resize_size)
else:
self.resize_size = None
self.mean = list(mean)
self.std = list(std)
self.interpolation = _check_interpolation(interpolation)
self.use_gray_scale = use_gray_scale
def forward(self, left_image: Tensor, right_image: Tensor) -> Tuple[Tensor, Tensor]:
def _process_image(img: PIL.Image.Image) -> Tensor:
if not isinstance(img, Tensor):
img = F.pil_to_tensor(img)
if self.resize_size is not None:
# We hard-code antialias=False to preserve results after we changed
# its default from None to True (see
# https://github.com/pytorch/vision/pull/7160)
# TODO: we could re-train the stereo models with antialias=True?
img = F.resize(img, self.resize_size, interpolation=self.interpolation, antialias=False)
if self.use_gray_scale is True:
img = F.rgb_to_grayscale(img)
img = F.convert_image_dtype(img, torch.float)
img = F.normalize(img, mean=self.mean, std=self.std)
img = img.contiguous()
return img
left_image = _process_image(left_image)
right_image = _process_image(right_image)
return left_image, right_image
def __repr__(self) -> str:
format_string = self.__class__.__name__ + "("
format_string += f"\n resize_size={self.resize_size}"
format_string += f"\n mean={self.mean}"
format_string += f"\n std={self.std}"
format_string += f"\n interpolation={self.interpolation}"
format_string += "\n)"
return format_string
def describe(self) -> str:
return (
"Accepts ``PIL.Image``, batched ``(B, C, H, W)`` and single ``(C, H, W)`` image ``torch.Tensor`` objects. "
f"The images are resized to ``resize_size={self.resize_size}`` using ``interpolation={self.interpolation}``. "
f"Finally the values are first rescaled to ``[0.0, 1.0]`` and then normalized using ``mean={self.mean}`` and "
f"``std={self.std}``."
)
```
|
============================================================================================================================================
SOURCE CODE FILE: _type_conversion.py
LINES: 1
SIZE: 1.00 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\transforms\_type_conversion.py
ENCODING: utf-8
```py
from typing import Any, Dict
import torch
from torch.nn.functional import one_hot
from torchvision.prototype import tv_tensors as proto_tv_tensors
from torchvision.transforms.v2 import Transform
class LabelToOneHot(Transform):
_transformed_types = (proto_tv_tensors.Label,)
def __init__(self, num_categories: int = -1):
super().__init__()
self.num_categories = num_categories
def transform(self, inpt: proto_tv_tensors.Label, params: Dict[str, Any]) -> proto_tv_tensors.OneHotLabel:
num_categories = self.num_categories
if num_categories == -1 and inpt.categories is not None:
num_categories = len(inpt.categories)
output = one_hot(inpt.as_subclass(torch.Tensor), num_classes=num_categories)
return proto_tv_tensors.OneHotLabel(output, categories=inpt.categories)
def extra_repr(self) -> str:
if self.num_categories == -1:
return ""
return f"num_categories={self.num_categories}"
```
|
====================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.04 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\tv_tensors\__init__.py
ENCODING: utf-8
```py
from ._label import Label, OneHotLabel
```
|
==================================================================================================================================
SOURCE CODE FILE: _label.py
LINES: 1
SIZE: 2.13 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\tv_tensors\_label.py
ENCODING: utf-8
```py
from __future__ import annotations
from typing import Any, Optional, Sequence, Type, TypeVar, Union
import torch
from torch.utils._pytree import tree_map
from torchvision.tv_tensors._tv_tensor import TVTensor
L = TypeVar("L", bound="_LabelBase")
class _LabelBase(TVTensor):
categories: Optional[Sequence[str]]
@classmethod
def _wrap(cls: Type[L], tensor: torch.Tensor, *, categories: Optional[Sequence[str]]) -> L:
label_base = tensor.as_subclass(cls)
label_base.categories = categories
return label_base
def __new__(
cls: Type[L],
data: Any,
*,
categories: Optional[Sequence[str]] = None,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> L:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return cls._wrap(tensor, categories=categories)
@classmethod
def from_category(
cls: Type[L],
category: str,
*,
categories: Sequence[str],
**kwargs: Any,
) -> L:
return cls(categories.index(category), categories=categories, **kwargs)
class Label(_LabelBase):
def to_categories(self) -> Any:
if self.categories is None:
raise RuntimeError("Label does not have categories")
return tree_map(lambda idx: self.categories[idx], self.tolist()) # type: ignore[index]
class OneHotLabel(_LabelBase):
def __new__(
cls,
data: Any,
*,
categories: Optional[Sequence[str]] = None,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: bool = False,
) -> OneHotLabel:
one_hot_label = super().__new__(
cls, data, categories=categories, dtype=dtype, device=device, requires_grad=requires_grad
)
if categories is not None and len(categories) != one_hot_label.shape[-1]:
raise ValueError()
return one_hot_label
```
|
===============================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.02 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\utils\__init__.py
ENCODING: utf-8
```py
from . import _internal
```
|
================================================================================================================================
SOURCE CODE FILE: _internal.py
LINES: 1
SIZE: 5.28 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\prototype\utils\_internal.py
ENCODING: utf-8
```py
import collections.abc
import difflib
import io
import mmap
import platform
from typing import BinaryIO, Callable, Collection, Sequence, TypeVar, Union
import numpy as np
import torch
from torchvision._utils import sequence_to_str
__all__ = [
"add_suggestion",
"fromfile",
"ReadOnlyTensorBuffer",
]
def add_suggestion(
msg: str,
*,
word: str,
possibilities: Collection[str],
close_match_hint: Callable[[str], str] = lambda close_match: f"Did you mean '{close_match}'?",
alternative_hint: Callable[
[Sequence[str]], str
] = lambda possibilities: f"Can be {sequence_to_str(possibilities, separate_last='or ')}.",
) -> str:
if not isinstance(possibilities, collections.abc.Sequence):
possibilities = sorted(possibilities)
suggestions = difflib.get_close_matches(word, possibilities, 1)
hint = close_match_hint(suggestions[0]) if suggestions else alternative_hint(possibilities)
if not hint:
return msg
return f"{msg.strip()} {hint}"
D = TypeVar("D")
def _read_mutable_buffer_fallback(file: BinaryIO, count: int, item_size: int) -> bytearray:
# A plain file.read() will give a read-only bytes, so we convert it to bytearray to make it mutable
return bytearray(file.read(-1 if count == -1 else count * item_size))
def fromfile(
file: BinaryIO,
*,
dtype: torch.dtype,
byte_order: str,
count: int = -1,
) -> torch.Tensor:
"""Construct a tensor from a binary file.
.. note::
This function is similar to :func:`numpy.fromfile` with two notable differences:
1. This function only accepts an open binary file, but not a path to it.
2. This function has an additional ``byte_order`` parameter, since PyTorch's ``dtype``'s do not support that
concept.
.. note::
If the ``file`` was opened in update mode, i.e. "r+b" or "w+b", reading data is much faster. Be aware that as
long as the file is still open, inplace operations on the returned tensor will reflect back to the file.
Args:
file (IO): Open binary file.
dtype (torch.dtype): Data type of the underlying data as well as of the returned tensor.
byte_order (str): Byte order of the data. Can be "little" or "big" endian.
count (int): Number of values of the returned tensor. If ``-1`` (default), will read the complete file.
"""
byte_order = "<" if byte_order == "little" else ">"
char = "f" if dtype.is_floating_point else ("i" if dtype.is_signed else "u")
item_size = (torch.finfo if dtype.is_floating_point else torch.iinfo)(dtype).bits // 8
np_dtype = byte_order + char + str(item_size)
buffer: Union[memoryview, bytearray]
if platform.system() != "Windows":
# PyTorch does not support tensors with underlying read-only memory. In case
# - the file has a .fileno(),
# - the file was opened for updating, i.e. 'r+b' or 'w+b',
# - the file is seekable
# we can avoid copying the data for performance. Otherwise we fall back to simply .read() the data and copy it
# to a mutable location afterwards.
try:
buffer = memoryview(mmap.mmap(file.fileno(), 0))[file.tell() :]
# Reading from the memoryview does not advance the file cursor, so we have to do it manually.
file.seek(*(0, io.SEEK_END) if count == -1 else (count * item_size, io.SEEK_CUR))
except (AttributeError, PermissionError, io.UnsupportedOperation):
buffer = _read_mutable_buffer_fallback(file, count, item_size)
else:
# On Windows just trying to call mmap.mmap() on a file that does not support it, may corrupt the internal state
# so no data can be read afterwards. Thus, we simply ignore the possible speed-up.
buffer = _read_mutable_buffer_fallback(file, count, item_size)
# We cannot use torch.frombuffer() directly, since it only supports the native byte order of the system. Thus, we
# read the data with np.frombuffer() with the correct byte order and convert it to the native one with the
# successive .astype() call.
return torch.from_numpy(np.frombuffer(buffer, dtype=np_dtype, count=count).astype(np_dtype[1:], copy=False))
class ReadOnlyTensorBuffer:
def __init__(self, tensor: torch.Tensor) -> None:
self._memory = memoryview(tensor.numpy()) # type: ignore[arg-type]
self._cursor: int = 0
def tell(self) -> int:
return self._cursor
def seek(self, offset: int, whence: int = io.SEEK_SET) -> int:
if whence == io.SEEK_SET:
self._cursor = offset
elif whence == io.SEEK_CUR:
self._cursor += offset
pass
elif whence == io.SEEK_END:
self._cursor = len(self._memory) + offset
else:
raise ValueError(
f"'whence' should be ``{io.SEEK_SET}``, ``{io.SEEK_CUR}``, or ``{io.SEEK_END}``, "
f"but got {repr(whence)} instead"
)
return self.tell()
def read(self, size: int = -1) -> bytes:
cursor = self.tell()
offset, whence = (0, io.SEEK_END) if size == -1 else (size, io.SEEK_CUR)
return self._memory[slice(cursor, self.seek(offset, whence))].tobytes()
```
|
==========================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 0.05 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\transforms\__init__.py
ENCODING: utf-8
```py
from .transforms import *
from .autoaugment import *
```
|
=================================================================================================================================
SOURCE CODE FILE: _functional_pil.py
LINES: 1
SIZE: 12.17 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\transforms\_functional_pil.py
ENCODING: utf-8
```py
import numbers
from typing import Any, Dict, List, Literal, Optional, Sequence, Tuple, Union
import numpy as np
import torch
from PIL import Image, ImageEnhance, ImageOps
try:
import accimage
except ImportError:
accimage = None
@torch.jit.unused
def _is_pil_image(img: Any) -> bool:
if accimage is not None:
return isinstance(img, (Image.Image, accimage.Image))
else:
return isinstance(img, Image.Image)
@torch.jit.unused
def get_dimensions(img: Any) -> List[int]:
if _is_pil_image(img):
if hasattr(img, "getbands"):
channels = len(img.getbands())
else:
channels = img.channels
width, height = img.size
return [channels, height, width]
raise TypeError(f"Unexpected type {type(img)}")
@torch.jit.unused
def get_image_size(img: Any) -> List[int]:
if _is_pil_image(img):
return list(img.size)
raise TypeError(f"Unexpected type {type(img)}")
@torch.jit.unused
def get_image_num_channels(img: Any) -> int:
if _is_pil_image(img):
if hasattr(img, "getbands"):
return len(img.getbands())
else:
return img.channels
raise TypeError(f"Unexpected type {type(img)}")
@torch.jit.unused
def hflip(img: Image.Image) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
return img.transpose(Image.FLIP_LEFT_RIGHT)
@torch.jit.unused
def vflip(img: Image.Image) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
return img.transpose(Image.FLIP_TOP_BOTTOM)
@torch.jit.unused
def adjust_brightness(img: Image.Image, brightness_factor: float) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
enhancer = ImageEnhance.Brightness(img)
img = enhancer.enhance(brightness_factor)
return img
@torch.jit.unused
def adjust_contrast(img: Image.Image, contrast_factor: float) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
enhancer = ImageEnhance.Contrast(img)
img = enhancer.enhance(contrast_factor)
return img
@torch.jit.unused
def adjust_saturation(img: Image.Image, saturation_factor: float) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
enhancer = ImageEnhance.Color(img)
img = enhancer.enhance(saturation_factor)
return img
@torch.jit.unused
def adjust_hue(img: Image.Image, hue_factor: float) -> Image.Image:
if not (-0.5 <= hue_factor <= 0.5):
raise ValueError(f"hue_factor ({hue_factor}) is not in [-0.5, 0.5].")
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
input_mode = img.mode
if input_mode in {"L", "1", "I", "F"}:
return img
h, s, v = img.convert("HSV").split()
np_h = np.array(h, dtype=np.uint8)
# This will over/underflow, as desired
np_h += np.int32(hue_factor * 255).astype(np.uint8)
h = Image.fromarray(np_h, "L")
img = Image.merge("HSV", (h, s, v)).convert(input_mode)
return img
@torch.jit.unused
def adjust_gamma(
img: Image.Image,
gamma: float,
gain: float = 1.0,
) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
if gamma < 0:
raise ValueError("Gamma should be a non-negative real number")
input_mode = img.mode
img = img.convert("RGB")
gamma_map = [int((255 + 1 - 1e-3) * gain * pow(ele / 255.0, gamma)) for ele in range(256)] * 3
img = img.point(gamma_map) # use PIL's point-function to accelerate this part
img = img.convert(input_mode)
return img
@torch.jit.unused
def pad(
img: Image.Image,
padding: Union[int, List[int], Tuple[int, ...]],
fill: Optional[Union[float, List[float], Tuple[float, ...]]] = 0,
padding_mode: Literal["constant", "edge", "reflect", "symmetric"] = "constant",
) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
if not isinstance(padding, (numbers.Number, tuple, list)):
raise TypeError("Got inappropriate padding arg")
if fill is not None and not isinstance(fill, (numbers.Number, tuple, list)):
raise TypeError("Got inappropriate fill arg")
if not isinstance(padding_mode, str):
raise TypeError("Got inappropriate padding_mode arg")
if isinstance(padding, list):
padding = tuple(padding)
if isinstance(padding, tuple) and len(padding) not in [1, 2, 4]:
raise ValueError(f"Padding must be an int or a 1, 2, or 4 element tuple, not a {len(padding)} element tuple")
if isinstance(padding, tuple) and len(padding) == 1:
# Compatibility with `functional_tensor.pad`
padding = padding[0]
if padding_mode not in ["constant", "edge", "reflect", "symmetric"]:
raise ValueError("Padding mode should be either constant, edge, reflect or symmetric")
if padding_mode == "constant":
opts = _parse_fill(fill, img, name="fill")
if img.mode == "P":
palette = img.getpalette()
image = ImageOps.expand(img, border=padding, **opts)
image.putpalette(palette)
return image
return ImageOps.expand(img, border=padding, **opts)
else:
if isinstance(padding, int):
pad_left = pad_right = pad_top = pad_bottom = padding
if isinstance(padding, tuple) and len(padding) == 2:
pad_left = pad_right = padding[0]
pad_top = pad_bottom = padding[1]
if isinstance(padding, tuple) and len(padding) == 4:
pad_left = padding[0]
pad_top = padding[1]
pad_right = padding[2]
pad_bottom = padding[3]
p = [pad_left, pad_top, pad_right, pad_bottom]
cropping = -np.minimum(p, 0)
if cropping.any():
crop_left, crop_top, crop_right, crop_bottom = cropping
img = img.crop((crop_left, crop_top, img.width - crop_right, img.height - crop_bottom))
pad_left, pad_top, pad_right, pad_bottom = np.maximum(p, 0)
if img.mode == "P":
palette = img.getpalette()
img = np.asarray(img)
img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), mode=padding_mode)
img = Image.fromarray(img)
img.putpalette(palette)
return img
img = np.asarray(img)
# RGB image
if len(img.shape) == 3:
img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), padding_mode)
# Grayscale image
if len(img.shape) == 2:
img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode)
return Image.fromarray(img)
@torch.jit.unused
def crop(
img: Image.Image,
top: int,
left: int,
height: int,
width: int,
) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
return img.crop((left, top, left + width, top + height))
@torch.jit.unused
def resize(
img: Image.Image,
size: Union[List[int], int],
interpolation: int = Image.BILINEAR,
) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
if not (isinstance(size, list) and len(size) == 2):
raise TypeError(f"Got inappropriate size arg: {size}")
return img.resize(tuple(size[::-1]), interpolation)
@torch.jit.unused
def _parse_fill(
fill: Optional[Union[float, List[float], Tuple[float, ...]]],
img: Image.Image,
name: str = "fillcolor",
) -> Dict[str, Optional[Union[float, List[float], Tuple[float, ...]]]]:
# Process fill color for affine transforms
num_channels = get_image_num_channels(img)
if fill is None:
fill = 0
if isinstance(fill, (int, float)) and num_channels > 1:
fill = tuple([fill] * num_channels)
if isinstance(fill, (list, tuple)):
if len(fill) == 1:
fill = fill * num_channels
elif len(fill) != num_channels:
msg = "The number of elements in 'fill' does not match the number of channels of the image ({} != {})"
raise ValueError(msg.format(len(fill), num_channels))
fill = tuple(fill) # type: ignore[arg-type]
if img.mode != "F":
if isinstance(fill, (list, tuple)):
fill = tuple(int(x) for x in fill)
else:
fill = int(fill)
return {name: fill}
@torch.jit.unused
def affine(
img: Image.Image,
matrix: List[float],
interpolation: int = Image.NEAREST,
fill: Optional[Union[int, float, Sequence[int], Sequence[float]]] = None,
) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
output_size = img.size
opts = _parse_fill(fill, img)
return img.transform(output_size, Image.AFFINE, matrix, interpolation, **opts)
@torch.jit.unused
def rotate(
img: Image.Image,
angle: float,
interpolation: int = Image.NEAREST,
expand: bool = False,
center: Optional[Tuple[int, int]] = None,
fill: Optional[Union[int, float, Sequence[int], Sequence[float]]] = None,
) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
opts = _parse_fill(fill, img)
return img.rotate(angle, interpolation, expand, center, **opts)
@torch.jit.unused
def perspective(
img: Image.Image,
perspective_coeffs: List[float],
interpolation: int = Image.BICUBIC,
fill: Optional[Union[int, float, Sequence[int], Sequence[float]]] = None,
) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
opts = _parse_fill(fill, img)
return img.transform(img.size, Image.PERSPECTIVE, perspective_coeffs, interpolation, **opts)
@torch.jit.unused
def to_grayscale(img: Image.Image, num_output_channels: int) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
if num_output_channels == 1:
img = img.convert("L")
elif num_output_channels == 3:
img = img.convert("L")
np_img = np.array(img, dtype=np.uint8)
np_img = np.dstack([np_img, np_img, np_img])
img = Image.fromarray(np_img, "RGB")
else:
raise ValueError("num_output_channels should be either 1 or 3")
return img
@torch.jit.unused
def invert(img: Image.Image) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
return ImageOps.invert(img)
@torch.jit.unused
def posterize(img: Image.Image, bits: int) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
return ImageOps.posterize(img, bits)
@torch.jit.unused
def solarize(img: Image.Image, threshold: int) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
return ImageOps.solarize(img, threshold)
@torch.jit.unused
def adjust_sharpness(img: Image.Image, sharpness_factor: float) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
enhancer = ImageEnhance.Sharpness(img)
img = enhancer.enhance(sharpness_factor)
return img
@torch.jit.unused
def autocontrast(img: Image.Image) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
return ImageOps.autocontrast(img)
@torch.jit.unused
def equalize(img: Image.Image) -> Image.Image:
if not _is_pil_image(img):
raise TypeError(f"img should be PIL Image. Got {type(img)}")
return ImageOps.equalize(img)
```
|
====================================================================================================================================
SOURCE CODE FILE: _functional_tensor.py
LINES: 1
SIZE: 34.08 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\transforms\_functional_tensor.py
ENCODING: utf-8
```py
import warnings
from typing import List, Optional, Tuple, Union
import torch
from torch import Tensor
from torch.nn.functional import conv2d, grid_sample, interpolate, pad as torch_pad
def _is_tensor_a_torch_image(x: Tensor) -> bool:
return x.ndim >= 2
def _assert_image_tensor(img: Tensor) -> None:
if not _is_tensor_a_torch_image(img):
raise TypeError("Tensor is not a torch image.")
def get_dimensions(img: Tensor) -> List[int]:
_assert_image_tensor(img)
channels = 1 if img.ndim == 2 else img.shape[-3]
height, width = img.shape[-2:]
return [channels, height, width]
def get_image_size(img: Tensor) -> List[int]:
# Returns (w, h) of tensor image
_assert_image_tensor(img)
return [img.shape[-1], img.shape[-2]]
def get_image_num_channels(img: Tensor) -> int:
_assert_image_tensor(img)
if img.ndim == 2:
return 1
elif img.ndim > 2:
return img.shape[-3]
raise TypeError(f"Input ndim should be 2 or more. Got {img.ndim}")
def _max_value(dtype: torch.dtype) -> int:
if dtype == torch.uint8:
return 255
elif dtype == torch.int8:
return 127
elif dtype == torch.int16:
return 32767
elif dtype == torch.uint16:
return 65535
elif dtype == torch.int32:
return 2147483647
elif dtype == torch.int64:
return 9223372036854775807
else:
# This is only here for completeness. This value is implicitly assumed in a lot of places so changing it is not
# easy.
return 1
def _assert_channels(img: Tensor, permitted: List[int]) -> None:
c = get_dimensions(img)[0]
if c not in permitted:
raise TypeError(f"Input image tensor permitted channel values are {permitted}, but found {c}")
def convert_image_dtype(image: torch.Tensor, dtype: torch.dtype = torch.float) -> torch.Tensor:
if image.dtype == dtype:
return image
if image.is_floating_point():
# TODO: replace with dtype.is_floating_point when torchscript supports it
if torch.tensor(0, dtype=dtype).is_floating_point():
return image.to(dtype)
# float to int
if (image.dtype == torch.float32 and dtype in (torch.int32, torch.int64)) or (
image.dtype == torch.float64 and dtype == torch.int64
):
msg = f"The cast from {image.dtype} to {dtype} cannot be performed safely."
raise RuntimeError(msg)
# https://github.com/pytorch/vision/pull/2078#issuecomment-612045321
# For data in the range 0-1, (float * 255).to(uint) is only 255
# when float is exactly 1.0.
# `max + 1 - epsilon` provides more evenly distributed mapping of
# ranges of floats to ints.
eps = 1e-3
max_val = float(_max_value(dtype))
result = image.mul(max_val + 1.0 - eps)
return result.to(dtype)
else:
input_max = float(_max_value(image.dtype))
# int to float
# TODO: replace with dtype.is_floating_point when torchscript supports it
if torch.tensor(0, dtype=dtype).is_floating_point():
image = image.to(dtype)
return image / input_max
output_max = float(_max_value(dtype))
# int to int
if input_max > output_max:
# factor should be forced to int for torch jit script
# otherwise factor is a float and image // factor can produce different results
factor = int((input_max + 1) // (output_max + 1))
image = torch.div(image, factor, rounding_mode="floor")
return image.to(dtype)
else:
# factor should be forced to int for torch jit script
# otherwise factor is a float and image * factor can produce different results
factor = int((output_max + 1) // (input_max + 1))
image = image.to(dtype)
return image * factor
def vflip(img: Tensor) -> Tensor:
_assert_image_tensor(img)
return img.flip(-2)
def hflip(img: Tensor) -> Tensor:
_assert_image_tensor(img)
return img.flip(-1)
def crop(img: Tensor, top: int, left: int, height: int, width: int) -> Tensor:
_assert_image_tensor(img)
_, h, w = get_dimensions(img)
right = left + width
bottom = top + height
if left < 0 or top < 0 or right > w or bottom > h:
padding_ltrb = [
max(-left + min(0, right), 0),
max(-top + min(0, bottom), 0),
max(right - max(w, left), 0),
max(bottom - max(h, top), 0),
]
return pad(img[..., max(top, 0) : bottom, max(left, 0) : right], padding_ltrb, fill=0)
return img[..., top:bottom, left:right]
def rgb_to_grayscale(img: Tensor, num_output_channels: int = 1) -> Tensor:
if img.ndim < 3:
raise TypeError(f"Input image tensor should have at least 3 dimensions, but found {img.ndim}")
_assert_channels(img, [1, 3])
if num_output_channels not in (1, 3):
raise ValueError("num_output_channels should be either 1 or 3")
if img.shape[-3] == 3:
r, g, b = img.unbind(dim=-3)
# This implementation closely follows the TF one:
# https://github.com/tensorflow/tensorflow/blob/v2.3.0/tensorflow/python/ops/image_ops_impl.py#L2105-L2138
l_img = (0.2989 * r + 0.587 * g + 0.114 * b).to(img.dtype)
l_img = l_img.unsqueeze(dim=-3)
else:
l_img = img.clone()
if num_output_channels == 3:
return l_img.expand(img.shape)
return l_img
def adjust_brightness(img: Tensor, brightness_factor: float) -> Tensor:
if brightness_factor < 0:
raise ValueError(f"brightness_factor ({brightness_factor}) is not non-negative.")
_assert_image_tensor(img)
_assert_channels(img, [1, 3])
return _blend(img, torch.zeros_like(img), brightness_factor)
def adjust_contrast(img: Tensor, contrast_factor: float) -> Tensor:
if contrast_factor < 0:
raise ValueError(f"contrast_factor ({contrast_factor}) is not non-negative.")
_assert_image_tensor(img)
_assert_channels(img, [3, 1])
c = get_dimensions(img)[0]
dtype = img.dtype if torch.is_floating_point(img) else torch.float32
if c == 3:
mean = torch.mean(rgb_to_grayscale(img).to(dtype), dim=(-3, -2, -1), keepdim=True)
else:
mean = torch.mean(img.to(dtype), dim=(-3, -2, -1), keepdim=True)
return _blend(img, mean, contrast_factor)
def adjust_hue(img: Tensor, hue_factor: float) -> Tensor:
if not (-0.5 <= hue_factor <= 0.5):
raise ValueError(f"hue_factor ({hue_factor}) is not in [-0.5, 0.5].")
if not (isinstance(img, torch.Tensor)):
raise TypeError("Input img should be Tensor image")
_assert_image_tensor(img)
_assert_channels(img, [1, 3])
if get_dimensions(img)[0] == 1: # Match PIL behaviour
return img
orig_dtype = img.dtype
img = convert_image_dtype(img, torch.float32)
img = _rgb2hsv(img)
h, s, v = img.unbind(dim=-3)
h = (h + hue_factor) % 1.0
img = torch.stack((h, s, v), dim=-3)
img_hue_adj = _hsv2rgb(img)
return convert_image_dtype(img_hue_adj, orig_dtype)
def adjust_saturation(img: Tensor, saturation_factor: float) -> Tensor:
if saturation_factor < 0:
raise ValueError(f"saturation_factor ({saturation_factor}) is not non-negative.")
_assert_image_tensor(img)
_assert_channels(img, [1, 3])
if get_dimensions(img)[0] == 1: # Match PIL behaviour
return img
return _blend(img, rgb_to_grayscale(img), saturation_factor)
def adjust_gamma(img: Tensor, gamma: float, gain: float = 1) -> Tensor:
if not isinstance(img, torch.Tensor):
raise TypeError("Input img should be a Tensor.")
_assert_channels(img, [1, 3])
if gamma < 0:
raise ValueError("Gamma should be a non-negative real number")
result = img
dtype = img.dtype
if not torch.is_floating_point(img):
result = convert_image_dtype(result, torch.float32)
result = (gain * result**gamma).clamp(0, 1)
result = convert_image_dtype(result, dtype)
return result
def _blend(img1: Tensor, img2: Tensor, ratio: float) -> Tensor:
ratio = float(ratio)
bound = _max_value(img1.dtype)
return (ratio * img1 + (1.0 - ratio) * img2).clamp(0, bound).to(img1.dtype)
def _rgb2hsv(img: Tensor) -> Tensor:
r, g, b = img.unbind(dim=-3)
# Implementation is based on https://github.com/python-pillow/Pillow/blob/4174d4267616897df3746d315d5a2d0f82c656ee/
# src/libImaging/Convert.c#L330
maxc = torch.max(img, dim=-3).values
minc = torch.min(img, dim=-3).values
# The algorithm erases S and H channel where `maxc = minc`. This avoids NaN
# from happening in the results, because
# + S channel has division by `maxc`, which is zero only if `maxc = minc`
# + H channel has division by `(maxc - minc)`.
#
# Instead of overwriting NaN afterwards, we just prevent it from occurring, so
# we don't need to deal with it in case we save the NaN in a buffer in
# backprop, if it is ever supported, but it doesn't hurt to do so.
eqc = maxc == minc
cr = maxc - minc
# Since `eqc => cr = 0`, replacing denominator with 1 when `eqc` is fine.
ones = torch.ones_like(maxc)
s = cr / torch.where(eqc, ones, maxc)
# Note that `eqc => maxc = minc = r = g = b`. So the following calculation
# of `h` would reduce to `bc - gc + 2 + rc - bc + 4 + rc - bc = 6` so it
# would not matter what values `rc`, `gc`, and `bc` have here, and thus
# replacing denominator with 1 when `eqc` is fine.
cr_divisor = torch.where(eqc, ones, cr)
rc = (maxc - r) / cr_divisor
gc = (maxc - g) / cr_divisor
bc = (maxc - b) / cr_divisor
hr = (maxc == r) * (bc - gc)
hg = ((maxc == g) & (maxc != r)) * (2.0 + rc - bc)
hb = ((maxc != g) & (maxc != r)) * (4.0 + gc - rc)
h = hr + hg + hb
h = torch.fmod((h / 6.0 + 1.0), 1.0)
return torch.stack((h, s, maxc), dim=-3)
def _hsv2rgb(img: Tensor) -> Tensor:
h, s, v = img.unbind(dim=-3)
i = torch.floor(h * 6.0)
f = (h * 6.0) - i
i = i.to(dtype=torch.int32)
p = torch.clamp((v * (1.0 - s)), 0.0, 1.0)
q = torch.clamp((v * (1.0 - s * f)), 0.0, 1.0)
t = torch.clamp((v * (1.0 - s * (1.0 - f))), 0.0, 1.0)
i = i % 6
mask = i.unsqueeze(dim=-3) == torch.arange(6, device=i.device).view(-1, 1, 1)
a1 = torch.stack((v, q, p, p, t, v), dim=-3)
a2 = torch.stack((t, v, v, q, p, p), dim=-3)
a3 = torch.stack((p, p, t, v, v, q), dim=-3)
a4 = torch.stack((a1, a2, a3), dim=-4)
return torch.einsum("...ijk, ...xijk -> ...xjk", mask.to(dtype=img.dtype), a4)
def _pad_symmetric(img: Tensor, padding: List[int]) -> Tensor:
# padding is left, right, top, bottom
# crop if needed
if padding[0] < 0 or padding[1] < 0 or padding[2] < 0 or padding[3] < 0:
neg_min_padding = [-min(x, 0) for x in padding]
crop_left, crop_right, crop_top, crop_bottom = neg_min_padding
img = img[..., crop_top : img.shape[-2] - crop_bottom, crop_left : img.shape[-1] - crop_right]
padding = [max(x, 0) for x in padding]
in_sizes = img.size()
_x_indices = [i for i in range(in_sizes[-1])] # [0, 1, 2, 3, ...]
left_indices = [i for i in range(padding[0] - 1, -1, -1)] # e.g. [3, 2, 1, 0]
right_indices = [-(i + 1) for i in range(padding[1])] # e.g. [-1, -2, -3]
x_indices = torch.tensor(left_indices + _x_indices + right_indices, device=img.device)
_y_indices = [i for i in range(in_sizes[-2])]
top_indices = [i for i in range(padding[2] - 1, -1, -1)]
bottom_indices = [-(i + 1) for i in range(padding[3])]
y_indices = torch.tensor(top_indices + _y_indices + bottom_indices, device=img.device)
ndim = img.ndim
if ndim == 3:
return img[:, y_indices[:, None], x_indices[None, :]]
elif ndim == 4:
return img[:, :, y_indices[:, None], x_indices[None, :]]
else:
raise RuntimeError("Symmetric padding of N-D tensors are not supported yet")
def _parse_pad_padding(padding: Union[int, List[int]]) -> List[int]:
if isinstance(padding, int):
if torch.jit.is_scripting():
# This maybe unreachable
raise ValueError("padding can't be an int while torchscripting, set it as a list [value, ]")
pad_left = pad_right = pad_top = pad_bottom = padding
elif len(padding) == 1:
pad_left = pad_right = pad_top = pad_bottom = padding[0]
elif len(padding) == 2:
pad_left = pad_right = padding[0]
pad_top = pad_bottom = padding[1]
else:
pad_left = padding[0]
pad_top = padding[1]
pad_right = padding[2]
pad_bottom = padding[3]
return [pad_left, pad_right, pad_top, pad_bottom]
def pad(
img: Tensor, padding: Union[int, List[int]], fill: Optional[Union[int, float]] = 0, padding_mode: str = "constant"
) -> Tensor:
_assert_image_tensor(img)
if fill is None:
fill = 0
if not isinstance(padding, (int, tuple, list)):
raise TypeError("Got inappropriate padding arg")
if not isinstance(fill, (int, float)):
raise TypeError("Got inappropriate fill arg")
if not isinstance(padding_mode, str):
raise TypeError("Got inappropriate padding_mode arg")
if isinstance(padding, tuple):
padding = list(padding)
if isinstance(padding, list):
# TODO: Jit is failing on loading this op when scripted and saved
# https://github.com/pytorch/pytorch/issues/81100
if len(padding) not in [1, 2, 4]:
raise ValueError(
f"Padding must be an int or a 1, 2, or 4 element tuple, not a {len(padding)} element tuple"
)
if padding_mode not in ["constant", "edge", "reflect", "symmetric"]:
raise ValueError("Padding mode should be either constant, edge, reflect or symmetric")
p = _parse_pad_padding(padding)
if padding_mode == "edge":
# remap padding_mode str
padding_mode = "replicate"
elif padding_mode == "symmetric":
# route to another implementation
return _pad_symmetric(img, p)
need_squeeze = False
if img.ndim < 4:
img = img.unsqueeze(dim=0)
need_squeeze = True
out_dtype = img.dtype
need_cast = False
if (padding_mode != "constant") and img.dtype not in (torch.float32, torch.float64):
# Here we temporarily cast input tensor to float
# until pytorch issue is resolved :
# https://github.com/pytorch/pytorch/issues/40763
need_cast = True
img = img.to(torch.float32)
if padding_mode in ("reflect", "replicate"):
img = torch_pad(img, p, mode=padding_mode)
else:
img = torch_pad(img, p, mode=padding_mode, value=float(fill))
if need_squeeze:
img = img.squeeze(dim=0)
if need_cast:
img = img.to(out_dtype)
return img
def resize(
img: Tensor,
size: List[int],
interpolation: str = "bilinear",
antialias: Optional[bool] = True,
) -> Tensor:
_assert_image_tensor(img)
if isinstance(size, tuple):
size = list(size)
if antialias is None:
antialias = False
if antialias and interpolation not in ["bilinear", "bicubic"]:
# We manually set it to False to avoid an error downstream in interpolate()
# This behaviour is documented: the parameter is irrelevant for modes
# that are not bilinear or bicubic. We used to raise an error here, but
# now we don't as True is the default.
antialias = False
img, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, [torch.float32, torch.float64])
# Define align_corners to avoid warnings
align_corners = False if interpolation in ["bilinear", "bicubic"] else None
img = interpolate(img, size=size, mode=interpolation, align_corners=align_corners, antialias=antialias)
if interpolation == "bicubic" and out_dtype == torch.uint8:
img = img.clamp(min=0, max=255)
img = _cast_squeeze_out(img, need_cast=need_cast, need_squeeze=need_squeeze, out_dtype=out_dtype)
return img
def _assert_grid_transform_inputs(
img: Tensor,
matrix: Optional[List[float]],
interpolation: str,
fill: Optional[Union[int, float, List[float]]],
supported_interpolation_modes: List[str],
coeffs: Optional[List[float]] = None,
) -> None:
if not (isinstance(img, torch.Tensor)):
raise TypeError("Input img should be Tensor")
_assert_image_tensor(img)
if matrix is not None and not isinstance(matrix, list):
raise TypeError("Argument matrix should be a list")
if matrix is not None and len(matrix) != 6:
raise ValueError("Argument matrix should have 6 float values")
if coeffs is not None and len(coeffs) != 8:
raise ValueError("Argument coeffs should have 8 float values")
if fill is not None and not isinstance(fill, (int, float, tuple, list)):
warnings.warn("Argument fill should be either int, float, tuple or list")
# Check fill
num_channels = get_dimensions(img)[0]
if fill is not None and isinstance(fill, (tuple, list)) and len(fill) > 1 and len(fill) != num_channels:
msg = (
"The number of elements in 'fill' cannot broadcast to match the number of "
"channels of the image ({} != {})"
)
raise ValueError(msg.format(len(fill), num_channels))
if interpolation not in supported_interpolation_modes:
raise ValueError(f"Interpolation mode '{interpolation}' is unsupported with Tensor input")
def _cast_squeeze_in(img: Tensor, req_dtypes: List[torch.dtype]) -> Tuple[Tensor, bool, bool, torch.dtype]:
need_squeeze = False
# make image NCHW
if img.ndim < 4:
img = img.unsqueeze(dim=0)
need_squeeze = True
out_dtype = img.dtype
need_cast = False
if out_dtype not in req_dtypes:
need_cast = True
req_dtype = req_dtypes[0]
img = img.to(req_dtype)
return img, need_cast, need_squeeze, out_dtype
def _cast_squeeze_out(img: Tensor, need_cast: bool, need_squeeze: bool, out_dtype: torch.dtype) -> Tensor:
if need_squeeze:
img = img.squeeze(dim=0)
if need_cast:
if out_dtype in (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64):
# it is better to round before cast
img = torch.round(img)
img = img.to(out_dtype)
return img
def _apply_grid_transform(
img: Tensor, grid: Tensor, mode: str, fill: Optional[Union[int, float, List[float]]]
) -> Tensor:
img, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, [grid.dtype])
if img.shape[0] > 1:
# Apply same grid to a batch of images
grid = grid.expand(img.shape[0], grid.shape[1], grid.shape[2], grid.shape[3])
# Append a dummy mask for customized fill colors, should be faster than grid_sample() twice
if fill is not None:
mask = torch.ones((img.shape[0], 1, img.shape[2], img.shape[3]), dtype=img.dtype, device=img.device)
img = torch.cat((img, mask), dim=1)
img = grid_sample(img, grid, mode=mode, padding_mode="zeros", align_corners=False)
# Fill with required color
if fill is not None:
mask = img[:, -1:, :, :] # N * 1 * H * W
img = img[:, :-1, :, :] # N * C * H * W
mask = mask.expand_as(img)
fill_list, len_fill = (fill, len(fill)) if isinstance(fill, (tuple, list)) else ([float(fill)], 1)
fill_img = torch.tensor(fill_list, dtype=img.dtype, device=img.device).view(1, len_fill, 1, 1).expand_as(img)
if mode == "nearest":
mask = mask < 0.5
img[mask] = fill_img[mask]
else: # 'bilinear'
img = img * mask + (1.0 - mask) * fill_img
img = _cast_squeeze_out(img, need_cast, need_squeeze, out_dtype)
return img
def _gen_affine_grid(
theta: Tensor,
w: int,
h: int,
ow: int,
oh: int,
) -> Tensor:
# https://github.com/pytorch/pytorch/blob/74b65c32be68b15dc7c9e8bb62459efbfbde33d8/aten/src/ATen/native/
# AffineGridGenerator.cpp#L18
# Difference with AffineGridGenerator is that:
# 1) we normalize grid values after applying theta
# 2) we can normalize by other image size, such that it covers "extend" option like in PIL.Image.rotate
d = 0.5
base_grid = torch.empty(1, oh, ow, 3, dtype=theta.dtype, device=theta.device)
x_grid = torch.linspace(-ow * 0.5 + d, ow * 0.5 + d - 1, steps=ow, device=theta.device)
base_grid[..., 0].copy_(x_grid)
y_grid = torch.linspace(-oh * 0.5 + d, oh * 0.5 + d - 1, steps=oh, device=theta.device).unsqueeze_(-1)
base_grid[..., 1].copy_(y_grid)
base_grid[..., 2].fill_(1)
rescaled_theta = theta.transpose(1, 2) / torch.tensor([0.5 * w, 0.5 * h], dtype=theta.dtype, device=theta.device)
output_grid = base_grid.view(1, oh * ow, 3).bmm(rescaled_theta)
return output_grid.view(1, oh, ow, 2)
def affine(
img: Tensor,
matrix: List[float],
interpolation: str = "nearest",
fill: Optional[Union[int, float, List[float]]] = None,
) -> Tensor:
_assert_grid_transform_inputs(img, matrix, interpolation, fill, ["nearest", "bilinear"])
dtype = img.dtype if torch.is_floating_point(img) else torch.float32
theta = torch.tensor(matrix, dtype=dtype, device=img.device).reshape(1, 2, 3)
shape = img.shape
# grid will be generated on the same device as theta and img
grid = _gen_affine_grid(theta, w=shape[-1], h=shape[-2], ow=shape[-1], oh=shape[-2])
return _apply_grid_transform(img, grid, interpolation, fill=fill)
def _compute_affine_output_size(matrix: List[float], w: int, h: int) -> Tuple[int, int]:
# Inspired of PIL implementation:
# https://github.com/python-pillow/Pillow/blob/11de3318867e4398057373ee9f12dcb33db7335c/src/PIL/Image.py#L2054
# pts are Top-Left, Top-Right, Bottom-Left, Bottom-Right points.
# Points are shifted due to affine matrix torch convention about
# the center point. Center is (0, 0) for image center pivot point (w * 0.5, h * 0.5)
pts = torch.tensor(
[
[-0.5 * w, -0.5 * h, 1.0],
[-0.5 * w, 0.5 * h, 1.0],
[0.5 * w, 0.5 * h, 1.0],
[0.5 * w, -0.5 * h, 1.0],
]
)
theta = torch.tensor(matrix, dtype=torch.float).view(2, 3)
new_pts = torch.matmul(pts, theta.T)
min_vals, _ = new_pts.min(dim=0)
max_vals, _ = new_pts.max(dim=0)
# shift points to [0, w] and [0, h] interval to match PIL results
min_vals += torch.tensor((w * 0.5, h * 0.5))
max_vals += torch.tensor((w * 0.5, h * 0.5))
# Truncate precision to 1e-4 to avoid ceil of Xe-15 to 1.0
tol = 1e-4
cmax = torch.ceil((max_vals / tol).trunc_() * tol)
cmin = torch.floor((min_vals / tol).trunc_() * tol)
size = cmax - cmin
return int(size[0]), int(size[1]) # w, h
def rotate(
img: Tensor,
matrix: List[float],
interpolation: str = "nearest",
expand: bool = False,
fill: Optional[Union[int, float, List[float]]] = None,
) -> Tensor:
_assert_grid_transform_inputs(img, matrix, interpolation, fill, ["nearest", "bilinear"])
w, h = img.shape[-1], img.shape[-2]
ow, oh = _compute_affine_output_size(matrix, w, h) if expand else (w, h)
dtype = img.dtype if torch.is_floating_point(img) else torch.float32
theta = torch.tensor(matrix, dtype=dtype, device=img.device).reshape(1, 2, 3)
# grid will be generated on the same device as theta and img
grid = _gen_affine_grid(theta, w=w, h=h, ow=ow, oh=oh)
return _apply_grid_transform(img, grid, interpolation, fill=fill)
def _perspective_grid(coeffs: List[float], ow: int, oh: int, dtype: torch.dtype, device: torch.device) -> Tensor:
# https://github.com/python-pillow/Pillow/blob/4634eafe3c695a014267eefdce830b4a825beed7/
# src/libImaging/Geometry.c#L394
#
# x_out = (coeffs[0] * x + coeffs[1] * y + coeffs[2]) / (coeffs[6] * x + coeffs[7] * y + 1)
# y_out = (coeffs[3] * x + coeffs[4] * y + coeffs[5]) / (coeffs[6] * x + coeffs[7] * y + 1)
#
theta1 = torch.tensor(
[[[coeffs[0], coeffs[1], coeffs[2]], [coeffs[3], coeffs[4], coeffs[5]]]], dtype=dtype, device=device
)
theta2 = torch.tensor([[[coeffs[6], coeffs[7], 1.0], [coeffs[6], coeffs[7], 1.0]]], dtype=dtype, device=device)
d = 0.5
base_grid = torch.empty(1, oh, ow, 3, dtype=dtype, device=device)
x_grid = torch.linspace(d, ow * 1.0 + d - 1.0, steps=ow, device=device)
base_grid[..., 0].copy_(x_grid)
y_grid = torch.linspace(d, oh * 1.0 + d - 1.0, steps=oh, device=device).unsqueeze_(-1)
base_grid[..., 1].copy_(y_grid)
base_grid[..., 2].fill_(1)
rescaled_theta1 = theta1.transpose(1, 2) / torch.tensor([0.5 * ow, 0.5 * oh], dtype=dtype, device=device)
output_grid1 = base_grid.view(1, oh * ow, 3).bmm(rescaled_theta1)
output_grid2 = base_grid.view(1, oh * ow, 3).bmm(theta2.transpose(1, 2))
output_grid = output_grid1 / output_grid2 - 1.0
return output_grid.view(1, oh, ow, 2)
def perspective(
img: Tensor,
perspective_coeffs: List[float],
interpolation: str = "bilinear",
fill: Optional[Union[int, float, List[float]]] = None,
) -> Tensor:
if not (isinstance(img, torch.Tensor)):
raise TypeError("Input img should be Tensor.")
_assert_image_tensor(img)
_assert_grid_transform_inputs(
img,
matrix=None,
interpolation=interpolation,
fill=fill,
supported_interpolation_modes=["nearest", "bilinear"],
coeffs=perspective_coeffs,
)
ow, oh = img.shape[-1], img.shape[-2]
dtype = img.dtype if torch.is_floating_point(img) else torch.float32
grid = _perspective_grid(perspective_coeffs, ow=ow, oh=oh, dtype=dtype, device=img.device)
return _apply_grid_transform(img, grid, interpolation, fill=fill)
def _get_gaussian_kernel1d(kernel_size: int, sigma: float, dtype: torch.dtype, device: torch.device) -> Tensor:
ksize_half = (kernel_size - 1) * 0.5
x = torch.linspace(-ksize_half, ksize_half, steps=kernel_size, dtype=dtype, device=device)
pdf = torch.exp(-0.5 * (x / sigma).pow(2))
kernel1d = pdf / pdf.sum()
return kernel1d
def _get_gaussian_kernel2d(
kernel_size: List[int], sigma: List[float], dtype: torch.dtype, device: torch.device
) -> Tensor:
kernel1d_x = _get_gaussian_kernel1d(kernel_size[0], sigma[0], dtype, device)
kernel1d_y = _get_gaussian_kernel1d(kernel_size[1], sigma[1], dtype, device)
kernel2d = torch.mm(kernel1d_y[:, None], kernel1d_x[None, :])
return kernel2d
def gaussian_blur(img: Tensor, kernel_size: List[int], sigma: List[float]) -> Tensor:
if not (isinstance(img, torch.Tensor)):
raise TypeError(f"img should be Tensor. Got {type(img)}")
_assert_image_tensor(img)
dtype = img.dtype if torch.is_floating_point(img) else torch.float32
kernel = _get_gaussian_kernel2d(kernel_size, sigma, dtype=dtype, device=img.device)
kernel = kernel.expand(img.shape[-3], 1, kernel.shape[0], kernel.shape[1])
img, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, [kernel.dtype])
# padding = (left, right, top, bottom)
padding = [kernel_size[0] // 2, kernel_size[0] // 2, kernel_size[1] // 2, kernel_size[1] // 2]
img = torch_pad(img, padding, mode="reflect")
img = conv2d(img, kernel, groups=img.shape[-3])
img = _cast_squeeze_out(img, need_cast, need_squeeze, out_dtype)
return img
def invert(img: Tensor) -> Tensor:
_assert_image_tensor(img)
if img.ndim < 3:
raise TypeError(f"Input image tensor should have at least 3 dimensions, but found {img.ndim}")
_assert_channels(img, [1, 3])
return _max_value(img.dtype) - img
def posterize(img: Tensor, bits: int) -> Tensor:
_assert_image_tensor(img)
if img.ndim < 3:
raise TypeError(f"Input image tensor should have at least 3 dimensions, but found {img.ndim}")
if img.dtype != torch.uint8:
raise TypeError(f"Only torch.uint8 image tensors are supported, but found {img.dtype}")
_assert_channels(img, [1, 3])
mask = -int(2 ** (8 - bits)) # JIT-friendly for: ~(2 ** (8 - bits) - 1)
return img & mask
def solarize(img: Tensor, threshold: float) -> Tensor:
_assert_image_tensor(img)
if img.ndim < 3:
raise TypeError(f"Input image tensor should have at least 3 dimensions, but found {img.ndim}")
_assert_channels(img, [1, 3])
if threshold > _max_value(img.dtype):
raise TypeError("Threshold should be less than bound of img.")
inverted_img = invert(img)
return torch.where(img >= threshold, inverted_img, img)
def _blurred_degenerate_image(img: Tensor) -> Tensor:
dtype = img.dtype if torch.is_floating_point(img) else torch.float32
kernel = torch.ones((3, 3), dtype=dtype, device=img.device)
kernel[1, 1] = 5.0
kernel /= kernel.sum()
kernel = kernel.expand(img.shape[-3], 1, kernel.shape[0], kernel.shape[1])
result_tmp, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, [kernel.dtype])
result_tmp = conv2d(result_tmp, kernel, groups=result_tmp.shape[-3])
result_tmp = _cast_squeeze_out(result_tmp, need_cast, need_squeeze, out_dtype)
result = img.clone()
result[..., 1:-1, 1:-1] = result_tmp
return result
def adjust_sharpness(img: Tensor, sharpness_factor: float) -> Tensor:
if sharpness_factor < 0:
raise ValueError(f"sharpness_factor ({sharpness_factor}) is not non-negative.")
_assert_image_tensor(img)
_assert_channels(img, [1, 3])
if img.size(-1) <= 2 or img.size(-2) <= 2:
return img
return _blend(img, _blurred_degenerate_image(img), sharpness_factor)
def autocontrast(img: Tensor) -> Tensor:
_assert_image_tensor(img)
if img.ndim < 3:
raise TypeError(f"Input image tensor should have at least 3 dimensions, but found {img.ndim}")
_assert_channels(img, [1, 3])
bound = _max_value(img.dtype)
dtype = img.dtype if torch.is_floating_point(img) else torch.float32
minimum = img.amin(dim=(-2, -1), keepdim=True).to(dtype)
maximum = img.amax(dim=(-2, -1), keepdim=True).to(dtype)
scale = bound / (maximum - minimum)
eq_idxs = torch.isfinite(scale).logical_not()
minimum[eq_idxs] = 0
scale[eq_idxs] = 1
return ((img - minimum) * scale).clamp(0, bound).to(img.dtype)
def _scale_channel(img_chan: Tensor) -> Tensor:
# TODO: we should expect bincount to always be faster than histc, but this
# isn't always the case. Once
# https://github.com/pytorch/pytorch/issues/53194 is fixed, remove the if
# block and only use bincount.
if img_chan.is_cuda:
hist = torch.histc(img_chan.to(torch.float32), bins=256, min=0, max=255)
else:
hist = torch.bincount(img_chan.reshape(-1), minlength=256)
nonzero_hist = hist[hist != 0]
step = torch.div(nonzero_hist[:-1].sum(), 255, rounding_mode="floor")
if step == 0:
return img_chan
lut = torch.div(torch.cumsum(hist, 0) + torch.div(step, 2, rounding_mode="floor"), step, rounding_mode="floor")
lut = torch.nn.functional.pad(lut, [1, 0])[:-1].clamp(0, 255)
return lut[img_chan.to(torch.int64)].to(torch.uint8)
def _equalize_single_image(img: Tensor) -> Tensor:
return torch.stack([_scale_channel(img[c]) for c in range(img.size(0))])
def equalize(img: Tensor) -> Tensor:
_assert_image_tensor(img)
if not (3 <= img.ndim <= 4):
raise TypeError(f"Input image tensor should have 3 or 4 dimensions, but found {img.ndim}")
if img.dtype != torch.uint8:
raise TypeError(f"Only torch.uint8 image tensors are supported, but found {img.dtype}")
_assert_channels(img, [1, 3])
if img.ndim == 3:
return _equalize_single_image(img)
return torch.stack([_equalize_single_image(x) for x in img])
def normalize(tensor: Tensor, mean: List[float], std: List[float], inplace: bool = False) -> Tensor:
_assert_image_tensor(tensor)
if not tensor.is_floating_point():
raise TypeError(f"Input tensor should be a float tensor. Got {tensor.dtype}.")
if tensor.ndim < 3:
raise ValueError(
f"Expected tensor to be a tensor image of size (..., C, H, W). Got tensor.size() = {tensor.size()}"
)
if not inplace:
tensor = tensor.clone()
dtype = tensor.dtype
mean = torch.as_tensor(mean, dtype=dtype, device=tensor.device)
std = torch.as_tensor(std, dtype=dtype, device=tensor.device)
if (std == 0).any():
raise ValueError(f"std evaluated to zero after conversion to {dtype}, leading to division by zero.")
if mean.ndim == 1:
mean = mean.view(-1, 1, 1)
if std.ndim == 1:
std = std.view(-1, 1, 1)
return tensor.sub_(mean).div_(std)
def erase(img: Tensor, i: int, j: int, h: int, w: int, v: Tensor, inplace: bool = False) -> Tensor:
_assert_image_tensor(img)
if not inplace:
img = img.clone()
img[..., i : i + h, j : j + w] = v
return img
def _create_identity_grid(size: List[int]) -> Tensor:
hw_space = [torch.linspace((-s + 1) / s, (s - 1) / s, s) for s in size]
grid_y, grid_x = torch.meshgrid(hw_space, indexing="ij")
return torch.stack([grid_x, grid_y], -1).unsqueeze(0) # 1 x H x W x 2
def elastic_transform(
img: Tensor,
displacement: Tensor,
interpolation: str = "bilinear",
fill: Optional[Union[int, float, List[float]]] = None,
) -> Tensor:
if not (isinstance(img, torch.Tensor)):
raise TypeError(f"img should be Tensor. Got {type(img)}")
size = list(img.shape[-2:])
displacement = displacement.to(img.device)
identity_grid = _create_identity_grid(size)
grid = identity_grid.to(img.device) + displacement
return _apply_grid_transform(img, grid, interpolation, fill)
```
|
===================================================================================================================================
SOURCE CODE FILE: _functional_video.py
LINES: 1
SIZE: 3.88 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\transforms\_functional_video.py
ENCODING: utf-8
```py
import warnings
import torch
warnings.warn(
"The 'torchvision.transforms._functional_video' module is deprecated since 0.12 and will be removed in the future. "
"Please use the 'torchvision.transforms.functional' module instead."
)
def _is_tensor_video_clip(clip):
if not torch.is_tensor(clip):
raise TypeError("clip should be Tensor. Got %s" % type(clip))
if not clip.ndimension() == 4:
raise ValueError("clip should be 4D. Got %dD" % clip.dim())
return True
def crop(clip, i, j, h, w):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
"""
if len(clip.size()) != 4:
raise ValueError("clip should be a 4D tensor")
return clip[..., i : i + h, j : j + w]
def resize(clip, target_size, interpolation_mode):
if len(target_size) != 2:
raise ValueError(f"target size should be tuple (height, width), instead got {target_size}")
return torch.nn.functional.interpolate(clip, size=target_size, mode=interpolation_mode, align_corners=False)
def resized_crop(clip, i, j, h, w, size, interpolation_mode="bilinear"):
"""
Do spatial cropping and resizing to the video clip
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
i (int): i in (i,j) i.e coordinates of the upper left corner.
j (int): j in (i,j) i.e coordinates of the upper left corner.
h (int): Height of the cropped region.
w (int): Width of the cropped region.
size (tuple(int, int)): height and width of resized clip
Returns:
clip (torch.tensor): Resized and cropped clip. Size is (C, T, H, W)
"""
if not _is_tensor_video_clip(clip):
raise ValueError("clip should be a 4D torch.tensor")
clip = crop(clip, i, j, h, w)
clip = resize(clip, size, interpolation_mode)
return clip
def center_crop(clip, crop_size):
if not _is_tensor_video_clip(clip):
raise ValueError("clip should be a 4D torch.tensor")
h, w = clip.size(-2), clip.size(-1)
th, tw = crop_size
if h < th or w < tw:
raise ValueError("height and width must be no smaller than crop_size")
i = int(round((h - th) / 2.0))
j = int(round((w - tw) / 2.0))
return crop(clip, i, j, th, tw)
def to_tensor(clip):
"""
Convert tensor data type from uint8 to float, divide value by 255.0 and
permute the dimensions of clip tensor
Args:
clip (torch.tensor, dtype=torch.uint8): Size is (T, H, W, C)
Return:
clip (torch.tensor, dtype=torch.float): Size is (C, T, H, W)
"""
_is_tensor_video_clip(clip)
if not clip.dtype == torch.uint8:
raise TypeError("clip tensor should have data type uint8. Got %s" % str(clip.dtype))
return clip.float().permute(3, 0, 1, 2) / 255.0
def normalize(clip, mean, std, inplace=False):
"""
Args:
clip (torch.tensor): Video clip to be normalized. Size is (C, T, H, W)
mean (tuple): pixel RGB mean. Size is (3)
std (tuple): pixel standard deviation. Size is (3)
Returns:
normalized clip (torch.tensor): Size is (C, T, H, W)
"""
if not _is_tensor_video_clip(clip):
raise ValueError("clip should be a 4D torch.tensor")
if not inplace:
clip = clip.clone()
mean = torch.as_tensor(mean, dtype=clip.dtype, device=clip.device)
std = torch.as_tensor(std, dtype=clip.dtype, device=clip.device)
clip.sub_(mean[:, None, None, None]).div_(std[:, None, None, None])
return clip
def hflip(clip):
"""
Args:
clip (torch.tensor): Video clip to be normalized. Size is (C, T, H, W)
Returns:
flipped clip (torch.tensor): Size is (C, T, H, W)
"""
if not _is_tensor_video_clip(clip):
raise ValueError("clip should be a 4D torch.tensor")
return clip.flip(-1)
```
|
==========================================================================================================================
SOURCE CODE FILE: _presets.py
LINES: 18
SIZE: 8.52 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\transforms\_presets.py
ENCODING: utf-8
```py
"""
This file is part of the private API. Please do not use directly these classes as they will be modified on
future versions without warning. The classes should be accessed only via the transforms argument of Weights.
"""
from typing import Optional, Tuple, Union
import torch
from torch import nn, Tensor
from . import functional as F, InterpolationMode
__all__ = [
"ObjectDetection",
"ImageClassification",
"VideoClassification",
"SemanticSegmentation",
"OpticalFlow",
]
class ObjectDetection(nn.Module):
def forward(self, img: Tensor) -> Tensor:
if not isinstance(img, Tensor):
img = F.pil_to_tensor(img)
return F.convert_image_dtype(img, torch.float)
def __repr__(self) -> str:
return self.__class__.__name__ + "()"
def describe(self) -> str:
return (
"Accepts ``PIL.Image``, batched ``(B, C, H, W)`` and single ``(C, H, W)`` image ``torch.Tensor`` objects. "
"The images are rescaled to ``[0.0, 1.0]``."
)
class ImageClassification(nn.Module):
def __init__(
self,
*,
crop_size: int,
resize_size: int = 256,
mean: Tuple[float, ...] = (0.485, 0.456, 0.406),
std: Tuple[float, ...] = (0.229, 0.224, 0.225),
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
antialias: Optional[bool] = True,
) -> None:
super().__init__()
self.crop_size = [crop_size]
self.resize_size = [resize_size]
self.mean = list(mean)
self.std = list(std)
self.interpolation = interpolation
self.antialias = antialias
def forward(self, img: Tensor) -> Tensor:
img = F.resize(img, self.resize_size, interpolation=self.interpolation, antialias=self.antialias)
img = F.center_crop(img, self.crop_size)
if not isinstance(img, Tensor):
img = F.pil_to_tensor(img)
img = F.convert_image_dtype(img, torch.float)
img = F.normalize(img, mean=self.mean, std=self.std)
return img
def __repr__(self) -> str:
format_string = self.__class__.__name__ + "("
format_string += f"\n crop_size={self.crop_size}"
format_string += f"\n resize_size={self.resize_size}"
format_string += f"\n mean={self.mean}"
format_string += f"\n std={self.std}"
format_string += f"\n interpolation={self.interpolation}"
format_string += "\n)"
return format_string
def describe(self) -> str:
return (
"Accepts ``PIL.Image``, batched ``(B, C, H, W)`` and single ``(C, H, W)`` image ``torch.Tensor`` objects. "
f"The images are resized to ``resize_size={self.resize_size}`` using ``interpolation={self.interpolation}``, "
f"followed by a central crop of ``crop_size={self.crop_size}``. Finally the values are first rescaled to "
f"``[0.0, 1.0]`` and then normalized using ``mean={self.mean}`` and ``std={self.std}``."
)
class VideoClassification(nn.Module):
def __init__(
self,
*,
crop_size: Tuple[int, int],
resize_size: Union[Tuple[int], Tuple[int, int]],
mean: Tuple[float, ...] = (0.43216, 0.394666, 0.37645),
std: Tuple[float, ...] = (0.22803, 0.22145, 0.216989),
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
) -> None:
super().__init__()
self.crop_size = list(crop_size)
self.resize_size = list(resize_size)
self.mean = list(mean)
self.std = list(std)
self.interpolation = interpolation
def forward(self, vid: Tensor) -> Tensor:
need_squeeze = False
if vid.ndim < 5:
vid = vid.unsqueeze(dim=0)
need_squeeze = True
N, T, C, H, W = vid.shape
vid = vid.view(-1, C, H, W)
# We hard-code antialias=False to preserve results after we changed
# its default from None to True (see
# https://github.com/pytorch/vision/pull/7160)
# TODO: we could re-train the video models with antialias=True?
vid = F.resize(vid, self.resize_size, interpolation=self.interpolation, antialias=False)
vid = F.center_crop(vid, self.crop_size)
vid = F.convert_image_dtype(vid, torch.float)
vid = F.normalize(vid, mean=self.mean, std=self.std)
H, W = self.crop_size
vid = vid.view(N, T, C, H, W)
vid = vid.permute(0, 2, 1, 3, 4) # (N, T, C, H, W) => (N, C, T, H, W)
if need_squeeze:
vid = vid.squeeze(dim=0)
return vid
def __repr__(self) -> str:
format_string = self.__class__.__name__ + "("
format_string += f"\n crop_size={self.crop_size}"
format_string += f"\n resize_size={self.resize_size}"
format_string += f"\n mean={self.mean}"
format_string += f"\n std={self.std}"
format_string += f"\n interpolation={self.interpolation}"
format_string += "\n)"
return format_string
def describe(self) -> str:
return (
"Accepts batched ``(B, T, C, H, W)`` and single ``(T, C, H, W)`` video frame ``torch.Tensor`` objects. "
f"The frames are resized to ``resize_size={self.resize_size}`` using ``interpolation={self.interpolation}``, "
f"followed by a central crop of ``crop_size={self.crop_size}``. Finally the values are first rescaled to "
f"``[0.0, 1.0]`` and then normalized using ``mean={self.mean}`` and ``std={self.std}``. Finally the output "
"dimensions are permuted to ``(..., C, T, H, W)`` tensors."
)
class SemanticSegmentation(nn.Module):
def __init__(
self,
*,
resize_size: Optional[int],
mean: Tuple[float, ...] = (0.485, 0.456, 0.406),
std: Tuple[float, ...] = (0.229, 0.224, 0.225),
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
antialias: Optional[bool] = True,
) -> None:
super().__init__()
self.resize_size = [resize_size] if resize_size is not None else None
self.mean = list(mean)
self.std = list(std)
self.interpolation = interpolation
self.antialias = antialias
def forward(self, img: Tensor) -> Tensor:
if isinstance(self.resize_size, list):
img = F.resize(img, self.resize_size, interpolation=self.interpolation, antialias=self.antialias)
if not isinstance(img, Tensor):
img = F.pil_to_tensor(img)
img = F.convert_image_dtype(img, torch.float)
img = F.normalize(img, mean=self.mean, std=self.std)
return img
def __repr__(self) -> str:
format_string = self.__class__.__name__ + "("
format_string += f"\n resize_size={self.resize_size}"
format_string += f"\n mean={self.mean}"
format_string += f"\n std={self.std}"
format_string += f"\n interpolation={self.interpolation}"
format_string += "\n)"
return format_string
def describe(self) -> str:
return (
"Accepts ``PIL.Image``, batched ``(B, C, H, W)`` and single ``(C, H, W)`` image ``torch.Tensor`` objects. "
f"The images are resized to ``resize_size={self.resize_size}`` using ``interpolation={self.interpolation}``. "
f"Finally the values are first rescaled to ``[0.0, 1.0]`` and then normalized using ``mean={self.mean}`` and "
f"``std={self.std}``."
)
class OpticalFlow(nn.Module):
def forward(self, img1: Tensor, img2: Tensor) -> Tuple[Tensor, Tensor]:
if not isinstance(img1, Tensor):
img1 = F.pil_to_tensor(img1)
if not isinstance(img2, Tensor):
img2 = F.pil_to_tensor(img2)
img1 = F.convert_image_dtype(img1, torch.float)
img2 = F.convert_image_dtype(img2, torch.float)
# map [0, 1] into [-1, 1]
img1 = F.normalize(img1, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
img2 = F.normalize(img2, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
img1 = img1.contiguous()
img2 = img2.contiguous()
return img1, img2
def __repr__(self) -> str:
return self.__class__.__name__ + "()"
def describe(self) -> str:
return (
"Accepts ``PIL.Image``, batched ``(B, C, H, W)`` and single ``(C, H, W)`` image ``torch.Tensor`` objects. "
"The images are rescaled to ``[-1.0, 1.0]``."
)
```
|
===================================================================================================================================
SOURCE CODE FILE: _transforms_video.py
LINES: 1
SIZE: 5.00 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\transforms\_transforms_video.py
ENCODING: utf-8
```py
#!/usr/bin/env python3
import numbers
import random
import warnings
from torchvision.transforms import RandomCrop, RandomResizedCrop
from . import _functional_video as F
__all__ = [
"RandomCropVideo",
"RandomResizedCropVideo",
"CenterCropVideo",
"NormalizeVideo",
"ToTensorVideo",
"RandomHorizontalFlipVideo",
]
warnings.warn(
"The 'torchvision.transforms._transforms_video' module is deprecated since 0.12 and will be removed in the future. "
"Please use the 'torchvision.transforms' module instead."
)
class RandomCropVideo(RandomCrop):
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, clip):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
Returns:
torch.tensor: randomly cropped/resized video clip.
size is (C, T, OH, OW)
"""
i, j, h, w = self.get_params(clip, self.size)
return F.crop(clip, i, j, h, w)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(size={self.size})"
class RandomResizedCropVideo(RandomResizedCrop):
def __init__(
self,
size,
scale=(0.08, 1.0),
ratio=(3.0 / 4.0, 4.0 / 3.0),
interpolation_mode="bilinear",
):
if isinstance(size, tuple):
if len(size) != 2:
raise ValueError(f"size should be tuple (height, width), instead got {size}")
self.size = size
else:
self.size = (size, size)
self.interpolation_mode = interpolation_mode
self.scale = scale
self.ratio = ratio
def __call__(self, clip):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
Returns:
torch.tensor: randomly cropped/resized video clip.
size is (C, T, H, W)
"""
i, j, h, w = self.get_params(clip, self.scale, self.ratio)
return F.resized_crop(clip, i, j, h, w, self.size, self.interpolation_mode)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(size={self.size}, interpolation_mode={self.interpolation_mode}, scale={self.scale}, ratio={self.ratio})"
class CenterCropVideo:
def __init__(self, crop_size):
if isinstance(crop_size, numbers.Number):
self.crop_size = (int(crop_size), int(crop_size))
else:
self.crop_size = crop_size
def __call__(self, clip):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
Returns:
torch.tensor: central cropping of video clip. Size is
(C, T, crop_size, crop_size)
"""
return F.center_crop(clip, self.crop_size)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(crop_size={self.crop_size})"
class NormalizeVideo:
"""
Normalize the video clip by mean subtraction and division by standard deviation
Args:
mean (3-tuple): pixel RGB mean
std (3-tuple): pixel RGB standard deviation
inplace (boolean): whether do in-place normalization
"""
def __init__(self, mean, std, inplace=False):
self.mean = mean
self.std = std
self.inplace = inplace
def __call__(self, clip):
"""
Args:
clip (torch.tensor): video clip to be normalized. Size is (C, T, H, W)
"""
return F.normalize(clip, self.mean, self.std, self.inplace)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(mean={self.mean}, std={self.std}, inplace={self.inplace})"
class ToTensorVideo:
"""
Convert tensor data type from uint8 to float, divide value by 255.0 and
permute the dimensions of clip tensor
"""
def __init__(self):
pass
def __call__(self, clip):
"""
Args:
clip (torch.tensor, dtype=torch.uint8): Size is (T, H, W, C)
Return:
clip (torch.tensor, dtype=torch.float): Size is (C, T, H, W)
"""
return F.to_tensor(clip)
def __repr__(self) -> str:
return self.__class__.__name__
class RandomHorizontalFlipVideo:
"""
Flip the video clip along the horizontal direction with a given probability
Args:
p (float): probability of the clip being flipped. Default value is 0.5
"""
def __init__(self, p=0.5):
self.p = p
def __call__(self, clip):
"""
Args:
clip (torch.tensor): Size is (C, T, H, W)
Return:
clip (torch.tensor): Size is (C, T, H, W)
"""
if random.random() < self.p:
clip = F.hflip(clip)
return clip
def __repr__(self) -> str:
return f"{self.__class__.__name__}(p={self.p})"
```
|
=============================================================================================================================
SOURCE CODE FILE: autoaugment.py
LINES: 1
SIZE: 28.18 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\transforms\autoaugment.py
ENCODING: utf-8
```py
import math
from enum import Enum
from typing import Dict, List, Optional, Tuple
import torch
from torch import Tensor
from . import functional as F, InterpolationMode
__all__ = ["AutoAugmentPolicy", "AutoAugment", "RandAugment", "TrivialAugmentWide", "AugMix"]
def _apply_op(
img: Tensor, op_name: str, magnitude: float, interpolation: InterpolationMode, fill: Optional[List[float]]
):
if op_name == "ShearX":
# magnitude should be arctan(magnitude)
# official autoaug: (1, level, 0, 0, 1, 0)
# https://github.com/tensorflow/models/blob/dd02069717128186b88afa8d857ce57d17957f03/research/autoaugment/augmentation_transforms.py#L290
# compared to
# torchvision: (1, tan(level), 0, 0, 1, 0)
# https://github.com/pytorch/vision/blob/0c2373d0bba3499e95776e7936e207d8a1676e65/torchvision/transforms/functional.py#L976
img = F.affine(
img,
angle=0.0,
translate=[0, 0],
scale=1.0,
shear=[math.degrees(math.atan(magnitude)), 0.0],
interpolation=interpolation,
fill=fill,
center=[0, 0],
)
elif op_name == "ShearY":
# magnitude should be arctan(magnitude)
# See above
img = F.affine(
img,
angle=0.0,
translate=[0, 0],
scale=1.0,
shear=[0.0, math.degrees(math.atan(magnitude))],
interpolation=interpolation,
fill=fill,
center=[0, 0],
)
elif op_name == "TranslateX":
img = F.affine(
img,
angle=0.0,
translate=[int(magnitude), 0],
scale=1.0,
interpolation=interpolation,
shear=[0.0, 0.0],
fill=fill,
)
elif op_name == "TranslateY":
img = F.affine(
img,
angle=0.0,
translate=[0, int(magnitude)],
scale=1.0,
interpolation=interpolation,
shear=[0.0, 0.0],
fill=fill,
)
elif op_name == "Rotate":
img = F.rotate(img, magnitude, interpolation=interpolation, fill=fill)
elif op_name == "Brightness":
img = F.adjust_brightness(img, 1.0 + magnitude)
elif op_name == "Color":
img = F.adjust_saturation(img, 1.0 + magnitude)
elif op_name == "Contrast":
img = F.adjust_contrast(img, 1.0 + magnitude)
elif op_name == "Sharpness":
img = F.adjust_sharpness(img, 1.0 + magnitude)
elif op_name == "Posterize":
img = F.posterize(img, int(magnitude))
elif op_name == "Solarize":
img = F.solarize(img, magnitude)
elif op_name == "AutoContrast":
img = F.autocontrast(img)
elif op_name == "Equalize":
img = F.equalize(img)
elif op_name == "Invert":
img = F.invert(img)
elif op_name == "Identity":
pass
else:
raise ValueError(f"The provided operator {op_name} is not recognized.")
return img
class AutoAugmentPolicy(Enum):
"""AutoAugment policies learned on different datasets.
Available policies are IMAGENET, CIFAR10 and SVHN.
"""
IMAGENET = "imagenet"
CIFAR10 = "cifar10"
SVHN = "svhn"
# FIXME: Eliminate copy-pasted code for fill standardization and _augmentation_space() by moving stuff on a base class
class AutoAugment(torch.nn.Module):
r"""AutoAugment data augmentation method based on
`"AutoAugment: Learning Augmentation Strategies from Data" <https://arxiv.org/pdf/1805.09501.pdf>`_.
If the image is torch Tensor, it should be of type torch.uint8, and it is expected
to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "L" or "RGB".
Args:
policy (AutoAugmentPolicy): Desired policy enum defined by
:class:`torchvision.transforms.autoaugment.AutoAugmentPolicy`. Default is ``AutoAugmentPolicy.IMAGENET``.
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
fill (sequence or number, optional): Pixel fill value for the area outside the transformed
image. If given a number, the value is used for all bands respectively.
"""
def __init__(
self,
policy: AutoAugmentPolicy = AutoAugmentPolicy.IMAGENET,
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: Optional[List[float]] = None,
) -> None:
super().__init__()
self.policy = policy
self.interpolation = interpolation
self.fill = fill
self.policies = self._get_policies(policy)
def _get_policies(
self, policy: AutoAugmentPolicy
) -> List[Tuple[Tuple[str, float, Optional[int]], Tuple[str, float, Optional[int]]]]:
if policy == AutoAugmentPolicy.IMAGENET:
return [
(("Posterize", 0.4, 8), ("Rotate", 0.6, 9)),
(("Solarize", 0.6, 5), ("AutoContrast", 0.6, None)),
(("Equalize", 0.8, None), ("Equalize", 0.6, None)),
(("Posterize", 0.6, 7), ("Posterize", 0.6, 6)),
(("Equalize", 0.4, None), ("Solarize", 0.2, 4)),
(("Equalize", 0.4, None), ("Rotate", 0.8, 8)),
(("Solarize", 0.6, 3), ("Equalize", 0.6, None)),
(("Posterize", 0.8, 5), ("Equalize", 1.0, None)),
(("Rotate", 0.2, 3), ("Solarize", 0.6, 8)),
(("Equalize", 0.6, None), ("Posterize", 0.4, 6)),
(("Rotate", 0.8, 8), ("Color", 0.4, 0)),
(("Rotate", 0.4, 9), ("Equalize", 0.6, None)),
(("Equalize", 0.0, None), ("Equalize", 0.8, None)),
(("Invert", 0.6, None), ("Equalize", 1.0, None)),
(("Color", 0.6, 4), ("Contrast", 1.0, 8)),
(("Rotate", 0.8, 8), ("Color", 1.0, 2)),
(("Color", 0.8, 8), ("Solarize", 0.8, 7)),
(("Sharpness", 0.4, 7), ("Invert", 0.6, None)),
(("ShearX", 0.6, 5), ("Equalize", 1.0, None)),
(("Color", 0.4, 0), ("Equalize", 0.6, None)),
(("Equalize", 0.4, None), ("Solarize", 0.2, 4)),
(("Solarize", 0.6, 5), ("AutoContrast", 0.6, None)),
(("Invert", 0.6, None), ("Equalize", 1.0, None)),
(("Color", 0.6, 4), ("Contrast", 1.0, 8)),
(("Equalize", 0.8, None), ("Equalize", 0.6, None)),
]
elif policy == AutoAugmentPolicy.CIFAR10:
return [
(("Invert", 0.1, None), ("Contrast", 0.2, 6)),
(("Rotate", 0.7, 2), ("TranslateX", 0.3, 9)),
(("Sharpness", 0.8, 1), ("Sharpness", 0.9, 3)),
(("ShearY", 0.5, 8), ("TranslateY", 0.7, 9)),
(("AutoContrast", 0.5, None), ("Equalize", 0.9, None)),
(("ShearY", 0.2, 7), ("Posterize", 0.3, 7)),
(("Color", 0.4, 3), ("Brightness", 0.6, 7)),
(("Sharpness", 0.3, 9), ("Brightness", 0.7, 9)),
(("Equalize", 0.6, None), ("Equalize", 0.5, None)),
(("Contrast", 0.6, 7), ("Sharpness", 0.6, 5)),
(("Color", 0.7, 7), ("TranslateX", 0.5, 8)),
(("Equalize", 0.3, None), ("AutoContrast", 0.4, None)),
(("TranslateY", 0.4, 3), ("Sharpness", 0.2, 6)),
(("Brightness", 0.9, 6), ("Color", 0.2, 8)),
(("Solarize", 0.5, 2), ("Invert", 0.0, None)),
(("Equalize", 0.2, None), ("AutoContrast", 0.6, None)),
(("Equalize", 0.2, None), ("Equalize", 0.6, None)),
(("Color", 0.9, 9), ("Equalize", 0.6, None)),
(("AutoContrast", 0.8, None), ("Solarize", 0.2, 8)),
(("Brightness", 0.1, 3), ("Color", 0.7, 0)),
(("Solarize", 0.4, 5), ("AutoContrast", 0.9, None)),
(("TranslateY", 0.9, 9), ("TranslateY", 0.7, 9)),
(("AutoContrast", 0.9, None), ("Solarize", 0.8, 3)),
(("Equalize", 0.8, None), ("Invert", 0.1, None)),
(("TranslateY", 0.7, 9), ("AutoContrast", 0.9, None)),
]
elif policy == AutoAugmentPolicy.SVHN:
return [
(("ShearX", 0.9, 4), ("Invert", 0.2, None)),
(("ShearY", 0.9, 8), ("Invert", 0.7, None)),
(("Equalize", 0.6, None), ("Solarize", 0.6, 6)),
(("Invert", 0.9, None), ("Equalize", 0.6, None)),
(("Equalize", 0.6, None), ("Rotate", 0.9, 3)),
(("ShearX", 0.9, 4), ("AutoContrast", 0.8, None)),
(("ShearY", 0.9, 8), ("Invert", 0.4, None)),
(("ShearY", 0.9, 5), ("Solarize", 0.2, 6)),
(("Invert", 0.9, None), ("AutoContrast", 0.8, None)),
(("Equalize", 0.6, None), ("Rotate", 0.9, 3)),
(("ShearX", 0.9, 4), ("Solarize", 0.3, 3)),
(("ShearY", 0.8, 8), ("Invert", 0.7, None)),
(("Equalize", 0.9, None), ("TranslateY", 0.6, 6)),
(("Invert", 0.9, None), ("Equalize", 0.6, None)),
(("Contrast", 0.3, 3), ("Rotate", 0.8, 4)),
(("Invert", 0.8, None), ("TranslateY", 0.0, 2)),
(("ShearY", 0.7, 6), ("Solarize", 0.4, 8)),
(("Invert", 0.6, None), ("Rotate", 0.8, 4)),
(("ShearY", 0.3, 7), ("TranslateX", 0.9, 3)),
(("ShearX", 0.1, 6), ("Invert", 0.6, None)),
(("Solarize", 0.7, 2), ("TranslateY", 0.6, 7)),
(("ShearY", 0.8, 4), ("Invert", 0.8, None)),
(("ShearX", 0.7, 9), ("TranslateY", 0.8, 3)),
(("ShearY", 0.8, 5), ("AutoContrast", 0.7, None)),
(("ShearX", 0.7, 2), ("Invert", 0.1, None)),
]
else:
raise ValueError(f"The provided policy {policy} is not recognized.")
def _augmentation_space(self, num_bins: int, image_size: Tuple[int, int]) -> Dict[str, Tuple[Tensor, bool]]:
return {
# op_name: (magnitudes, signed)
"ShearX": (torch.linspace(0.0, 0.3, num_bins), True),
"ShearY": (torch.linspace(0.0, 0.3, num_bins), True),
"TranslateX": (torch.linspace(0.0, 150.0 / 331.0 * image_size[1], num_bins), True),
"TranslateY": (torch.linspace(0.0, 150.0 / 331.0 * image_size[0], num_bins), True),
"Rotate": (torch.linspace(0.0, 30.0, num_bins), True),
"Brightness": (torch.linspace(0.0, 0.9, num_bins), True),
"Color": (torch.linspace(0.0, 0.9, num_bins), True),
"Contrast": (torch.linspace(0.0, 0.9, num_bins), True),
"Sharpness": (torch.linspace(0.0, 0.9, num_bins), True),
"Posterize": (8 - (torch.arange(num_bins) / ((num_bins - 1) / 4)).round().int(), False),
"Solarize": (torch.linspace(255.0, 0.0, num_bins), False),
"AutoContrast": (torch.tensor(0.0), False),
"Equalize": (torch.tensor(0.0), False),
"Invert": (torch.tensor(0.0), False),
}
@staticmethod
def get_params(transform_num: int) -> Tuple[int, Tensor, Tensor]:
"""Get parameters for autoaugment transformation
Returns:
params required by the autoaugment transformation
"""
policy_id = int(torch.randint(transform_num, (1,)).item())
probs = torch.rand((2,))
signs = torch.randint(2, (2,))
return policy_id, probs, signs
def forward(self, img: Tensor) -> Tensor:
"""
img (PIL Image or Tensor): Image to be transformed.
Returns:
PIL Image or Tensor: AutoAugmented image.
"""
fill = self.fill
channels, height, width = F.get_dimensions(img)
if isinstance(img, Tensor):
if isinstance(fill, (int, float)):
fill = [float(fill)] * channels
elif fill is not None:
fill = [float(f) for f in fill]
transform_id, probs, signs = self.get_params(len(self.policies))
op_meta = self._augmentation_space(10, (height, width))
for i, (op_name, p, magnitude_id) in enumerate(self.policies[transform_id]):
if probs[i] <= p:
magnitudes, signed = op_meta[op_name]
magnitude = float(magnitudes[magnitude_id].item()) if magnitude_id is not None else 0.0
if signed and signs[i] == 0:
magnitude *= -1.0
img = _apply_op(img, op_name, magnitude, interpolation=self.interpolation, fill=fill)
return img
def __repr__(self) -> str:
return f"{self.__class__.__name__}(policy={self.policy}, fill={self.fill})"
class RandAugment(torch.nn.Module):
r"""RandAugment data augmentation method based on
`"RandAugment: Practical automated data augmentation with a reduced search space"
<https://arxiv.org/abs/1909.13719>`_.
If the image is torch Tensor, it should be of type torch.uint8, and it is expected
to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "L" or "RGB".
Args:
num_ops (int): Number of augmentation transformations to apply sequentially.
magnitude (int): Magnitude for all the transformations.
num_magnitude_bins (int): The number of different magnitude values.
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
fill (sequence or number, optional): Pixel fill value for the area outside the transformed
image. If given a number, the value is used for all bands respectively.
"""
def __init__(
self,
num_ops: int = 2,
magnitude: int = 9,
num_magnitude_bins: int = 31,
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: Optional[List[float]] = None,
) -> None:
super().__init__()
self.num_ops = num_ops
self.magnitude = magnitude
self.num_magnitude_bins = num_magnitude_bins
self.interpolation = interpolation
self.fill = fill
def _augmentation_space(self, num_bins: int, image_size: Tuple[int, int]) -> Dict[str, Tuple[Tensor, bool]]:
return {
# op_name: (magnitudes, signed)
"Identity": (torch.tensor(0.0), False),
"ShearX": (torch.linspace(0.0, 0.3, num_bins), True),
"ShearY": (torch.linspace(0.0, 0.3, num_bins), True),
"TranslateX": (torch.linspace(0.0, 150.0 / 331.0 * image_size[1], num_bins), True),
"TranslateY": (torch.linspace(0.0, 150.0 / 331.0 * image_size[0], num_bins), True),
"Rotate": (torch.linspace(0.0, 30.0, num_bins), True),
"Brightness": (torch.linspace(0.0, 0.9, num_bins), True),
"Color": (torch.linspace(0.0, 0.9, num_bins), True),
"Contrast": (torch.linspace(0.0, 0.9, num_bins), True),
"Sharpness": (torch.linspace(0.0, 0.9, num_bins), True),
"Posterize": (8 - (torch.arange(num_bins) / ((num_bins - 1) / 4)).round().int(), False),
"Solarize": (torch.linspace(255.0, 0.0, num_bins), False),
"AutoContrast": (torch.tensor(0.0), False),
"Equalize": (torch.tensor(0.0), False),
}
def forward(self, img: Tensor) -> Tensor:
"""
img (PIL Image or Tensor): Image to be transformed.
Returns:
PIL Image or Tensor: Transformed image.
"""
fill = self.fill
channels, height, width = F.get_dimensions(img)
if isinstance(img, Tensor):
if isinstance(fill, (int, float)):
fill = [float(fill)] * channels
elif fill is not None:
fill = [float(f) for f in fill]
op_meta = self._augmentation_space(self.num_magnitude_bins, (height, width))
for _ in range(self.num_ops):
op_index = int(torch.randint(len(op_meta), (1,)).item())
op_name = list(op_meta.keys())[op_index]
magnitudes, signed = op_meta[op_name]
magnitude = float(magnitudes[self.magnitude].item()) if magnitudes.ndim > 0 else 0.0
if signed and torch.randint(2, (1,)):
magnitude *= -1.0
img = _apply_op(img, op_name, magnitude, interpolation=self.interpolation, fill=fill)
return img
def __repr__(self) -> str:
s = (
f"{self.__class__.__name__}("
f"num_ops={self.num_ops}"
f", magnitude={self.magnitude}"
f", num_magnitude_bins={self.num_magnitude_bins}"
f", interpolation={self.interpolation}"
f", fill={self.fill}"
f")"
)
return s
class TrivialAugmentWide(torch.nn.Module):
r"""Dataset-independent data-augmentation with TrivialAugment Wide, as described in
`"TrivialAugment: Tuning-free Yet State-of-the-Art Data Augmentation" <https://arxiv.org/abs/2103.10158>`_.
If the image is torch Tensor, it should be of type torch.uint8, and it is expected
to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "L" or "RGB".
Args:
num_magnitude_bins (int): The number of different magnitude values.
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
fill (sequence or number, optional): Pixel fill value for the area outside the transformed
image. If given a number, the value is used for all bands respectively.
"""
def __init__(
self,
num_magnitude_bins: int = 31,
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: Optional[List[float]] = None,
) -> None:
super().__init__()
self.num_magnitude_bins = num_magnitude_bins
self.interpolation = interpolation
self.fill = fill
def _augmentation_space(self, num_bins: int) -> Dict[str, Tuple[Tensor, bool]]:
return {
# op_name: (magnitudes, signed)
"Identity": (torch.tensor(0.0), False),
"ShearX": (torch.linspace(0.0, 0.99, num_bins), True),
"ShearY": (torch.linspace(0.0, 0.99, num_bins), True),
"TranslateX": (torch.linspace(0.0, 32.0, num_bins), True),
"TranslateY": (torch.linspace(0.0, 32.0, num_bins), True),
"Rotate": (torch.linspace(0.0, 135.0, num_bins), True),
"Brightness": (torch.linspace(0.0, 0.99, num_bins), True),
"Color": (torch.linspace(0.0, 0.99, num_bins), True),
"Contrast": (torch.linspace(0.0, 0.99, num_bins), True),
"Sharpness": (torch.linspace(0.0, 0.99, num_bins), True),
"Posterize": (8 - (torch.arange(num_bins) / ((num_bins - 1) / 6)).round().int(), False),
"Solarize": (torch.linspace(255.0, 0.0, num_bins), False),
"AutoContrast": (torch.tensor(0.0), False),
"Equalize": (torch.tensor(0.0), False),
}
def forward(self, img: Tensor) -> Tensor:
"""
img (PIL Image or Tensor): Image to be transformed.
Returns:
PIL Image or Tensor: Transformed image.
"""
fill = self.fill
channels, height, width = F.get_dimensions(img)
if isinstance(img, Tensor):
if isinstance(fill, (int, float)):
fill = [float(fill)] * channels
elif fill is not None:
fill = [float(f) for f in fill]
op_meta = self._augmentation_space(self.num_magnitude_bins)
op_index = int(torch.randint(len(op_meta), (1,)).item())
op_name = list(op_meta.keys())[op_index]
magnitudes, signed = op_meta[op_name]
magnitude = (
float(magnitudes[torch.randint(len(magnitudes), (1,), dtype=torch.long)].item())
if magnitudes.ndim > 0
else 0.0
)
if signed and torch.randint(2, (1,)):
magnitude *= -1.0
return _apply_op(img, op_name, magnitude, interpolation=self.interpolation, fill=fill)
def __repr__(self) -> str:
s = (
f"{self.__class__.__name__}("
f"num_magnitude_bins={self.num_magnitude_bins}"
f", interpolation={self.interpolation}"
f", fill={self.fill}"
f")"
)
return s
class AugMix(torch.nn.Module):
r"""AugMix data augmentation method based on
`"AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty" <https://arxiv.org/abs/1912.02781>`_.
If the image is torch Tensor, it should be of type torch.uint8, and it is expected
to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "L" or "RGB".
Args:
severity (int): The severity of base augmentation operators. Default is ``3``.
mixture_width (int): The number of augmentation chains. Default is ``3``.
chain_depth (int): The depth of augmentation chains. A negative value denotes stochastic depth sampled from the interval [1, 3].
Default is ``-1``.
alpha (float): The hyperparameter for the probability distributions. Default is ``1.0``.
all_ops (bool): Use all operations (including brightness, contrast, color and sharpness). Default is ``True``.
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
fill (sequence or number, optional): Pixel fill value for the area outside the transformed
image. If given a number, the value is used for all bands respectively.
"""
def __init__(
self,
severity: int = 3,
mixture_width: int = 3,
chain_depth: int = -1,
alpha: float = 1.0,
all_ops: bool = True,
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
fill: Optional[List[float]] = None,
) -> None:
super().__init__()
self._PARAMETER_MAX = 10
if not (1 <= severity <= self._PARAMETER_MAX):
raise ValueError(f"The severity must be between [1, {self._PARAMETER_MAX}]. Got {severity} instead.")
self.severity = severity
self.mixture_width = mixture_width
self.chain_depth = chain_depth
self.alpha = alpha
self.all_ops = all_ops
self.interpolation = interpolation
self.fill = fill
def _augmentation_space(self, num_bins: int, image_size: Tuple[int, int]) -> Dict[str, Tuple[Tensor, bool]]:
s = {
# op_name: (magnitudes, signed)
"ShearX": (torch.linspace(0.0, 0.3, num_bins), True),
"ShearY": (torch.linspace(0.0, 0.3, num_bins), True),
"TranslateX": (torch.linspace(0.0, image_size[1] / 3.0, num_bins), True),
"TranslateY": (torch.linspace(0.0, image_size[0] / 3.0, num_bins), True),
"Rotate": (torch.linspace(0.0, 30.0, num_bins), True),
"Posterize": (4 - (torch.arange(num_bins) / ((num_bins - 1) / 4)).round().int(), False),
"Solarize": (torch.linspace(255.0, 0.0, num_bins), False),
"AutoContrast": (torch.tensor(0.0), False),
"Equalize": (torch.tensor(0.0), False),
}
if self.all_ops:
s.update(
{
"Brightness": (torch.linspace(0.0, 0.9, num_bins), True),
"Color": (torch.linspace(0.0, 0.9, num_bins), True),
"Contrast": (torch.linspace(0.0, 0.9, num_bins), True),
"Sharpness": (torch.linspace(0.0, 0.9, num_bins), True),
}
)
return s
@torch.jit.unused
def _pil_to_tensor(self, img) -> Tensor:
return F.pil_to_tensor(img)
@torch.jit.unused
def _tensor_to_pil(self, img: Tensor):
return F.to_pil_image(img)
def _sample_dirichlet(self, params: Tensor) -> Tensor:
# Must be on a separate method so that we can overwrite it in tests.
return torch._sample_dirichlet(params)
def forward(self, orig_img: Tensor) -> Tensor:
"""
img (PIL Image or Tensor): Image to be transformed.
Returns:
PIL Image or Tensor: Transformed image.
"""
fill = self.fill
channels, height, width = F.get_dimensions(orig_img)
if isinstance(orig_img, Tensor):
img = orig_img
if isinstance(fill, (int, float)):
fill = [float(fill)] * channels
elif fill is not None:
fill = [float(f) for f in fill]
else:
img = self._pil_to_tensor(orig_img)
op_meta = self._augmentation_space(self._PARAMETER_MAX, (height, width))
orig_dims = list(img.shape)
batch = img.view([1] * max(4 - img.ndim, 0) + orig_dims)
batch_dims = [batch.size(0)] + [1] * (batch.ndim - 1)
# Sample the beta weights for combining the original and augmented image. To get Beta, we use a Dirichlet
# with 2 parameters. The 1st column stores the weights of the original and the 2nd the ones of augmented image.
m = self._sample_dirichlet(
torch.tensor([self.alpha, self.alpha], device=batch.device).expand(batch_dims[0], -1)
)
# Sample the mixing weights and combine them with the ones sampled from Beta for the augmented images.
combined_weights = self._sample_dirichlet(
torch.tensor([self.alpha] * self.mixture_width, device=batch.device).expand(batch_dims[0], -1)
) * m[:, 1].view([batch_dims[0], -1])
mix = m[:, 0].view(batch_dims) * batch
for i in range(self.mixture_width):
aug = batch
depth = self.chain_depth if self.chain_depth > 0 else int(torch.randint(low=1, high=4, size=(1,)).item())
for _ in range(depth):
op_index = int(torch.randint(len(op_meta), (1,)).item())
op_name = list(op_meta.keys())[op_index]
magnitudes, signed = op_meta[op_name]
magnitude = (
float(magnitudes[torch.randint(self.severity, (1,), dtype=torch.long)].item())
if magnitudes.ndim > 0
else 0.0
)
if signed and torch.randint(2, (1,)):
magnitude *= -1.0
aug = _apply_op(aug, op_name, magnitude, interpolation=self.interpolation, fill=fill)
mix.add_(combined_weights[:, i].view(batch_dims) * aug)
mix = mix.view(orig_dims).to(dtype=img.dtype)
if not isinstance(orig_img, Tensor):
return self._tensor_to_pil(mix)
return mix
def __repr__(self) -> str:
s = (
f"{self.__class__.__name__}("
f"severity={self.severity}"
f", mixture_width={self.mixture_width}"
f", chain_depth={self.chain_depth}"
f", alpha={self.alpha}"
f", all_ops={self.all_ops}"
f", interpolation={self.interpolation}"
f", fill={self.fill}"
f")"
)
return s
```
|
============================================================================================================================
SOURCE CODE FILE: functional.py
LINES: 1
SIZE: 67.81 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\transforms\functional.py
ENCODING: utf-8
```py
import math
import numbers
import sys
import warnings
from enum import Enum
from typing import Any, List, Optional, Tuple, Union
import numpy as np
import torch
from PIL import Image
from PIL.Image import Image as PILImage
from torch import Tensor
try:
import accimage
except ImportError:
accimage = None
from ..utils import _log_api_usage_once
from . import _functional_pil as F_pil, _functional_tensor as F_t
class InterpolationMode(Enum):
"""Interpolation modes
Available interpolation methods are ``nearest``, ``nearest-exact``, ``bilinear``, ``bicubic``, ``box``, ``hamming``,
and ``lanczos``.
"""
NEAREST = "nearest"
NEAREST_EXACT = "nearest-exact"
BILINEAR = "bilinear"
BICUBIC = "bicubic"
# For PIL compatibility
BOX = "box"
HAMMING = "hamming"
LANCZOS = "lanczos"
# TODO: Once torchscript supports Enums with staticmethod
# this can be put into InterpolationMode as staticmethod
def _interpolation_modes_from_int(i: int) -> InterpolationMode:
inverse_modes_mapping = {
0: InterpolationMode.NEAREST,
2: InterpolationMode.BILINEAR,
3: InterpolationMode.BICUBIC,
4: InterpolationMode.BOX,
5: InterpolationMode.HAMMING,
1: InterpolationMode.LANCZOS,
}
return inverse_modes_mapping[i]
pil_modes_mapping = {
InterpolationMode.NEAREST: 0,
InterpolationMode.BILINEAR: 2,
InterpolationMode.BICUBIC: 3,
InterpolationMode.NEAREST_EXACT: 0,
InterpolationMode.BOX: 4,
InterpolationMode.HAMMING: 5,
InterpolationMode.LANCZOS: 1,
}
_is_pil_image = F_pil._is_pil_image
def get_dimensions(img: Tensor) -> List[int]:
"""Returns the dimensions of an image as [channels, height, width].
Args:
img (PIL Image or Tensor): The image to be checked.
Returns:
List[int]: The image dimensions.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(get_dimensions)
if isinstance(img, torch.Tensor):
return F_t.get_dimensions(img)
return F_pil.get_dimensions(img)
def get_image_size(img: Tensor) -> List[int]:
"""Returns the size of an image as [width, height].
Args:
img (PIL Image or Tensor): The image to be checked.
Returns:
List[int]: The image size.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(get_image_size)
if isinstance(img, torch.Tensor):
return F_t.get_image_size(img)
return F_pil.get_image_size(img)
def get_image_num_channels(img: Tensor) -> int:
"""Returns the number of channels of an image.
Args:
img (PIL Image or Tensor): The image to be checked.
Returns:
int: The number of channels.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(get_image_num_channels)
if isinstance(img, torch.Tensor):
return F_t.get_image_num_channels(img)
return F_pil.get_image_num_channels(img)
@torch.jit.unused
def _is_numpy(img: Any) -> bool:
return isinstance(img, np.ndarray)
@torch.jit.unused
def _is_numpy_image(img: Any) -> bool:
return img.ndim in {2, 3}
def to_tensor(pic: Union[PILImage, np.ndarray]) -> Tensor:
"""Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
This function does not support torchscript.
See :class:`~torchvision.transforms.ToTensor` for more details.
Args:
pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(to_tensor)
if not (F_pil._is_pil_image(pic) or _is_numpy(pic)):
raise TypeError(f"pic should be PIL Image or ndarray. Got {type(pic)}")
if _is_numpy(pic) and not _is_numpy_image(pic):
raise ValueError(f"pic should be 2/3 dimensional. Got {pic.ndim} dimensions.")
default_float_dtype = torch.get_default_dtype()
if isinstance(pic, np.ndarray):
# handle numpy array
if pic.ndim == 2:
pic = pic[:, :, None]
img = torch.from_numpy(pic.transpose((2, 0, 1))).contiguous()
# backward compatibility
if isinstance(img, torch.ByteTensor):
return img.to(dtype=default_float_dtype).div(255)
else:
return img
if accimage is not None and isinstance(pic, accimage.Image):
nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)
pic.copyto(nppic)
return torch.from_numpy(nppic).to(dtype=default_float_dtype)
# handle PIL Image
mode_to_nptype = {"I": np.int32, "I;16" if sys.byteorder == "little" else "I;16B": np.int16, "F": np.float32}
img = torch.from_numpy(np.array(pic, mode_to_nptype.get(pic.mode, np.uint8), copy=True))
if pic.mode == "1":
img = 255 * img
img = img.view(pic.size[1], pic.size[0], F_pil.get_image_num_channels(pic))
# put it from HWC to CHW format
img = img.permute((2, 0, 1)).contiguous()
if isinstance(img, torch.ByteTensor):
return img.to(dtype=default_float_dtype).div(255)
else:
return img
def pil_to_tensor(pic: Any) -> Tensor:
"""Convert a ``PIL Image`` to a tensor of the same type.
This function does not support torchscript.
See :class:`~torchvision.transforms.PILToTensor` for more details.
.. note::
A deep copy of the underlying array is performed.
Args:
pic (PIL Image): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(pil_to_tensor)
if not F_pil._is_pil_image(pic):
raise TypeError(f"pic should be PIL Image. Got {type(pic)}")
if accimage is not None and isinstance(pic, accimage.Image):
# accimage format is always uint8 internally, so always return uint8 here
nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.uint8)
pic.copyto(nppic)
return torch.as_tensor(nppic)
# handle PIL Image
img = torch.as_tensor(np.array(pic, copy=True))
img = img.view(pic.size[1], pic.size[0], F_pil.get_image_num_channels(pic))
# put it from HWC to CHW format
img = img.permute((2, 0, 1))
return img
def convert_image_dtype(image: torch.Tensor, dtype: torch.dtype = torch.float) -> torch.Tensor:
"""Convert a tensor image to the given ``dtype`` and scale the values accordingly
This function does not support PIL Image.
Args:
image (torch.Tensor): Image to be converted
dtype (torch.dtype): Desired data type of the output
Returns:
Tensor: Converted image
.. note::
When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.
If converted back and forth, this mismatch has no effect.
Raises:
RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as
well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to
overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range
of the integer ``dtype``.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(convert_image_dtype)
if not isinstance(image, torch.Tensor):
raise TypeError("Input img should be Tensor Image")
return F_t.convert_image_dtype(image, dtype)
def to_pil_image(pic, mode=None):
"""Convert a tensor or an ndarray to PIL Image. This function does not support torchscript.
See :class:`~torchvision.transforms.ToPILImage` for more details.
Args:
pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
.. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
Returns:
PIL Image: Image converted to PIL Image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(to_pil_image)
if isinstance(pic, torch.Tensor):
if pic.ndim == 3:
pic = pic.permute((1, 2, 0))
pic = pic.numpy(force=True)
elif not isinstance(pic, np.ndarray):
raise TypeError(f"pic should be Tensor or ndarray. Got {type(pic)}.")
if pic.ndim == 2:
# if 2D image, add channel dimension (HWC)
pic = np.expand_dims(pic, 2)
if pic.ndim != 3:
raise ValueError(f"pic should be 2/3 dimensional. Got {pic.ndim} dimensions.")
if pic.shape[-1] > 4:
raise ValueError(f"pic should not have > 4 channels. Got {pic.shape[-1]} channels.")
npimg = pic
if np.issubdtype(npimg.dtype, np.floating) and mode != "F":
npimg = (npimg * 255).astype(np.uint8)
if npimg.shape[2] == 1:
expected_mode = None
npimg = npimg[:, :, 0]
if npimg.dtype == np.uint8:
expected_mode = "L"
elif npimg.dtype == np.int16:
expected_mode = "I;16" if sys.byteorder == "little" else "I;16B"
elif npimg.dtype == np.int32:
expected_mode = "I"
elif npimg.dtype == np.float32:
expected_mode = "F"
if mode is not None and mode != expected_mode:
raise ValueError(f"Incorrect mode ({mode}) supplied for input type {np.dtype}. Should be {expected_mode}")
mode = expected_mode
elif npimg.shape[2] == 2:
permitted_2_channel_modes = ["LA"]
if mode is not None and mode not in permitted_2_channel_modes:
raise ValueError(f"Only modes {permitted_2_channel_modes} are supported for 2D inputs")
if mode is None and npimg.dtype == np.uint8:
mode = "LA"
elif npimg.shape[2] == 4:
permitted_4_channel_modes = ["RGBA", "CMYK", "RGBX"]
if mode is not None and mode not in permitted_4_channel_modes:
raise ValueError(f"Only modes {permitted_4_channel_modes} are supported for 4D inputs")
if mode is None and npimg.dtype == np.uint8:
mode = "RGBA"
else:
permitted_3_channel_modes = ["RGB", "YCbCr", "HSV"]
if mode is not None and mode not in permitted_3_channel_modes:
raise ValueError(f"Only modes {permitted_3_channel_modes} are supported for 3D inputs")
if mode is None and npimg.dtype == np.uint8:
mode = "RGB"
if mode is None:
raise TypeError(f"Input type {npimg.dtype} is not supported")
return Image.fromarray(npimg, mode=mode)
def normalize(tensor: Tensor, mean: List[float], std: List[float], inplace: bool = False) -> Tensor:
"""Normalize a float tensor image with mean and standard deviation.
This transform does not support PIL Image.
.. note::
This transform acts out of place by default, i.e., it does not mutates the input tensor.
See :class:`~torchvision.transforms.Normalize` for more details.
Args:
tensor (Tensor): Float tensor image of size (C, H, W) or (B, C, H, W) to be normalized.
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
inplace(bool,optional): Bool to make this operation inplace.
Returns:
Tensor: Normalized Tensor image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(normalize)
if not isinstance(tensor, torch.Tensor):
raise TypeError(f"img should be Tensor Image. Got {type(tensor)}")
return F_t.normalize(tensor, mean=mean, std=std, inplace=inplace)
def _compute_resized_output_size(
image_size: Tuple[int, int],
size: Optional[List[int]],
max_size: Optional[int] = None,
allow_size_none: bool = False, # only True in v2
) -> List[int]:
h, w = image_size
short, long = (w, h) if w <= h else (h, w)
if size is None:
if not allow_size_none:
raise ValueError("This should never happen!!")
if not isinstance(max_size, int):
raise ValueError(f"max_size must be an integer when size is None, but got {max_size} instead.")
new_short, new_long = int(max_size * short / long), max_size
new_w, new_h = (new_short, new_long) if w <= h else (new_long, new_short)
elif len(size) == 1: # specified size only for the smallest edge
requested_new_short = size if isinstance(size, int) else size[0]
new_short, new_long = requested_new_short, int(requested_new_short * long / short)
if max_size is not None:
if max_size <= requested_new_short:
raise ValueError(
f"max_size = {max_size} must be strictly greater than the requested "
f"size for the smaller edge size = {size}"
)
if new_long > max_size:
new_short, new_long = int(max_size * new_short / new_long), max_size
new_w, new_h = (new_short, new_long) if w <= h else (new_long, new_short)
else: # specified both h and w
new_w, new_h = size[1], size[0]
return [new_h, new_w]
def resize(
img: Tensor,
size: List[int],
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
max_size: Optional[int] = None,
antialias: Optional[bool] = True,
) -> Tensor:
r"""Resize the input image to the given size.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
Args:
img (PIL Image or Tensor): Image to be resized.
size (sequence or int): Desired output size. If size is a sequence like
(h, w), the output size will be matched to this. If size is an int,
the smaller edge of the image will be matched to this number maintaining
the aspect ratio. i.e, if height > width, then image will be rescaled to
:math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`.
.. note::
In torchscript mode size as single int is not supported, use a sequence of length 1: ``[size, ]``.
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`.
Default is ``InterpolationMode.BILINEAR``. If input is Tensor, only ``InterpolationMode.NEAREST``,
``InterpolationMode.NEAREST_EXACT``, ``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are
supported.
The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
max_size (int, optional): The maximum allowed for the longer edge of
the resized image. If the longer edge of the image is greater
than ``max_size`` after being resized according to ``size``,
``size`` will be overruled so that the longer edge is equal to
``max_size``.
As a result, the smaller edge may be shorter than ``size``. This
is only supported if ``size`` is an int (or a sequence of length
1 in torchscript mode).
antialias (bool, optional): Whether to apply antialiasing.
It only affects **tensors** with bilinear or bicubic modes and it is
ignored otherwise: on PIL images, antialiasing is always applied on
bilinear or bicubic modes; on other modes (for PIL images and
tensors), antialiasing makes no sense and this parameter is ignored.
Possible values are:
- ``True`` (default): will apply antialiasing for bilinear or bicubic modes.
Other mode aren't affected. This is probably what you want to use.
- ``False``: will not apply antialiasing for tensors on any mode. PIL
images are still antialiased on bilinear or bicubic modes, because
PIL doesn't support no antialias.
- ``None``: equivalent to ``False`` for tensors and ``True`` for
PIL images. This value exists for legacy reasons and you probably
don't want to use it unless you really know what you are doing.
The default value changed from ``None`` to ``True`` in
v0.17, for the PIL and Tensor backends to be consistent.
Returns:
PIL Image or Tensor: Resized image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(resize)
if isinstance(interpolation, int):
interpolation = _interpolation_modes_from_int(interpolation)
elif not isinstance(interpolation, InterpolationMode):
raise TypeError(
"Argument interpolation should be a InterpolationMode or a corresponding Pillow integer constant"
)
if isinstance(size, (list, tuple)):
if len(size) not in [1, 2]:
raise ValueError(
f"Size must be an int or a 1 or 2 element tuple/list, not a {len(size)} element tuple/list"
)
if max_size is not None and len(size) != 1:
raise ValueError(
"max_size should only be passed if size specifies the length of the smaller edge, "
"i.e. size should be an int or a sequence of length 1 in torchscript mode."
)
_, image_height, image_width = get_dimensions(img)
if isinstance(size, int):
size = [size]
output_size = _compute_resized_output_size((image_height, image_width), size, max_size)
if [image_height, image_width] == output_size:
return img
if not isinstance(img, torch.Tensor):
if antialias is False:
warnings.warn("Anti-alias option is always applied for PIL Image input. Argument antialias is ignored.")
pil_interpolation = pil_modes_mapping[interpolation]
return F_pil.resize(img, size=output_size, interpolation=pil_interpolation)
return F_t.resize(img, size=output_size, interpolation=interpolation.value, antialias=antialias)
def pad(img: Tensor, padding: List[int], fill: Union[int, float] = 0, padding_mode: str = "constant") -> Tensor:
r"""Pad the given image on all sides with the given "pad" value.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means at most 2 leading dimensions for mode reflect and symmetric,
at most 3 leading dimensions for mode edge,
and an arbitrary number of leading dimensions for mode constant
Args:
img (PIL Image or Tensor): Image to be padded.
padding (int or sequence): Padding on each border. If a single int is provided this
is used to pad all borders. If sequence of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a sequence of length 4 is provided
this is the padding for the left, top, right and bottom borders respectively.
.. note::
In torchscript mode padding as single int is not supported, use a sequence of
length 1: ``[padding, ]``.
fill (number or tuple): Pixel fill value for constant fill. Default is 0.
If a tuple of length 3, it is used to fill R, G, B channels respectively.
This value is only used when the padding_mode is constant.
Only number is supported for torch Tensor.
Only int or tuple value is supported for PIL Image.
padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric.
Default is constant.
- constant: pads with a constant value, this value is specified with fill
- edge: pads with the last value at the edge of the image.
If input a 5D torch Tensor, the last 3 dimensions will be padded instead of the last 2
- reflect: pads with reflection of image without repeating the last value on the edge.
For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
will result in [3, 2, 1, 2, 3, 4, 3, 2]
- symmetric: pads with reflection of image repeating the last value on the edge.
For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
will result in [2, 1, 1, 2, 3, 4, 4, 3]
Returns:
PIL Image or Tensor: Padded image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(pad)
if not isinstance(img, torch.Tensor):
return F_pil.pad(img, padding=padding, fill=fill, padding_mode=padding_mode)
return F_t.pad(img, padding=padding, fill=fill, padding_mode=padding_mode)
def crop(img: Tensor, top: int, left: int, height: int, width: int) -> Tensor:
"""Crop the given image at specified location and output size.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
If image size is smaller than output size along any edge, image is padded with 0 and then cropped.
Args:
img (PIL Image or Tensor): Image to be cropped. (0,0) denotes the top left corner of the image.
top (int): Vertical component of the top left corner of the crop box.
left (int): Horizontal component of the top left corner of the crop box.
height (int): Height of the crop box.
width (int): Width of the crop box.
Returns:
PIL Image or Tensor: Cropped image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(crop)
if not isinstance(img, torch.Tensor):
return F_pil.crop(img, top, left, height, width)
return F_t.crop(img, top, left, height, width)
def center_crop(img: Tensor, output_size: List[int]) -> Tensor:
"""Crops the given image at the center.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
If image size is smaller than output size along any edge, image is padded with 0 and then center cropped.
Args:
img (PIL Image or Tensor): Image to be cropped.
output_size (sequence or int): (height, width) of the crop box. If int or sequence with single int,
it is used for both directions.
Returns:
PIL Image or Tensor: Cropped image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(center_crop)
if isinstance(output_size, numbers.Number):
output_size = (int(output_size), int(output_size))
elif isinstance(output_size, (tuple, list)) and len(output_size) == 1:
output_size = (output_size[0], output_size[0])
_, image_height, image_width = get_dimensions(img)
crop_height, crop_width = output_size
if crop_width > image_width or crop_height > image_height:
padding_ltrb = [
(crop_width - image_width) // 2 if crop_width > image_width else 0,
(crop_height - image_height) // 2 if crop_height > image_height else 0,
(crop_width - image_width + 1) // 2 if crop_width > image_width else 0,
(crop_height - image_height + 1) // 2 if crop_height > image_height else 0,
]
img = pad(img, padding_ltrb, fill=0) # PIL uses fill value 0
_, image_height, image_width = get_dimensions(img)
if crop_width == image_width and crop_height == image_height:
return img
crop_top = int(round((image_height - crop_height) / 2.0))
crop_left = int(round((image_width - crop_width) / 2.0))
return crop(img, crop_top, crop_left, crop_height, crop_width)
def resized_crop(
img: Tensor,
top: int,
left: int,
height: int,
width: int,
size: List[int],
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
antialias: Optional[bool] = True,
) -> Tensor:
"""Crop the given image and resize it to desired size.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
Notably used in :class:`~torchvision.transforms.RandomResizedCrop`.
Args:
img (PIL Image or Tensor): Image to be cropped. (0,0) denotes the top left corner of the image.
top (int): Vertical component of the top left corner of the crop box.
left (int): Horizontal component of the top left corner of the crop box.
height (int): Height of the crop box.
width (int): Width of the crop box.
size (sequence or int): Desired output size. Same semantics as ``resize``.
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`.
Default is ``InterpolationMode.BILINEAR``. If input is Tensor, only ``InterpolationMode.NEAREST``,
``InterpolationMode.NEAREST_EXACT``, ``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are
supported.
The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
antialias (bool, optional): Whether to apply antialiasing.
It only affects **tensors** with bilinear or bicubic modes and it is
ignored otherwise: on PIL images, antialiasing is always applied on
bilinear or bicubic modes; on other modes (for PIL images and
tensors), antialiasing makes no sense and this parameter is ignored.
Possible values are:
- ``True`` (default): will apply antialiasing for bilinear or bicubic modes.
Other mode aren't affected. This is probably what you want to use.
- ``False``: will not apply antialiasing for tensors on any mode. PIL
images are still antialiased on bilinear or bicubic modes, because
PIL doesn't support no antialias.
- ``None``: equivalent to ``False`` for tensors and ``True`` for
PIL images. This value exists for legacy reasons and you probably
don't want to use it unless you really know what you are doing.
The default value changed from ``None`` to ``True`` in
v0.17, for the PIL and Tensor backends to be consistent.
Returns:
PIL Image or Tensor: Cropped image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(resized_crop)
img = crop(img, top, left, height, width)
img = resize(img, size, interpolation, antialias=antialias)
return img
def hflip(img: Tensor) -> Tensor:
"""Horizontally flip the given image.
Args:
img (PIL Image or Tensor): Image to be flipped. If img
is a Tensor, it is expected to be in [..., H, W] format,
where ... means it can have an arbitrary number of leading
dimensions.
Returns:
PIL Image or Tensor: Horizontally flipped image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(hflip)
if not isinstance(img, torch.Tensor):
return F_pil.hflip(img)
return F_t.hflip(img)
def _get_perspective_coeffs(startpoints: List[List[int]], endpoints: List[List[int]]) -> List[float]:
"""Helper function to get the coefficients (a, b, c, d, e, f, g, h) for the perspective transforms.
In Perspective Transform each pixel (x, y) in the original image gets transformed as,
(x, y) -> ( (ax + by + c) / (gx + hy + 1), (dx + ey + f) / (gx + hy + 1) )
Args:
startpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
``[top-left, top-right, bottom-right, bottom-left]`` of the original image.
endpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
``[top-left, top-right, bottom-right, bottom-left]`` of the transformed image.
Returns:
octuple (a, b, c, d, e, f, g, h) for transforming each pixel.
"""
if len(startpoints) != 4 or len(endpoints) != 4:
raise ValueError(
f"Please provide exactly four corners, got {len(startpoints)} startpoints and {len(endpoints)} endpoints."
)
a_matrix = torch.zeros(2 * len(startpoints), 8, dtype=torch.float64)
for i, (p1, p2) in enumerate(zip(endpoints, startpoints)):
a_matrix[2 * i, :] = torch.tensor([p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1]])
a_matrix[2 * i + 1, :] = torch.tensor([0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1]])
b_matrix = torch.tensor(startpoints, dtype=torch.float64).view(8)
# do least squares in double precision to prevent numerical issues
res = torch.linalg.lstsq(a_matrix, b_matrix, driver="gels").solution.to(torch.float32)
output: List[float] = res.tolist()
return output
def perspective(
img: Tensor,
startpoints: List[List[int]],
endpoints: List[List[int]],
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
fill: Optional[List[float]] = None,
) -> Tensor:
"""Perform perspective transform of the given image.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
Args:
img (PIL Image or Tensor): Image to be transformed.
startpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
``[top-left, top-right, bottom-right, bottom-left]`` of the original image.
endpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
``[top-left, top-right, bottom-right, bottom-left]`` of the transformed image.
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
fill (sequence or number, optional): Pixel fill value for the area outside the transformed
image. If given a number, the value is used for all bands respectively.
.. note::
In torchscript mode single int/float value is not supported, please use a sequence
of length 1: ``[value, ]``.
Returns:
PIL Image or Tensor: transformed Image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(perspective)
coeffs = _get_perspective_coeffs(startpoints, endpoints)
if isinstance(interpolation, int):
interpolation = _interpolation_modes_from_int(interpolation)
elif not isinstance(interpolation, InterpolationMode):
raise TypeError(
"Argument interpolation should be a InterpolationMode or a corresponding Pillow integer constant"
)
if not isinstance(img, torch.Tensor):
pil_interpolation = pil_modes_mapping[interpolation]
return F_pil.perspective(img, coeffs, interpolation=pil_interpolation, fill=fill)
return F_t.perspective(img, coeffs, interpolation=interpolation.value, fill=fill)
def vflip(img: Tensor) -> Tensor:
"""Vertically flip the given image.
Args:
img (PIL Image or Tensor): Image to be flipped. If img
is a Tensor, it is expected to be in [..., H, W] format,
where ... means it can have an arbitrary number of leading
dimensions.
Returns:
PIL Image or Tensor: Vertically flipped image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(vflip)
if not isinstance(img, torch.Tensor):
return F_pil.vflip(img)
return F_t.vflip(img)
def five_crop(img: Tensor, size: List[int]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
"""Crop the given image into four corners and the central crop.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
.. Note::
This transform returns a tuple of images and there may be a
mismatch in the number of inputs and targets your ``Dataset`` returns.
Args:
img (PIL Image or Tensor): Image to be cropped.
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
Returns:
tuple: tuple (tl, tr, bl, br, center)
Corresponding top left, top right, bottom left, bottom right and center crop.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(five_crop)
if isinstance(size, numbers.Number):
size = (int(size), int(size))
elif isinstance(size, (tuple, list)) and len(size) == 1:
size = (size[0], size[0])
if len(size) != 2:
raise ValueError("Please provide only two dimensions (h, w) for size.")
_, image_height, image_width = get_dimensions(img)
crop_height, crop_width = size
if crop_width > image_width or crop_height > image_height:
msg = "Requested crop size {} is bigger than input size {}"
raise ValueError(msg.format(size, (image_height, image_width)))
tl = crop(img, 0, 0, crop_height, crop_width)
tr = crop(img, 0, image_width - crop_width, crop_height, crop_width)
bl = crop(img, image_height - crop_height, 0, crop_height, crop_width)
br = crop(img, image_height - crop_height, image_width - crop_width, crop_height, crop_width)
center = center_crop(img, [crop_height, crop_width])
return tl, tr, bl, br, center
def ten_crop(
img: Tensor, size: List[int], vertical_flip: bool = False
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:
"""Generate ten cropped images from the given image.
Crop the given image into four corners and the central crop plus the
flipped version of these (horizontal flipping is used by default).
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
.. Note::
This transform returns a tuple of images and there may be a
mismatch in the number of inputs and targets your ``Dataset`` returns.
Args:
img (PIL Image or Tensor): Image to be cropped.
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
vertical_flip (bool): Use vertical flipping instead of horizontal
Returns:
tuple: tuple (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip)
Corresponding top left, top right, bottom left, bottom right and
center crop and same for the flipped image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(ten_crop)
if isinstance(size, numbers.Number):
size = (int(size), int(size))
elif isinstance(size, (tuple, list)) and len(size) == 1:
size = (size[0], size[0])
if len(size) != 2:
raise ValueError("Please provide only two dimensions (h, w) for size.")
first_five = five_crop(img, size)
if vertical_flip:
img = vflip(img)
else:
img = hflip(img)
second_five = five_crop(img, size)
return first_five + second_five
def adjust_brightness(img: Tensor, brightness_factor: float) -> Tensor:
"""Adjust brightness of an image.
Args:
img (PIL Image or Tensor): Image to be adjusted.
If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
where ... means it can have an arbitrary number of leading dimensions.
brightness_factor (float): How much to adjust the brightness. Can be
any non-negative number. 0 gives a black image, 1 gives the
original image while 2 increases the brightness by a factor of 2.
Returns:
PIL Image or Tensor: Brightness adjusted image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(adjust_brightness)
if not isinstance(img, torch.Tensor):
return F_pil.adjust_brightness(img, brightness_factor)
return F_t.adjust_brightness(img, brightness_factor)
def adjust_contrast(img: Tensor, contrast_factor: float) -> Tensor:
"""Adjust contrast of an image.
Args:
img (PIL Image or Tensor): Image to be adjusted.
If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
where ... means it can have an arbitrary number of leading dimensions.
contrast_factor (float): How much to adjust the contrast. Can be any
non-negative number. 0 gives a solid gray image, 1 gives the
original image while 2 increases the contrast by a factor of 2.
Returns:
PIL Image or Tensor: Contrast adjusted image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(adjust_contrast)
if not isinstance(img, torch.Tensor):
return F_pil.adjust_contrast(img, contrast_factor)
return F_t.adjust_contrast(img, contrast_factor)
def adjust_saturation(img: Tensor, saturation_factor: float) -> Tensor:
"""Adjust color saturation of an image.
Args:
img (PIL Image or Tensor): Image to be adjusted.
If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
where ... means it can have an arbitrary number of leading dimensions.
saturation_factor (float): How much to adjust the saturation. 0 will
give a black and white image, 1 will give the original image while
2 will enhance the saturation by a factor of 2.
Returns:
PIL Image or Tensor: Saturation adjusted image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(adjust_saturation)
if not isinstance(img, torch.Tensor):
return F_pil.adjust_saturation(img, saturation_factor)
return F_t.adjust_saturation(img, saturation_factor)
def adjust_hue(img: Tensor, hue_factor: float) -> Tensor:
"""Adjust hue of an image.
The image hue is adjusted by converting the image to HSV and
cyclically shifting the intensities in the hue channel (H).
The image is then converted back to original image mode.
`hue_factor` is the amount of shift in H channel and must be in the
interval `[-0.5, 0.5]`.
See `Hue`_ for more details.
.. _Hue: https://en.wikipedia.org/wiki/Hue
Args:
img (PIL Image or Tensor): Image to be adjusted.
If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
where ... means it can have an arbitrary number of leading dimensions.
If img is PIL Image mode "1", "I", "F" and modes with transparency (alpha channel) are not supported.
Note: the pixel values of the input image has to be non-negative for conversion to HSV space;
thus it does not work if you normalize your image to an interval with negative values,
or use an interpolation that generates negative values before using this function.
hue_factor (float): How much to shift the hue channel. Should be in
[-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
HSV space in positive and negative direction respectively.
0 means no shift. Therefore, both -0.5 and 0.5 will give an image
with complementary colors while 0 gives the original image.
Returns:
PIL Image or Tensor: Hue adjusted image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(adjust_hue)
if not isinstance(img, torch.Tensor):
return F_pil.adjust_hue(img, hue_factor)
return F_t.adjust_hue(img, hue_factor)
def adjust_gamma(img: Tensor, gamma: float, gain: float = 1) -> Tensor:
r"""Perform gamma correction on an image.
Also known as Power Law Transform. Intensities in RGB mode are adjusted
based on the following equation:
.. math::
I_{\text{out}} = 255 \times \text{gain} \times \left(\frac{I_{\text{in}}}{255}\right)^{\gamma}
See `Gamma Correction`_ for more details.
.. _Gamma Correction: https://en.wikipedia.org/wiki/Gamma_correction
Args:
img (PIL Image or Tensor): PIL Image to be adjusted.
If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
where ... means it can have an arbitrary number of leading dimensions.
If img is PIL Image, modes with transparency (alpha channel) are not supported.
gamma (float): Non negative real number, same as :math:`\gamma` in the equation.
gamma larger than 1 make the shadows darker,
while gamma smaller than 1 make dark regions lighter.
gain (float): The constant multiplier.
Returns:
PIL Image or Tensor: Gamma correction adjusted image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(adjust_gamma)
if not isinstance(img, torch.Tensor):
return F_pil.adjust_gamma(img, gamma, gain)
return F_t.adjust_gamma(img, gamma, gain)
def _get_inverse_affine_matrix(
center: List[float], angle: float, translate: List[float], scale: float, shear: List[float], inverted: bool = True
) -> List[float]:
# Helper method to compute inverse matrix for affine transformation
# Pillow requires inverse affine transformation matrix:
# Affine matrix is : M = T * C * RotateScaleShear * C^-1
#
# where T is translation matrix: [1, 0, tx | 0, 1, ty | 0, 0, 1]
# C is translation matrix to keep center: [1, 0, cx | 0, 1, cy | 0, 0, 1]
# RotateScaleShear is rotation with scale and shear matrix
#
# RotateScaleShear(a, s, (sx, sy)) =
# = R(a) * S(s) * SHy(sy) * SHx(sx)
# = [ s*cos(a - sy)/cos(sy), s*(-cos(a - sy)*tan(sx)/cos(sy) - sin(a)), 0 ]
# [ s*sin(a - sy)/cos(sy), s*(-sin(a - sy)*tan(sx)/cos(sy) + cos(a)), 0 ]
# [ 0 , 0 , 1 ]
# where R is a rotation matrix, S is a scaling matrix, and SHx and SHy are the shears:
# SHx(s) = [1, -tan(s)] and SHy(s) = [1 , 0]
# [0, 1 ] [-tan(s), 1]
#
# Thus, the inverse is M^-1 = C * RotateScaleShear^-1 * C^-1 * T^-1
rot = math.radians(angle)
sx = math.radians(shear[0])
sy = math.radians(shear[1])
cx, cy = center
tx, ty = translate
# RSS without scaling
a = math.cos(rot - sy) / math.cos(sy)
b = -math.cos(rot - sy) * math.tan(sx) / math.cos(sy) - math.sin(rot)
c = math.sin(rot - sy) / math.cos(sy)
d = -math.sin(rot - sy) * math.tan(sx) / math.cos(sy) + math.cos(rot)
if inverted:
# Inverted rotation matrix with scale and shear
# det([[a, b], [c, d]]) == 1, since det(rotation) = 1 and det(shear) = 1
matrix = [d, -b, 0.0, -c, a, 0.0]
matrix = [x / scale for x in matrix]
# Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1
matrix[2] += matrix[0] * (-cx - tx) + matrix[1] * (-cy - ty)
matrix[5] += matrix[3] * (-cx - tx) + matrix[4] * (-cy - ty)
# Apply center translation: C * RSS^-1 * C^-1 * T^-1
matrix[2] += cx
matrix[5] += cy
else:
matrix = [a, b, 0.0, c, d, 0.0]
matrix = [x * scale for x in matrix]
# Apply inverse of center translation: RSS * C^-1
matrix[2] += matrix[0] * (-cx) + matrix[1] * (-cy)
matrix[5] += matrix[3] * (-cx) + matrix[4] * (-cy)
# Apply translation and center : T * C * RSS * C^-1
matrix[2] += cx + tx
matrix[5] += cy + ty
return matrix
def rotate(
img: Tensor,
angle: float,
interpolation: InterpolationMode = InterpolationMode.NEAREST,
expand: bool = False,
center: Optional[List[int]] = None,
fill: Optional[List[float]] = None,
) -> Tensor:
"""Rotate the image by angle.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
Args:
img (PIL Image or Tensor): image to be rotated.
angle (number): rotation angle value in degrees, counter-clockwise.
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
expand (bool, optional): Optional expansion flag.
If true, expands the output image to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (sequence, optional): Optional center of rotation. Origin is the upper left corner.
Default is the center of the image.
fill (sequence or number, optional): Pixel fill value for the area outside the transformed
image. If given a number, the value is used for all bands respectively.
.. note::
In torchscript mode single int/float value is not supported, please use a sequence
of length 1: ``[value, ]``.
Returns:
PIL Image or Tensor: Rotated image.
.. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(rotate)
if isinstance(interpolation, int):
interpolation = _interpolation_modes_from_int(interpolation)
elif not isinstance(interpolation, InterpolationMode):
raise TypeError(
"Argument interpolation should be a InterpolationMode or a corresponding Pillow integer constant"
)
if not isinstance(angle, (int, float)):
raise TypeError("Argument angle should be int or float")
if center is not None and not isinstance(center, (list, tuple)):
raise TypeError("Argument center should be a sequence")
if not isinstance(img, torch.Tensor):
pil_interpolation = pil_modes_mapping[interpolation]
return F_pil.rotate(img, angle=angle, interpolation=pil_interpolation, expand=expand, center=center, fill=fill)
center_f = [0.0, 0.0]
if center is not None:
_, height, width = get_dimensions(img)
# Center values should be in pixel coordinates but translated such that (0, 0) corresponds to image center.
center_f = [1.0 * (c - s * 0.5) for c, s in zip(center, [width, height])]
# due to current incoherence of rotation angle direction between affine and rotate implementations
# we need to set -angle.
matrix = _get_inverse_affine_matrix(center_f, -angle, [0.0, 0.0], 1.0, [0.0, 0.0])
return F_t.rotate(img, matrix=matrix, interpolation=interpolation.value, expand=expand, fill=fill)
def affine(
img: Tensor,
angle: float,
translate: List[int],
scale: float,
shear: List[float],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: Optional[List[float]] = None,
center: Optional[List[int]] = None,
) -> Tensor:
"""Apply affine transformation on the image keeping image center invariant.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
Args:
img (PIL Image or Tensor): image to transform.
angle (number): rotation angle in degrees between -180 and 180, clockwise direction.
translate (sequence of integers): horizontal and vertical translations (post-rotation translation)
scale (float): overall scale
shear (float or sequence): shear angle value in degrees between -180 to 180, clockwise direction.
If a sequence is specified, the first value corresponds to a shear parallel to the x-axis, while
the second value corresponds to a shear parallel to the y-axis.
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
fill (sequence or number, optional): Pixel fill value for the area outside the transformed
image. If given a number, the value is used for all bands respectively.
.. note::
In torchscript mode single int/float value is not supported, please use a sequence
of length 1: ``[value, ]``.
center (sequence, optional): Optional center of rotation. Origin is the upper left corner.
Default is the center of the image.
Returns:
PIL Image or Tensor: Transformed image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(affine)
if isinstance(interpolation, int):
interpolation = _interpolation_modes_from_int(interpolation)
elif not isinstance(interpolation, InterpolationMode):
raise TypeError(
"Argument interpolation should be a InterpolationMode or a corresponding Pillow integer constant"
)
if not isinstance(angle, (int, float)):
raise TypeError("Argument angle should be int or float")
if not isinstance(translate, (list, tuple)):
raise TypeError("Argument translate should be a sequence")
if len(translate) != 2:
raise ValueError("Argument translate should be a sequence of length 2")
if scale <= 0.0:
raise ValueError("Argument scale should be positive")
if not isinstance(shear, (numbers.Number, (list, tuple))):
raise TypeError("Shear should be either a single value or a sequence of two values")
if isinstance(angle, int):
angle = float(angle)
if isinstance(translate, tuple):
translate = list(translate)
if isinstance(shear, numbers.Number):
shear = [shear, 0.0]
if isinstance(shear, tuple):
shear = list(shear)
if len(shear) == 1:
shear = [shear[0], shear[0]]
if len(shear) != 2:
raise ValueError(f"Shear should be a sequence containing two values. Got {shear}")
if center is not None and not isinstance(center, (list, tuple)):
raise TypeError("Argument center should be a sequence")
_, height, width = get_dimensions(img)
if not isinstance(img, torch.Tensor):
# center = (width * 0.5 + 0.5, height * 0.5 + 0.5)
# it is visually better to estimate the center without 0.5 offset
# otherwise image rotated by 90 degrees is shifted vs output image of torch.rot90 or F_t.affine
if center is None:
center = [width * 0.5, height * 0.5]
matrix = _get_inverse_affine_matrix(center, angle, translate, scale, shear)
pil_interpolation = pil_modes_mapping[interpolation]
return F_pil.affine(img, matrix=matrix, interpolation=pil_interpolation, fill=fill)
center_f = [0.0, 0.0]
if center is not None:
_, height, width = get_dimensions(img)
# Center values should be in pixel coordinates but translated such that (0, 0) corresponds to image center.
center_f = [1.0 * (c - s * 0.5) for c, s in zip(center, [width, height])]
translate_f = [1.0 * t for t in translate]
matrix = _get_inverse_affine_matrix(center_f, angle, translate_f, scale, shear)
return F_t.affine(img, matrix=matrix, interpolation=interpolation.value, fill=fill)
# Looks like to_grayscale() is a stand-alone functional that is never called
# from the transform classes. Perhaps it's still here for BC? I can't be
# bothered to dig.
@torch.jit.unused
def to_grayscale(img, num_output_channels=1):
"""Convert PIL image of any mode (RGB, HSV, LAB, etc) to grayscale version of image.
This transform does not support torch Tensor.
Args:
img (PIL Image): PIL Image to be converted to grayscale.
num_output_channels (int): number of channels of the output image. Value can be 1 or 3. Default is 1.
Returns:
PIL Image: Grayscale version of the image.
- if num_output_channels = 1 : returned image is single channel
- if num_output_channels = 3 : returned image is 3 channel with r = g = b
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(to_grayscale)
if isinstance(img, Image.Image):
return F_pil.to_grayscale(img, num_output_channels)
raise TypeError("Input should be PIL Image")
def rgb_to_grayscale(img: Tensor, num_output_channels: int = 1) -> Tensor:
"""Convert RGB image to grayscale version of image.
If the image is torch Tensor, it is expected
to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions
Note:
Please, note that this method supports only RGB images as input. For inputs in other color spaces,
please, consider using :meth:`~torchvision.transforms.functional.to_grayscale` with PIL Image.
Args:
img (PIL Image or Tensor): RGB Image to be converted to grayscale.
num_output_channels (int): number of channels of the output image. Value can be 1 or 3. Default, 1.
Returns:
PIL Image or Tensor: Grayscale version of the image.
- if num_output_channels = 1 : returned image is single channel
- if num_output_channels = 3 : returned image is 3 channel with r = g = b
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(rgb_to_grayscale)
if not isinstance(img, torch.Tensor):
return F_pil.to_grayscale(img, num_output_channels)
return F_t.rgb_to_grayscale(img, num_output_channels)
def erase(img: Tensor, i: int, j: int, h: int, w: int, v: Tensor, inplace: bool = False) -> Tensor:
"""Erase the input Tensor Image with given value.
This transform does not support PIL Image.
Args:
img (Tensor Image): Tensor image of size (C, H, W) to be erased
i (int): i in (i,j) i.e coordinates of the upper left corner.
j (int): j in (i,j) i.e coordinates of the upper left corner.
h (int): Height of the erased region.
w (int): Width of the erased region.
v: Erasing value.
inplace(bool, optional): For in-place operations. By default, is set False.
Returns:
Tensor Image: Erased image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(erase)
if not isinstance(img, torch.Tensor):
raise TypeError(f"img should be Tensor Image. Got {type(img)}")
return F_t.erase(img, i, j, h, w, v, inplace=inplace)
def gaussian_blur(img: Tensor, kernel_size: List[int], sigma: Optional[List[float]] = None) -> Tensor:
"""Performs Gaussian blurring on the image by given kernel
The convolution will be using reflection padding corresponding to the kernel size, to maintain the input shape.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means at most one leading dimension.
Args:
img (PIL Image or Tensor): Image to be blurred
kernel_size (sequence of ints or int): Gaussian kernel size. Can be a sequence of integers
like ``(kx, ky)`` or a single integer for square kernels.
.. note::
In torchscript mode kernel_size as single int is not supported, use a sequence of
length 1: ``[ksize, ]``.
sigma (sequence of floats or float, optional): Gaussian kernel standard deviation. Can be a
sequence of floats like ``(sigma_x, sigma_y)`` or a single float to define the
same sigma in both X/Y directions. If None, then it is computed using
``kernel_size`` as ``sigma = 0.3 * ((kernel_size - 1) * 0.5 - 1) + 0.8``.
Default, None.
.. note::
In torchscript mode sigma as single float is
not supported, use a sequence of length 1: ``[sigma, ]``.
Returns:
PIL Image or Tensor: Gaussian Blurred version of the image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(gaussian_blur)
if not isinstance(kernel_size, (int, list, tuple)):
raise TypeError(f"kernel_size should be int or a sequence of integers. Got {type(kernel_size)}")
if isinstance(kernel_size, int):
kernel_size = [kernel_size, kernel_size]
if len(kernel_size) != 2:
raise ValueError(f"If kernel_size is a sequence its length should be 2. Got {len(kernel_size)}")
for ksize in kernel_size:
if ksize % 2 == 0 or ksize < 0:
raise ValueError(f"kernel_size should have odd and positive integers. Got {kernel_size}")
if sigma is None:
sigma = [ksize * 0.15 + 0.35 for ksize in kernel_size]
if sigma is not None and not isinstance(sigma, (int, float, list, tuple)):
raise TypeError(f"sigma should be either float or sequence of floats. Got {type(sigma)}")
if isinstance(sigma, (int, float)):
sigma = [float(sigma), float(sigma)]
if isinstance(sigma, (list, tuple)) and len(sigma) == 1:
sigma = [sigma[0], sigma[0]]
if len(sigma) != 2:
raise ValueError(f"If sigma is a sequence, its length should be 2. Got {len(sigma)}")
for s in sigma:
if s <= 0.0:
raise ValueError(f"sigma should have positive values. Got {sigma}")
t_img = img
if not isinstance(img, torch.Tensor):
if not F_pil._is_pil_image(img):
raise TypeError(f"img should be PIL Image or Tensor. Got {type(img)}")
t_img = pil_to_tensor(img)
output = F_t.gaussian_blur(t_img, kernel_size, sigma)
if not isinstance(img, torch.Tensor):
output = to_pil_image(output, mode=img.mode)
return output
def invert(img: Tensor) -> Tensor:
"""Invert the colors of an RGB/grayscale image.
Args:
img (PIL Image or Tensor): Image to have its colors inverted.
If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
where ... means it can have an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "L" or "RGB".
Returns:
PIL Image or Tensor: Color inverted image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(invert)
if not isinstance(img, torch.Tensor):
return F_pil.invert(img)
return F_t.invert(img)
def posterize(img: Tensor, bits: int) -> Tensor:
"""Posterize an image by reducing the number of bits for each color channel.
Args:
img (PIL Image or Tensor): Image to have its colors posterized.
If img is torch Tensor, it should be of type torch.uint8, and
it is expected to be in [..., 1 or 3, H, W] format, where ... means
it can have an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "L" or "RGB".
bits (int): The number of bits to keep for each channel (0-8).
Returns:
PIL Image or Tensor: Posterized image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(posterize)
if not (0 <= bits <= 8):
raise ValueError(f"The number if bits should be between 0 and 8. Got {bits}")
if not isinstance(img, torch.Tensor):
return F_pil.posterize(img, bits)
return F_t.posterize(img, bits)
def solarize(img: Tensor, threshold: float) -> Tensor:
"""Solarize an RGB/grayscale image by inverting all pixel values above a threshold.
Args:
img (PIL Image or Tensor): Image to have its colors inverted.
If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
where ... means it can have an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "L" or "RGB".
threshold (float): All pixels equal or above this value are inverted.
Returns:
PIL Image or Tensor: Solarized image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(solarize)
if not isinstance(img, torch.Tensor):
return F_pil.solarize(img, threshold)
return F_t.solarize(img, threshold)
def adjust_sharpness(img: Tensor, sharpness_factor: float) -> Tensor:
"""Adjust the sharpness of an image.
Args:
img (PIL Image or Tensor): Image to be adjusted.
If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
where ... means it can have an arbitrary number of leading dimensions.
sharpness_factor (float): How much to adjust the sharpness. Can be
any non-negative number. 0 gives a blurred image, 1 gives the
original image while 2 increases the sharpness by a factor of 2.
Returns:
PIL Image or Tensor: Sharpness adjusted image.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(adjust_sharpness)
if not isinstance(img, torch.Tensor):
return F_pil.adjust_sharpness(img, sharpness_factor)
return F_t.adjust_sharpness(img, sharpness_factor)
def autocontrast(img: Tensor) -> Tensor:
"""Maximize contrast of an image by remapping its
pixels per channel so that the lowest becomes black and the lightest
becomes white.
Args:
img (PIL Image or Tensor): Image on which autocontrast is applied.
If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
where ... means it can have an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "L" or "RGB".
Returns:
PIL Image or Tensor: An image that was autocontrasted.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(autocontrast)
if not isinstance(img, torch.Tensor):
return F_pil.autocontrast(img)
return F_t.autocontrast(img)
def equalize(img: Tensor) -> Tensor:
"""Equalize the histogram of an image by applying
a non-linear mapping to the input in order to create a uniform
distribution of grayscale values in the output.
Args:
img (PIL Image or Tensor): Image on which equalize is applied.
If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
where ... means it can have an arbitrary number of leading dimensions.
The tensor dtype must be ``torch.uint8`` and values are expected to be in ``[0, 255]``.
If img is PIL Image, it is expected to be in mode "P", "L" or "RGB".
Returns:
PIL Image or Tensor: An image that was equalized.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(equalize)
if not isinstance(img, torch.Tensor):
return F_pil.equalize(img)
return F_t.equalize(img)
def elastic_transform(
img: Tensor,
displacement: Tensor,
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
fill: Optional[List[float]] = None,
) -> Tensor:
"""Transform a tensor image with elastic transformations.
Given alpha and sigma, it will generate displacement
vectors for all pixels based on random offsets. Alpha controls the strength
and sigma controls the smoothness of the displacements.
The displacements are added to an identity grid and the resulting grid is
used to grid_sample from the image.
Applications:
Randomly transforms the morphology of objects in images and produces a
see-through-water-like effect.
Args:
img (PIL Image or Tensor): Image on which elastic_transform is applied.
If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
where ... means it can have an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "P", "L" or "RGB".
displacement (Tensor): The displacement field. Expected shape is [1, H, W, 2].
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`.
Default is ``InterpolationMode.BILINEAR``.
The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
fill (number or str or tuple): Pixel fill value for constant fill. Default is 0.
If a tuple of length 3, it is used to fill R, G, B channels respectively.
This value is only used when the padding_mode is constant.
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(elastic_transform)
# Backward compatibility with integer value
if isinstance(interpolation, int):
warnings.warn(
"Argument interpolation should be of type InterpolationMode instead of int. "
"Please, use InterpolationMode enum."
)
interpolation = _interpolation_modes_from_int(interpolation)
if not isinstance(displacement, torch.Tensor):
raise TypeError("Argument displacement should be a Tensor")
t_img = img
if not isinstance(img, torch.Tensor):
if not F_pil._is_pil_image(img):
raise TypeError(f"img should be PIL Image or Tensor. Got {type(img)}")
t_img = pil_to_tensor(img)
shape = t_img.shape
shape = (1,) + shape[-2:] + (2,)
if shape != displacement.shape:
raise ValueError(f"Argument displacement shape should be {shape}, but given {displacement.shape}")
# TODO: if image shape is [N1, N2, ..., C, H, W] and
# displacement is [1, H, W, 2] we need to reshape input image
# such grid_sampler takes internal code for 4D input
output = F_t.elastic_transform(
t_img,
displacement,
interpolation=interpolation.value,
fill=fill,
)
if not isinstance(img, torch.Tensor):
output = to_pil_image(output, mode=img.mode)
return output
```
|
============================================================================================================================
SOURCE CODE FILE: transforms.py
LINES: 8
SIZE: 85.64 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\transforms\transforms.py
ENCODING: utf-8
```py
import math
import numbers
import random
import warnings
from collections.abc import Sequence
from typing import List, Optional, Tuple, Union
import torch
from torch import Tensor
try:
import accimage
except ImportError:
accimage = None
from ..utils import _log_api_usage_once
from . import functional as F
from .functional import _interpolation_modes_from_int, InterpolationMode
__all__ = [
"Compose",
"ToTensor",
"PILToTensor",
"ConvertImageDtype",
"ToPILImage",
"Normalize",
"Resize",
"CenterCrop",
"Pad",
"Lambda",
"RandomApply",
"RandomChoice",
"RandomOrder",
"RandomCrop",
"RandomHorizontalFlip",
"RandomVerticalFlip",
"RandomResizedCrop",
"FiveCrop",
"TenCrop",
"LinearTransformation",
"ColorJitter",
"RandomRotation",
"RandomAffine",
"Grayscale",
"RandomGrayscale",
"RandomPerspective",
"RandomErasing",
"GaussianBlur",
"InterpolationMode",
"RandomInvert",
"RandomPosterize",
"RandomSolarize",
"RandomAdjustSharpness",
"RandomAutocontrast",
"RandomEqualize",
"ElasticTransform",
]
class Compose:
"""Composes several transforms together. This transform does not support torchscript.
Please, see the note below.
Args:
transforms (list of ``Transform`` objects): list of transforms to compose.
Example:
>>> transforms.Compose([
>>> transforms.CenterCrop(10),
>>> transforms.PILToTensor(),
>>> transforms.ConvertImageDtype(torch.float),
>>> ])
.. note::
In order to script the transformations, please use ``torch.nn.Sequential`` as below.
>>> transforms = torch.nn.Sequential(
>>> transforms.CenterCrop(10),
>>> transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
>>> )
>>> scripted_transforms = torch.jit.script(transforms)
Make sure to use only scriptable transformations, i.e. that work with ``torch.Tensor``, does not require
`lambda` functions or ``PIL.Image``.
"""
def __init__(self, transforms):
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(self)
self.transforms = transforms
def __call__(self, img):
for t in self.transforms:
img = t(img)
return img
def __repr__(self) -> str:
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += f" {t}"
format_string += "\n)"
return format_string
class ToTensor:
"""Convert a PIL Image or ndarray to tensor and scale the values accordingly.
This transform does not support torchscript.
Converts a PIL Image or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]
if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1)
or if the numpy.ndarray has dtype = np.uint8
In the other cases, tensors are returned without scaling.
.. note::
Because the input image is scaled to [0.0, 1.0], this transformation should not be used when
transforming target image masks. See the `references`_ for implementing the transforms for image masks.
.. _references: https://github.com/pytorch/vision/tree/main/references/segmentation
"""
def __init__(self) -> None:
_log_api_usage_once(self)
def __call__(self, pic):
"""
Args:
pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
return F.to_tensor(pic)
def __repr__(self) -> str:
return f"{self.__class__.__name__}()"
class PILToTensor:
"""Convert a PIL Image to a tensor of the same type - this does not scale values.
This transform does not support torchscript.
Converts a PIL Image (H x W x C) to a Tensor of shape (C x H x W).
"""
def __init__(self) -> None:
_log_api_usage_once(self)
def __call__(self, pic):
"""
.. note::
A deep copy of the underlying array is performed.
Args:
pic (PIL Image): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
return F.pil_to_tensor(pic)
def __repr__(self) -> str:
return f"{self.__class__.__name__}()"
class ConvertImageDtype(torch.nn.Module):
"""Convert a tensor image to the given ``dtype`` and scale the values accordingly.
This function does not support PIL Image.
Args:
dtype (torch.dtype): Desired data type of the output
.. note::
When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.
If converted back and forth, this mismatch has no effect.
Raises:
RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as
well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to
overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range
of the integer ``dtype``.
"""
def __init__(self, dtype: torch.dtype) -> None:
super().__init__()
_log_api_usage_once(self)
self.dtype = dtype
def forward(self, image):
return F.convert_image_dtype(image, self.dtype)
class ToPILImage:
"""Convert a tensor or an ndarray to PIL Image
This transform does not support torchscript.
Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
H x W x C to a PIL Image while adjusting the value range depending on the ``mode``.
Args:
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
If ``mode`` is ``None`` (default) there are some assumptions made about the input data:
- If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.
- If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.
- If the input has 2 channels, the ``mode`` is assumed to be ``LA``.
- If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``, ``short``).
.. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
"""
def __init__(self, mode=None):
_log_api_usage_once(self)
self.mode = mode
def __call__(self, pic):
"""
Args:
pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
Returns:
PIL Image: Image converted to PIL Image.
"""
return F.to_pil_image(pic, self.mode)
def __repr__(self) -> str:
format_string = self.__class__.__name__ + "("
if self.mode is not None:
format_string += f"mode={self.mode}"
format_string += ")"
return format_string
class Normalize(torch.nn.Module):
"""Normalize a tensor image with mean and standard deviation.
This transform does not support PIL Image.
Given mean: ``(mean[1],...,mean[n])`` and std: ``(std[1],..,std[n])`` for ``n``
channels, this transform will normalize each channel of the input
``torch.*Tensor`` i.e.,
``output[channel] = (input[channel] - mean[channel]) / std[channel]``
.. note::
This transform acts out of place, i.e., it does not mutate the input tensor.
Args:
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
inplace(bool,optional): Bool to make this operation in-place.
"""
def __init__(self, mean, std, inplace=False):
super().__init__()
_log_api_usage_once(self)
self.mean = mean
self.std = std
self.inplace = inplace
def forward(self, tensor: Tensor) -> Tensor:
"""
Args:
tensor (Tensor): Tensor image to be normalized.
Returns:
Tensor: Normalized Tensor image.
"""
return F.normalize(tensor, self.mean, self.std, self.inplace)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(mean={self.mean}, std={self.std})"
class Resize(torch.nn.Module):
"""Resize the input image to the given size.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means a maximum of two leading dimensions
Args:
size (sequence or int): Desired output size. If size is a sequence like
(h, w), output size will be matched to this. If size is an int,
smaller edge of the image will be matched to this number.
i.e, if height > width, then image will be rescaled to
(size * height / width, size).
.. note::
In torchscript mode size as single int is not supported, use a sequence of length 1: ``[size, ]``.
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.NEAREST_EXACT``,
``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are supported.
The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
max_size (int, optional): The maximum allowed for the longer edge of
the resized image. If the longer edge of the image is greater
than ``max_size`` after being resized according to ``size``,
``size`` will be overruled so that the longer edge is equal to
``max_size``.
As a result, the smaller edge may be shorter than ``size``. This
is only supported if ``size`` is an int (or a sequence of length
1 in torchscript mode).
antialias (bool, optional): Whether to apply antialiasing.
It only affects **tensors** with bilinear or bicubic modes and it is
ignored otherwise: on PIL images, antialiasing is always applied on
bilinear or bicubic modes; on other modes (for PIL images and
tensors), antialiasing makes no sense and this parameter is ignored.
Possible values are:
- ``True`` (default): will apply antialiasing for bilinear or bicubic modes.
Other mode aren't affected. This is probably what you want to use.
- ``False``: will not apply antialiasing for tensors on any mode. PIL
images are still antialiased on bilinear or bicubic modes, because
PIL doesn't support no antialias.
- ``None``: equivalent to ``False`` for tensors and ``True`` for
PIL images. This value exists for legacy reasons and you probably
don't want to use it unless you really know what you are doing.
The default value changed from ``None`` to ``True`` in
v0.17, for the PIL and Tensor backends to be consistent.
"""
def __init__(self, size, interpolation=InterpolationMode.BILINEAR, max_size=None, antialias=True):
super().__init__()
_log_api_usage_once(self)
if not isinstance(size, (int, Sequence)):
raise TypeError(f"Size should be int or sequence. Got {type(size)}")
if isinstance(size, Sequence) and len(size) not in (1, 2):
raise ValueError("If size is a sequence, it should have 1 or 2 values")
self.size = size
self.max_size = max_size
if isinstance(interpolation, int):
interpolation = _interpolation_modes_from_int(interpolation)
self.interpolation = interpolation
self.antialias = antialias
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be scaled.
Returns:
PIL Image or Tensor: Rescaled image.
"""
return F.resize(img, self.size, self.interpolation, self.max_size, self.antialias)
def __repr__(self) -> str:
detail = f"(size={self.size}, interpolation={self.interpolation.value}, max_size={self.max_size}, antialias={self.antialias})"
return f"{self.__class__.__name__}{detail}"
class CenterCrop(torch.nn.Module):
"""Crops the given image at the center.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
If image size is smaller than output size along any edge, image is padded with 0 and then center cropped.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
"""
def __init__(self, size):
super().__init__()
_log_api_usage_once(self)
self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.")
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be cropped.
Returns:
PIL Image or Tensor: Cropped image.
"""
return F.center_crop(img, self.size)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(size={self.size})"
class Pad(torch.nn.Module):
"""Pad the given image on all sides with the given "pad" value.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means at most 2 leading dimensions for mode reflect and symmetric,
at most 3 leading dimensions for mode edge,
and an arbitrary number of leading dimensions for mode constant
Args:
padding (int or sequence): Padding on each border. If a single int is provided this
is used to pad all borders. If sequence of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a sequence of length 4 is provided
this is the padding for the left, top, right and bottom borders respectively.
.. note::
In torchscript mode padding as single int is not supported, use a sequence of
length 1: ``[padding, ]``.
fill (number or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of
length 3, it is used to fill R, G, B channels respectively.
This value is only used when the padding_mode is constant.
Only number is supported for torch Tensor.
Only int or tuple value is supported for PIL Image.
padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric.
Default is constant.
- constant: pads with a constant value, this value is specified with fill
- edge: pads with the last value at the edge of the image.
If input a 5D torch Tensor, the last 3 dimensions will be padded instead of the last 2
- reflect: pads with reflection of image without repeating the last value on the edge.
For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
will result in [3, 2, 1, 2, 3, 4, 3, 2]
- symmetric: pads with reflection of image repeating the last value on the edge.
For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
will result in [2, 1, 1, 2, 3, 4, 4, 3]
"""
def __init__(self, padding, fill=0, padding_mode="constant"):
super().__init__()
_log_api_usage_once(self)
if not isinstance(padding, (numbers.Number, tuple, list)):
raise TypeError("Got inappropriate padding arg")
if not isinstance(fill, (numbers.Number, tuple, list)):
raise TypeError("Got inappropriate fill arg")
if padding_mode not in ["constant", "edge", "reflect", "symmetric"]:
raise ValueError("Padding mode should be either constant, edge, reflect or symmetric")
if isinstance(padding, Sequence) and len(padding) not in [1, 2, 4]:
raise ValueError(
f"Padding must be an int or a 1, 2, or 4 element tuple, not a {len(padding)} element tuple"
)
self.padding = padding
self.fill = fill
self.padding_mode = padding_mode
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be padded.
Returns:
PIL Image or Tensor: Padded image.
"""
return F.pad(img, self.padding, self.fill, self.padding_mode)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(padding={self.padding}, fill={self.fill}, padding_mode={self.padding_mode})"
class Lambda:
"""Apply a user-defined lambda as a transform. This transform does not support torchscript.
Args:
lambd (function): Lambda/function to be used for transform.
"""
def __init__(self, lambd):
_log_api_usage_once(self)
if not callable(lambd):
raise TypeError(f"Argument lambd should be callable, got {repr(type(lambd).__name__)}")
self.lambd = lambd
def __call__(self, img):
return self.lambd(img)
def __repr__(self) -> str:
return f"{self.__class__.__name__}()"
class RandomTransforms:
"""Base class for a list of transformations with randomness
Args:
transforms (sequence): list of transformations
"""
def __init__(self, transforms):
_log_api_usage_once(self)
if not isinstance(transforms, Sequence):
raise TypeError("Argument transforms should be a sequence")
self.transforms = transforms
def __call__(self, *args, **kwargs):
raise NotImplementedError()
def __repr__(self) -> str:
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += f" {t}"
format_string += "\n)"
return format_string
class RandomApply(torch.nn.Module):
"""Apply randomly a list of transformations with a given probability.
.. note::
In order to script the transformation, please use ``torch.nn.ModuleList`` as input instead of list/tuple of
transforms as shown below:
>>> transforms = transforms.RandomApply(torch.nn.ModuleList([
>>> transforms.ColorJitter(),
>>> ]), p=0.3)
>>> scripted_transforms = torch.jit.script(transforms)
Make sure to use only scriptable transformations, i.e. that work with ``torch.Tensor``, does not require
`lambda` functions or ``PIL.Image``.
Args:
transforms (sequence or torch.nn.Module): list of transformations
p (float): probability
"""
def __init__(self, transforms, p=0.5):
super().__init__()
_log_api_usage_once(self)
self.transforms = transforms
self.p = p
def forward(self, img):
if self.p < torch.rand(1):
return img
for t in self.transforms:
img = t(img)
return img
def __repr__(self) -> str:
format_string = self.__class__.__name__ + "("
format_string += f"\n p={self.p}"
for t in self.transforms:
format_string += "\n"
format_string += f" {t}"
format_string += "\n)"
return format_string
class RandomOrder(RandomTransforms):
"""Apply a list of transformations in a random order. This transform does not support torchscript."""
def __call__(self, img):
order = list(range(len(self.transforms)))
random.shuffle(order)
for i in order:
img = self.transforms[i](img)
return img
class RandomChoice(RandomTransforms):
"""Apply single transformation randomly picked from a list. This transform does not support torchscript."""
def __init__(self, transforms, p=None):
super().__init__(transforms)
if p is not None and not isinstance(p, Sequence):
raise TypeError("Argument p should be a sequence")
self.p = p
def __call__(self, *args):
t = random.choices(self.transforms, weights=self.p)[0]
return t(*args)
def __repr__(self) -> str:
return f"{super().__repr__()}(p={self.p})"
class RandomCrop(torch.nn.Module):
"""Crop the given image at a random location.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions,
but if non-constant padding is used, the input is expected to have at most 2 leading dimensions
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
padding (int or sequence, optional): Optional padding on each border
of the image. Default is None. If a single int is provided this
is used to pad all borders. If sequence of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a sequence of length 4 is provided
this is the padding for the left, top, right and bottom borders respectively.
.. note::
In torchscript mode padding as single int is not supported, use a sequence of
length 1: ``[padding, ]``.
pad_if_needed (boolean): It will pad the image if smaller than the
desired size to avoid raising an exception. Since cropping is done
after padding, the padding seems to be done at a random offset.
fill (number or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of
length 3, it is used to fill R, G, B channels respectively.
This value is only used when the padding_mode is constant.
Only number is supported for torch Tensor.
Only int or tuple value is supported for PIL Image.
padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric.
Default is constant.
- constant: pads with a constant value, this value is specified with fill
- edge: pads with the last value at the edge of the image.
If input a 5D torch Tensor, the last 3 dimensions will be padded instead of the last 2
- reflect: pads with reflection of image without repeating the last value on the edge.
For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
will result in [3, 2, 1, 2, 3, 4, 3, 2]
- symmetric: pads with reflection of image repeating the last value on the edge.
For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
will result in [2, 1, 1, 2, 3, 4, 4, 3]
"""
@staticmethod
def get_params(img: Tensor, output_size: Tuple[int, int]) -> Tuple[int, int, int, int]:
"""Get parameters for ``crop`` for a random crop.
Args:
img (PIL Image or Tensor): Image to be cropped.
output_size (tuple): Expected output size of the crop.
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
"""
_, h, w = F.get_dimensions(img)
th, tw = output_size
if h < th or w < tw:
raise ValueError(f"Required crop size {(th, tw)} is larger than input image size {(h, w)}")
if w == tw and h == th:
return 0, 0, h, w
i = torch.randint(0, h - th + 1, size=(1,)).item()
j = torch.randint(0, w - tw + 1, size=(1,)).item()
return i, j, th, tw
def __init__(self, size, padding=None, pad_if_needed=False, fill=0, padding_mode="constant"):
super().__init__()
_log_api_usage_once(self)
self.size = tuple(_setup_size(size, error_msg="Please provide only two dimensions (h, w) for size."))
self.padding = padding
self.pad_if_needed = pad_if_needed
self.fill = fill
self.padding_mode = padding_mode
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be cropped.
Returns:
PIL Image or Tensor: Cropped image.
"""
if self.padding is not None:
img = F.pad(img, self.padding, self.fill, self.padding_mode)
_, height, width = F.get_dimensions(img)
# pad the width if needed
if self.pad_if_needed and width < self.size[1]:
padding = [self.size[1] - width, 0]
img = F.pad(img, padding, self.fill, self.padding_mode)
# pad the height if needed
if self.pad_if_needed and height < self.size[0]:
padding = [0, self.size[0] - height]
img = F.pad(img, padding, self.fill, self.padding_mode)
i, j, h, w = self.get_params(img, self.size)
return F.crop(img, i, j, h, w)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(size={self.size}, padding={self.padding})"
class RandomHorizontalFlip(torch.nn.Module):
"""Horizontally flip the given image randomly with a given probability.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading
dimensions
Args:
p (float): probability of the image being flipped. Default value is 0.5
"""
def __init__(self, p=0.5):
super().__init__()
_log_api_usage_once(self)
self.p = p
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be flipped.
Returns:
PIL Image or Tensor: Randomly flipped image.
"""
if torch.rand(1) < self.p:
return F.hflip(img)
return img
def __repr__(self) -> str:
return f"{self.__class__.__name__}(p={self.p})"
class RandomVerticalFlip(torch.nn.Module):
"""Vertically flip the given image randomly with a given probability.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading
dimensions
Args:
p (float): probability of the image being flipped. Default value is 0.5
"""
def __init__(self, p=0.5):
super().__init__()
_log_api_usage_once(self)
self.p = p
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be flipped.
Returns:
PIL Image or Tensor: Randomly flipped image.
"""
if torch.rand(1) < self.p:
return F.vflip(img)
return img
def __repr__(self) -> str:
return f"{self.__class__.__name__}(p={self.p})"
class RandomPerspective(torch.nn.Module):
"""Performs a random perspective transformation of the given image with a given probability.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
Args:
distortion_scale (float): argument to control the degree of distortion and ranges from 0 to 1.
Default is 0.5.
p (float): probability of the image being transformed. Default is 0.5.
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
fill (sequence or number): Pixel fill value for the area outside the transformed
image. Default is ``0``. If given a number, the value is used for all bands respectively.
"""
def __init__(self, distortion_scale=0.5, p=0.5, interpolation=InterpolationMode.BILINEAR, fill=0):
super().__init__()
_log_api_usage_once(self)
self.p = p
if isinstance(interpolation, int):
interpolation = _interpolation_modes_from_int(interpolation)
self.interpolation = interpolation
self.distortion_scale = distortion_scale
if fill is None:
fill = 0
elif not isinstance(fill, (Sequence, numbers.Number)):
raise TypeError("Fill should be either a sequence or a number.")
self.fill = fill
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be Perspectively transformed.
Returns:
PIL Image or Tensor: Randomly transformed image.
"""
fill = self.fill
channels, height, width = F.get_dimensions(img)
if isinstance(img, Tensor):
if isinstance(fill, (int, float)):
fill = [float(fill)] * channels
else:
fill = [float(f) for f in fill]
if torch.rand(1) < self.p:
startpoints, endpoints = self.get_params(width, height, self.distortion_scale)
return F.perspective(img, startpoints, endpoints, self.interpolation, fill)
return img
@staticmethod
def get_params(width: int, height: int, distortion_scale: float) -> Tuple[List[List[int]], List[List[int]]]:
"""Get parameters for ``perspective`` for a random perspective transform.
Args:
width (int): width of the image.
height (int): height of the image.
distortion_scale (float): argument to control the degree of distortion and ranges from 0 to 1.
Returns:
List containing [top-left, top-right, bottom-right, bottom-left] of the original image,
List containing [top-left, top-right, bottom-right, bottom-left] of the transformed image.
"""
half_height = height // 2
half_width = width // 2
topleft = [
int(torch.randint(0, int(distortion_scale * half_width) + 1, size=(1,)).item()),
int(torch.randint(0, int(distortion_scale * half_height) + 1, size=(1,)).item()),
]
topright = [
int(torch.randint(width - int(distortion_scale * half_width) - 1, width, size=(1,)).item()),
int(torch.randint(0, int(distortion_scale * half_height) + 1, size=(1,)).item()),
]
botright = [
int(torch.randint(width - int(distortion_scale * half_width) - 1, width, size=(1,)).item()),
int(torch.randint(height - int(distortion_scale * half_height) - 1, height, size=(1,)).item()),
]
botleft = [
int(torch.randint(0, int(distortion_scale * half_width) + 1, size=(1,)).item()),
int(torch.randint(height - int(distortion_scale * half_height) - 1, height, size=(1,)).item()),
]
startpoints = [[0, 0], [width - 1, 0], [width - 1, height - 1], [0, height - 1]]
endpoints = [topleft, topright, botright, botleft]
return startpoints, endpoints
def __repr__(self) -> str:
return f"{self.__class__.__name__}(p={self.p})"
class RandomResizedCrop(torch.nn.Module):
"""Crop a random portion of image and resize it to a given size.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
A crop of the original image is made: the crop has a random area (H * W)
and a random aspect ratio. This crop is finally resized to the given
size. This is popularly used to train the Inception networks.
Args:
size (int or sequence): expected output size of the crop, for each edge. If size is an
int instead of sequence like (h, w), a square output size ``(size, size)`` is
made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
.. note::
In torchscript mode size as single int is not supported, use a sequence of length 1: ``[size, ]``.
scale (tuple of float): Specifies the lower and upper bounds for the random area of the crop,
before resizing. The scale is defined with respect to the area of the original image.
ratio (tuple of float): lower and upper bounds for the random aspect ratio of the crop, before
resizing.
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.NEAREST_EXACT``,
``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are supported.
The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
antialias (bool, optional): Whether to apply antialiasing.
It only affects **tensors** with bilinear or bicubic modes and it is
ignored otherwise: on PIL images, antialiasing is always applied on
bilinear or bicubic modes; on other modes (for PIL images and
tensors), antialiasing makes no sense and this parameter is ignored.
Possible values are:
- ``True`` (default): will apply antialiasing for bilinear or bicubic modes.
Other mode aren't affected. This is probably what you want to use.
- ``False``: will not apply antialiasing for tensors on any mode. PIL
images are still antialiased on bilinear or bicubic modes, because
PIL doesn't support no antialias.
- ``None``: equivalent to ``False`` for tensors and ``True`` for
PIL images. This value exists for legacy reasons and you probably
don't want to use it unless you really know what you are doing.
The default value changed from ``None`` to ``True`` in
v0.17, for the PIL and Tensor backends to be consistent.
"""
def __init__(
self,
size,
scale=(0.08, 1.0),
ratio=(3.0 / 4.0, 4.0 / 3.0),
interpolation=InterpolationMode.BILINEAR,
antialias: Optional[bool] = True,
):
super().__init__()
_log_api_usage_once(self)
self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.")
if not isinstance(scale, Sequence):
raise TypeError("Scale should be a sequence")
if not isinstance(ratio, Sequence):
raise TypeError("Ratio should be a sequence")
if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
warnings.warn("Scale and ratio should be of kind (min, max)")
if isinstance(interpolation, int):
interpolation = _interpolation_modes_from_int(interpolation)
self.interpolation = interpolation
self.antialias = antialias
self.scale = scale
self.ratio = ratio
@staticmethod
def get_params(img: Tensor, scale: List[float], ratio: List[float]) -> Tuple[int, int, int, int]:
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image or Tensor): Input image.
scale (list): range of scale of the origin size cropped
ratio (list): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
_, height, width = F.get_dimensions(img)
area = height * width
log_ratio = torch.log(torch.tensor(ratio))
for _ in range(10):
target_area = area * torch.empty(1).uniform_(scale[0], scale[1]).item()
aspect_ratio = torch.exp(torch.empty(1).uniform_(log_ratio[0], log_ratio[1])).item()
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if 0 < w <= width and 0 < h <= height:
i = torch.randint(0, height - h + 1, size=(1,)).item()
j = torch.randint(0, width - w + 1, size=(1,)).item()
return i, j, h, w
# Fallback to central crop
in_ratio = float(width) / float(height)
if in_ratio < min(ratio):
w = width
h = int(round(w / min(ratio)))
elif in_ratio > max(ratio):
h = height
w = int(round(h * max(ratio)))
else: # whole image
w = width
h = height
i = (height - h) // 2
j = (width - w) // 2
return i, j, h, w
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be cropped and resized.
Returns:
PIL Image or Tensor: Randomly cropped and resized image.
"""
i, j, h, w = self.get_params(img, self.scale, self.ratio)
return F.resized_crop(img, i, j, h, w, self.size, self.interpolation, antialias=self.antialias)
def __repr__(self) -> str:
interpolate_str = self.interpolation.value
format_string = self.__class__.__name__ + f"(size={self.size}"
format_string += f", scale={tuple(round(s, 4) for s in self.scale)}"
format_string += f", ratio={tuple(round(r, 4) for r in self.ratio)}"
format_string += f", interpolation={interpolate_str}"
format_string += f", antialias={self.antialias})"
return format_string
class FiveCrop(torch.nn.Module):
"""Crop the given image into four corners and the central crop.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading
dimensions
.. Note::
This transform returns a tuple of images and there may be a mismatch in the number of
inputs and targets your Dataset returns. See below for an example of how to deal with
this.
Args:
size (sequence or int): Desired output size of the crop. If size is an ``int``
instead of sequence like (h, w), a square crop of size (size, size) is made.
If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
Example:
>>> transform = Compose([
>>> FiveCrop(size), # this is a list of PIL Images
>>> Lambda(lambda crops: torch.stack([PILToTensor()(crop) for crop in crops])) # returns a 4D tensor
>>> ])
>>> #In your test loop you can do the following:
>>> input, target = batch # input is a 5d tensor, target is 2d
>>> bs, ncrops, c, h, w = input.size()
>>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops
>>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops
"""
def __init__(self, size):
super().__init__()
_log_api_usage_once(self)
self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.")
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be cropped.
Returns:
tuple of 5 images. Image can be PIL Image or Tensor
"""
return F.five_crop(img, self.size)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(size={self.size})"
class TenCrop(torch.nn.Module):
"""Crop the given image into four corners and the central crop plus the flipped version of
these (horizontal flipping is used by default).
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading
dimensions
.. Note::
This transform returns a tuple of images and there may be a mismatch in the number of
inputs and targets your Dataset returns. See below for an example of how to deal with
this.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
vertical_flip (bool): Use vertical flipping instead of horizontal
Example:
>>> transform = Compose([
>>> TenCrop(size), # this is a tuple of PIL Images
>>> Lambda(lambda crops: torch.stack([PILToTensor()(crop) for crop in crops])) # returns a 4D tensor
>>> ])
>>> #In your test loop you can do the following:
>>> input, target = batch # input is a 5d tensor, target is 2d
>>> bs, ncrops, c, h, w = input.size()
>>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops
>>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops
"""
def __init__(self, size, vertical_flip=False):
super().__init__()
_log_api_usage_once(self)
self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.")
self.vertical_flip = vertical_flip
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be cropped.
Returns:
tuple of 10 images. Image can be PIL Image or Tensor
"""
return F.ten_crop(img, self.size, self.vertical_flip)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(size={self.size}, vertical_flip={self.vertical_flip})"
class LinearTransformation(torch.nn.Module):
"""Transform a tensor image with a square transformation matrix and a mean_vector computed
offline.
This transform does not support PIL Image.
Given transformation_matrix and mean_vector, will flatten the torch.*Tensor and
subtract mean_vector from it which is then followed by computing the dot
product with the transformation matrix and then reshaping the tensor to its
original shape.
Applications:
whitening transformation: Suppose X is a column vector zero-centered data.
Then compute the data covariance matrix [D x D] with torch.mm(X.t(), X),
perform SVD on this matrix and pass it as transformation_matrix.
Args:
transformation_matrix (Tensor): tensor [D x D], D = C x H x W
mean_vector (Tensor): tensor [D], D = C x H x W
"""
def __init__(self, transformation_matrix, mean_vector):
super().__init__()
_log_api_usage_once(self)
if transformation_matrix.size(0) != transformation_matrix.size(1):
raise ValueError(
"transformation_matrix should be square. Got "
f"{tuple(transformation_matrix.size())} rectangular matrix."
)
if mean_vector.size(0) != transformation_matrix.size(0):
raise ValueError(
f"mean_vector should have the same length {mean_vector.size(0)}"
f" as any one of the dimensions of the transformation_matrix [{tuple(transformation_matrix.size())}]"
)
if transformation_matrix.device != mean_vector.device:
raise ValueError(
f"Input tensors should be on the same device. Got {transformation_matrix.device} and {mean_vector.device}"
)
if transformation_matrix.dtype != mean_vector.dtype:
raise ValueError(
f"Input tensors should have the same dtype. Got {transformation_matrix.dtype} and {mean_vector.dtype}"
)
self.transformation_matrix = transformation_matrix
self.mean_vector = mean_vector
def forward(self, tensor: Tensor) -> Tensor:
"""
Args:
tensor (Tensor): Tensor image to be whitened.
Returns:
Tensor: Transformed image.
"""
shape = tensor.shape
n = shape[-3] * shape[-2] * shape[-1]
if n != self.transformation_matrix.shape[0]:
raise ValueError(
"Input tensor and transformation matrix have incompatible shape."
+ f"[{shape[-3]} x {shape[-2]} x {shape[-1]}] != "
+ f"{self.transformation_matrix.shape[0]}"
)
if tensor.device.type != self.mean_vector.device.type:
raise ValueError(
"Input tensor should be on the same device as transformation matrix and mean vector. "
f"Got {tensor.device} vs {self.mean_vector.device}"
)
flat_tensor = tensor.view(-1, n) - self.mean_vector
transformation_matrix = self.transformation_matrix.to(flat_tensor.dtype)
transformed_tensor = torch.mm(flat_tensor, transformation_matrix)
tensor = transformed_tensor.view(shape)
return tensor
def __repr__(self) -> str:
s = (
f"{self.__class__.__name__}(transformation_matrix="
f"{self.transformation_matrix.tolist()}"
f", mean_vector={self.mean_vector.tolist()})"
)
return s
class ColorJitter(torch.nn.Module):
"""Randomly change the brightness, contrast, saturation and hue of an image.
If the image is torch Tensor, it is expected
to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
If img is PIL Image, mode "1", "I", "F" and modes with transparency (alpha channel) are not supported.
Args:
brightness (float or tuple of float (min, max)): How much to jitter brightness.
brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]
or the given [min, max]. Should be non negative numbers.
contrast (float or tuple of float (min, max)): How much to jitter contrast.
contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]
or the given [min, max]. Should be non-negative numbers.
saturation (float or tuple of float (min, max)): How much to jitter saturation.
saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]
or the given [min, max]. Should be non negative numbers.
hue (float or tuple of float (min, max)): How much to jitter hue.
hue_factor is chosen uniformly from [-hue, hue] or the given [min, max].
Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5.
To jitter hue, the pixel values of the input image has to be non-negative for conversion to HSV space;
thus it does not work if you normalize your image to an interval with negative values,
or use an interpolation that generates negative values before using this function.
"""
def __init__(
self,
brightness: Union[float, Tuple[float, float]] = 0,
contrast: Union[float, Tuple[float, float]] = 0,
saturation: Union[float, Tuple[float, float]] = 0,
hue: Union[float, Tuple[float, float]] = 0,
) -> None:
super().__init__()
_log_api_usage_once(self)
self.brightness = self._check_input(brightness, "brightness")
self.contrast = self._check_input(contrast, "contrast")
self.saturation = self._check_input(saturation, "saturation")
self.hue = self._check_input(hue, "hue", center=0, bound=(-0.5, 0.5), clip_first_on_zero=False)
@torch.jit.unused
def _check_input(self, value, name, center=1, bound=(0, float("inf")), clip_first_on_zero=True):
if isinstance(value, numbers.Number):
if value < 0:
raise ValueError(f"If {name} is a single number, it must be non negative.")
value = [center - float(value), center + float(value)]
if clip_first_on_zero:
value[0] = max(value[0], 0.0)
elif isinstance(value, (tuple, list)) and len(value) == 2:
value = [float(value[0]), float(value[1])]
else:
raise TypeError(f"{name} should be a single number or a list/tuple with length 2.")
if not bound[0] <= value[0] <= value[1] <= bound[1]:
raise ValueError(f"{name} values should be between {bound}, but got {value}.")
# if value is 0 or (1., 1.) for brightness/contrast/saturation
# or (0., 0.) for hue, do nothing
if value[0] == value[1] == center:
return None
else:
return tuple(value)
@staticmethod
def get_params(
brightness: Optional[List[float]],
contrast: Optional[List[float]],
saturation: Optional[List[float]],
hue: Optional[List[float]],
) -> Tuple[Tensor, Optional[float], Optional[float], Optional[float], Optional[float]]:
"""Get the parameters for the randomized transform to be applied on image.
Args:
brightness (tuple of float (min, max), optional): The range from which the brightness_factor is chosen
uniformly. Pass None to turn off the transformation.
contrast (tuple of float (min, max), optional): The range from which the contrast_factor is chosen
uniformly. Pass None to turn off the transformation.
saturation (tuple of float (min, max), optional): The range from which the saturation_factor is chosen
uniformly. Pass None to turn off the transformation.
hue (tuple of float (min, max), optional): The range from which the hue_factor is chosen uniformly.
Pass None to turn off the transformation.
Returns:
tuple: The parameters used to apply the randomized transform
along with their random order.
"""
fn_idx = torch.randperm(4)
b = None if brightness is None else float(torch.empty(1).uniform_(brightness[0], brightness[1]))
c = None if contrast is None else float(torch.empty(1).uniform_(contrast[0], contrast[1]))
s = None if saturation is None else float(torch.empty(1).uniform_(saturation[0], saturation[1]))
h = None if hue is None else float(torch.empty(1).uniform_(hue[0], hue[1]))
return fn_idx, b, c, s, h
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Input image.
Returns:
PIL Image or Tensor: Color jittered image.
"""
fn_idx, brightness_factor, contrast_factor, saturation_factor, hue_factor = self.get_params(
self.brightness, self.contrast, self.saturation, self.hue
)
for fn_id in fn_idx:
if fn_id == 0 and brightness_factor is not None:
img = F.adjust_brightness(img, brightness_factor)
elif fn_id == 1 and contrast_factor is not None:
img = F.adjust_contrast(img, contrast_factor)
elif fn_id == 2 and saturation_factor is not None:
img = F.adjust_saturation(img, saturation_factor)
elif fn_id == 3 and hue_factor is not None:
img = F.adjust_hue(img, hue_factor)
return img
def __repr__(self) -> str:
s = (
f"{self.__class__.__name__}("
f"brightness={self.brightness}"
f", contrast={self.contrast}"
f", saturation={self.saturation}"
f", hue={self.hue})"
)
return s
class RandomRotation(torch.nn.Module):
"""Rotate the image by angle.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
Args:
degrees (sequence or number): Range of degrees to select from.
If degrees is a number instead of sequence like (min, max), the range of degrees
will be (-degrees, +degrees).
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
expand (bool, optional): Optional expansion flag.
If true, expands the output to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (sequence, optional): Optional center of rotation, (x, y). Origin is the upper left corner.
Default is the center of the image.
fill (sequence or number): Pixel fill value for the area outside the rotated
image. Default is ``0``. If given a number, the value is used for all bands respectively.
.. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters
"""
def __init__(self, degrees, interpolation=InterpolationMode.NEAREST, expand=False, center=None, fill=0):
super().__init__()
_log_api_usage_once(self)
if isinstance(interpolation, int):
interpolation = _interpolation_modes_from_int(interpolation)
self.degrees = _setup_angle(degrees, name="degrees", req_sizes=(2,))
if center is not None:
_check_sequence_input(center, "center", req_sizes=(2,))
self.center = center
self.interpolation = interpolation
self.expand = expand
if fill is None:
fill = 0
elif not isinstance(fill, (Sequence, numbers.Number)):
raise TypeError("Fill should be either a sequence or a number.")
self.fill = fill
@staticmethod
def get_params(degrees: List[float]) -> float:
"""Get parameters for ``rotate`` for a random rotation.
Returns:
float: angle parameter to be passed to ``rotate`` for random rotation.
"""
angle = float(torch.empty(1).uniform_(float(degrees[0]), float(degrees[1])).item())
return angle
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be rotated.
Returns:
PIL Image or Tensor: Rotated image.
"""
fill = self.fill
channels, _, _ = F.get_dimensions(img)
if isinstance(img, Tensor):
if isinstance(fill, (int, float)):
fill = [float(fill)] * channels
else:
fill = [float(f) for f in fill]
angle = self.get_params(self.degrees)
return F.rotate(img, angle, self.interpolation, self.expand, self.center, fill)
def __repr__(self) -> str:
interpolate_str = self.interpolation.value
format_string = self.__class__.__name__ + f"(degrees={self.degrees}"
format_string += f", interpolation={interpolate_str}"
format_string += f", expand={self.expand}"
if self.center is not None:
format_string += f", center={self.center}"
if self.fill is not None:
format_string += f", fill={self.fill}"
format_string += ")"
return format_string
class RandomAffine(torch.nn.Module):
"""Random affine transformation of the image keeping center invariant.
If the image is torch Tensor, it is expected
to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
Args:
degrees (sequence or number): Range of degrees to select from.
If degrees is a number instead of sequence like (min, max), the range of degrees
will be (-degrees, +degrees). Set to 0 to deactivate rotations.
translate (tuple, optional): tuple of maximum absolute fraction for horizontal
and vertical translations. For example translate=(a, b), then horizontal shift
is randomly sampled in the range -img_width * a < dx < img_width * a and vertical shift is
randomly sampled in the range -img_height * b < dy < img_height * b. Will not translate by default.
scale (tuple, optional): scaling factor interval, e.g (a, b), then scale is
randomly sampled from the range a <= scale <= b. Will keep original scale by default.
shear (sequence or number, optional): Range of degrees to select from.
If shear is a number, a shear parallel to the x-axis in the range (-shear, +shear)
will be applied. Else if shear is a sequence of 2 values a shear parallel to the x-axis in the
range (shear[0], shear[1]) will be applied. Else if shear is a sequence of 4 values,
an x-axis shear in (shear[0], shear[1]) and y-axis shear in (shear[2], shear[3]) will be applied.
Will not apply shear by default.
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
fill (sequence or number): Pixel fill value for the area outside the transformed
image. Default is ``0``. If given a number, the value is used for all bands respectively.
center (sequence, optional): Optional center of rotation, (x, y). Origin is the upper left corner.
Default is the center of the image.
.. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters
"""
def __init__(
self,
degrees,
translate=None,
scale=None,
shear=None,
interpolation=InterpolationMode.NEAREST,
fill=0,
center=None,
):
super().__init__()
_log_api_usage_once(self)
if isinstance(interpolation, int):
interpolation = _interpolation_modes_from_int(interpolation)
self.degrees = _setup_angle(degrees, name="degrees", req_sizes=(2,))
if translate is not None:
_check_sequence_input(translate, "translate", req_sizes=(2,))
for t in translate:
if not (0.0 <= t <= 1.0):
raise ValueError("translation values should be between 0 and 1")
self.translate = translate
if scale is not None:
_check_sequence_input(scale, "scale", req_sizes=(2,))
for s in scale:
if s <= 0:
raise ValueError("scale values should be positive")
self.scale = scale
if shear is not None:
self.shear = _setup_angle(shear, name="shear", req_sizes=(2, 4))
else:
self.shear = shear
self.interpolation = interpolation
if fill is None:
fill = 0
elif not isinstance(fill, (Sequence, numbers.Number)):
raise TypeError("Fill should be either a sequence or a number.")
self.fill = fill
if center is not None:
_check_sequence_input(center, "center", req_sizes=(2,))
self.center = center
@staticmethod
def get_params(
degrees: List[float],
translate: Optional[List[float]],
scale_ranges: Optional[List[float]],
shears: Optional[List[float]],
img_size: List[int],
) -> Tuple[float, Tuple[int, int], float, Tuple[float, float]]:
"""Get parameters for affine transformation
Returns:
params to be passed to the affine transformation
"""
angle = float(torch.empty(1).uniform_(float(degrees[0]), float(degrees[1])).item())
if translate is not None:
max_dx = float(translate[0] * img_size[0])
max_dy = float(translate[1] * img_size[1])
tx = int(round(torch.empty(1).uniform_(-max_dx, max_dx).item()))
ty = int(round(torch.empty(1).uniform_(-max_dy, max_dy).item()))
translations = (tx, ty)
else:
translations = (0, 0)
if scale_ranges is not None:
scale = float(torch.empty(1).uniform_(scale_ranges[0], scale_ranges[1]).item())
else:
scale = 1.0
shear_x = shear_y = 0.0
if shears is not None:
shear_x = float(torch.empty(1).uniform_(shears[0], shears[1]).item())
if len(shears) == 4:
shear_y = float(torch.empty(1).uniform_(shears[2], shears[3]).item())
shear = (shear_x, shear_y)
return angle, translations, scale, shear
def forward(self, img):
"""
img (PIL Image or Tensor): Image to be transformed.
Returns:
PIL Image or Tensor: Affine transformed image.
"""
fill = self.fill
channels, height, width = F.get_dimensions(img)
if isinstance(img, Tensor):
if isinstance(fill, (int, float)):
fill = [float(fill)] * channels
else:
fill = [float(f) for f in fill]
img_size = [width, height] # flip for keeping BC on get_params call
ret = self.get_params(self.degrees, self.translate, self.scale, self.shear, img_size)
return F.affine(img, *ret, interpolation=self.interpolation, fill=fill, center=self.center)
def __repr__(self) -> str:
s = f"{self.__class__.__name__}(degrees={self.degrees}"
s += f", translate={self.translate}" if self.translate is not None else ""
s += f", scale={self.scale}" if self.scale is not None else ""
s += f", shear={self.shear}" if self.shear is not None else ""
s += f", interpolation={self.interpolation.value}" if self.interpolation != InterpolationMode.NEAREST else ""
s += f", fill={self.fill}" if self.fill != 0 else ""
s += f", center={self.center}" if self.center is not None else ""
s += ")"
return s
class Grayscale(torch.nn.Module):
"""Convert image to grayscale.
If the image is torch Tensor, it is expected
to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions
Args:
num_output_channels (int): (1 or 3) number of channels desired for output image
Returns:
PIL Image: Grayscale version of the input.
- If ``num_output_channels == 1`` : returned image is single channel
- If ``num_output_channels == 3`` : returned image is 3 channel with r == g == b
"""
def __init__(self, num_output_channels=1):
super().__init__()
_log_api_usage_once(self)
self.num_output_channels = num_output_channels
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be converted to grayscale.
Returns:
PIL Image or Tensor: Grayscaled image.
"""
return F.rgb_to_grayscale(img, num_output_channels=self.num_output_channels)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(num_output_channels={self.num_output_channels})"
class RandomGrayscale(torch.nn.Module):
"""Randomly convert image to grayscale with a probability of p (default 0.1).
If the image is torch Tensor, it is expected
to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions
Args:
p (float): probability that image should be converted to grayscale.
Returns:
PIL Image or Tensor: Grayscale version of the input image with probability p and unchanged
with probability (1-p).
- If input image is 1 channel: grayscale version is 1 channel
- If input image is 3 channel: grayscale version is 3 channel with r == g == b
"""
def __init__(self, p=0.1):
super().__init__()
_log_api_usage_once(self)
self.p = p
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be converted to grayscale.
Returns:
PIL Image or Tensor: Randomly grayscaled image.
"""
num_output_channels, _, _ = F.get_dimensions(img)
if torch.rand(1) < self.p:
return F.rgb_to_grayscale(img, num_output_channels=num_output_channels)
return img
def __repr__(self) -> str:
return f"{self.__class__.__name__}(p={self.p})"
class RandomErasing(torch.nn.Module):
"""Randomly selects a rectangle region in a torch.Tensor image and erases its pixels.
This transform does not support PIL Image.
'Random Erasing Data Augmentation' by Zhong et al. See https://arxiv.org/abs/1708.04896
Args:
p: probability that the random erasing operation will be performed.
scale: range of proportion of erased area against input image.
ratio: range of aspect ratio of erased area.
value: erasing value. Default is 0. If a single int, it is used to
erase all pixels. If a tuple of length 3, it is used to erase
R, G, B channels respectively.
If a str of 'random', erasing each pixel with random values.
inplace: boolean to make this transform inplace. Default set to False.
Returns:
Erased Image.
Example:
>>> transform = transforms.Compose([
>>> transforms.RandomHorizontalFlip(),
>>> transforms.PILToTensor(),
>>> transforms.ConvertImageDtype(torch.float),
>>> transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
>>> transforms.RandomErasing(),
>>> ])
"""
def __init__(self, p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3), value=0, inplace=False):
super().__init__()
_log_api_usage_once(self)
if not isinstance(value, (numbers.Number, str, tuple, list)):
raise TypeError("Argument value should be either a number or str or a sequence")
if isinstance(value, str) and value != "random":
raise ValueError("If value is str, it should be 'random'")
if not isinstance(scale, Sequence):
raise TypeError("Scale should be a sequence")
if not isinstance(ratio, Sequence):
raise TypeError("Ratio should be a sequence")
if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
warnings.warn("Scale and ratio should be of kind (min, max)")
if scale[0] < 0 or scale[1] > 1:
raise ValueError("Scale should be between 0 and 1")
if p < 0 or p > 1:
raise ValueError("Random erasing probability should be between 0 and 1")
self.p = p
self.scale = scale
self.ratio = ratio
self.value = value
self.inplace = inplace
@staticmethod
def get_params(
img: Tensor, scale: Tuple[float, float], ratio: Tuple[float, float], value: Optional[List[float]] = None
) -> Tuple[int, int, int, int, Tensor]:
"""Get parameters for ``erase`` for a random erasing.
Args:
img (Tensor): Tensor image to be erased.
scale (sequence): range of proportion of erased area against input image.
ratio (sequence): range of aspect ratio of erased area.
value (list, optional): erasing value. If None, it is interpreted as "random"
(erasing each pixel with random values). If ``len(value)`` is 1, it is interpreted as a number,
i.e. ``value[0]``.
Returns:
tuple: params (i, j, h, w, v) to be passed to ``erase`` for random erasing.
"""
img_c, img_h, img_w = img.shape[-3], img.shape[-2], img.shape[-1]
area = img_h * img_w
log_ratio = torch.log(torch.tensor(ratio))
for _ in range(10):
erase_area = area * torch.empty(1).uniform_(scale[0], scale[1]).item()
aspect_ratio = torch.exp(torch.empty(1).uniform_(log_ratio[0], log_ratio[1])).item()
h = int(round(math.sqrt(erase_area * aspect_ratio)))
w = int(round(math.sqrt(erase_area / aspect_ratio)))
if not (h < img_h and w < img_w):
continue
if value is None:
v = torch.empty([img_c, h, w], dtype=torch.float32).normal_()
else:
v = torch.tensor(value)[:, None, None]
i = torch.randint(0, img_h - h + 1, size=(1,)).item()
j = torch.randint(0, img_w - w + 1, size=(1,)).item()
return i, j, h, w, v
# Return original image
return 0, 0, img_h, img_w, img
def forward(self, img):
"""
Args:
img (Tensor): Tensor image to be erased.
Returns:
img (Tensor): Erased Tensor image.
"""
if torch.rand(1) < self.p:
# cast self.value to script acceptable type
if isinstance(self.value, (int, float)):
value = [float(self.value)]
elif isinstance(self.value, str):
value = None
elif isinstance(self.value, (list, tuple)):
value = [float(v) for v in self.value]
else:
value = self.value
if value is not None and not (len(value) in (1, img.shape[-3])):
raise ValueError(
"If value is a sequence, it should have either a single value or "
f"{img.shape[-3]} (number of input channels)"
)
x, y, h, w, v = self.get_params(img, scale=self.scale, ratio=self.ratio, value=value)
return F.erase(img, x, y, h, w, v, self.inplace)
return img
def __repr__(self) -> str:
s = (
f"{self.__class__.__name__}"
f"(p={self.p}, "
f"scale={self.scale}, "
f"ratio={self.ratio}, "
f"value={self.value}, "
f"inplace={self.inplace})"
)
return s
class GaussianBlur(torch.nn.Module):
"""Blurs image with randomly chosen Gaussian blur.
If the image is torch Tensor, it is expected
to have [..., C, H, W] shape, where ... means at most one leading dimension.
Args:
kernel_size (int or sequence): Size of the Gaussian kernel.
sigma (float or tuple of float (min, max)): Standard deviation to be used for
creating kernel to perform blurring. If float, sigma is fixed. If it is tuple
of float (min, max), sigma is chosen uniformly at random to lie in the
given range.
Returns:
PIL Image or Tensor: Gaussian blurred version of the input image.
"""
def __init__(self, kernel_size, sigma=(0.1, 2.0)):
super().__init__()
_log_api_usage_once(self)
self.kernel_size = _setup_size(kernel_size, "Kernel size should be a tuple/list of two integers")
for ks in self.kernel_size:
if ks <= 0 or ks % 2 == 0:
raise ValueError("Kernel size value should be an odd and positive number.")
if isinstance(sigma, numbers.Number):
if sigma <= 0:
raise ValueError("If sigma is a single number, it must be positive.")
sigma = (sigma, sigma)
elif isinstance(sigma, Sequence) and len(sigma) == 2:
if not 0.0 < sigma[0] <= sigma[1]:
raise ValueError("sigma values should be positive and of the form (min, max).")
else:
raise ValueError("sigma should be a single number or a list/tuple with length 2.")
self.sigma = sigma
@staticmethod
def get_params(sigma_min: float, sigma_max: float) -> float:
"""Choose sigma for random gaussian blurring.
Args:
sigma_min (float): Minimum standard deviation that can be chosen for blurring kernel.
sigma_max (float): Maximum standard deviation that can be chosen for blurring kernel.
Returns:
float: Standard deviation to be passed to calculate kernel for gaussian blurring.
"""
return torch.empty(1).uniform_(sigma_min, sigma_max).item()
def forward(self, img: Tensor) -> Tensor:
"""
Args:
img (PIL Image or Tensor): image to be blurred.
Returns:
PIL Image or Tensor: Gaussian blurred image
"""
sigma = self.get_params(self.sigma[0], self.sigma[1])
return F.gaussian_blur(img, self.kernel_size, [sigma, sigma])
def __repr__(self) -> str:
s = f"{self.__class__.__name__}(kernel_size={self.kernel_size}, sigma={self.sigma})"
return s
def _setup_size(size, error_msg):
if isinstance(size, numbers.Number):
return int(size), int(size)
if isinstance(size, Sequence) and len(size) == 1:
return size[0], size[0]
if len(size) != 2:
raise ValueError(error_msg)
return size
def _check_sequence_input(x, name, req_sizes):
msg = req_sizes[0] if len(req_sizes) < 2 else " or ".join([str(s) for s in req_sizes])
if not isinstance(x, Sequence):
raise TypeError(f"{name} should be a sequence of length {msg}.")
if len(x) not in req_sizes:
raise ValueError(f"{name} should be a sequence of length {msg}.")
def _setup_angle(x, name, req_sizes=(2,)):
if isinstance(x, numbers.Number):
if x < 0:
raise ValueError(f"If {name} is a single number, it must be positive.")
x = [-x, x]
else:
_check_sequence_input(x, name, req_sizes)
return [float(d) for d in x]
class RandomInvert(torch.nn.Module):
"""Inverts the colors of the given image randomly with a given probability.
If img is a Tensor, it is expected to be in [..., 1 or 3, H, W] format,
where ... means it can have an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "L" or "RGB".
Args:
p (float): probability of the image being color inverted. Default value is 0.5
"""
def __init__(self, p=0.5):
super().__init__()
_log_api_usage_once(self)
self.p = p
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be inverted.
Returns:
PIL Image or Tensor: Randomly color inverted image.
"""
if torch.rand(1).item() < self.p:
return F.invert(img)
return img
def __repr__(self) -> str:
return f"{self.__class__.__name__}(p={self.p})"
class RandomPosterize(torch.nn.Module):
"""Posterize the image randomly with a given probability by reducing the
number of bits for each color channel. If the image is torch Tensor, it should be of type torch.uint8,
and it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "L" or "RGB".
Args:
bits (int): number of bits to keep for each channel (0-8)
p (float): probability of the image being posterized. Default value is 0.5
"""
def __init__(self, bits, p=0.5):
super().__init__()
_log_api_usage_once(self)
self.bits = bits
self.p = p
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be posterized.
Returns:
PIL Image or Tensor: Randomly posterized image.
"""
if torch.rand(1).item() < self.p:
return F.posterize(img, self.bits)
return img
def __repr__(self) -> str:
return f"{self.__class__.__name__}(bits={self.bits},p={self.p})"
class RandomSolarize(torch.nn.Module):
"""Solarize the image randomly with a given probability by inverting all pixel
values above a threshold. If img is a Tensor, it is expected to be in [..., 1 or 3, H, W] format,
where ... means it can have an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "L" or "RGB".
Args:
threshold (float): all pixels equal or above this value are inverted.
p (float): probability of the image being solarized. Default value is 0.5
"""
def __init__(self, threshold, p=0.5):
super().__init__()
_log_api_usage_once(self)
self.threshold = threshold
self.p = p
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be solarized.
Returns:
PIL Image or Tensor: Randomly solarized image.
"""
if torch.rand(1).item() < self.p:
return F.solarize(img, self.threshold)
return img
def __repr__(self) -> str:
return f"{self.__class__.__name__}(threshold={self.threshold},p={self.p})"
class RandomAdjustSharpness(torch.nn.Module):
"""Adjust the sharpness of the image randomly with a given probability. If the image is torch Tensor,
it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
Args:
sharpness_factor (float): How much to adjust the sharpness. Can be
any non-negative number. 0 gives a blurred image, 1 gives the
original image while 2 increases the sharpness by a factor of 2.
p (float): probability of the image being sharpened. Default value is 0.5
"""
def __init__(self, sharpness_factor, p=0.5):
super().__init__()
_log_api_usage_once(self)
self.sharpness_factor = sharpness_factor
self.p = p
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be sharpened.
Returns:
PIL Image or Tensor: Randomly sharpened image.
"""
if torch.rand(1).item() < self.p:
return F.adjust_sharpness(img, self.sharpness_factor)
return img
def __repr__(self) -> str:
return f"{self.__class__.__name__}(sharpness_factor={self.sharpness_factor},p={self.p})"
class RandomAutocontrast(torch.nn.Module):
"""Autocontrast the pixels of the given image randomly with a given probability.
If the image is torch Tensor, it is expected
to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "L" or "RGB".
Args:
p (float): probability of the image being autocontrasted. Default value is 0.5
"""
def __init__(self, p=0.5):
super().__init__()
_log_api_usage_once(self)
self.p = p
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be autocontrasted.
Returns:
PIL Image or Tensor: Randomly autocontrasted image.
"""
if torch.rand(1).item() < self.p:
return F.autocontrast(img)
return img
def __repr__(self) -> str:
return f"{self.__class__.__name__}(p={self.p})"
class RandomEqualize(torch.nn.Module):
"""Equalize the histogram of the given image randomly with a given probability.
If the image is torch Tensor, it is expected
to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "P", "L" or "RGB".
Args:
p (float): probability of the image being equalized. Default value is 0.5
"""
def __init__(self, p=0.5):
super().__init__()
_log_api_usage_once(self)
self.p = p
def forward(self, img):
"""
Args:
img (PIL Image or Tensor): Image to be equalized.
Returns:
PIL Image or Tensor: Randomly equalized image.
"""
if torch.rand(1).item() < self.p:
return F.equalize(img)
return img
def __repr__(self) -> str:
return f"{self.__class__.__name__}(p={self.p})"
class ElasticTransform(torch.nn.Module):
"""Transform a tensor image with elastic transformations.
Given alpha and sigma, it will generate displacement
vectors for all pixels based on random offsets. Alpha controls the strength
and sigma controls the smoothness of the displacements.
The displacements are added to an identity grid and the resulting grid is
used to grid_sample from the image.
Applications:
Randomly transforms the morphology of objects in images and produces a
see-through-water-like effect.
Args:
alpha (float or sequence of floats): Magnitude of displacements. Default is 50.0.
sigma (float or sequence of floats): Smoothness of displacements. Default is 5.0.
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
fill (sequence or number): Pixel fill value for the area outside the transformed
image. Default is ``0``. If given a number, the value is used for all bands respectively.
"""
def __init__(self, alpha=50.0, sigma=5.0, interpolation=InterpolationMode.BILINEAR, fill=0):
super().__init__()
_log_api_usage_once(self)
if not isinstance(alpha, (float, Sequence)):
raise TypeError(f"alpha should be float or a sequence of floats. Got {type(alpha)}")
if isinstance(alpha, Sequence) and len(alpha) != 2:
raise ValueError(f"If alpha is a sequence its length should be 2. Got {len(alpha)}")
if isinstance(alpha, Sequence):
for element in alpha:
if not isinstance(element, float):
raise TypeError(f"alpha should be a sequence of floats. Got {type(element)}")
if isinstance(alpha, float):
alpha = [float(alpha), float(alpha)]
if isinstance(alpha, (list, tuple)) and len(alpha) == 1:
alpha = [alpha[0], alpha[0]]
self.alpha = alpha
if not isinstance(sigma, (float, Sequence)):
raise TypeError(f"sigma should be float or a sequence of floats. Got {type(sigma)}")
if isinstance(sigma, Sequence) and len(sigma) != 2:
raise ValueError(f"If sigma is a sequence its length should be 2. Got {len(sigma)}")
if isinstance(sigma, Sequence):
for element in sigma:
if not isinstance(element, float):
raise TypeError(f"sigma should be a sequence of floats. Got {type(element)}")
if isinstance(sigma, float):
sigma = [float(sigma), float(sigma)]
if isinstance(sigma, (list, tuple)) and len(sigma) == 1:
sigma = [sigma[0], sigma[0]]
self.sigma = sigma
if isinstance(interpolation, int):
interpolation = _interpolation_modes_from_int(interpolation)
self.interpolation = interpolation
if isinstance(fill, (int, float)):
fill = [float(fill)]
elif isinstance(fill, (list, tuple)):
fill = [float(f) for f in fill]
else:
raise TypeError(f"fill should be int or float or a list or tuple of them. Got {type(fill)}")
self.fill = fill
@staticmethod
def get_params(alpha: List[float], sigma: List[float], size: List[int]) -> Tensor:
dx = torch.rand([1, 1] + size) * 2 - 1
if sigma[0] > 0.0:
kx = int(8 * sigma[0] + 1)
# if kernel size is even we have to make it odd
if kx % 2 == 0:
kx += 1
dx = F.gaussian_blur(dx, [kx, kx], sigma)
dx = dx * alpha[0] / size[0]
dy = torch.rand([1, 1] + size) * 2 - 1
if sigma[1] > 0.0:
ky = int(8 * sigma[1] + 1)
# if kernel size is even we have to make it odd
if ky % 2 == 0:
ky += 1
dy = F.gaussian_blur(dy, [ky, ky], sigma)
dy = dy * alpha[1] / size[1]
return torch.concat([dx, dy], 1).permute([0, 2, 3, 1]) # 1 x H x W x 2
def forward(self, tensor: Tensor) -> Tensor:
"""
Args:
tensor (PIL Image or Tensor): Image to be transformed.
Returns:
PIL Image or Tensor: Transformed image.
"""
_, height, width = F.get_dimensions(tensor)
displacement = self.get_params(self.alpha, self.sigma, [height, width])
return F.elastic_transform(tensor, displacement, self.interpolation, self.fill)
def __repr__(self):
format_string = self.__class__.__name__
format_string += f"(alpha={self.alpha}"
format_string += f", sigma={self.sigma}"
format_string += f", interpolation={self.interpolation}"
format_string += f", fill={self.fill})"
return format_string
```
|
=============================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 1.57 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\transforms\v2\__init__.py
ENCODING: utf-8
```py
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional # usort: skip
from ._transform import Transform # usort: skip
from ._augment import CutMix, JPEG, MixUp, RandomErasing
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
Grayscale,
RandomAdjustSharpness,
RandomAutocontrast,
RandomChannelPermutation,
RandomEqualize,
RandomGrayscale,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
RGB,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBoxes, ConvertBoundingBoxFormat
from ._misc import (
ConvertImageDtype,
GaussianBlur,
GaussianNoise,
Identity,
Lambda,
LinearTransformation,
Normalize,
SanitizeBoundingBoxes,
ToDtype,
)
from ._temporal import UniformTemporalSubsample
from ._type_conversion import PILToTensor, ToImage, ToPILImage, ToPureTensor
from ._utils import check_type, get_bounding_boxes, has_all, has_any, query_chw, query_size
from ._deprecated import ToTensor # usort: skip
```
|
=============================================================================================================================
SOURCE CODE FILE: _augment.py
LINES: 1
SIZE: 16.23 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\transforms\v2\_augment.py
ENCODING: utf-8
```py
import math
import numbers
import warnings
from typing import Any, Callable, Dict, List, Optional, Sequence, Union
import PIL.Image
import torch
from torch.nn.functional import one_hot
from torch.utils._pytree import tree_flatten, tree_unflatten
from torchvision import transforms as _transforms, tv_tensors
from torchvision.transforms.v2 import functional as F
from ._transform import _RandomApplyTransform, Transform
from ._utils import _check_sequence_input, _parse_labels_getter, has_any, is_pure_tensor, query_chw, query_size
class RandomErasing(_RandomApplyTransform):
"""Randomly select a rectangle region in the input image or video and erase its pixels.
This transform does not support PIL Image.
'Random Erasing Data Augmentation' by Zhong et al. See https://arxiv.org/abs/1708.04896
Args:
p (float, optional): probability that the random erasing operation will be performed.
scale (tuple of float, optional): range of proportion of erased area against input image.
ratio (tuple of float, optional): range of aspect ratio of erased area.
value (number or tuple of numbers): erasing value. Default is 0. If a single int, it is used to
erase all pixels. If a tuple of length 3, it is used to erase
R, G, B channels respectively.
If a str of 'random', erasing each pixel with random values.
inplace (bool, optional): boolean to make this transform inplace. Default set to False.
Returns:
Erased input.
Example:
>>> from torchvision.transforms import v2 as transforms
>>>
>>> transform = transforms.Compose([
>>> transforms.RandomHorizontalFlip(),
>>> transforms.PILToTensor(),
>>> transforms.ConvertImageDtype(torch.float),
>>> transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
>>> transforms.RandomErasing(),
>>> ])
"""
_v1_transform_cls = _transforms.RandomErasing
def _extract_params_for_v1_transform(self) -> Dict[str, Any]:
return dict(
super()._extract_params_for_v1_transform(),
value="random" if self.value is None else self.value,
)
def __init__(
self,
p: float = 0.5,
scale: Sequence[float] = (0.02, 0.33),
ratio: Sequence[float] = (0.3, 3.3),
value: float = 0.0,
inplace: bool = False,
):
super().__init__(p=p)
if not isinstance(value, (numbers.Number, str, tuple, list)):
raise TypeError("Argument value should be either a number or str or a sequence")
if isinstance(value, str) and value != "random":
raise ValueError("If value is str, it should be 'random'")
if not isinstance(scale, Sequence):
raise TypeError("Scale should be a sequence")
if not isinstance(ratio, Sequence):
raise TypeError("Ratio should be a sequence")
if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
warnings.warn("Scale and ratio should be of kind (min, max)")
if scale[0] < 0 or scale[1] > 1:
raise ValueError("Scale should be between 0 and 1")
self.scale = scale
self.ratio = ratio
if isinstance(value, (int, float)):
self.value = [float(value)]
elif isinstance(value, str):
self.value = None
elif isinstance(value, (list, tuple)):
self.value = [float(v) for v in value]
else:
self.value = value
self.inplace = inplace
self._log_ratio = torch.log(torch.tensor(self.ratio))
def _call_kernel(self, functional: Callable, inpt: Any, *args: Any, **kwargs: Any) -> Any:
if isinstance(inpt, (tv_tensors.BoundingBoxes, tv_tensors.Mask)):
warnings.warn(
f"{type(self).__name__}() is currently passing through inputs of type "
f"tv_tensors.{type(inpt).__name__}. This will likely change in the future."
)
return super()._call_kernel(functional, inpt, *args, **kwargs)
def make_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
img_c, img_h, img_w = query_chw(flat_inputs)
if self.value is not None and not (len(self.value) in (1, img_c)):
raise ValueError(
f"If value is a sequence, it should have either a single value or {img_c} (number of inpt channels)"
)
area = img_h * img_w
log_ratio = self._log_ratio
for _ in range(10):
erase_area = area * torch.empty(1).uniform_(self.scale[0], self.scale[1]).item()
aspect_ratio = torch.exp(
torch.empty(1).uniform_(
log_ratio[0], # type: ignore[arg-type]
log_ratio[1], # type: ignore[arg-type]
)
).item()
h = int(round(math.sqrt(erase_area * aspect_ratio)))
w = int(round(math.sqrt(erase_area / aspect_ratio)))
if not (h < img_h and w < img_w):
continue
if self.value is None:
v = torch.empty([img_c, h, w], dtype=torch.float32).normal_()
else:
v = torch.tensor(self.value)[:, None, None]
i = torch.randint(0, img_h - h + 1, size=(1,)).item()
j = torch.randint(0, img_w - w + 1, size=(1,)).item()
break
else:
i, j, h, w, v = 0, 0, img_h, img_w, None
return dict(i=i, j=j, h=h, w=w, v=v)
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if params["v"] is not None:
inpt = self._call_kernel(F.erase, inpt, **params, inplace=self.inplace)
return inpt
class _BaseMixUpCutMix(Transform):
def __init__(self, *, alpha: float = 1.0, num_classes: Optional[int] = None, labels_getter="default") -> None:
super().__init__()
self.alpha = float(alpha)
self._dist = torch.distributions.Beta(torch.tensor([alpha]), torch.tensor([alpha]))
self.num_classes = num_classes
self._labels_getter = _parse_labels_getter(labels_getter)
def forward(self, *inputs):
inputs = inputs if len(inputs) > 1 else inputs[0]
flat_inputs, spec = tree_flatten(inputs)
needs_transform_list = self._needs_transform_list(flat_inputs)
if has_any(flat_inputs, PIL.Image.Image, tv_tensors.BoundingBoxes, tv_tensors.Mask):
raise ValueError(f"{type(self).__name__}() does not support PIL images, bounding boxes and masks.")
labels = self._labels_getter(inputs)
if not isinstance(labels, torch.Tensor):
raise ValueError(f"The labels must be a tensor, but got {type(labels)} instead.")
if labels.ndim not in (1, 2):
raise ValueError(
f"labels should be index based with shape (batch_size,) "
f"or probability based with shape (batch_size, num_classes), "
f"but got a tensor of shape {labels.shape} instead."
)
if labels.ndim == 2 and self.num_classes is not None and labels.shape[-1] != self.num_classes:
raise ValueError(
f"When passing 2D labels, "
f"the number of elements in last dimension must match num_classes: "
f"{labels.shape[-1]} != {self.num_classes}. "
f"You can Leave num_classes to None."
)
if labels.ndim == 1 and self.num_classes is None:
raise ValueError("num_classes must be passed if the labels are index-based (1D)")
params = {
"labels": labels,
"batch_size": labels.shape[0],
**self.make_params(
[inpt for (inpt, needs_transform) in zip(flat_inputs, needs_transform_list) if needs_transform]
),
}
# By default, the labels will be False inside needs_transform_list, since they are a torch.Tensor coming
# after an image or video. However, we need to handle them in _transform, so we make sure to set them to True
needs_transform_list[next(idx for idx, inpt in enumerate(flat_inputs) if inpt is labels)] = True
flat_outputs = [
self.transform(inpt, params) if needs_transform else inpt
for (inpt, needs_transform) in zip(flat_inputs, needs_transform_list)
]
return tree_unflatten(flat_outputs, spec)
def _check_image_or_video(self, inpt: torch.Tensor, *, batch_size: int):
expected_num_dims = 5 if isinstance(inpt, tv_tensors.Video) else 4
if inpt.ndim != expected_num_dims:
raise ValueError(
f"Expected a batched input with {expected_num_dims} dims, but got {inpt.ndim} dimensions instead."
)
if inpt.shape[0] != batch_size:
raise ValueError(
f"The batch size of the image or video does not match the batch size of the labels: "
f"{inpt.shape[0]} != {batch_size}."
)
def _mixup_label(self, label: torch.Tensor, *, lam: float) -> torch.Tensor:
if label.ndim == 1:
label = one_hot(label, num_classes=self.num_classes) # type: ignore[arg-type]
if not label.dtype.is_floating_point:
label = label.float()
return label.roll(1, 0).mul_(1.0 - lam).add_(label.mul(lam))
class MixUp(_BaseMixUpCutMix):
"""Apply MixUp to the provided batch of images and labels.
Paper: `mixup: Beyond Empirical Risk Minimization <https://arxiv.org/abs/1710.09412>`_.
.. note::
This transform is meant to be used on **batches** of samples, not
individual images. See
:ref:`sphx_glr_auto_examples_transforms_plot_cutmix_mixup.py` for detailed usage
examples.
The sample pairing is deterministic and done by matching consecutive
samples in the batch, so the batch needs to be shuffled (this is an
implementation detail, not a guaranteed convention.)
In the input, the labels are expected to be a tensor of shape ``(batch_size,)``. They will be transformed
into a tensor of shape ``(batch_size, num_classes)``.
Args:
alpha (float, optional): hyperparameter of the Beta distribution used for mixup. Default is 1.
num_classes (int, optional): number of classes in the batch. Used for one-hot-encoding.
Can be None only if the labels are already one-hot-encoded.
labels_getter (callable or "default", optional): indicates how to identify the labels in the input.
By default, this will pick the second parameter as the labels if it's a tensor. This covers the most
common scenario where this transform is called as ``MixUp()(imgs_batch, labels_batch)``.
It can also be a callable that takes the same input as the transform, and returns the labels.
"""
def make_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
return dict(lam=float(self._dist.sample(()))) # type: ignore[arg-type]
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
lam = params["lam"]
if inpt is params["labels"]:
return self._mixup_label(inpt, lam=lam)
elif isinstance(inpt, (tv_tensors.Image, tv_tensors.Video)) or is_pure_tensor(inpt):
self._check_image_or_video(inpt, batch_size=params["batch_size"])
output = inpt.roll(1, 0).mul_(1.0 - lam).add_(inpt.mul(lam))
if isinstance(inpt, (tv_tensors.Image, tv_tensors.Video)):
output = tv_tensors.wrap(output, like=inpt)
return output
else:
return inpt
class CutMix(_BaseMixUpCutMix):
"""Apply CutMix to the provided batch of images and labels.
Paper: `CutMix: Regularization Strategy to Train Strong Classifiers with Localizable Features
<https://arxiv.org/abs/1905.04899>`_.
.. note::
This transform is meant to be used on **batches** of samples, not
individual images. See
:ref:`sphx_glr_auto_examples_transforms_plot_cutmix_mixup.py` for detailed usage
examples.
The sample pairing is deterministic and done by matching consecutive
samples in the batch, so the batch needs to be shuffled (this is an
implementation detail, not a guaranteed convention.)
In the input, the labels are expected to be a tensor of shape ``(batch_size,)``. They will be transformed
into a tensor of shape ``(batch_size, num_classes)``.
Args:
alpha (float, optional): hyperparameter of the Beta distribution used for mixup. Default is 1.
num_classes (int, optional): number of classes in the batch. Used for one-hot-encoding.
Can be None only if the labels are already one-hot-encoded.
labels_getter (callable or "default", optional): indicates how to identify the labels in the input.
By default, this will pick the second parameter as the labels if it's a tensor. This covers the most
common scenario where this transform is called as ``CutMix()(imgs_batch, labels_batch)``.
It can also be a callable that takes the same input as the transform, and returns the labels.
"""
def make_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
lam = float(self._dist.sample(())) # type: ignore[arg-type]
H, W = query_size(flat_inputs)
r_x = torch.randint(W, size=(1,))
r_y = torch.randint(H, size=(1,))
r = 0.5 * math.sqrt(1.0 - lam)
r_w_half = int(r * W)
r_h_half = int(r * H)
x1 = int(torch.clamp(r_x - r_w_half, min=0))
y1 = int(torch.clamp(r_y - r_h_half, min=0))
x2 = int(torch.clamp(r_x + r_w_half, max=W))
y2 = int(torch.clamp(r_y + r_h_half, max=H))
box = (x1, y1, x2, y2)
lam_adjusted = float(1.0 - (x2 - x1) * (y2 - y1) / (W * H))
return dict(box=box, lam_adjusted=lam_adjusted)
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if inpt is params["labels"]:
return self._mixup_label(inpt, lam=params["lam_adjusted"])
elif isinstance(inpt, (tv_tensors.Image, tv_tensors.Video)) or is_pure_tensor(inpt):
self._check_image_or_video(inpt, batch_size=params["batch_size"])
x1, y1, x2, y2 = params["box"]
rolled = inpt.roll(1, 0)
output = inpt.clone()
output[..., y1:y2, x1:x2] = rolled[..., y1:y2, x1:x2]
if isinstance(inpt, (tv_tensors.Image, tv_tensors.Video)):
output = tv_tensors.wrap(output, like=inpt)
return output
else:
return inpt
class JPEG(Transform):
"""Apply JPEG compression and decompression to the given images.
If the input is a :class:`torch.Tensor`, it is expected
to be of dtype uint8, on CPU, and have [..., 3 or 1, H, W] shape,
where ... means an arbitrary number of leading dimensions.
Args:
quality (sequence or number): JPEG quality, from 1 to 100. Lower means more compression.
If quality is a sequence like (min, max), it specifies the range of JPEG quality to
randomly select from (inclusive of both ends).
Returns:
image with JPEG compression.
"""
def __init__(self, quality: Union[int, Sequence[int]]):
super().__init__()
if isinstance(quality, int):
if isinstance(quality, bool):
raise TypeError("quality can't be bool")
quality = [quality, quality]
else:
_check_sequence_input(quality, "quality", req_sizes=(2,))
if not (1 <= quality[0] <= quality[1] <= 100 and isinstance(quality[0], int) and isinstance(quality[1], int)):
raise ValueError(f"quality must be an integer from 1 to 100, got {quality =}")
self.quality = quality
def make_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
quality = torch.randint(self.quality[0], self.quality[1] + 1, ()).item()
return dict(quality=quality)
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
return self._call_kernel(F.jpeg, inpt, quality=params["quality"])
```
|
==================================================================================================================================
SOURCE CODE FILE: _auto_augment.py
LINES: 1
SIZE: 32.10 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\transforms\v2\_auto_augment.py
ENCODING: utf-8
```py
import math
from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Type, Union
import PIL.Image
import torch
from torch.utils._pytree import tree_flatten, tree_unflatten, TreeSpec
from torchvision import transforms as _transforms, tv_tensors
from torchvision.transforms import _functional_tensor as _FT
from torchvision.transforms.v2 import AutoAugmentPolicy, functional as F, InterpolationMode, Transform
from torchvision.transforms.v2.functional._geometry import _check_interpolation
from torchvision.transforms.v2.functional._meta import get_size
from torchvision.transforms.v2.functional._utils import _FillType, _FillTypeJIT
from ._utils import _get_fill, _setup_fill_arg, check_type, is_pure_tensor
ImageOrVideo = Union[torch.Tensor, PIL.Image.Image, tv_tensors.Image, tv_tensors.Video]
class _AutoAugmentBase(Transform):
def __init__(
self,
*,
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = None,
) -> None:
super().__init__()
self.interpolation = _check_interpolation(interpolation)
self.fill = fill
self._fill = _setup_fill_arg(fill)
def _extract_params_for_v1_transform(self) -> Dict[str, Any]:
params = super()._extract_params_for_v1_transform()
if isinstance(params["fill"], dict):
raise ValueError(f"{type(self).__name__}() can not be scripted for when `fill` is a dictionary.")
return params
def _get_random_item(self, dct: Dict[str, Tuple[Callable, bool]]) -> Tuple[str, Tuple[Callable, bool]]:
keys = tuple(dct.keys())
key = keys[int(torch.randint(len(keys), ()))]
return key, dct[key]
def _flatten_and_extract_image_or_video(
self,
inputs: Any,
unsupported_types: Tuple[Type, ...] = (tv_tensors.BoundingBoxes, tv_tensors.Mask),
) -> Tuple[Tuple[List[Any], TreeSpec, int], ImageOrVideo]:
flat_inputs, spec = tree_flatten(inputs if len(inputs) > 1 else inputs[0])
needs_transform_list = self._needs_transform_list(flat_inputs)
image_or_videos = []
for idx, (inpt, needs_transform) in enumerate(zip(flat_inputs, needs_transform_list)):
if needs_transform and check_type(
inpt,
(
tv_tensors.Image,
PIL.Image.Image,
is_pure_tensor,
tv_tensors.Video,
),
):
image_or_videos.append((idx, inpt))
elif isinstance(inpt, unsupported_types):
raise TypeError(f"Inputs of type {type(inpt).__name__} are not supported by {type(self).__name__}()")
if not image_or_videos:
raise TypeError("Found no image in the sample.")
if len(image_or_videos) > 1:
raise TypeError(
f"Auto augment transformations are only properly defined for a single image or video, "
f"but found {len(image_or_videos)}."
)
idx, image_or_video = image_or_videos[0]
return (flat_inputs, spec, idx), image_or_video
def _unflatten_and_insert_image_or_video(
self,
flat_inputs_with_spec: Tuple[List[Any], TreeSpec, int],
image_or_video: ImageOrVideo,
) -> Any:
flat_inputs, spec, idx = flat_inputs_with_spec
flat_inputs[idx] = image_or_video
return tree_unflatten(flat_inputs, spec)
def _apply_image_or_video_transform(
self,
image: ImageOrVideo,
transform_id: str,
magnitude: float,
interpolation: Union[InterpolationMode, int],
fill: Dict[Union[Type, str], _FillTypeJIT],
) -> ImageOrVideo:
# Note: this cast is wrong and is only here to make mypy happy (it disagrees with torchscript)
image = cast(torch.Tensor, image)
fill_ = _get_fill(fill, type(image))
if transform_id == "Identity":
return image
elif transform_id == "ShearX":
# magnitude should be arctan(magnitude)
# official autoaug: (1, level, 0, 0, 1, 0)
# https://github.com/tensorflow/models/blob/dd02069717128186b88afa8d857ce57d17957f03/research/autoaugment/augmentation_transforms.py#L290
# compared to
# torchvision: (1, tan(level), 0, 0, 1, 0)
# https://github.com/pytorch/vision/blob/0c2373d0bba3499e95776e7936e207d8a1676e65/torchvision/transforms/functional.py#L976
return F.affine(
image,
angle=0.0,
translate=[0, 0],
scale=1.0,
shear=[math.degrees(math.atan(magnitude)), 0.0],
interpolation=interpolation,
fill=fill_,
center=[0, 0],
)
elif transform_id == "ShearY":
# magnitude should be arctan(magnitude)
# See above
return F.affine(
image,
angle=0.0,
translate=[0, 0],
scale=1.0,
shear=[0.0, math.degrees(math.atan(magnitude))],
interpolation=interpolation,
fill=fill_,
center=[0, 0],
)
elif transform_id == "TranslateX":
return F.affine(
image,
angle=0.0,
translate=[int(magnitude), 0],
scale=1.0,
interpolation=interpolation,
shear=[0.0, 0.0],
fill=fill_,
)
elif transform_id == "TranslateY":
return F.affine(
image,
angle=0.0,
translate=[0, int(magnitude)],
scale=1.0,
interpolation=interpolation,
shear=[0.0, 0.0],
fill=fill_,
)
elif transform_id == "Rotate":
return F.rotate(image, angle=magnitude, interpolation=interpolation, fill=fill_)
elif transform_id == "Brightness":
return F.adjust_brightness(image, brightness_factor=1.0 + magnitude)
elif transform_id == "Color":
return F.adjust_saturation(image, saturation_factor=1.0 + magnitude)
elif transform_id == "Contrast":
return F.adjust_contrast(image, contrast_factor=1.0 + magnitude)
elif transform_id == "Sharpness":
return F.adjust_sharpness(image, sharpness_factor=1.0 + magnitude)
elif transform_id == "Posterize":
return F.posterize(image, bits=int(magnitude))
elif transform_id == "Solarize":
bound = _FT._max_value(image.dtype) if isinstance(image, torch.Tensor) else 255.0
return F.solarize(image, threshold=bound * magnitude)
elif transform_id == "AutoContrast":
return F.autocontrast(image)
elif transform_id == "Equalize":
return F.equalize(image)
elif transform_id == "Invert":
return F.invert(image)
else:
raise ValueError(f"No transform available for {transform_id}")
class AutoAugment(_AutoAugmentBase):
r"""AutoAugment data augmentation method based on
`"AutoAugment: Learning Augmentation Strategies from Data" <https://arxiv.org/pdf/1805.09501.pdf>`_.
This transformation works on images and videos only.
If the input is :class:`torch.Tensor`, it should be of type ``torch.uint8``, and it is expected
to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "L" or "RGB".
Args:
policy (AutoAugmentPolicy, optional): Desired policy enum defined by
:class:`torchvision.transforms.autoaugment.AutoAugmentPolicy`. Default is ``AutoAugmentPolicy.IMAGENET``.
interpolation (InterpolationMode, optional): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
fill (sequence or number, optional): Pixel fill value for the area outside the transformed
image. If given a number, the value is used for all bands respectively.
"""
_v1_transform_cls = _transforms.AutoAugment
_AUGMENTATION_SPACE = {
"ShearX": (lambda num_bins, height, width: torch.linspace(0.0, 0.3, num_bins), True),
"ShearY": (lambda num_bins, height, width: torch.linspace(0.0, 0.3, num_bins), True),
"TranslateX": (
lambda num_bins, height, width: torch.linspace(0.0, 150.0 / 331.0 * width, num_bins),
True,
),
"TranslateY": (
lambda num_bins, height, width: torch.linspace(0.0, 150.0 / 331.0 * height, num_bins),
True,
),
"Rotate": (lambda num_bins, height, width: torch.linspace(0.0, 30.0, num_bins), True),
"Brightness": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),
"Color": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),
"Contrast": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),
"Sharpness": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),
"Posterize": (
lambda num_bins, height, width: (8 - (torch.arange(num_bins) / ((num_bins - 1) / 4))).round().int(),
False,
),
"Solarize": (lambda num_bins, height, width: torch.linspace(1.0, 0.0, num_bins), False),
"AutoContrast": (lambda num_bins, height, width: None, False),
"Equalize": (lambda num_bins, height, width: None, False),
"Invert": (lambda num_bins, height, width: None, False),
}
def __init__(
self,
policy: AutoAugmentPolicy = AutoAugmentPolicy.IMAGENET,
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = None,
) -> None:
super().__init__(interpolation=interpolation, fill=fill)
self.policy = policy
self._policies = self._get_policies(policy)
def _get_policies(
self, policy: AutoAugmentPolicy
) -> List[Tuple[Tuple[str, float, Optional[int]], Tuple[str, float, Optional[int]]]]:
if policy == AutoAugmentPolicy.IMAGENET:
return [
(("Posterize", 0.4, 8), ("Rotate", 0.6, 9)),
(("Solarize", 0.6, 5), ("AutoContrast", 0.6, None)),
(("Equalize", 0.8, None), ("Equalize", 0.6, None)),
(("Posterize", 0.6, 7), ("Posterize", 0.6, 6)),
(("Equalize", 0.4, None), ("Solarize", 0.2, 4)),
(("Equalize", 0.4, None), ("Rotate", 0.8, 8)),
(("Solarize", 0.6, 3), ("Equalize", 0.6, None)),
(("Posterize", 0.8, 5), ("Equalize", 1.0, None)),
(("Rotate", 0.2, 3), ("Solarize", 0.6, 8)),
(("Equalize", 0.6, None), ("Posterize", 0.4, 6)),
(("Rotate", 0.8, 8), ("Color", 0.4, 0)),
(("Rotate", 0.4, 9), ("Equalize", 0.6, None)),
(("Equalize", 0.0, None), ("Equalize", 0.8, None)),
(("Invert", 0.6, None), ("Equalize", 1.0, None)),
(("Color", 0.6, 4), ("Contrast", 1.0, 8)),
(("Rotate", 0.8, 8), ("Color", 1.0, 2)),
(("Color", 0.8, 8), ("Solarize", 0.8, 7)),
(("Sharpness", 0.4, 7), ("Invert", 0.6, None)),
(("ShearX", 0.6, 5), ("Equalize", 1.0, None)),
(("Color", 0.4, 0), ("Equalize", 0.6, None)),
(("Equalize", 0.4, None), ("Solarize", 0.2, 4)),
(("Solarize", 0.6, 5), ("AutoContrast", 0.6, None)),
(("Invert", 0.6, None), ("Equalize", 1.0, None)),
(("Color", 0.6, 4), ("Contrast", 1.0, 8)),
(("Equalize", 0.8, None), ("Equalize", 0.6, None)),
]
elif policy == AutoAugmentPolicy.CIFAR10:
return [
(("Invert", 0.1, None), ("Contrast", 0.2, 6)),
(("Rotate", 0.7, 2), ("TranslateX", 0.3, 9)),
(("Sharpness", 0.8, 1), ("Sharpness", 0.9, 3)),
(("ShearY", 0.5, 8), ("TranslateY", 0.7, 9)),
(("AutoContrast", 0.5, None), ("Equalize", 0.9, None)),
(("ShearY", 0.2, 7), ("Posterize", 0.3, 7)),
(("Color", 0.4, 3), ("Brightness", 0.6, 7)),
(("Sharpness", 0.3, 9), ("Brightness", 0.7, 9)),
(("Equalize", 0.6, None), ("Equalize", 0.5, None)),
(("Contrast", 0.6, 7), ("Sharpness", 0.6, 5)),
(("Color", 0.7, 7), ("TranslateX", 0.5, 8)),
(("Equalize", 0.3, None), ("AutoContrast", 0.4, None)),
(("TranslateY", 0.4, 3), ("Sharpness", 0.2, 6)),
(("Brightness", 0.9, 6), ("Color", 0.2, 8)),
(("Solarize", 0.5, 2), ("Invert", 0.0, None)),
(("Equalize", 0.2, None), ("AutoContrast", 0.6, None)),
(("Equalize", 0.2, None), ("Equalize", 0.6, None)),
(("Color", 0.9, 9), ("Equalize", 0.6, None)),
(("AutoContrast", 0.8, None), ("Solarize", 0.2, 8)),
(("Brightness", 0.1, 3), ("Color", 0.7, 0)),
(("Solarize", 0.4, 5), ("AutoContrast", 0.9, None)),
(("TranslateY", 0.9, 9), ("TranslateY", 0.7, 9)),
(("AutoContrast", 0.9, None), ("Solarize", 0.8, 3)),
(("Equalize", 0.8, None), ("Invert", 0.1, None)),
(("TranslateY", 0.7, 9), ("AutoContrast", 0.9, None)),
]
elif policy == AutoAugmentPolicy.SVHN:
return [
(("ShearX", 0.9, 4), ("Invert", 0.2, None)),
(("ShearY", 0.9, 8), ("Invert", 0.7, None)),
(("Equalize", 0.6, None), ("Solarize", 0.6, 6)),
(("Invert", 0.9, None), ("Equalize", 0.6, None)),
(("Equalize", 0.6, None), ("Rotate", 0.9, 3)),
(("ShearX", 0.9, 4), ("AutoContrast", 0.8, None)),
(("ShearY", 0.9, 8), ("Invert", 0.4, None)),
(("ShearY", 0.9, 5), ("Solarize", 0.2, 6)),
(("Invert", 0.9, None), ("AutoContrast", 0.8, None)),
(("Equalize", 0.6, None), ("Rotate", 0.9, 3)),
(("ShearX", 0.9, 4), ("Solarize", 0.3, 3)),
(("ShearY", 0.8, 8), ("Invert", 0.7, None)),
(("Equalize", 0.9, None), ("TranslateY", 0.6, 6)),
(("Invert", 0.9, None), ("Equalize", 0.6, None)),
(("Contrast", 0.3, 3), ("Rotate", 0.8, 4)),
(("Invert", 0.8, None), ("TranslateY", 0.0, 2)),
(("ShearY", 0.7, 6), ("Solarize", 0.4, 8)),
(("Invert", 0.6, None), ("Rotate", 0.8, 4)),
(("ShearY", 0.3, 7), ("TranslateX", 0.9, 3)),
(("ShearX", 0.1, 6), ("Invert", 0.6, None)),
(("Solarize", 0.7, 2), ("TranslateY", 0.6, 7)),
(("ShearY", 0.8, 4), ("Invert", 0.8, None)),
(("ShearX", 0.7, 9), ("TranslateY", 0.8, 3)),
(("ShearY", 0.8, 5), ("AutoContrast", 0.7, None)),
(("ShearX", 0.7, 2), ("Invert", 0.1, None)),
]
else:
raise ValueError(f"The provided policy {policy} is not recognized.")
def forward(self, *inputs: Any) -> Any:
flat_inputs_with_spec, image_or_video = self._flatten_and_extract_image_or_video(inputs)
height, width = get_size(image_or_video) # type: ignore[arg-type]
policy = self._policies[int(torch.randint(len(self._policies), ()))]
for transform_id, probability, magnitude_idx in policy:
if not torch.rand(()) <= probability:
continue
magnitudes_fn, signed = self._AUGMENTATION_SPACE[transform_id]
magnitudes = magnitudes_fn(10, height, width)
if magnitudes is not None:
magnitude = float(magnitudes[magnitude_idx])
if signed and torch.rand(()) <= 0.5:
magnitude *= -1
else:
magnitude = 0.0
image_or_video = self._apply_image_or_video_transform(
image_or_video, transform_id, magnitude, interpolation=self.interpolation, fill=self._fill
)
return self._unflatten_and_insert_image_or_video(flat_inputs_with_spec, image_or_video)
class RandAugment(_AutoAugmentBase):
r"""RandAugment data augmentation method based on
`"RandAugment: Practical automated data augmentation with a reduced search space"
<https://arxiv.org/abs/1909.13719>`_.
This transformation works on images and videos only.
If the input is :class:`torch.Tensor`, it should be of type ``torch.uint8``, and it is expected
to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "L" or "RGB".
Args:
num_ops (int, optional): Number of augmentation transformations to apply sequentially,
must be non-negative integer. Default: 2.
magnitude (int, optional): Magnitude for all the transformations.
num_magnitude_bins (int, optional): The number of different magnitude values.
interpolation (InterpolationMode, optional): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
fill (sequence or number, optional): Pixel fill value for the area outside the transformed
image. If given a number, the value is used for all bands respectively.
"""
_v1_transform_cls = _transforms.RandAugment
_AUGMENTATION_SPACE = {
"Identity": (lambda num_bins, height, width: None, False),
"ShearX": (lambda num_bins, height, width: torch.linspace(0.0, 0.3, num_bins), True),
"ShearY": (lambda num_bins, height, width: torch.linspace(0.0, 0.3, num_bins), True),
"TranslateX": (
lambda num_bins, height, width: torch.linspace(0.0, 150.0 / 331.0 * width, num_bins),
True,
),
"TranslateY": (
lambda num_bins, height, width: torch.linspace(0.0, 150.0 / 331.0 * height, num_bins),
True,
),
"Rotate": (lambda num_bins, height, width: torch.linspace(0.0, 30.0, num_bins), True),
"Brightness": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),
"Color": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),
"Contrast": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),
"Sharpness": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),
"Posterize": (
lambda num_bins, height, width: (8 - (torch.arange(num_bins) / ((num_bins - 1) / 4))).round().int(),
False,
),
"Solarize": (lambda num_bins, height, width: torch.linspace(1.0, 0.0, num_bins), False),
"AutoContrast": (lambda num_bins, height, width: None, False),
"Equalize": (lambda num_bins, height, width: None, False),
}
def __init__(
self,
num_ops: int = 2,
magnitude: int = 9,
num_magnitude_bins: int = 31,
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = None,
) -> None:
super().__init__(interpolation=interpolation, fill=fill)
if not isinstance(num_ops, int) or (num_ops < 0):
raise ValueError(f"num_ops should be a non-negative integer, but got {num_ops} instead.")
self.num_ops = num_ops
self.magnitude = magnitude
self.num_magnitude_bins = num_magnitude_bins
def forward(self, *inputs: Any) -> Any:
flat_inputs_with_spec, image_or_video = self._flatten_and_extract_image_or_video(inputs)
height, width = get_size(image_or_video) # type: ignore[arg-type]
for _ in range(self.num_ops):
transform_id, (magnitudes_fn, signed) = self._get_random_item(self._AUGMENTATION_SPACE)
magnitudes = magnitudes_fn(self.num_magnitude_bins, height, width)
if magnitudes is not None:
magnitude = float(magnitudes[self.magnitude])
if signed and torch.rand(()) <= 0.5:
magnitude *= -1
else:
magnitude = 0.0
image_or_video = self._apply_image_or_video_transform(
image_or_video, transform_id, magnitude, interpolation=self.interpolation, fill=self._fill
)
return self._unflatten_and_insert_image_or_video(flat_inputs_with_spec, image_or_video)
class TrivialAugmentWide(_AutoAugmentBase):
r"""Dataset-independent data-augmentation with TrivialAugment Wide, as described in
`"TrivialAugment: Tuning-free Yet State-of-the-Art Data Augmentation" <https://arxiv.org/abs/2103.10158>`_.
This transformation works on images and videos only.
If the input is :class:`torch.Tensor`, it should be of type ``torch.uint8``, and it is expected
to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "L" or "RGB".
Args:
num_magnitude_bins (int, optional): The number of different magnitude values.
interpolation (InterpolationMode, optional): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
fill (sequence or number, optional): Pixel fill value for the area outside the transformed
image. If given a number, the value is used for all bands respectively.
"""
_v1_transform_cls = _transforms.TrivialAugmentWide
_AUGMENTATION_SPACE = {
"Identity": (lambda num_bins, height, width: None, False),
"ShearX": (lambda num_bins, height, width: torch.linspace(0.0, 0.99, num_bins), True),
"ShearY": (lambda num_bins, height, width: torch.linspace(0.0, 0.99, num_bins), True),
"TranslateX": (lambda num_bins, height, width: torch.linspace(0.0, 32.0, num_bins), True),
"TranslateY": (lambda num_bins, height, width: torch.linspace(0.0, 32.0, num_bins), True),
"Rotate": (lambda num_bins, height, width: torch.linspace(0.0, 135.0, num_bins), True),
"Brightness": (lambda num_bins, height, width: torch.linspace(0.0, 0.99, num_bins), True),
"Color": (lambda num_bins, height, width: torch.linspace(0.0, 0.99, num_bins), True),
"Contrast": (lambda num_bins, height, width: torch.linspace(0.0, 0.99, num_bins), True),
"Sharpness": (lambda num_bins, height, width: torch.linspace(0.0, 0.99, num_bins), True),
"Posterize": (
lambda num_bins, height, width: (8 - (torch.arange(num_bins) / ((num_bins - 1) / 6))).round().int(),
False,
),
"Solarize": (lambda num_bins, height, width: torch.linspace(1.0, 0.0, num_bins), False),
"AutoContrast": (lambda num_bins, height, width: None, False),
"Equalize": (lambda num_bins, height, width: None, False),
}
def __init__(
self,
num_magnitude_bins: int = 31,
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = None,
):
super().__init__(interpolation=interpolation, fill=fill)
self.num_magnitude_bins = num_magnitude_bins
def forward(self, *inputs: Any) -> Any:
flat_inputs_with_spec, image_or_video = self._flatten_and_extract_image_or_video(inputs)
height, width = get_size(image_or_video) # type: ignore[arg-type]
transform_id, (magnitudes_fn, signed) = self._get_random_item(self._AUGMENTATION_SPACE)
magnitudes = magnitudes_fn(self.num_magnitude_bins, height, width)
if magnitudes is not None:
magnitude = float(magnitudes[int(torch.randint(self.num_magnitude_bins, ()))])
if signed and torch.rand(()) <= 0.5:
magnitude *= -1
else:
magnitude = 0.0
image_or_video = self._apply_image_or_video_transform(
image_or_video, transform_id, magnitude, interpolation=self.interpolation, fill=self._fill
)
return self._unflatten_and_insert_image_or_video(flat_inputs_with_spec, image_or_video)
class AugMix(_AutoAugmentBase):
r"""AugMix data augmentation method based on
`"AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty" <https://arxiv.org/abs/1912.02781>`_.
This transformation works on images and videos only.
If the input is :class:`torch.Tensor`, it should be of type ``torch.uint8``, and it is expected
to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "L" or "RGB".
Args:
severity (int, optional): The severity of base augmentation operators. Default is ``3``.
mixture_width (int, optional): The number of augmentation chains. Default is ``3``.
chain_depth (int, optional): The depth of augmentation chains. A negative value denotes stochastic depth sampled from the interval [1, 3].
Default is ``-1``.
alpha (float, optional): The hyperparameter for the probability distributions. Default is ``1.0``.
all_ops (bool, optional): Use all operations (including brightness, contrast, color and sharpness). Default is ``True``.
interpolation (InterpolationMode, optional): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
fill (sequence or number, optional): Pixel fill value for the area outside the transformed
image. If given a number, the value is used for all bands respectively.
"""
_v1_transform_cls = _transforms.AugMix
_PARTIAL_AUGMENTATION_SPACE = {
"ShearX": (lambda num_bins, height, width: torch.linspace(0.0, 0.3, num_bins), True),
"ShearY": (lambda num_bins, height, width: torch.linspace(0.0, 0.3, num_bins), True),
"TranslateX": (lambda num_bins, height, width: torch.linspace(0.0, width / 3.0, num_bins), True),
"TranslateY": (lambda num_bins, height, width: torch.linspace(0.0, height / 3.0, num_bins), True),
"Rotate": (lambda num_bins, height, width: torch.linspace(0.0, 30.0, num_bins), True),
"Posterize": (
lambda num_bins, height, width: (4 - (torch.arange(num_bins) / ((num_bins - 1) / 4))).round().int(),
False,
),
"Solarize": (lambda num_bins, height, width: torch.linspace(1.0, 0.0, num_bins), False),
"AutoContrast": (lambda num_bins, height, width: None, False),
"Equalize": (lambda num_bins, height, width: None, False),
}
_AUGMENTATION_SPACE: Dict[str, Tuple[Callable[[int, int, int], Optional[torch.Tensor]], bool]] = {
**_PARTIAL_AUGMENTATION_SPACE,
"Brightness": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),
"Color": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),
"Contrast": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),
"Sharpness": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),
}
def __init__(
self,
severity: int = 3,
mixture_width: int = 3,
chain_depth: int = -1,
alpha: float = 1.0,
all_ops: bool = True,
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = None,
) -> None:
super().__init__(interpolation=interpolation, fill=fill)
self._PARAMETER_MAX = 10
if not (1 <= severity <= self._PARAMETER_MAX):
raise ValueError(f"The severity must be between [1, {self._PARAMETER_MAX}]. Got {severity} instead.")
self.severity = severity
self.mixture_width = mixture_width
self.chain_depth = chain_depth
self.alpha = alpha
self.all_ops = all_ops
def _sample_dirichlet(self, params: torch.Tensor) -> torch.Tensor:
# Must be on a separate method so that we can overwrite it in tests.
return torch._sample_dirichlet(params)
def forward(self, *inputs: Any) -> Any:
flat_inputs_with_spec, orig_image_or_video = self._flatten_and_extract_image_or_video(inputs)
height, width = get_size(orig_image_or_video) # type: ignore[arg-type]
if isinstance(orig_image_or_video, torch.Tensor):
image_or_video = orig_image_or_video
else: # isinstance(inpt, PIL.Image.Image):
image_or_video = F.pil_to_tensor(orig_image_or_video)
augmentation_space = self._AUGMENTATION_SPACE if self.all_ops else self._PARTIAL_AUGMENTATION_SPACE
orig_dims = list(image_or_video.shape)
expected_ndim = 5 if isinstance(orig_image_or_video, tv_tensors.Video) else 4
batch = image_or_video.reshape([1] * max(expected_ndim - image_or_video.ndim, 0) + orig_dims)
batch_dims = [batch.size(0)] + [1] * (batch.ndim - 1)
# Sample the beta weights for combining the original and augmented image or video. To get Beta, we use a
# Dirichlet with 2 parameters. The 1st column stores the weights of the original and the 2nd the ones of
# augmented image or video.
m = self._sample_dirichlet(
torch.tensor([self.alpha, self.alpha], device=batch.device).expand(batch_dims[0], -1)
)
# Sample the mixing weights and combine them with the ones sampled from Beta for the augmented images or videos.
combined_weights = self._sample_dirichlet(
torch.tensor([self.alpha] * self.mixture_width, device=batch.device).expand(batch_dims[0], -1)
) * m[:, 1].reshape([batch_dims[0], -1])
mix = m[:, 0].reshape(batch_dims) * batch
for i in range(self.mixture_width):
aug = batch
depth = self.chain_depth if self.chain_depth > 0 else int(torch.randint(low=1, high=4, size=(1,)).item())
for _ in range(depth):
transform_id, (magnitudes_fn, signed) = self._get_random_item(augmentation_space)
magnitudes = magnitudes_fn(self._PARAMETER_MAX, height, width)
if magnitudes is not None:
magnitude = float(magnitudes[int(torch.randint(self.severity, ()))])
if signed and torch.rand(()) <= 0.5:
magnitude *= -1
else:
magnitude = 0.0
aug = self._apply_image_or_video_transform(aug, transform_id, magnitude, interpolation=self.interpolation, fill=self._fill) # type: ignore[assignment]
mix.add_(combined_weights[:, i].reshape(batch_dims) * aug)
mix = mix.reshape(orig_dims).to(dtype=image_or_video.dtype)
if isinstance(orig_image_or_video, (tv_tensors.Image, tv_tensors.Video)):
mix = tv_tensors.wrap(mix, like=orig_image_or_video)
elif isinstance(orig_image_or_video, PIL.Image.Image):
mix = F.to_pil_image(mix)
return self._unflatten_and_insert_image_or_video(flat_inputs_with_spec, mix)
```
|
===========================================================================================================================
SOURCE CODE FILE: _color.py
LINES: 1
SIZE: 16.96 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\transforms\v2\_color.py
ENCODING: utf-8
```py
import collections.abc
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
import torch
from torchvision import transforms as _transforms
from torchvision.transforms.v2 import functional as F, Transform
from ._transform import _RandomApplyTransform
from ._utils import query_chw
class Grayscale(Transform):
"""Convert images or videos to grayscale.
If the input is a :class:`torch.Tensor`, it is expected
to have [..., 3 or 1, H, W] shape, where ... means an arbitrary number of leading dimensions
Args:
num_output_channels (int): (1 or 3) number of channels desired for output image
"""
_v1_transform_cls = _transforms.Grayscale
def __init__(self, num_output_channels: int = 1):
super().__init__()
self.num_output_channels = num_output_channels
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
return self._call_kernel(F.rgb_to_grayscale, inpt, num_output_channels=self.num_output_channels)
class RandomGrayscale(_RandomApplyTransform):
"""Randomly convert image or videos to grayscale with a probability of p (default 0.1).
If the input is a :class:`torch.Tensor`, it is expected to have [..., 3 or 1, H, W] shape,
where ... means an arbitrary number of leading dimensions
The output has the same number of channels as the input.
Args:
p (float): probability that image should be converted to grayscale.
"""
_v1_transform_cls = _transforms.RandomGrayscale
def __init__(self, p: float = 0.1) -> None:
super().__init__(p=p)
def make_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
num_input_channels, *_ = query_chw(flat_inputs)
return dict(num_input_channels=num_input_channels)
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
return self._call_kernel(F.rgb_to_grayscale, inpt, num_output_channels=params["num_input_channels"])
class RGB(Transform):
"""Convert images or videos to RGB (if they are already not RGB).
If the input is a :class:`torch.Tensor`, it is expected
to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions
"""
def __init__(self):
super().__init__()
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
return self._call_kernel(F.grayscale_to_rgb, inpt)
class ColorJitter(Transform):
"""Randomly change the brightness, contrast, saturation and hue of an image or video.
If the input is a :class:`torch.Tensor`, it is expected
to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
If img is PIL Image, mode "1", "I", "F" and modes with transparency (alpha channel) are not supported.
Args:
brightness (float or tuple of float (min, max)): How much to jitter brightness.
brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]
or the given [min, max]. Should be non negative numbers.
contrast (float or tuple of float (min, max)): How much to jitter contrast.
contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]
or the given [min, max]. Should be non-negative numbers.
saturation (float or tuple of float (min, max)): How much to jitter saturation.
saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]
or the given [min, max]. Should be non negative numbers.
hue (float or tuple of float (min, max)): How much to jitter hue.
hue_factor is chosen uniformly from [-hue, hue] or the given [min, max].
Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5.
To jitter hue, the pixel values of the input image has to be non-negative for conversion to HSV space;
thus it does not work if you normalize your image to an interval with negative values,
or use an interpolation that generates negative values before using this function.
"""
_v1_transform_cls = _transforms.ColorJitter
def _extract_params_for_v1_transform(self) -> Dict[str, Any]:
return {attr: value or 0 for attr, value in super()._extract_params_for_v1_transform().items()}
def __init__(
self,
brightness: Optional[Union[float, Sequence[float]]] = None,
contrast: Optional[Union[float, Sequence[float]]] = None,
saturation: Optional[Union[float, Sequence[float]]] = None,
hue: Optional[Union[float, Sequence[float]]] = None,
) -> None:
super().__init__()
self.brightness = self._check_input(brightness, "brightness")
self.contrast = self._check_input(contrast, "contrast")
self.saturation = self._check_input(saturation, "saturation")
self.hue = self._check_input(hue, "hue", center=0, bound=(-0.5, 0.5), clip_first_on_zero=False)
def _check_input(
self,
value: Optional[Union[float, Sequence[float]]],
name: str,
center: float = 1.0,
bound: Tuple[float, float] = (0, float("inf")),
clip_first_on_zero: bool = True,
) -> Optional[Tuple[float, float]]:
if value is None:
return None
if isinstance(value, (int, float)):
if value < 0:
raise ValueError(f"If {name} is a single number, it must be non negative.")
value = [center - value, center + value]
if clip_first_on_zero:
value[0] = max(value[0], 0.0)
elif isinstance(value, collections.abc.Sequence) and len(value) == 2:
value = [float(v) for v in value]
else:
raise TypeError(f"{name}={value} should be a single number or a sequence with length 2.")
if not bound[0] <= value[0] <= value[1] <= bound[1]:
raise ValueError(f"{name} values should be between {bound} and increasing, but got {value}.")
return None if value[0] == value[1] == center else (float(value[0]), float(value[1]))
@staticmethod
def _generate_value(left: float, right: float) -> float:
return torch.empty(1).uniform_(left, right).item()
def make_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
fn_idx = torch.randperm(4)
b = None if self.brightness is None else self._generate_value(self.brightness[0], self.brightness[1])
c = None if self.contrast is None else self._generate_value(self.contrast[0], self.contrast[1])
s = None if self.saturation is None else self._generate_value(self.saturation[0], self.saturation[1])
h = None if self.hue is None else self._generate_value(self.hue[0], self.hue[1])
return dict(fn_idx=fn_idx, brightness_factor=b, contrast_factor=c, saturation_factor=s, hue_factor=h)
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
output = inpt
brightness_factor = params["brightness_factor"]
contrast_factor = params["contrast_factor"]
saturation_factor = params["saturation_factor"]
hue_factor = params["hue_factor"]
for fn_id in params["fn_idx"]:
if fn_id == 0 and brightness_factor is not None:
output = self._call_kernel(F.adjust_brightness, output, brightness_factor=brightness_factor)
elif fn_id == 1 and contrast_factor is not None:
output = self._call_kernel(F.adjust_contrast, output, contrast_factor=contrast_factor)
elif fn_id == 2 and saturation_factor is not None:
output = self._call_kernel(F.adjust_saturation, output, saturation_factor=saturation_factor)
elif fn_id == 3 and hue_factor is not None:
output = self._call_kernel(F.adjust_hue, output, hue_factor=hue_factor)
return output
class RandomChannelPermutation(Transform):
"""Randomly permute the channels of an image or video"""
def make_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
num_channels, *_ = query_chw(flat_inputs)
return dict(permutation=torch.randperm(num_channels))
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
return self._call_kernel(F.permute_channels, inpt, params["permutation"])
class RandomPhotometricDistort(Transform):
"""Randomly distorts the image or video as used in `SSD: Single Shot
MultiBox Detector <https://arxiv.org/abs/1512.02325>`_.
This transform relies on :class:`~torchvision.transforms.v2.ColorJitter`
under the hood to adjust the contrast, saturation, hue, brightness, and also
randomly permutes channels.
Args:
brightness (tuple of float (min, max), optional): How much to jitter brightness.
brightness_factor is chosen uniformly from [min, max]. Should be non negative numbers.
contrast (tuple of float (min, max), optional): How much to jitter contrast.
contrast_factor is chosen uniformly from [min, max]. Should be non-negative numbers.
saturation (tuple of float (min, max), optional): How much to jitter saturation.
saturation_factor is chosen uniformly from [min, max]. Should be non negative numbers.
hue (tuple of float (min, max), optional): How much to jitter hue.
hue_factor is chosen uniformly from [min, max]. Should have -0.5 <= min <= max <= 0.5.
To jitter hue, the pixel values of the input image has to be non-negative for conversion to HSV space;
thus it does not work if you normalize your image to an interval with negative values,
or use an interpolation that generates negative values before using this function.
p (float, optional) probability each distortion operation (contrast, saturation, ...) to be applied.
Default is 0.5.
"""
def __init__(
self,
brightness: Tuple[float, float] = (0.875, 1.125),
contrast: Tuple[float, float] = (0.5, 1.5),
saturation: Tuple[float, float] = (0.5, 1.5),
hue: Tuple[float, float] = (-0.05, 0.05),
p: float = 0.5,
):
super().__init__()
self.brightness = brightness
self.contrast = contrast
self.hue = hue
self.saturation = saturation
self.p = p
def make_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
num_channels, *_ = query_chw(flat_inputs)
params: Dict[str, Any] = {
key: ColorJitter._generate_value(range[0], range[1]) if torch.rand(1) < self.p else None
for key, range in [
("brightness_factor", self.brightness),
("contrast_factor", self.contrast),
("saturation_factor", self.saturation),
("hue_factor", self.hue),
]
}
params["contrast_before"] = bool(torch.rand(()) < 0.5)
params["channel_permutation"] = torch.randperm(num_channels) if torch.rand(1) < self.p else None
return params
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if params["brightness_factor"] is not None:
inpt = self._call_kernel(F.adjust_brightness, inpt, brightness_factor=params["brightness_factor"])
if params["contrast_factor"] is not None and params["contrast_before"]:
inpt = self._call_kernel(F.adjust_contrast, inpt, contrast_factor=params["contrast_factor"])
if params["saturation_factor"] is not None:
inpt = self._call_kernel(F.adjust_saturation, inpt, saturation_factor=params["saturation_factor"])
if params["hue_factor"] is not None:
inpt = self._call_kernel(F.adjust_hue, inpt, hue_factor=params["hue_factor"])
if params["contrast_factor"] is not None and not params["contrast_before"]:
inpt = self._call_kernel(F.adjust_contrast, inpt, contrast_factor=params["contrast_factor"])
if params["channel_permutation"] is not None:
inpt = self._call_kernel(F.permute_channels, inpt, permutation=params["channel_permutation"])
return inpt
class RandomEqualize(_RandomApplyTransform):
"""Equalize the histogram of the given image or video with a given probability.
If the input is a :class:`torch.Tensor`, it is expected
to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "P", "L" or "RGB".
Args:
p (float): probability of the image being equalized. Default value is 0.5
"""
_v1_transform_cls = _transforms.RandomEqualize
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
return self._call_kernel(F.equalize, inpt)
class RandomInvert(_RandomApplyTransform):
"""Inverts the colors of the given image or video with a given probability.
If img is a Tensor, it is expected to be in [..., 1 or 3, H, W] format,
where ... means it can have an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "L" or "RGB".
Args:
p (float): probability of the image being color inverted. Default value is 0.5
"""
_v1_transform_cls = _transforms.RandomInvert
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
return self._call_kernel(F.invert, inpt)
class RandomPosterize(_RandomApplyTransform):
"""Posterize the image or video with a given probability by reducing the
number of bits for each color channel.
If the input is a :class:`torch.Tensor`, it should be of type torch.uint8,
and it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "L" or "RGB".
Args:
bits (int): number of bits to keep for each channel (0-8)
p (float): probability of the image being posterized. Default value is 0.5
"""
_v1_transform_cls = _transforms.RandomPosterize
def __init__(self, bits: int, p: float = 0.5) -> None:
super().__init__(p=p)
self.bits = bits
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
return self._call_kernel(F.posterize, inpt, bits=self.bits)
class RandomSolarize(_RandomApplyTransform):
"""Solarize the image or video with a given probability by inverting all pixel
values above a threshold.
If img is a Tensor, it is expected to be in [..., 1 or 3, H, W] format,
where ... means it can have an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "L" or "RGB".
Args:
threshold (float): all pixels equal or above this value are inverted.
p (float): probability of the image being solarized. Default value is 0.5
"""
_v1_transform_cls = _transforms.RandomSolarize
def _extract_params_for_v1_transform(self) -> Dict[str, Any]:
params = super()._extract_params_for_v1_transform()
params["threshold"] = float(params["threshold"])
return params
def __init__(self, threshold: float, p: float = 0.5) -> None:
super().__init__(p=p)
self.threshold = threshold
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
return self._call_kernel(F.solarize, inpt, threshold=self.threshold)
class RandomAutocontrast(_RandomApplyTransform):
"""Autocontrast the pixels of the given image or video with a given probability.
If the input is a :class:`torch.Tensor`, it is expected
to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "L" or "RGB".
Args:
p (float): probability of the image being autocontrasted. Default value is 0.5
"""
_v1_transform_cls = _transforms.RandomAutocontrast
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
return self._call_kernel(F.autocontrast, inpt)
class RandomAdjustSharpness(_RandomApplyTransform):
"""Adjust the sharpness of the image or video with a given probability.
If the input is a :class:`torch.Tensor`,
it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
Args:
sharpness_factor (float): How much to adjust the sharpness. Can be
any non-negative number. 0 gives a blurred image, 1 gives the
original image while 2 increases the sharpness by a factor of 2.
p (float): probability of the image being sharpened. Default value is 0.5
"""
_v1_transform_cls = _transforms.RandomAdjustSharpness
def __init__(self, sharpness_factor: float, p: float = 0.5) -> None:
super().__init__(p=p)
self.sharpness_factor = sharpness_factor
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
return self._call_kernel(F.adjust_sharpness, inpt, sharpness_factor=self.sharpness_factor)
```
|
===============================================================================================================================
SOURCE CODE FILE: _container.py
LINES: 3
SIZE: 6.08 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\transforms\v2\_container.py
ENCODING: utf-8
```py
from typing import Any, Callable, Dict, List, Optional, Sequence, Union
import torch
from torch import nn
from torchvision import transforms as _transforms
from torchvision.transforms.v2 import Transform
class Compose(Transform):
"""Composes several transforms together.
This transform does not support torchscript.
Please, see the note below.
Args:
transforms (list of ``Transform`` objects): list of transforms to compose.
Example:
>>> transforms.Compose([
>>> transforms.CenterCrop(10),
>>> transforms.PILToTensor(),
>>> transforms.ConvertImageDtype(torch.float),
>>> ])
.. note::
In order to script the transformations, please use ``torch.nn.Sequential`` as below.
>>> transforms = torch.nn.Sequential(
>>> transforms.CenterCrop(10),
>>> transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
>>> )
>>> scripted_transforms = torch.jit.script(transforms)
Make sure to use only scriptable transformations, i.e. that work with ``torch.Tensor``, does not require
`lambda` functions or ``PIL.Image``.
"""
def __init__(self, transforms: Sequence[Callable]) -> None:
super().__init__()
if not isinstance(transforms, Sequence):
raise TypeError("Argument transforms should be a sequence of callables")
elif not transforms:
raise ValueError("Pass at least one transform")
self.transforms = transforms
def forward(self, *inputs: Any) -> Any:
needs_unpacking = len(inputs) > 1
for transform in self.transforms:
outputs = transform(*inputs)
inputs = outputs if needs_unpacking else (outputs,)
return outputs
def extra_repr(self) -> str:
format_string = []
for t in self.transforms:
format_string.append(f" {t}")
return "\n".join(format_string)
class RandomApply(Transform):
"""Apply randomly a list of transformations with a given probability.
.. note::
In order to script the transformation, please use ``torch.nn.ModuleList`` as input instead of list/tuple of
transforms as shown below:
>>> transforms = transforms.RandomApply(torch.nn.ModuleList([
>>> transforms.ColorJitter(),
>>> ]), p=0.3)
>>> scripted_transforms = torch.jit.script(transforms)
Make sure to use only scriptable transformations, i.e. that work with ``torch.Tensor``, does not require
`lambda` functions or ``PIL.Image``.
Args:
transforms (sequence or torch.nn.Module): list of transformations
p (float): probability of applying the list of transforms
"""
_v1_transform_cls = _transforms.RandomApply
def __init__(self, transforms: Union[Sequence[Callable], nn.ModuleList], p: float = 0.5) -> None:
super().__init__()
if not isinstance(transforms, (Sequence, nn.ModuleList)):
raise TypeError("Argument transforms should be a sequence of callables or a `nn.ModuleList`")
self.transforms = transforms
if not (0.0 <= p <= 1.0):
raise ValueError("`p` should be a floating point value in the interval [0.0, 1.0].")
self.p = p
def _extract_params_for_v1_transform(self) -> Dict[str, Any]:
return {"transforms": self.transforms, "p": self.p}
def forward(self, *inputs: Any) -> Any:
needs_unpacking = len(inputs) > 1
if torch.rand(1) >= self.p:
return inputs if needs_unpacking else inputs[0]
for transform in self.transforms:
outputs = transform(*inputs)
inputs = outputs if needs_unpacking else (outputs,)
return outputs
def extra_repr(self) -> str:
format_string = []
for t in self.transforms:
format_string.append(f" {t}")
return "\n".join(format_string)
class RandomChoice(Transform):
"""Apply single transformation randomly picked from a list.
This transform does not support torchscript.
Args:
transforms (sequence or torch.nn.Module): list of transformations
p (list of floats or None, optional): probability of each transform being picked.
If ``p`` doesn't sum to 1, it is automatically normalized. If ``None``
(default), all transforms have the same probability.
"""
def __init__(
self,
transforms: Sequence[Callable],
p: Optional[List[float]] = None,
) -> None:
if not isinstance(transforms, Sequence):
raise TypeError("Argument transforms should be a sequence of callables")
if p is None:
p = [1] * len(transforms)
elif len(p) != len(transforms):
raise ValueError(f"Length of p doesn't match the number of transforms: {len(p)} != {len(transforms)}")
super().__init__()
self.transforms = transforms
total = sum(p)
self.p = [prob / total for prob in p]
def forward(self, *inputs: Any) -> Any:
idx = int(torch.multinomial(torch.tensor(self.p), 1))
transform = self.transforms[idx]
return transform(*inputs)
class RandomOrder(Transform):
"""Apply a list of transformations in a random order.
This transform does not support torchscript.
Args:
transforms (sequence or torch.nn.Module): list of transformations
"""
def __init__(self, transforms: Sequence[Callable]) -> None:
if not isinstance(transforms, Sequence):
raise TypeError("Argument transforms should be a sequence of callables")
super().__init__()
self.transforms = transforms
def forward(self, *inputs: Any) -> Any:
needs_unpacking = len(inputs) > 1
for idx in torch.randperm(len(self.transforms)):
transform = self.transforms[idx]
outputs = transform(*inputs)
inputs = outputs if needs_unpacking else (outputs,)
return outputs
```
|
================================================================================================================================
SOURCE CODE FILE: _deprecated.py
LINES: 1
SIZE: 1.95 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\transforms\v2\_deprecated.py
ENCODING: utf-8
```py
import warnings
from typing import Any, Dict, Union
import numpy as np
import PIL.Image
import torch
from torchvision.transforms import functional as _F
from torchvision.transforms.v2 import Transform
class ToTensor(Transform):
"""[DEPRECATED] Use ``v2.Compose([v2.ToImage(), v2.ToDtype(torch.float32, scale=True)])`` instead.
Convert a PIL Image or ndarray to tensor and scale the values accordingly.
.. warning::
:class:`v2.ToTensor` is deprecated and will be removed in a future release.
Please use instead ``v2.Compose([v2.ToImage(), v2.ToDtype(torch.float32, scale=True)])``.
Output is equivalent up to float precision.
This transform does not support torchscript.
Converts a PIL Image or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]
if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1)
or if the numpy.ndarray has dtype = np.uint8
In the other cases, tensors are returned without scaling.
.. note::
Because the input image is scaled to [0.0, 1.0], this transformation should not be used when
transforming target image masks. See the `references`_ for implementing the transforms for image masks.
.. _references: https://github.com/pytorch/vision/tree/main/references/segmentation
"""
_transformed_types = (PIL.Image.Image, np.ndarray)
def __init__(self) -> None:
warnings.warn(
"The transform `ToTensor()` is deprecated and will be removed in a future release. "
"Instead, please use `v2.Compose([v2.ToImage(), v2.ToDtype(torch.float32, scale=True)])`."
"Output is equivalent up to float precision."
)
super().__init__()
def transform(self, inpt: Union[PIL.Image.Image, np.ndarray], params: Dict[str, Any]) -> torch.Tensor:
return _F.to_tensor(inpt)
```
|
==============================================================================================================================
SOURCE CODE FILE: _geometry.py
LINES: 1
SIZE: 67.45 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\transforms\v2\_geometry.py
ENCODING: utf-8
```py
import math
import numbers
import warnings
from typing import Any, Callable, Dict, List, Literal, Optional, Sequence, Tuple, Type, Union
import PIL.Image
import torch
from torchvision import transforms as _transforms, tv_tensors
from torchvision.ops.boxes import box_iou
from torchvision.transforms.functional import _get_perspective_coeffs
from torchvision.transforms.v2 import functional as F, InterpolationMode, Transform
from torchvision.transforms.v2.functional._utils import _FillType
from ._transform import _RandomApplyTransform
from ._utils import (
_check_padding_arg,
_check_padding_mode_arg,
_check_sequence_input,
_get_fill,
_setup_angle,
_setup_fill_arg,
_setup_number_or_seq,
_setup_size,
get_bounding_boxes,
has_all,
has_any,
is_pure_tensor,
query_size,
)
class RandomHorizontalFlip(_RandomApplyTransform):
"""Horizontally flip the input with a given probability.
If the input is a :class:`torch.Tensor` or a ``TVTensor`` (e.g. :class:`~torchvision.tv_tensors.Image`,
:class:`~torchvision.tv_tensors.Video`, :class:`~torchvision.tv_tensors.BoundingBoxes` etc.)
it can have arbitrary number of leading batch dimensions. For example,
the image can have ``[..., C, H, W]`` shape. A bounding box can have ``[..., 4]`` shape.
Args:
p (float, optional): probability of the input being flipped. Default value is 0.5
"""
_v1_transform_cls = _transforms.RandomHorizontalFlip
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
return self._call_kernel(F.horizontal_flip, inpt)
class RandomVerticalFlip(_RandomApplyTransform):
"""Vertically flip the input with a given probability.
If the input is a :class:`torch.Tensor` or a ``TVTensor`` (e.g. :class:`~torchvision.tv_tensors.Image`,
:class:`~torchvision.tv_tensors.Video`, :class:`~torchvision.tv_tensors.BoundingBoxes` etc.)
it can have arbitrary number of leading batch dimensions. For example,
the image can have ``[..., C, H, W]`` shape. A bounding box can have ``[..., 4]`` shape.
Args:
p (float, optional): probability of the input being flipped. Default value is 0.5
"""
_v1_transform_cls = _transforms.RandomVerticalFlip
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
return self._call_kernel(F.vertical_flip, inpt)
class Resize(Transform):
"""Resize the input to the given size.
If the input is a :class:`torch.Tensor` or a ``TVTensor`` (e.g. :class:`~torchvision.tv_tensors.Image`,
:class:`~torchvision.tv_tensors.Video`, :class:`~torchvision.tv_tensors.BoundingBoxes` etc.)
it can have arbitrary number of leading batch dimensions. For example,
the image can have ``[..., C, H, W]`` shape. A bounding box can have ``[..., 4]`` shape.
Args:
size (sequence, int, or None): Desired
output size.
- If size is a sequence like (h, w), output size will be matched to this.
- If size is an int, smaller edge of the image will be matched to this
number. i.e, if height > width, then image will be rescaled to
(size * height / width, size).
- If size is None, the output shape is determined by the ``max_size``
parameter.
.. note::
In torchscript mode size as single int is not supported, use a sequence of length 1: ``[size, ]``.
interpolation (InterpolationMode, optional): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.NEAREST_EXACT``,
``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are supported.
The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
max_size (int, optional): The maximum allowed for the longer edge of
the resized image.
- If ``size`` is an int: if the longer edge of the image is greater
than ``max_size`` after being resized according to ``size``,
``size`` will be overruled so that the longer edge is equal to
``max_size``. As a result, the smaller edge may be shorter than
``size``. This is only supported if ``size`` is an int (or a
sequence of length 1 in torchscript mode).
- If ``size`` is None: the longer edge of the image will be matched
to max_size. i.e, if height > width, then image will be rescaled
to (max_size, max_size * width / height).
This should be left to ``None`` (default) when ``size`` is a
sequence.
antialias (bool, optional): Whether to apply antialiasing.
It only affects **tensors** with bilinear or bicubic modes and it is
ignored otherwise: on PIL images, antialiasing is always applied on
bilinear or bicubic modes; on other modes (for PIL images and
tensors), antialiasing makes no sense and this parameter is ignored.
Possible values are:
- ``True`` (default): will apply antialiasing for bilinear or bicubic modes.
Other mode aren't affected. This is probably what you want to use.
- ``False``: will not apply antialiasing for tensors on any mode. PIL
images are still antialiased on bilinear or bicubic modes, because
PIL doesn't support no antialias.
- ``None``: equivalent to ``False`` for tensors and ``True`` for
PIL images. This value exists for legacy reasons and you probably
don't want to use it unless you really know what you are doing.
The default value changed from ``None`` to ``True`` in
v0.17, for the PIL and Tensor backends to be consistent.
"""
_v1_transform_cls = _transforms.Resize
def __init__(
self,
size: Union[int, Sequence[int], None],
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
max_size: Optional[int] = None,
antialias: Optional[bool] = True,
) -> None:
super().__init__()
if isinstance(size, int):
size = [size]
elif isinstance(size, Sequence) and len(size) in {1, 2}:
size = list(size)
elif size is None:
if not isinstance(max_size, int):
raise ValueError(f"max_size must be an integer when size is None, but got {max_size} instead.")
else:
raise ValueError(
f"size can be an integer, a sequence of one or two integers, or None, but got {size} instead."
)
self.size = size
self.interpolation = interpolation
self.max_size = max_size
self.antialias = antialias
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
return self._call_kernel(
F.resize,
inpt,
self.size,
interpolation=self.interpolation,
max_size=self.max_size,
antialias=self.antialias,
)
class CenterCrop(Transform):
"""Crop the input at the center.
If the input is a :class:`torch.Tensor` or a ``TVTensor`` (e.g. :class:`~torchvision.tv_tensors.Image`,
:class:`~torchvision.tv_tensors.Video`, :class:`~torchvision.tv_tensors.BoundingBoxes` etc.)
it can have arbitrary number of leading batch dimensions. For example,
the image can have ``[..., C, H, W]`` shape. A bounding box can have ``[..., 4]`` shape.
If image size is smaller than output size along any edge, image is padded with 0 and then center cropped.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
"""
_v1_transform_cls = _transforms.CenterCrop
def __init__(self, size: Union[int, Sequence[int]]):
super().__init__()
self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.")
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
return self._call_kernel(F.center_crop, inpt, output_size=self.size)
class RandomResizedCrop(Transform):
"""Crop a random portion of the input and resize it to a given size.
If the input is a :class:`torch.Tensor` or a ``TVTensor`` (e.g. :class:`~torchvision.tv_tensors.Image`,
:class:`~torchvision.tv_tensors.Video`, :class:`~torchvision.tv_tensors.BoundingBoxes` etc.)
it can have arbitrary number of leading batch dimensions. For example,
the image can have ``[..., C, H, W]`` shape. A bounding box can have ``[..., 4]`` shape.
A crop of the original input is made: the crop has a random area (H * W)
and a random aspect ratio. This crop is finally resized to the given
size. This is popularly used to train the Inception networks.
Args:
size (int or sequence): expected output size of the crop, for each edge. If size is an
int instead of sequence like (h, w), a square output size ``(size, size)`` is
made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
.. note::
In torchscript mode size as single int is not supported, use a sequence of length 1: ``[size, ]``.
scale (tuple of float, optional): Specifies the lower and upper bounds for the random area of the crop,
before resizing. The scale is defined with respect to the area of the original image.
ratio (tuple of float, optional): lower and upper bounds for the random aspect ratio of the crop, before
resizing.
interpolation (InterpolationMode, optional): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.NEAREST_EXACT``,
``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are supported.
The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
antialias (bool, optional): Whether to apply antialiasing.
It only affects **tensors** with bilinear or bicubic modes and it is
ignored otherwise: on PIL images, antialiasing is always applied on
bilinear or bicubic modes; on other modes (for PIL images and
tensors), antialiasing makes no sense and this parameter is ignored.
Possible values are:
- ``True`` (default): will apply antialiasing for bilinear or bicubic modes.
Other mode aren't affected. This is probably what you want to use.
- ``False``: will not apply antialiasing for tensors on any mode. PIL
images are still antialiased on bilinear or bicubic modes, because
PIL doesn't support no antialias.
- ``None``: equivalent to ``False`` for tensors and ``True`` for
PIL images. This value exists for legacy reasons and you probably
don't want to use it unless you really know what you are doing.
The default value changed from ``None`` to ``True`` in
v0.17, for the PIL and Tensor backends to be consistent.
"""
_v1_transform_cls = _transforms.RandomResizedCrop
def __init__(
self,
size: Union[int, Sequence[int]],
scale: Tuple[float, float] = (0.08, 1.0),
ratio: Tuple[float, float] = (3.0 / 4.0, 4.0 / 3.0),
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
antialias: Optional[bool] = True,
) -> None:
super().__init__()
self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.")
if not isinstance(scale, Sequence) or len(scale) != 2:
raise TypeError("Scale should be a sequence of two floats.")
if not isinstance(ratio, Sequence) or len(ratio) != 2:
raise TypeError("Ratio should be a sequence of two floats.")
if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
warnings.warn("Scale and ratio should be of kind (min, max)")
self.scale = scale
self.ratio = ratio
self.interpolation = interpolation
self.antialias = antialias
self._log_ratio = torch.log(torch.tensor(self.ratio))
def make_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
height, width = query_size(flat_inputs)
area = height * width
log_ratio = self._log_ratio
for _ in range(10):
target_area = area * torch.empty(1).uniform_(self.scale[0], self.scale[1]).item()
aspect_ratio = torch.exp(
torch.empty(1).uniform_(
log_ratio[0], # type: ignore[arg-type]
log_ratio[1], # type: ignore[arg-type]
)
).item()
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if 0 < w <= width and 0 < h <= height:
i = torch.randint(0, height - h + 1, size=(1,)).item()
j = torch.randint(0, width - w + 1, size=(1,)).item()
break
else:
# Fallback to central crop
in_ratio = float(width) / float(height)
if in_ratio < min(self.ratio):
w = width
h = int(round(w / min(self.ratio)))
elif in_ratio > max(self.ratio):
h = height
w = int(round(h * max(self.ratio)))
else: # whole image
w = width
h = height
i = (height - h) // 2
j = (width - w) // 2
return dict(top=i, left=j, height=h, width=w)
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
return self._call_kernel(
F.resized_crop, inpt, **params, size=self.size, interpolation=self.interpolation, antialias=self.antialias
)
class FiveCrop(Transform):
"""Crop the image or video into four corners and the central crop.
If the input is a :class:`torch.Tensor` or a :class:`~torchvision.tv_tensors.Image` or a
:class:`~torchvision.tv_tensors.Video` it can have arbitrary number of leading batch dimensions.
For example, the image can have ``[..., C, H, W]`` shape.
.. Note::
This transform returns a tuple of images and there may be a mismatch in the number of
inputs and targets your Dataset returns. See below for an example of how to deal with
this.
Args:
size (sequence or int): Desired output size of the crop. If size is an ``int``
instead of sequence like (h, w), a square crop of size (size, size) is made.
If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
Example:
>>> class BatchMultiCrop(transforms.Transform):
... def forward(self, sample: Tuple[Tuple[Union[tv_tensors.Image, tv_tensors.Video], ...], int]):
... images_or_videos, labels = sample
... batch_size = len(images_or_videos)
... image_or_video = images_or_videos[0]
... images_or_videos = tv_tensors.wrap(torch.stack(images_or_videos), like=image_or_video)
... labels = torch.full((batch_size,), label, device=images_or_videos.device)
... return images_or_videos, labels
...
>>> image = tv_tensors.Image(torch.rand(3, 256, 256))
>>> label = 3
>>> transform = transforms.Compose([transforms.FiveCrop(224), BatchMultiCrop()])
>>> images, labels = transform(image, label)
>>> images.shape
torch.Size([5, 3, 224, 224])
>>> labels
tensor([3, 3, 3, 3, 3])
"""
_v1_transform_cls = _transforms.FiveCrop
def __init__(self, size: Union[int, Sequence[int]]) -> None:
super().__init__()
self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.")
def _call_kernel(self, functional: Callable, inpt: Any, *args: Any, **kwargs: Any) -> Any:
if isinstance(inpt, (tv_tensors.BoundingBoxes, tv_tensors.Mask)):
warnings.warn(
f"{type(self).__name__}() is currently passing through inputs of type "
f"tv_tensors.{type(inpt).__name__}. This will likely change in the future."
)
return super()._call_kernel(functional, inpt, *args, **kwargs)
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
return self._call_kernel(F.five_crop, inpt, self.size)
def check_inputs(self, flat_inputs: List[Any]) -> None:
if has_any(flat_inputs, tv_tensors.BoundingBoxes, tv_tensors.Mask):
raise TypeError(f"BoundingBoxes'es and Mask's are not supported by {type(self).__name__}()")
class TenCrop(Transform):
"""Crop the image or video into four corners and the central crop plus the flipped version of
these (horizontal flipping is used by default).
If the input is a :class:`torch.Tensor` or a :class:`~torchvision.tv_tensors.Image` or a
:class:`~torchvision.tv_tensors.Video` it can have arbitrary number of leading batch dimensions.
For example, the image can have ``[..., C, H, W]`` shape.
See :class:`~torchvision.transforms.v2.FiveCrop` for an example.
.. Note::
This transform returns a tuple of images and there may be a mismatch in the number of
inputs and targets your Dataset returns. See below for an example of how to deal with
this.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
vertical_flip (bool, optional): Use vertical flipping instead of horizontal
"""
_v1_transform_cls = _transforms.TenCrop
def __init__(self, size: Union[int, Sequence[int]], vertical_flip: bool = False) -> None:
super().__init__()
self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.")
self.vertical_flip = vertical_flip
def _call_kernel(self, functional: Callable, inpt: Any, *args: Any, **kwargs: Any) -> Any:
if isinstance(inpt, (tv_tensors.BoundingBoxes, tv_tensors.Mask)):
warnings.warn(
f"{type(self).__name__}() is currently passing through inputs of type "
f"tv_tensors.{type(inpt).__name__}. This will likely change in the future."
)
return super()._call_kernel(functional, inpt, *args, **kwargs)
def check_inputs(self, flat_inputs: List[Any]) -> None:
if has_any(flat_inputs, tv_tensors.BoundingBoxes, tv_tensors.Mask):
raise TypeError(f"BoundingBoxes'es and Mask's are not supported by {type(self).__name__}()")
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
return self._call_kernel(F.ten_crop, inpt, self.size, vertical_flip=self.vertical_flip)
class Pad(Transform):
"""Pad the input on all sides with the given "pad" value.
If the input is a :class:`torch.Tensor` or a ``TVTensor`` (e.g. :class:`~torchvision.tv_tensors.Image`,
:class:`~torchvision.tv_tensors.Video`, :class:`~torchvision.tv_tensors.BoundingBoxes` etc.)
it can have arbitrary number of leading batch dimensions. For example,
the image can have ``[..., C, H, W]`` shape. A bounding box can have ``[..., 4]`` shape.
Args:
padding (int or sequence): Padding on each border. If a single int is provided this
is used to pad all borders. If sequence of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a sequence of length 4 is provided
this is the padding for the left, top, right and bottom borders respectively.
.. note::
In torchscript mode padding as single int is not supported, use a sequence of
length 1: ``[padding, ]``.
fill (number or tuple or dict, optional): Pixel fill value used when the ``padding_mode`` is constant.
Default is 0. If a tuple of length 3, it is used to fill R, G, B channels respectively.
Fill value can be also a dictionary mapping data type to the fill value, e.g.
``fill={tv_tensors.Image: 127, tv_tensors.Mask: 0}`` where ``Image`` will be filled with 127 and
``Mask`` will be filled with 0.
padding_mode (str, optional): Type of padding. Should be: constant, edge, reflect or symmetric.
Default is "constant".
- constant: pads with a constant value, this value is specified with fill
- edge: pads with the last value at the edge of the image.
- reflect: pads with reflection of image without repeating the last value on the edge.
For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
will result in [3, 2, 1, 2, 3, 4, 3, 2]
- symmetric: pads with reflection of image repeating the last value on the edge.
For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
will result in [2, 1, 1, 2, 3, 4, 4, 3]
"""
_v1_transform_cls = _transforms.Pad
def _extract_params_for_v1_transform(self) -> Dict[str, Any]:
params = super()._extract_params_for_v1_transform()
if not (params["fill"] is None or isinstance(params["fill"], (int, float))):
raise ValueError(f"{type(self).__name__}() can only be scripted for a scalar `fill`, but got {self.fill}.")
return params
def __init__(
self,
padding: Union[int, Sequence[int]],
fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = 0,
padding_mode: Literal["constant", "edge", "reflect", "symmetric"] = "constant",
) -> None:
super().__init__()
_check_padding_arg(padding)
_check_padding_mode_arg(padding_mode)
# This cast does Sequence[int] -> List[int] and is required to make mypy happy
if not isinstance(padding, int):
padding = list(padding)
self.padding = padding
self.fill = fill
self._fill = _setup_fill_arg(fill)
self.padding_mode = padding_mode
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
fill = _get_fill(self._fill, type(inpt))
return self._call_kernel(F.pad, inpt, padding=self.padding, fill=fill, padding_mode=self.padding_mode) # type: ignore[arg-type]
class RandomZoomOut(_RandomApplyTransform):
""" "Zoom out" transformation from
`"SSD: Single Shot MultiBox Detector" <https://arxiv.org/abs/1512.02325>`_.
This transformation randomly pads images, videos, bounding boxes and masks creating a zoom out effect.
Output spatial size is randomly sampled from original size up to a maximum size configured
with ``side_range`` parameter:
.. code-block:: python
r = uniform_sample(side_range[0], side_range[1])
output_width = input_width * r
output_height = input_height * r
If the input is a :class:`torch.Tensor` or a ``TVTensor`` (e.g. :class:`~torchvision.tv_tensors.Image`,
:class:`~torchvision.tv_tensors.Video`, :class:`~torchvision.tv_tensors.BoundingBoxes` etc.)
it can have arbitrary number of leading batch dimensions. For example,
the image can have ``[..., C, H, W]`` shape. A bounding box can have ``[..., 4]`` shape.
Args:
fill (number or tuple or dict, optional): Pixel fill value used when the ``padding_mode`` is constant.
Default is 0. If a tuple of length 3, it is used to fill R, G, B channels respectively.
Fill value can be also a dictionary mapping data type to the fill value, e.g.
``fill={tv_tensors.Image: 127, tv_tensors.Mask: 0}`` where ``Image`` will be filled with 127 and
``Mask`` will be filled with 0.
side_range (sequence of floats, optional): tuple of two floats defines minimum and maximum factors to
scale the input size.
p (float, optional): probability that the zoom operation will be performed.
"""
def __init__(
self,
fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = 0,
side_range: Sequence[float] = (1.0, 4.0),
p: float = 0.5,
) -> None:
super().__init__(p=p)
self.fill = fill
self._fill = _setup_fill_arg(fill)
_check_sequence_input(side_range, "side_range", req_sizes=(2,))
self.side_range = side_range
if side_range[0] < 1.0 or side_range[0] > side_range[1]:
raise ValueError(f"Invalid side range provided {side_range}.")
def make_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
orig_h, orig_w = query_size(flat_inputs)
r = self.side_range[0] + torch.rand(1) * (self.side_range[1] - self.side_range[0])
canvas_width = int(orig_w * r)
canvas_height = int(orig_h * r)
r = torch.rand(2)
left = int((canvas_width - orig_w) * r[0])
top = int((canvas_height - orig_h) * r[1])
right = canvas_width - (left + orig_w)
bottom = canvas_height - (top + orig_h)
padding = [left, top, right, bottom]
return dict(padding=padding)
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
fill = _get_fill(self._fill, type(inpt))
return self._call_kernel(F.pad, inpt, **params, fill=fill)
class RandomRotation(Transform):
"""Rotate the input by angle.
If the input is a :class:`torch.Tensor` or a ``TVTensor`` (e.g. :class:`~torchvision.tv_tensors.Image`,
:class:`~torchvision.tv_tensors.Video`, :class:`~torchvision.tv_tensors.BoundingBoxes` etc.)
it can have arbitrary number of leading batch dimensions. For example,
the image can have ``[..., C, H, W]`` shape. A bounding box can have ``[..., 4]`` shape.
Args:
degrees (sequence or number): Range of degrees to select from.
If degrees is a number instead of sequence like (min, max), the range of degrees
will be [-degrees, +degrees].
interpolation (InterpolationMode, optional): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
expand (bool, optional): Optional expansion flag.
If true, expands the output to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center (see note below) and no translation.
center (sequence, optional): Optional center of rotation, (x, y). Origin is the upper left corner.
Default is the center of the image.
.. note::
In theory, setting ``center`` has no effect if ``expand=True``, since the image center will become the
center of rotation. In practice however, due to numerical precision, this can lead to off-by-one
differences of the resulting image size compared to using the image center in the first place. Thus, when
setting ``expand=True``, it's best to leave ``center=None`` (default).
fill (number or tuple or dict, optional): Pixel fill value used when the ``padding_mode`` is constant.
Default is 0. If a tuple of length 3, it is used to fill R, G, B channels respectively.
Fill value can be also a dictionary mapping data type to the fill value, e.g.
``fill={tv_tensors.Image: 127, tv_tensors.Mask: 0}`` where ``Image`` will be filled with 127 and
``Mask`` will be filled with 0.
.. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters
"""
_v1_transform_cls = _transforms.RandomRotation
def __init__(
self,
degrees: Union[numbers.Number, Sequence],
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
expand: bool = False,
center: Optional[List[float]] = None,
fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = 0,
) -> None:
super().__init__()
self.degrees = _setup_angle(degrees, name="degrees", req_sizes=(2,))
self.interpolation = interpolation
self.expand = expand
self.fill = fill
self._fill = _setup_fill_arg(fill)
if center is not None:
_check_sequence_input(center, "center", req_sizes=(2,))
self.center = center
def make_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
angle = torch.empty(1).uniform_(self.degrees[0], self.degrees[1]).item()
return dict(angle=angle)
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
fill = _get_fill(self._fill, type(inpt))
return self._call_kernel(
F.rotate,
inpt,
**params,
interpolation=self.interpolation,
expand=self.expand,
center=self.center,
fill=fill,
)
class RandomAffine(Transform):
"""Random affine transformation the input keeping center invariant.
If the input is a :class:`torch.Tensor` or a ``TVTensor`` (e.g. :class:`~torchvision.tv_tensors.Image`,
:class:`~torchvision.tv_tensors.Video`, :class:`~torchvision.tv_tensors.BoundingBoxes` etc.)
it can have arbitrary number of leading batch dimensions. For example,
the image can have ``[..., C, H, W]`` shape. A bounding box can have ``[..., 4]`` shape.
Args:
degrees (sequence or number): Range of degrees to select from.
If degrees is a number instead of sequence like (min, max), the range of degrees
will be (-degrees, +degrees). Set to 0 to deactivate rotations.
translate (tuple, optional): tuple of maximum absolute fraction for horizontal
and vertical translations. For example translate=(a, b), then horizontal shift
is randomly sampled in the range -img_width * a < dx < img_width * a and vertical shift is
randomly sampled in the range -img_height * b < dy < img_height * b. Will not translate by default.
scale (tuple, optional): scaling factor interval, e.g (a, b), then scale is
randomly sampled from the range a <= scale <= b. Will keep original scale by default.
shear (sequence or number, optional): Range of degrees to select from.
If shear is a number, a shear parallel to the x-axis in the range (-shear, +shear)
will be applied. Else if shear is a sequence of 2 values a shear parallel to the x-axis in the
range (shear[0], shear[1]) will be applied. Else if shear is a sequence of 4 values,
an x-axis shear in (shear[0], shear[1]) and y-axis shear in (shear[2], shear[3]) will be applied.
Will not apply shear by default.
interpolation (InterpolationMode, optional): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
fill (number or tuple or dict, optional): Pixel fill value used when the ``padding_mode`` is constant.
Default is 0. If a tuple of length 3, it is used to fill R, G, B channels respectively.
Fill value can be also a dictionary mapping data type to the fill value, e.g.
``fill={tv_tensors.Image: 127, tv_tensors.Mask: 0}`` where ``Image`` will be filled with 127 and
``Mask`` will be filled with 0.
center (sequence, optional): Optional center of rotation, (x, y). Origin is the upper left corner.
Default is the center of the image.
.. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters
"""
_v1_transform_cls = _transforms.RandomAffine
def __init__(
self,
degrees: Union[numbers.Number, Sequence],
translate: Optional[Sequence[float]] = None,
scale: Optional[Sequence[float]] = None,
shear: Optional[Union[int, float, Sequence[float]]] = None,
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = 0,
center: Optional[List[float]] = None,
) -> None:
super().__init__()
self.degrees = _setup_angle(degrees, name="degrees", req_sizes=(2,))
if translate is not None:
_check_sequence_input(translate, "translate", req_sizes=(2,))
for t in translate:
if not (0.0 <= t <= 1.0):
raise ValueError("translation values should be between 0 and 1")
self.translate = translate
if scale is not None:
_check_sequence_input(scale, "scale", req_sizes=(2,))
for s in scale:
if s <= 0:
raise ValueError("scale values should be positive")
self.scale = scale
if shear is not None:
self.shear = _setup_angle(shear, name="shear", req_sizes=(2, 4))
else:
self.shear = shear
self.interpolation = interpolation
self.fill = fill
self._fill = _setup_fill_arg(fill)
if center is not None:
_check_sequence_input(center, "center", req_sizes=(2,))
self.center = center
def make_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
height, width = query_size(flat_inputs)
angle = torch.empty(1).uniform_(self.degrees[0], self.degrees[1]).item()
if self.translate is not None:
max_dx = float(self.translate[0] * width)
max_dy = float(self.translate[1] * height)
tx = int(round(torch.empty(1).uniform_(-max_dx, max_dx).item()))
ty = int(round(torch.empty(1).uniform_(-max_dy, max_dy).item()))
translate = (tx, ty)
else:
translate = (0, 0)
if self.scale is not None:
scale = torch.empty(1).uniform_(self.scale[0], self.scale[1]).item()
else:
scale = 1.0
shear_x = shear_y = 0.0
if self.shear is not None:
shear_x = torch.empty(1).uniform_(self.shear[0], self.shear[1]).item()
if len(self.shear) == 4:
shear_y = torch.empty(1).uniform_(self.shear[2], self.shear[3]).item()
shear = (shear_x, shear_y)
return dict(angle=angle, translate=translate, scale=scale, shear=shear)
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
fill = _get_fill(self._fill, type(inpt))
return self._call_kernel(
F.affine,
inpt,
**params,
interpolation=self.interpolation,
fill=fill,
center=self.center,
)
class RandomCrop(Transform):
"""Crop the input at a random location.
If the input is a :class:`torch.Tensor` or a ``TVTensor`` (e.g. :class:`~torchvision.tv_tensors.Image`,
:class:`~torchvision.tv_tensors.Video`, :class:`~torchvision.tv_tensors.BoundingBoxes` etc.)
it can have arbitrary number of leading batch dimensions. For example,
the image can have ``[..., C, H, W]`` shape. A bounding box can have ``[..., 4]`` shape.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
padding (int or sequence, optional): Optional padding on each border
of the image. Default is None. If a single int is provided this
is used to pad all borders. If sequence of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a sequence of length 4 is provided
this is the padding for the left, top, right and bottom borders respectively.
.. note::
In torchscript mode padding as single int is not supported, use a sequence of
length 1: ``[padding, ]``.
pad_if_needed (boolean, optional): It will pad the image if smaller than the
desired size to avoid raising an exception. Since cropping is done
after padding, the padding seems to be done at a random offset.
fill (number or tuple or dict, optional): Pixel fill value used when the ``padding_mode`` is constant.
Default is 0. If a tuple of length 3, it is used to fill R, G, B channels respectively.
Fill value can be also a dictionary mapping data type to the fill value, e.g.
``fill={tv_tensors.Image: 127, tv_tensors.Mask: 0}`` where ``Image`` will be filled with 127 and
``Mask`` will be filled with 0.
padding_mode (str, optional): Type of padding. Should be: constant, edge, reflect or symmetric.
Default is constant.
- constant: pads with a constant value, this value is specified with fill
- edge: pads with the last value at the edge of the image.
- reflect: pads with reflection of image without repeating the last value on the edge.
For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
will result in [3, 2, 1, 2, 3, 4, 3, 2]
- symmetric: pads with reflection of image repeating the last value on the edge.
For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
will result in [2, 1, 1, 2, 3, 4, 4, 3]
"""
_v1_transform_cls = _transforms.RandomCrop
def _extract_params_for_v1_transform(self) -> Dict[str, Any]:
params = super()._extract_params_for_v1_transform()
if not (params["fill"] is None or isinstance(params["fill"], (int, float))):
raise ValueError(f"{type(self).__name__}() can only be scripted for a scalar `fill`, but got {self.fill}.")
padding = self.padding
if padding is not None:
pad_left, pad_right, pad_top, pad_bottom = padding
padding = [pad_left, pad_top, pad_right, pad_bottom]
params["padding"] = padding
return params
def __init__(
self,
size: Union[int, Sequence[int]],
padding: Optional[Union[int, Sequence[int]]] = None,
pad_if_needed: bool = False,
fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = 0,
padding_mode: Literal["constant", "edge", "reflect", "symmetric"] = "constant",
) -> None:
super().__init__()
self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.")
if pad_if_needed or padding is not None:
if padding is not None:
_check_padding_arg(padding)
_check_padding_mode_arg(padding_mode)
self.padding = F._geometry._parse_pad_padding(padding) if padding else None # type: ignore[arg-type]
self.pad_if_needed = pad_if_needed
self.fill = fill
self._fill = _setup_fill_arg(fill)
self.padding_mode = padding_mode
def make_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
padded_height, padded_width = query_size(flat_inputs)
if self.padding is not None:
pad_left, pad_right, pad_top, pad_bottom = self.padding
padded_height += pad_top + pad_bottom
padded_width += pad_left + pad_right
else:
pad_left = pad_right = pad_top = pad_bottom = 0
cropped_height, cropped_width = self.size
if self.pad_if_needed:
if padded_height < cropped_height:
diff = cropped_height - padded_height
pad_top += diff
pad_bottom += diff
padded_height += 2 * diff
if padded_width < cropped_width:
diff = cropped_width - padded_width
pad_left += diff
pad_right += diff
padded_width += 2 * diff
if padded_height < cropped_height or padded_width < cropped_width:
raise ValueError(
f"Required crop size {(cropped_height, cropped_width)} is larger than "
f"{'padded ' if self.padding is not None else ''}input image size {(padded_height, padded_width)}."
)
# We need a different order here than we have in self.padding since this padding will be parsed again in `F.pad`
padding = [pad_left, pad_top, pad_right, pad_bottom]
needs_pad = any(padding)
needs_vert_crop, top = (
(True, int(torch.randint(0, padded_height - cropped_height + 1, size=())))
if padded_height > cropped_height
else (False, 0)
)
needs_horz_crop, left = (
(True, int(torch.randint(0, padded_width - cropped_width + 1, size=())))
if padded_width > cropped_width
else (False, 0)
)
return dict(
needs_crop=needs_vert_crop or needs_horz_crop,
top=top,
left=left,
height=cropped_height,
width=cropped_width,
needs_pad=needs_pad,
padding=padding,
)
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if params["needs_pad"]:
fill = _get_fill(self._fill, type(inpt))
inpt = self._call_kernel(F.pad, inpt, padding=params["padding"], fill=fill, padding_mode=self.padding_mode)
if params["needs_crop"]:
inpt = self._call_kernel(
F.crop, inpt, top=params["top"], left=params["left"], height=params["height"], width=params["width"]
)
return inpt
class RandomPerspective(_RandomApplyTransform):
"""Perform a random perspective transformation of the input with a given probability.
If the input is a :class:`torch.Tensor` or a ``TVTensor`` (e.g. :class:`~torchvision.tv_tensors.Image`,
:class:`~torchvision.tv_tensors.Video`, :class:`~torchvision.tv_tensors.BoundingBoxes` etc.)
it can have arbitrary number of leading batch dimensions. For example,
the image can have ``[..., C, H, W]`` shape. A bounding box can have ``[..., 4]`` shape.
Args:
distortion_scale (float, optional): argument to control the degree of distortion and ranges from 0 to 1.
Default is 0.5.
p (float, optional): probability of the input being transformed. Default is 0.5.
interpolation (InterpolationMode, optional): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
fill (number or tuple or dict, optional): Pixel fill value used when the ``padding_mode`` is constant.
Default is 0. If a tuple of length 3, it is used to fill R, G, B channels respectively.
Fill value can be also a dictionary mapping data type to the fill value, e.g.
``fill={tv_tensors.Image: 127, tv_tensors.Mask: 0}`` where ``Image`` will be filled with 127 and
``Mask`` will be filled with 0.
"""
_v1_transform_cls = _transforms.RandomPerspective
def __init__(
self,
distortion_scale: float = 0.5,
p: float = 0.5,
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = 0,
) -> None:
super().__init__(p=p)
if not (0 <= distortion_scale <= 1):
raise ValueError("Argument distortion_scale value should be between 0 and 1")
self.distortion_scale = distortion_scale
self.interpolation = interpolation
self.fill = fill
self._fill = _setup_fill_arg(fill)
def make_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
height, width = query_size(flat_inputs)
distortion_scale = self.distortion_scale
half_height = height // 2
half_width = width // 2
bound_height = int(distortion_scale * half_height) + 1
bound_width = int(distortion_scale * half_width) + 1
topleft = [
int(torch.randint(0, bound_width, size=(1,))),
int(torch.randint(0, bound_height, size=(1,))),
]
topright = [
int(torch.randint(width - bound_width, width, size=(1,))),
int(torch.randint(0, bound_height, size=(1,))),
]
botright = [
int(torch.randint(width - bound_width, width, size=(1,))),
int(torch.randint(height - bound_height, height, size=(1,))),
]
botleft = [
int(torch.randint(0, bound_width, size=(1,))),
int(torch.randint(height - bound_height, height, size=(1,))),
]
startpoints = [[0, 0], [width - 1, 0], [width - 1, height - 1], [0, height - 1]]
endpoints = [topleft, topright, botright, botleft]
perspective_coeffs = _get_perspective_coeffs(startpoints, endpoints)
return dict(coefficients=perspective_coeffs)
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
fill = _get_fill(self._fill, type(inpt))
return self._call_kernel(
F.perspective,
inpt,
startpoints=None,
endpoints=None,
fill=fill,
interpolation=self.interpolation,
**params,
)
class ElasticTransform(Transform):
"""Transform the input with elastic transformations.
If the input is a :class:`torch.Tensor` or a ``TVTensor`` (e.g. :class:`~torchvision.tv_tensors.Image`,
:class:`~torchvision.tv_tensors.Video`, :class:`~torchvision.tv_tensors.BoundingBoxes` etc.)
it can have arbitrary number of leading batch dimensions. For example,
the image can have ``[..., C, H, W]`` shape. A bounding box can have ``[..., 4]`` shape.
Given alpha and sigma, it will generate displacement
vectors for all pixels based on random offsets. Alpha controls the strength
and sigma controls the smoothness of the displacements.
The displacements are added to an identity grid and the resulting grid is
used to transform the input.
.. note::
Implementation to transform bounding boxes is approximative (not exact).
We construct an approximation of the inverse grid as ``inverse_grid = identity - displacement``.
This is not an exact inverse of the grid used to transform images, i.e. ``grid = identity + displacement``.
Our assumption is that ``displacement * displacement`` is small and can be ignored.
Large displacements would lead to large errors in the approximation.
Applications:
Randomly transforms the morphology of objects in images and produces a
see-through-water-like effect.
Args:
alpha (float or sequence of floats, optional): Magnitude of displacements. Default is 50.0.
sigma (float or sequence of floats, optional): Smoothness of displacements. Default is 5.0.
interpolation (InterpolationMode, optional): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
fill (number or tuple or dict, optional): Pixel fill value used when the ``padding_mode`` is constant.
Default is 0. If a tuple of length 3, it is used to fill R, G, B channels respectively.
Fill value can be also a dictionary mapping data type to the fill value, e.g.
``fill={tv_tensors.Image: 127, tv_tensors.Mask: 0}`` where ``Image`` will be filled with 127 and
``Mask`` will be filled with 0.
"""
_v1_transform_cls = _transforms.ElasticTransform
def __init__(
self,
alpha: Union[float, Sequence[float]] = 50.0,
sigma: Union[float, Sequence[float]] = 5.0,
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = 0,
) -> None:
super().__init__()
self.alpha = _setup_number_or_seq(alpha, "alpha")
self.sigma = _setup_number_or_seq(sigma, "sigma")
self.interpolation = interpolation
self.fill = fill
self._fill = _setup_fill_arg(fill)
def make_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
size = list(query_size(flat_inputs))
dx = torch.rand([1, 1] + size) * 2 - 1
if self.sigma[0] > 0.0:
kx = int(8 * self.sigma[0] + 1)
# if kernel size is even we have to make it odd
if kx % 2 == 0:
kx += 1
dx = self._call_kernel(F.gaussian_blur, dx, [kx, kx], list(self.sigma))
dx = dx * self.alpha[0] / size[0]
dy = torch.rand([1, 1] + size) * 2 - 1
if self.sigma[1] > 0.0:
ky = int(8 * self.sigma[1] + 1)
# if kernel size is even we have to make it odd
if ky % 2 == 0:
ky += 1
dy = self._call_kernel(F.gaussian_blur, dy, [ky, ky], list(self.sigma))
dy = dy * self.alpha[1] / size[1]
displacement = torch.concat([dx, dy], 1).permute([0, 2, 3, 1]) # 1 x H x W x 2
return dict(displacement=displacement)
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
fill = _get_fill(self._fill, type(inpt))
return self._call_kernel(
F.elastic,
inpt,
**params,
fill=fill,
interpolation=self.interpolation,
)
class RandomIoUCrop(Transform):
"""Random IoU crop transformation from
`"SSD: Single Shot MultiBox Detector" <https://arxiv.org/abs/1512.02325>`_.
This transformation requires an image or video data and ``tv_tensors.BoundingBoxes`` in the input.
.. warning::
In order to properly remove the bounding boxes below the IoU threshold, `RandomIoUCrop`
must be followed by :class:`~torchvision.transforms.v2.SanitizeBoundingBoxes`, either immediately
after or later in the transforms pipeline.
If the input is a :class:`torch.Tensor` or a ``TVTensor`` (e.g. :class:`~torchvision.tv_tensors.Image`,
:class:`~torchvision.tv_tensors.Video`, :class:`~torchvision.tv_tensors.BoundingBoxes` etc.)
it can have arbitrary number of leading batch dimensions. For example,
the image can have ``[..., C, H, W]`` shape. A bounding box can have ``[..., 4]`` shape.
Args:
min_scale (float, optional): Minimum factors to scale the input size.
max_scale (float, optional): Maximum factors to scale the input size.
min_aspect_ratio (float, optional): Minimum aspect ratio for the cropped image or video.
max_aspect_ratio (float, optional): Maximum aspect ratio for the cropped image or video.
sampler_options (list of float, optional): List of minimal IoU (Jaccard) overlap between all the boxes and
a cropped image or video. Default, ``None`` which corresponds to ``[0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0]``
trials (int, optional): Number of trials to find a crop for a given value of minimal IoU (Jaccard) overlap.
Default, 40.
"""
def __init__(
self,
min_scale: float = 0.3,
max_scale: float = 1.0,
min_aspect_ratio: float = 0.5,
max_aspect_ratio: float = 2.0,
sampler_options: Optional[List[float]] = None,
trials: int = 40,
):
super().__init__()
# Configuration similar to https://github.com/weiliu89/caffe/blob/ssd/examples/ssd/ssd_coco.py#L89-L174
self.min_scale = min_scale
self.max_scale = max_scale
self.min_aspect_ratio = min_aspect_ratio
self.max_aspect_ratio = max_aspect_ratio
if sampler_options is None:
sampler_options = [0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0]
self.options = sampler_options
self.trials = trials
def check_inputs(self, flat_inputs: List[Any]) -> None:
if not (
has_all(flat_inputs, tv_tensors.BoundingBoxes)
and has_any(flat_inputs, PIL.Image.Image, tv_tensors.Image, is_pure_tensor)
):
raise TypeError(
f"{type(self).__name__}() requires input sample to contain tensor or PIL images "
"and bounding boxes. Sample can also contain masks."
)
def make_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
orig_h, orig_w = query_size(flat_inputs)
bboxes = get_bounding_boxes(flat_inputs)
while True:
# sample an option
idx = int(torch.randint(low=0, high=len(self.options), size=(1,)))
min_jaccard_overlap = self.options[idx]
if min_jaccard_overlap >= 1.0: # a value larger than 1 encodes the leave as-is option
return dict()
for _ in range(self.trials):
# check the aspect ratio limitations
r = self.min_scale + (self.max_scale - self.min_scale) * torch.rand(2)
new_w = int(orig_w * r[0])
new_h = int(orig_h * r[1])
aspect_ratio = new_w / new_h
if not (self.min_aspect_ratio <= aspect_ratio <= self.max_aspect_ratio):
continue
# check for 0 area crops
r = torch.rand(2)
left = int((orig_w - new_w) * r[0])
top = int((orig_h - new_h) * r[1])
right = left + new_w
bottom = top + new_h
if left == right or top == bottom:
continue
# check for any valid boxes with centers within the crop area
xyxy_bboxes = F.convert_bounding_box_format(
bboxes.as_subclass(torch.Tensor),
bboxes.format,
tv_tensors.BoundingBoxFormat.XYXY,
)
cx = 0.5 * (xyxy_bboxes[..., 0] + xyxy_bboxes[..., 2])
cy = 0.5 * (xyxy_bboxes[..., 1] + xyxy_bboxes[..., 3])
is_within_crop_area = (left < cx) & (cx < right) & (top < cy) & (cy < bottom)
if not is_within_crop_area.any():
continue
# check at least 1 box with jaccard limitations
xyxy_bboxes = xyxy_bboxes[is_within_crop_area]
ious = box_iou(
xyxy_bboxes,
torch.tensor([[left, top, right, bottom]], dtype=xyxy_bboxes.dtype, device=xyxy_bboxes.device),
)
if ious.max() < min_jaccard_overlap:
continue
return dict(top=top, left=left, height=new_h, width=new_w, is_within_crop_area=is_within_crop_area)
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if len(params) < 1:
return inpt
output = self._call_kernel(
F.crop, inpt, top=params["top"], left=params["left"], height=params["height"], width=params["width"]
)
if isinstance(output, tv_tensors.BoundingBoxes):
# We "mark" the invalid boxes as degenreate, and they can be
# removed by a later call to SanitizeBoundingBoxes()
output[~params["is_within_crop_area"]] = 0
return output
class ScaleJitter(Transform):
"""Perform Large Scale Jitter on the input according to
`"Simple Copy-Paste is a Strong Data Augmentation Method for Instance Segmentation" <https://arxiv.org/abs/2012.07177>`_.
If the input is a :class:`torch.Tensor` or a ``TVTensor`` (e.g. :class:`~torchvision.tv_tensors.Image`,
:class:`~torchvision.tv_tensors.Video`, :class:`~torchvision.tv_tensors.BoundingBoxes` etc.)
it can have arbitrary number of leading batch dimensions. For example,
the image can have ``[..., C, H, W]`` shape. A bounding box can have ``[..., 4]`` shape.
Args:
target_size (tuple of int): Target size. This parameter defines base scale for jittering,
e.g. ``min(target_size[0] / width, target_size[1] / height)``.
scale_range (tuple of float, optional): Minimum and maximum of the scale range. Default, ``(0.1, 2.0)``.
interpolation (InterpolationMode, optional): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.NEAREST_EXACT``,
``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are supported.
The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
antialias (bool, optional): Whether to apply antialiasing.
It only affects **tensors** with bilinear or bicubic modes and it is
ignored otherwise: on PIL images, antialiasing is always applied on
bilinear or bicubic modes; on other modes (for PIL images and
tensors), antialiasing makes no sense and this parameter is ignored.
Possible values are:
- ``True`` (default): will apply antialiasing for bilinear or bicubic modes.
Other mode aren't affected. This is probably what you want to use.
- ``False``: will not apply antialiasing for tensors on any mode. PIL
images are still antialiased on bilinear or bicubic modes, because
PIL doesn't support no antialias.
- ``None``: equivalent to ``False`` for tensors and ``True`` for
PIL images. This value exists for legacy reasons and you probably
don't want to use it unless you really know what you are doing.
The default value changed from ``None`` to ``True`` in
v0.17, for the PIL and Tensor backends to be consistent.
"""
def __init__(
self,
target_size: Tuple[int, int],
scale_range: Tuple[float, float] = (0.1, 2.0),
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
antialias: Optional[bool] = True,
):
super().__init__()
self.target_size = target_size
self.scale_range = scale_range
self.interpolation = interpolation
self.antialias = antialias
def make_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
orig_height, orig_width = query_size(flat_inputs)
scale = self.scale_range[0] + torch.rand(1) * (self.scale_range[1] - self.scale_range[0])
r = min(self.target_size[1] / orig_height, self.target_size[0] / orig_width) * scale
new_width = int(orig_width * r)
new_height = int(orig_height * r)
return dict(size=(new_height, new_width))
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
return self._call_kernel(
F.resize, inpt, size=params["size"], interpolation=self.interpolation, antialias=self.antialias
)
class RandomShortestSize(Transform):
"""Randomly resize the input.
If the input is a :class:`torch.Tensor` or a ``TVTensor`` (e.g. :class:`~torchvision.tv_tensors.Image`,
:class:`~torchvision.tv_tensors.Video`, :class:`~torchvision.tv_tensors.BoundingBoxes` etc.)
it can have arbitrary number of leading batch dimensions. For example,
the image can have ``[..., C, H, W]`` shape. A bounding box can have ``[..., 4]`` shape.
Args:
min_size (int or sequence of int): Minimum spatial size. Single integer value or a sequence of integer values.
max_size (int, optional): Maximum spatial size. Default, None.
interpolation (InterpolationMode, optional): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.NEAREST_EXACT``,
``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are supported.
The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
antialias (bool, optional): Whether to apply antialiasing.
It only affects **tensors** with bilinear or bicubic modes and it is
ignored otherwise: on PIL images, antialiasing is always applied on
bilinear or bicubic modes; on other modes (for PIL images and
tensors), antialiasing makes no sense and this parameter is ignored.
Possible values are:
- ``True`` (default): will apply antialiasing for bilinear or bicubic modes.
Other mode aren't affected. This is probably what you want to use.
- ``False``: will not apply antialiasing for tensors on any mode. PIL
images are still antialiased on bilinear or bicubic modes, because
PIL doesn't support no antialias.
- ``None``: equivalent to ``False`` for tensors and ``True`` for
PIL images. This value exists for legacy reasons and you probably
don't want to use it unless you really know what you are doing.
The default value changed from ``None`` to ``True`` in
v0.17, for the PIL and Tensor backends to be consistent.
"""
def __init__(
self,
min_size: Union[List[int], Tuple[int], int],
max_size: Optional[int] = None,
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
antialias: Optional[bool] = True,
):
super().__init__()
self.min_size = [min_size] if isinstance(min_size, int) else list(min_size)
self.max_size = max_size
self.interpolation = interpolation
self.antialias = antialias
def make_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
orig_height, orig_width = query_size(flat_inputs)
min_size = self.min_size[int(torch.randint(len(self.min_size), ()))]
r = min_size / min(orig_height, orig_width)
if self.max_size is not None:
r = min(r, self.max_size / max(orig_height, orig_width))
new_width = int(orig_width * r)
new_height = int(orig_height * r)
return dict(size=(new_height, new_width))
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
return self._call_kernel(
F.resize, inpt, size=params["size"], interpolation=self.interpolation, antialias=self.antialias
)
class RandomResize(Transform):
"""Randomly resize the input.
This transformation can be used together with ``RandomCrop`` as data augmentations to train
models on image segmentation task.
Output spatial size is randomly sampled from the interval ``[min_size, max_size]``:
.. code-block:: python
size = uniform_sample(min_size, max_size)
output_width = size
output_height = size
If the input is a :class:`torch.Tensor` or a ``TVTensor`` (e.g. :class:`~torchvision.tv_tensors.Image`,
:class:`~torchvision.tv_tensors.Video`, :class:`~torchvision.tv_tensors.BoundingBoxes` etc.)
it can have arbitrary number of leading batch dimensions. For example,
the image can have ``[..., C, H, W]`` shape. A bounding box can have ``[..., 4]`` shape.
Args:
min_size (int): Minimum output size for random sampling
max_size (int): Maximum output size for random sampling
interpolation (InterpolationMode, optional): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.NEAREST_EXACT``,
``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are supported.
The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
antialias (bool, optional): Whether to apply antialiasing.
It only affects **tensors** with bilinear or bicubic modes and it is
ignored otherwise: on PIL images, antialiasing is always applied on
bilinear or bicubic modes; on other modes (for PIL images and
tensors), antialiasing makes no sense and this parameter is ignored.
Possible values are:
- ``True`` (default): will apply antialiasing for bilinear or bicubic modes.
Other mode aren't affected. This is probably what you want to use.
- ``False``: will not apply antialiasing for tensors on any mode. PIL
images are still antialiased on bilinear or bicubic modes, because
PIL doesn't support no antialias.
- ``None``: equivalent to ``False`` for tensors and ``True`` for
PIL images. This value exists for legacy reasons and you probably
don't want to use it unless you really know what you are doing.
The default value changed from ``None`` to ``True`` in
v0.17, for the PIL and Tensor backends to be consistent.
"""
def __init__(
self,
min_size: int,
max_size: int,
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
antialias: Optional[bool] = True,
) -> None:
super().__init__()
self.min_size = min_size
self.max_size = max_size
self.interpolation = interpolation
self.antialias = antialias
def make_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
size = int(torch.randint(self.min_size, self.max_size, ()))
return dict(size=[size])
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
return self._call_kernel(
F.resize, inpt, params["size"], interpolation=self.interpolation, antialias=self.antialias
)
```
|
==========================================================================================================================
SOURCE CODE FILE: _meta.py
LINES: 1
SIZE: 1.41 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\transforms\v2\_meta.py
ENCODING: utf-8
```py
from typing import Any, Dict, Union
from torchvision import tv_tensors
from torchvision.transforms.v2 import functional as F, Transform
class ConvertBoundingBoxFormat(Transform):
"""Convert bounding box coordinates to the given ``format``, eg from "CXCYWH" to "XYXY".
Args:
format (str or tv_tensors.BoundingBoxFormat): output bounding box format.
Possible values are defined by :class:`~torchvision.tv_tensors.BoundingBoxFormat` and
string values match the enums, e.g. "XYXY" or "XYWH" etc.
"""
_transformed_types = (tv_tensors.BoundingBoxes,)
def __init__(self, format: Union[str, tv_tensors.BoundingBoxFormat]) -> None:
super().__init__()
self.format = format
def transform(self, inpt: tv_tensors.BoundingBoxes, params: Dict[str, Any]) -> tv_tensors.BoundingBoxes:
return F.convert_bounding_box_format(inpt, new_format=self.format) # type: ignore[return-value, arg-type]
class ClampBoundingBoxes(Transform):
"""Clamp bounding boxes to their corresponding image dimensions.
The clamping is done according to the bounding boxes' ``canvas_size`` meta-data.
"""
_transformed_types = (tv_tensors.BoundingBoxes,)
def transform(self, inpt: tv_tensors.BoundingBoxes, params: Dict[str, Any]) -> tv_tensors.BoundingBoxes:
return F.clamp_bounding_boxes(inpt) # type: ignore[return-value]
```
|
==========================================================================================================================
SOURCE CODE FILE: _misc.py
LINES: 1
SIZE: 19.09 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\transforms\v2\_misc.py
ENCODING: utf-8
```py
import warnings
from typing import Any, Callable, Dict, List, Optional, Sequence, Type, Union
import PIL.Image
import torch
from torch.utils._pytree import tree_flatten, tree_unflatten
from torchvision import transforms as _transforms, tv_tensors
from torchvision.transforms.v2 import functional as F, Transform
from ._utils import _parse_labels_getter, _setup_number_or_seq, _setup_size, get_bounding_boxes, has_any, is_pure_tensor
# TODO: do we want/need to expose this?
class Identity(Transform):
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
return inpt
class Lambda(Transform):
"""Apply a user-defined function as a transform.
This transform does not support torchscript.
Args:
lambd (function): Lambda/function to be used for transform.
"""
_transformed_types = (object,)
def __init__(self, lambd: Callable[[Any], Any], *types: Type):
super().__init__()
self.lambd = lambd
self.types = types or self._transformed_types
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if isinstance(inpt, self.types):
return self.lambd(inpt)
else:
return inpt
def extra_repr(self) -> str:
extras = []
name = getattr(self.lambd, "__name__", None)
if name:
extras.append(name)
extras.append(f"types={[type.__name__ for type in self.types]}")
return ", ".join(extras)
class LinearTransformation(Transform):
"""Transform a tensor image or video with a square transformation matrix and a mean_vector computed offline.
This transform does not support PIL Image.
Given transformation_matrix and mean_vector, will flatten the torch.*Tensor and
subtract mean_vector from it which is then followed by computing the dot
product with the transformation matrix and then reshaping the tensor to its
original shape.
Applications:
whitening transformation: Suppose X is a column vector zero-centered data.
Then compute the data covariance matrix [D x D] with torch.mm(X.t(), X),
perform SVD on this matrix and pass it as transformation_matrix.
Args:
transformation_matrix (Tensor): tensor [D x D], D = C x H x W
mean_vector (Tensor): tensor [D], D = C x H x W
"""
_v1_transform_cls = _transforms.LinearTransformation
_transformed_types = (is_pure_tensor, tv_tensors.Image, tv_tensors.Video)
def __init__(self, transformation_matrix: torch.Tensor, mean_vector: torch.Tensor):
super().__init__()
if transformation_matrix.size(0) != transformation_matrix.size(1):
raise ValueError(
"transformation_matrix should be square. Got "
f"{tuple(transformation_matrix.size())} rectangular matrix."
)
if mean_vector.size(0) != transformation_matrix.size(0):
raise ValueError(
f"mean_vector should have the same length {mean_vector.size(0)}"
f" as any one of the dimensions of the transformation_matrix [{tuple(transformation_matrix.size())}]"
)
if transformation_matrix.device != mean_vector.device:
raise ValueError(
f"Input tensors should be on the same device. Got {transformation_matrix.device} and {mean_vector.device}"
)
if transformation_matrix.dtype != mean_vector.dtype:
raise ValueError(
f"Input tensors should have the same dtype. Got {transformation_matrix.dtype} and {mean_vector.dtype}"
)
self.transformation_matrix = transformation_matrix
self.mean_vector = mean_vector
def check_inputs(self, sample: Any) -> Any:
if has_any(sample, PIL.Image.Image):
raise TypeError(f"{type(self).__name__}() does not support PIL images.")
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
shape = inpt.shape
n = shape[-3] * shape[-2] * shape[-1]
if n != self.transformation_matrix.shape[0]:
raise ValueError(
"Input tensor and transformation matrix have incompatible shape."
+ f"[{shape[-3]} x {shape[-2]} x {shape[-1]}] != "
+ f"{self.transformation_matrix.shape[0]}"
)
if inpt.device.type != self.mean_vector.device.type:
raise ValueError(
"Input tensor should be on the same device as transformation matrix and mean vector. "
f"Got {inpt.device} vs {self.mean_vector.device}"
)
flat_inpt = inpt.reshape(-1, n) - self.mean_vector
transformation_matrix = self.transformation_matrix.to(flat_inpt.dtype)
output = torch.mm(flat_inpt, transformation_matrix)
output = output.reshape(shape)
if isinstance(inpt, (tv_tensors.Image, tv_tensors.Video)):
output = tv_tensors.wrap(output, like=inpt)
return output
class Normalize(Transform):
"""Normalize a tensor image or video with mean and standard deviation.
This transform does not support PIL Image.
Given mean: ``(mean[1],...,mean[n])`` and std: ``(std[1],..,std[n])`` for ``n``
channels, this transform will normalize each channel of the input
``torch.*Tensor`` i.e.,
``output[channel] = (input[channel] - mean[channel]) / std[channel]``
.. note::
This transform acts out of place, i.e., it does not mutate the input tensor.
Args:
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
inplace(bool,optional): Bool to make this operation in-place.
"""
_v1_transform_cls = _transforms.Normalize
def __init__(self, mean: Sequence[float], std: Sequence[float], inplace: bool = False):
super().__init__()
self.mean = list(mean)
self.std = list(std)
self.inplace = inplace
def check_inputs(self, sample: Any) -> Any:
if has_any(sample, PIL.Image.Image):
raise TypeError(f"{type(self).__name__}() does not support PIL images.")
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
return self._call_kernel(F.normalize, inpt, mean=self.mean, std=self.std, inplace=self.inplace)
class GaussianBlur(Transform):
"""Blurs image with randomly chosen Gaussian blur kernel.
The convolution will be using reflection padding corresponding to the kernel size, to maintain the input shape.
If the input is a Tensor, it is expected
to have [..., C, H, W] shape, where ... means an arbitrary number of leading dimensions.
Args:
kernel_size (int or sequence): Size of the Gaussian kernel.
sigma (float or tuple of float (min, max)): Standard deviation to be used for
creating kernel to perform blurring. If float, sigma is fixed. If it is tuple
of float (min, max), sigma is chosen uniformly at random to lie in the
given range.
"""
_v1_transform_cls = _transforms.GaussianBlur
def __init__(
self, kernel_size: Union[int, Sequence[int]], sigma: Union[int, float, Sequence[float]] = (0.1, 2.0)
) -> None:
super().__init__()
self.kernel_size = _setup_size(kernel_size, "Kernel size should be a tuple/list of two integers")
for ks in self.kernel_size:
if ks <= 0 or ks % 2 == 0:
raise ValueError("Kernel size value should be an odd and positive number.")
self.sigma = _setup_number_or_seq(sigma, "sigma")
if not 0.0 < self.sigma[0] <= self.sigma[1]:
raise ValueError(f"sigma values should be positive and of the form (min, max). Got {self.sigma}")
def make_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
sigma = torch.empty(1).uniform_(self.sigma[0], self.sigma[1]).item()
return dict(sigma=[sigma, sigma])
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
return self._call_kernel(F.gaussian_blur, inpt, self.kernel_size, **params)
class GaussianNoise(Transform):
"""Add gaussian noise to images or videos.
The input tensor is expected to be in [..., 1 or 3, H, W] format,
where ... means it can have an arbitrary number of leading dimensions.
Each image or frame in a batch will be transformed independently i.e. the
noise added to each image will be different.
The input tensor is also expected to be of float dtype in ``[0, 1]``.
This transform does not support PIL images.
Args:
mean (float): Mean of the sampled normal distribution. Default is 0.
sigma (float): Standard deviation of the sampled normal distribution. Default is 0.1.
clip (bool, optional): Whether to clip the values in ``[0, 1]`` after adding noise. Default is True.
"""
def __init__(self, mean: float = 0.0, sigma: float = 0.1, clip=True) -> None:
super().__init__()
self.mean = mean
self.sigma = sigma
self.clip = clip
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
return self._call_kernel(F.gaussian_noise, inpt, mean=self.mean, sigma=self.sigma, clip=self.clip)
class ToDtype(Transform):
"""Converts the input to a specific dtype, optionally scaling the values for images or videos.
.. note::
``ToDtype(dtype, scale=True)`` is the recommended replacement for ``ConvertImageDtype(dtype)``.
Args:
dtype (``torch.dtype`` or dict of ``TVTensor`` -> ``torch.dtype``): The dtype to convert to.
If a ``torch.dtype`` is passed, e.g. ``torch.float32``, only images and videos will be converted
to that dtype: this is for compatibility with :class:`~torchvision.transforms.v2.ConvertImageDtype`.
A dict can be passed to specify per-tv_tensor conversions, e.g.
``dtype={tv_tensors.Image: torch.float32, tv_tensors.Mask: torch.int64, "others":None}``. The "others"
key can be used as a catch-all for any other tv_tensor type, and ``None`` means no conversion.
scale (bool, optional): Whether to scale the values for images or videos. See :ref:`range_and_dtype`.
Default: ``False``.
"""
_transformed_types = (torch.Tensor,)
def __init__(
self, dtype: Union[torch.dtype, Dict[Union[Type, str], Optional[torch.dtype]]], scale: bool = False
) -> None:
super().__init__()
if not isinstance(dtype, (dict, torch.dtype)):
raise ValueError(f"dtype must be a dict or a torch.dtype, got {type(dtype)} instead")
if (
isinstance(dtype, dict)
and torch.Tensor in dtype
and any(cls in dtype for cls in [tv_tensors.Image, tv_tensors.Video])
):
warnings.warn(
"Got `dtype` values for `torch.Tensor` and either `tv_tensors.Image` or `tv_tensors.Video`. "
"Note that a plain `torch.Tensor` will *not* be transformed by this (or any other transformation) "
"in case a `tv_tensors.Image` or `tv_tensors.Video` is present in the input."
)
self.dtype = dtype
self.scale = scale
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if isinstance(self.dtype, torch.dtype):
# For consistency / BC with ConvertImageDtype, we only care about images or videos when dtype
# is a simple torch.dtype
if not is_pure_tensor(inpt) and not isinstance(inpt, (tv_tensors.Image, tv_tensors.Video)):
return inpt
dtype: Optional[torch.dtype] = self.dtype
elif type(inpt) in self.dtype:
dtype = self.dtype[type(inpt)]
elif "others" in self.dtype:
dtype = self.dtype["others"]
else:
raise ValueError(
f"No dtype was specified for type {type(inpt)}. "
"If you only need to convert the dtype of images or videos, you can just pass e.g. dtype=torch.float32. "
"If you're passing a dict as dtype, "
'you can use "others" as a catch-all key '
'e.g. dtype={tv_tensors.Mask: torch.int64, "others": None} to pass-through the rest of the inputs.'
)
supports_scaling = is_pure_tensor(inpt) or isinstance(inpt, (tv_tensors.Image, tv_tensors.Video))
if dtype is None:
if self.scale and supports_scaling:
warnings.warn(
"scale was set to True but no dtype was specified for images or videos: no scaling will be done."
)
return inpt
return self._call_kernel(F.to_dtype, inpt, dtype=dtype, scale=self.scale)
class ConvertImageDtype(Transform):
"""[DEPRECATED] Use ``v2.ToDtype(dtype, scale=True)`` instead.
Convert input image to the given ``dtype`` and scale the values accordingly.
.. warning::
Consider using ``ToDtype(dtype, scale=True)`` instead. See :class:`~torchvision.transforms.v2.ToDtype`.
This function does not support PIL Image.
Args:
dtype (torch.dtype): Desired data type of the output
.. note::
When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.
If converted back and forth, this mismatch has no effect.
Raises:
RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as
well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to
overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range
of the integer ``dtype``.
"""
_v1_transform_cls = _transforms.ConvertImageDtype
def __init__(self, dtype: torch.dtype = torch.float32) -> None:
super().__init__()
self.dtype = dtype
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
return self._call_kernel(F.to_dtype, inpt, dtype=self.dtype, scale=True)
class SanitizeBoundingBoxes(Transform):
"""Remove degenerate/invalid bounding boxes and their corresponding labels and masks.
This transform removes bounding boxes and their associated labels/masks that:
- are below a given ``min_size`` or ``min_area``: by default this also removes degenerate boxes that have e.g. X2 <= X1.
- have any coordinate outside of their corresponding image. You may want to
call :class:`~torchvision.transforms.v2.ClampBoundingBoxes` first to avoid undesired removals.
It can also sanitize other tensors like the "iscrowd" or "area" properties from COCO
(see ``labels_getter`` parameter).
It is recommended to call it at the end of a pipeline, before passing the
input to the models. It is critical to call this transform if
:class:`~torchvision.transforms.v2.RandomIoUCrop` was called.
If you want to be extra careful, you may call it after all transforms that
may modify bounding boxes but once at the end should be enough in most
cases.
Args:
min_size (float, optional): The size below which bounding boxes are removed. Default is 1.
min_area (float, optional): The area below which bounding boxes are removed. Default is 1.
labels_getter (callable or str or None, optional): indicates how to identify the labels in the input
(or anything else that needs to be sanitized along with the bounding boxes).
By default, this will try to find a "labels" key in the input (case-insensitive), if
the input is a dict or it is a tuple whose second element is a dict.
This heuristic should work well with a lot of datasets, including the built-in torchvision datasets.
It can also be a callable that takes the same input as the transform, and returns either:
- A single tensor (the labels)
- A tuple/list of tensors, each of which will be subject to the same sanitization as the bounding boxes.
This is useful to sanitize multiple tensors like the labels, and the "iscrowd" or "area" properties
from COCO.
If ``labels_getter`` is None then only bounding boxes are sanitized.
"""
def __init__(
self,
min_size: float = 1.0,
min_area: float = 1.0,
labels_getter: Union[Callable[[Any], Any], str, None] = "default",
) -> None:
super().__init__()
if min_size < 1:
raise ValueError(f"min_size must be >= 1, got {min_size}.")
self.min_size = min_size
if min_area < 1:
raise ValueError(f"min_area must be >= 1, got {min_area}.")
self.min_area = min_area
self.labels_getter = labels_getter
self._labels_getter = _parse_labels_getter(labels_getter)
def forward(self, *inputs: Any) -> Any:
inputs = inputs if len(inputs) > 1 else inputs[0]
labels = self._labels_getter(inputs)
if labels is not None:
msg = "The labels in the input to forward() must be a tensor or None, got {type} instead."
if isinstance(labels, torch.Tensor):
labels = (labels,)
elif isinstance(labels, (tuple, list)):
for entry in labels:
if not isinstance(entry, torch.Tensor):
# TODO: we don't need to enforce tensors, just that entries are indexable as t[bool_mask]
raise ValueError(msg.format(type=type(entry)))
else:
raise ValueError(msg.format(type=type(labels)))
flat_inputs, spec = tree_flatten(inputs)
boxes = get_bounding_boxes(flat_inputs)
if labels is not None:
for label in labels:
if boxes.shape[0] != label.shape[0]:
raise ValueError(
f"Number of boxes (shape={boxes.shape}) and must match the number of labels."
f"Found labels with shape={label.shape})."
)
valid = F._misc._get_sanitize_bounding_boxes_mask(
boxes,
format=boxes.format,
canvas_size=boxes.canvas_size,
min_size=self.min_size,
min_area=self.min_area,
)
params = dict(valid=valid, labels=labels)
flat_outputs = [self.transform(inpt, params) for inpt in flat_inputs]
return tree_unflatten(flat_outputs, spec)
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
is_label = params["labels"] is not None and any(inpt is label for label in params["labels"])
is_bounding_boxes_or_mask = isinstance(inpt, (tv_tensors.BoundingBoxes, tv_tensors.Mask))
if not (is_label or is_bounding_boxes_or_mask):
return inpt
output = inpt[params["valid"]]
if is_label:
return output
else:
return tv_tensors.wrap(output, like=inpt)
```
|
==============================================================================================================================
SOURCE CODE FILE: _temporal.py
LINES: 1
SIZE: 0.91 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\transforms\v2\_temporal.py
ENCODING: utf-8
```py
from typing import Any, Dict
import torch
from torchvision.transforms.v2 import functional as F, Transform
class UniformTemporalSubsample(Transform):
"""Uniformly subsample ``num_samples`` indices from the temporal dimension of the video.
Videos are expected to be of shape ``[..., T, C, H, W]`` where ``T`` denotes the temporal dimension.
When ``num_samples`` is larger than the size of temporal dimension of the video, it
will sample frames based on nearest neighbor interpolation.
Args:
num_samples (int): The number of equispaced samples to be selected
"""
_transformed_types = (torch.Tensor,)
def __init__(self, num_samples: int):
super().__init__()
self.num_samples = num_samples
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
return self._call_kernel(F.uniform_temporal_subsample, inpt, self.num_samples)
```
|
===============================================================================================================================
SOURCE CODE FILE: _transform.py
LINES: 1
SIZE: 9.34 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\transforms\v2\_transform.py
ENCODING: utf-8
```py
from __future__ import annotations
import enum
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
import PIL.Image
import torch
from torch import nn
from torch.utils._pytree import tree_flatten, tree_unflatten
from torchvision import tv_tensors
from torchvision.transforms.v2._utils import check_type, has_any, is_pure_tensor
from torchvision.utils import _log_api_usage_once
from .functional._utils import _get_kernel
class Transform(nn.Module):
"""Base class to implement your own v2 transforms.
See :ref:`sphx_glr_auto_examples_transforms_plot_custom_transforms.py` for
more details.
"""
# Class attribute defining transformed types. Other types are passed-through without any transformation
# We support both Types and callables that are able to do further checks on the type of the input.
_transformed_types: Tuple[Union[Type, Callable[[Any], bool]], ...] = (torch.Tensor, PIL.Image.Image)
def __init__(self) -> None:
super().__init__()
_log_api_usage_once(self)
def check_inputs(self, flat_inputs: List[Any]) -> None:
pass
# When v2 was introduced, this method was private and called
# `_get_params()`. Now it's publicly exposed as `make_params()`. It cannot
# be exposed as `get_params()` because there is already a `get_params()`
# methods for v2 transforms: it's the v1's `get_params()` that we have to
# keep in order to guarantee 100% BC with v1. (It's defined in
# __init_subclass__ below).
def make_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
"""Method to override for custom transforms.
See :ref:`sphx_glr_auto_examples_transforms_plot_custom_transforms.py`"""
return dict()
def _call_kernel(self, functional: Callable, inpt: Any, *args: Any, **kwargs: Any) -> Any:
kernel = _get_kernel(functional, type(inpt), allow_passthrough=True)
return kernel(inpt, *args, **kwargs)
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
"""Method to override for custom transforms.
See :ref:`sphx_glr_auto_examples_transforms_plot_custom_transforms.py`"""
raise NotImplementedError
def forward(self, *inputs: Any) -> Any:
"""Do not override this! Use ``transform()`` instead."""
flat_inputs, spec = tree_flatten(inputs if len(inputs) > 1 else inputs[0])
self.check_inputs(flat_inputs)
needs_transform_list = self._needs_transform_list(flat_inputs)
params = self.make_params(
[inpt for (inpt, needs_transform) in zip(flat_inputs, needs_transform_list) if needs_transform]
)
flat_outputs = [
self.transform(inpt, params) if needs_transform else inpt
for (inpt, needs_transform) in zip(flat_inputs, needs_transform_list)
]
return tree_unflatten(flat_outputs, spec)
def _needs_transform_list(self, flat_inputs: List[Any]) -> List[bool]:
# Below is a heuristic on how to deal with pure tensor inputs:
# 1. Pure tensors, i.e. tensors that are not a tv_tensor, are passed through if there is an explicit image
# (`tv_tensors.Image` or `PIL.Image.Image`) or video (`tv_tensors.Video`) in the sample.
# 2. If there is no explicit image or video in the sample, only the first encountered pure tensor is
# transformed as image, while the rest is passed through. The order is defined by the returned `flat_inputs`
# of `tree_flatten`, which recurses depth-first through the input.
#
# This heuristic stems from two requirements:
# 1. We need to keep BC for single input pure tensors and treat them as images.
# 2. We don't want to treat all pure tensors as images, because some datasets like `CelebA` or `Widerface`
# return supplemental numerical data as tensors that cannot be transformed as images.
#
# The heuristic should work well for most people in practice. The only case where it doesn't is if someone
# tries to transform multiple pure tensors at the same time, expecting them all to be treated as images.
# However, this case wasn't supported by transforms v1 either, so there is no BC concern.
needs_transform_list = []
transform_pure_tensor = not has_any(flat_inputs, tv_tensors.Image, tv_tensors.Video, PIL.Image.Image)
for inpt in flat_inputs:
needs_transform = True
if not check_type(inpt, self._transformed_types):
needs_transform = False
elif is_pure_tensor(inpt):
if transform_pure_tensor:
transform_pure_tensor = False
else:
needs_transform = False
needs_transform_list.append(needs_transform)
return needs_transform_list
def extra_repr(self) -> str:
extra = []
for name, value in self.__dict__.items():
if name.startswith("_") or name == "training":
continue
if not isinstance(value, (bool, int, float, str, tuple, list, enum.Enum)):
continue
extra.append(f"{name}={value}")
return ", ".join(extra)
# This attribute should be set on all transforms that have a v1 equivalent. Doing so enables two things:
# 1. In case the v1 transform has a static `get_params` method, it will also be available under the same name on
# the v2 transform. See `__init_subclass__` for details.
# 2. The v2 transform will be JIT scriptable. See `_extract_params_for_v1_transform` and `__prepare_scriptable__`
# for details.
_v1_transform_cls: Optional[Type[nn.Module]] = None
def __init_subclass__(cls) -> None:
# Since `get_params` is a `@staticmethod`, we have to bind it to the class itself rather than to an instance.
# This method is called after subclassing has happened, i.e. `cls` is the subclass, e.g. `Resize`.
if cls._v1_transform_cls is not None and hasattr(cls._v1_transform_cls, "get_params"):
cls.get_params = staticmethod(cls._v1_transform_cls.get_params) # type: ignore[attr-defined]
def _extract_params_for_v1_transform(self) -> Dict[str, Any]:
# This method is called by `__prepare_scriptable__` to instantiate the equivalent v1 transform from the current
# v2 transform instance. It extracts all available public attributes that are specific to that transform and
# not `nn.Module` in general.
# Overwrite this method on the v2 transform class if the above is not sufficient. For example, this might happen
# if the v2 transform introduced new parameters that are not support by the v1 transform.
common_attrs = nn.Module().__dict__.keys()
return {
attr: value
for attr, value in self.__dict__.items()
if not attr.startswith("_") and attr not in common_attrs
}
def __prepare_scriptable__(self) -> nn.Module:
# This method is called early on when `torch.jit.script`'ing an `nn.Module` instance. If it succeeds, the return
# value is used for scripting over the original object that should have been scripted. Since the v1 transforms
# are JIT scriptable, and we made sure that for single image inputs v1 and v2 are equivalent, we just return the
# equivalent v1 transform here. This of course only makes transforms v2 JIT scriptable as long as transforms v1
# is around.
if self._v1_transform_cls is None:
raise RuntimeError(
f"Transform {type(self).__name__} cannot be JIT scripted. "
"torchscript is only supported for backward compatibility with transforms "
"which are already in torchvision.transforms. "
"For torchscript support (on tensors only), you can use the functional API instead."
)
return self._v1_transform_cls(**self._extract_params_for_v1_transform())
class _RandomApplyTransform(Transform):
def __init__(self, p: float = 0.5) -> None:
if not (0.0 <= p <= 1.0):
raise ValueError("`p` should be a floating point value in the interval [0.0, 1.0].")
super().__init__()
self.p = p
def forward(self, *inputs: Any) -> Any:
# We need to almost duplicate `Transform.forward()` here since we always want to check the inputs, but return
# early afterwards in case the random check triggers. The same result could be achieved by calling
# `super().forward()` after the random check, but that would call `self.check_inputs` twice.
inputs = inputs if len(inputs) > 1 else inputs[0]
flat_inputs, spec = tree_flatten(inputs)
self.check_inputs(flat_inputs)
if torch.rand(1) >= self.p:
return inputs
needs_transform_list = self._needs_transform_list(flat_inputs)
params = self.make_params(
[inpt for (inpt, needs_transform) in zip(flat_inputs, needs_transform_list) if needs_transform]
)
flat_outputs = [
self.transform(inpt, params) if needs_transform else inpt
for (inpt, needs_transform) in zip(flat_inputs, needs_transform_list)
]
return tree_unflatten(flat_outputs, spec)
```
|
=====================================================================================================================================
SOURCE CODE FILE: _type_conversion.py
LINES: 1
SIZE: 2.87 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\transforms\v2\_type_conversion.py
ENCODING: utf-8
```py
from typing import Any, Dict, Optional, Union
import numpy as np
import PIL.Image
import torch
from torchvision import tv_tensors
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import is_pure_tensor
class PILToTensor(Transform):
"""Convert a PIL Image to a tensor of the same type - this does not scale values.
This transform does not support torchscript.
Converts a PIL Image (H x W x C) to a Tensor of shape (C x H x W).
"""
_transformed_types = (PIL.Image.Image,)
def transform(self, inpt: PIL.Image.Image, params: Dict[str, Any]) -> torch.Tensor:
return F.pil_to_tensor(inpt)
class ToImage(Transform):
"""Convert a tensor, ndarray, or PIL Image to :class:`~torchvision.tv_tensors.Image`
; this does not scale values.
This transform does not support torchscript.
"""
_transformed_types = (is_pure_tensor, PIL.Image.Image, np.ndarray)
def transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> tv_tensors.Image:
return F.to_image(inpt)
class ToPILImage(Transform):
"""Convert a tensor or an ndarray to PIL Image
This transform does not support torchscript.
Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
H x W x C to a PIL Image while adjusting the value range depending on the ``mode``.
Args:
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
If ``mode`` is ``None`` (default) there are some assumptions made about the input data:
- If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.
- If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.
- If the input has 2 channels, the ``mode`` is assumed to be ``LA``.
- If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``,
``short``).
.. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
"""
_transformed_types = (is_pure_tensor, tv_tensors.Image, np.ndarray)
def __init__(self, mode: Optional[str] = None) -> None:
super().__init__()
self.mode = mode
def transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> PIL.Image.Image:
return F.to_pil_image(inpt, mode=self.mode)
class ToPureTensor(Transform):
"""Convert all TVTensors to pure tensors, removing associated metadata (if any).
This doesn't scale or change the values, only the type.
"""
_transformed_types = (tv_tensors.TVTensor,)
def transform(self, inpt: Any, params: Dict[str, Any]) -> torch.Tensor:
return inpt.as_subclass(torch.Tensor)
```
|
===========================================================================================================================
SOURCE CODE FILE: _utils.py
LINES: 1
SIZE: 8.89 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\transforms\v2\_utils.py
ENCODING: utf-8
```py
from __future__ import annotations
import collections.abc
import numbers
from contextlib import suppress
from typing import Any, Callable, Dict, List, Literal, Sequence, Tuple, Type, Union
import PIL.Image
import torch
from torchvision import tv_tensors
from torchvision._utils import sequence_to_str
from torchvision.transforms.transforms import _check_sequence_input, _setup_angle, _setup_size # noqa: F401
from torchvision.transforms.v2.functional import get_dimensions, get_size, is_pure_tensor
from torchvision.transforms.v2.functional._utils import _FillType, _FillTypeJIT
def _setup_number_or_seq(arg: Union[int, float, Sequence[Union[int, float]]], name: str) -> Sequence[float]:
if not isinstance(arg, (int, float, Sequence)):
raise TypeError(f"{name} should be a number or a sequence of numbers. Got {type(arg)}")
if isinstance(arg, Sequence) and len(arg) not in (1, 2):
raise ValueError(f"If {name} is a sequence its length should be 1 or 2. Got {len(arg)}")
if isinstance(arg, Sequence):
for element in arg:
if not isinstance(element, (int, float)):
raise ValueError(f"{name} should be a sequence of numbers. Got {type(element)}")
if isinstance(arg, (int, float)):
arg = [float(arg), float(arg)]
elif isinstance(arg, Sequence):
if len(arg) == 1:
arg = [float(arg[0]), float(arg[0])]
else:
arg = [float(arg[0]), float(arg[1])]
return arg
def _check_fill_arg(fill: Union[_FillType, Dict[Union[Type, str], _FillType]]) -> None:
if isinstance(fill, dict):
for value in fill.values():
_check_fill_arg(value)
else:
if fill is not None and not isinstance(fill, (numbers.Number, tuple, list)):
raise TypeError("Got inappropriate fill arg, only Numbers, tuples, lists and dicts are allowed.")
def _convert_fill_arg(fill: _FillType) -> _FillTypeJIT:
# Fill = 0 is not equivalent to None, https://github.com/pytorch/vision/issues/6517
# So, we can't reassign fill to 0
# if fill is None:
# fill = 0
if fill is None:
return fill
if not isinstance(fill, (int, float)):
fill = [float(v) for v in list(fill)]
return fill # type: ignore[return-value]
def _setup_fill_arg(fill: Union[_FillType, Dict[Union[Type, str], _FillType]]) -> Dict[Union[Type, str], _FillTypeJIT]:
_check_fill_arg(fill)
if isinstance(fill, dict):
for k, v in fill.items():
fill[k] = _convert_fill_arg(v)
return fill # type: ignore[return-value]
else:
return {"others": _convert_fill_arg(fill)}
def _get_fill(fill_dict, inpt_type):
if inpt_type in fill_dict:
return fill_dict[inpt_type]
elif "others" in fill_dict:
return fill_dict["others"]
else:
RuntimeError("This should never happen, please open an issue on the torchvision repo if you hit this.")
def _check_padding_arg(padding: Union[int, Sequence[int]]) -> None:
err_msg = f"Padding must be an int or a 1, 2, or 4 element of tuple or list, got {padding}."
if isinstance(padding, (tuple, list)):
if len(padding) not in [1, 2, 4] or not all(isinstance(p, int) for p in padding):
raise ValueError(err_msg)
elif not isinstance(padding, int):
raise ValueError(err_msg)
# TODO: let's use torchvision._utils.StrEnum to have the best of both worlds (strings and enums)
# https://github.com/pytorch/vision/issues/6250
def _check_padding_mode_arg(padding_mode: Literal["constant", "edge", "reflect", "symmetric"]) -> None:
if padding_mode not in ["constant", "edge", "reflect", "symmetric"]:
raise ValueError("Padding mode should be either constant, edge, reflect or symmetric")
def _find_labels_default_heuristic(inputs: Any) -> torch.Tensor:
"""
This heuristic covers three cases:
1. The input is tuple or list whose second item is a labels tensor. This happens for already batched
classification inputs for MixUp and CutMix (typically after the Dataloder).
2. The input is a tuple or list whose second item is a dictionary that contains the labels tensor
under a label-like (see below) key. This happens for the inputs of detection models.
3. The input is a dictionary that is structured as the one from 2.
What is "label-like" key? We first search for an case-insensitive match of 'labels' inside the keys of the
dictionary. This is the name our detection models expect. If we can't find that, we look for a case-insensitive
match of the term 'label' anywhere inside the key, i.e. 'FooLaBeLBar'. If we can't find that either, the dictionary
contains no "label-like" key.
"""
if isinstance(inputs, (tuple, list)):
inputs = inputs[1]
# MixUp, CutMix
if is_pure_tensor(inputs):
return inputs
if not isinstance(inputs, collections.abc.Mapping):
raise ValueError(
f"When using the default labels_getter, the input passed to forward must be a dictionary or a two-tuple "
f"whose second item is a dictionary or a tensor, but got {inputs} instead."
)
candidate_key = None
with suppress(StopIteration):
candidate_key = next(key for key in inputs.keys() if key.lower() == "labels")
if candidate_key is None:
with suppress(StopIteration):
candidate_key = next(key for key in inputs.keys() if "label" in key.lower())
if candidate_key is None:
raise ValueError(
"Could not infer where the labels are in the sample. Try passing a callable as the labels_getter parameter?"
"If there are no labels in the sample by design, pass labels_getter=None."
)
return inputs[candidate_key]
def _parse_labels_getter(labels_getter: Union[str, Callable[[Any], Any], None]) -> Callable[[Any], Any]:
if labels_getter == "default":
return _find_labels_default_heuristic
elif callable(labels_getter):
return labels_getter
elif labels_getter is None:
return lambda _: None
else:
raise ValueError(f"labels_getter should either be 'default', a callable, or None, but got {labels_getter}.")
def get_bounding_boxes(flat_inputs: List[Any]) -> tv_tensors.BoundingBoxes:
"""Return the Bounding Boxes in the input.
Assumes only one ``BoundingBoxes`` object is present.
"""
# This assumes there is only one bbox per sample as per the general convention
try:
return next(inpt for inpt in flat_inputs if isinstance(inpt, tv_tensors.BoundingBoxes))
except StopIteration:
raise ValueError("No bounding boxes were found in the sample")
def query_chw(flat_inputs: List[Any]) -> Tuple[int, int, int]:
"""Return Channel, Height, and Width."""
chws = {
tuple(get_dimensions(inpt))
for inpt in flat_inputs
if check_type(inpt, (is_pure_tensor, tv_tensors.Image, PIL.Image.Image, tv_tensors.Video))
}
if not chws:
raise TypeError("No image or video was found in the sample")
elif len(chws) > 1:
raise ValueError(f"Found multiple CxHxW dimensions in the sample: {sequence_to_str(sorted(chws))}")
c, h, w = chws.pop()
return c, h, w
def query_size(flat_inputs: List[Any]) -> Tuple[int, int]:
"""Return Height and Width."""
sizes = {
tuple(get_size(inpt))
for inpt in flat_inputs
if check_type(
inpt,
(
is_pure_tensor,
tv_tensors.Image,
PIL.Image.Image,
tv_tensors.Video,
tv_tensors.Mask,
tv_tensors.BoundingBoxes,
),
)
}
if not sizes:
raise TypeError("No image, video, mask or bounding box was found in the sample")
elif len(sizes) > 1:
raise ValueError(f"Found multiple HxW dimensions in the sample: {sequence_to_str(sorted(sizes))}")
h, w = sizes.pop()
return h, w
def check_type(obj: Any, types_or_checks: Tuple[Union[Type, Callable[[Any], bool]], ...]) -> bool:
for type_or_check in types_or_checks:
if isinstance(obj, type_or_check) if isinstance(type_or_check, type) else type_or_check(obj):
return True
return False
def has_any(flat_inputs: List[Any], *types_or_checks: Union[Type, Callable[[Any], bool]]) -> bool:
for inpt in flat_inputs:
if check_type(inpt, types_or_checks):
return True
return False
def has_all(flat_inputs: List[Any], *types_or_checks: Union[Type, Callable[[Any], bool]]) -> bool:
for type_or_check in types_or_checks:
for inpt in flat_inputs:
if isinstance(inpt, type_or_check) if isinstance(type_or_check, type) else type_or_check(inpt):
break
else:
return False
return True
```
|
========================================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 3.61 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\transforms\v2\functional\__init__.py
ENCODING: utf-8
```py
from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_pure_tensor, register_kernel # usort: skip
from ._meta import (
clamp_bounding_boxes,
convert_bounding_box_format,
get_dimensions_image,
get_dimensions_video,
get_dimensions,
get_num_frames_video,
get_num_frames,
get_image_num_channels,
get_num_channels_image,
get_num_channels_video,
get_num_channels,
get_size_bounding_boxes,
get_size_image,
get_size_mask,
get_size_video,
get_size,
) # usort: skip
from ._augment import erase, erase_image, erase_video, jpeg, jpeg_image, jpeg_video
from ._color import (
adjust_brightness,
adjust_brightness_image,
adjust_brightness_video,
adjust_contrast,
adjust_contrast_image,
adjust_contrast_video,
adjust_gamma,
adjust_gamma_image,
adjust_gamma_video,
adjust_hue,
adjust_hue_image,
adjust_hue_video,
adjust_saturation,
adjust_saturation_image,
adjust_saturation_video,
adjust_sharpness,
adjust_sharpness_image,
adjust_sharpness_video,
autocontrast,
autocontrast_image,
autocontrast_video,
equalize,
equalize_image,
equalize_video,
grayscale_to_rgb,
grayscale_to_rgb_image,
invert,
invert_image,
invert_video,
permute_channels,
permute_channels_image,
permute_channels_video,
posterize,
posterize_image,
posterize_video,
rgb_to_grayscale,
rgb_to_grayscale_image,
solarize,
solarize_image,
solarize_video,
to_grayscale,
)
from ._geometry import (
affine,
affine_bounding_boxes,
affine_image,
affine_mask,
affine_video,
center_crop,
center_crop_bounding_boxes,
center_crop_image,
center_crop_mask,
center_crop_video,
crop,
crop_bounding_boxes,
crop_image,
crop_mask,
crop_video,
elastic,
elastic_bounding_boxes,
elastic_image,
elastic_mask,
elastic_transform,
elastic_video,
five_crop,
five_crop_image,
five_crop_video,
hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file
horizontal_flip,
horizontal_flip_bounding_boxes,
horizontal_flip_image,
horizontal_flip_mask,
horizontal_flip_video,
pad,
pad_bounding_boxes,
pad_image,
pad_mask,
pad_video,
perspective,
perspective_bounding_boxes,
perspective_image,
perspective_mask,
perspective_video,
resize,
resize_bounding_boxes,
resize_image,
resize_mask,
resize_video,
resized_crop,
resized_crop_bounding_boxes,
resized_crop_image,
resized_crop_mask,
resized_crop_video,
rotate,
rotate_bounding_boxes,
rotate_image,
rotate_mask,
rotate_video,
ten_crop,
ten_crop_image,
ten_crop_video,
vertical_flip,
vertical_flip_bounding_boxes,
vertical_flip_image,
vertical_flip_mask,
vertical_flip_video,
vflip,
)
from ._misc import (
convert_image_dtype,
gaussian_blur,
gaussian_blur_image,
gaussian_blur_video,
gaussian_noise,
gaussian_noise_image,
gaussian_noise_video,
normalize,
normalize_image,
normalize_video,
sanitize_bounding_boxes,
to_dtype,
to_dtype_image,
to_dtype_video,
)
from ._temporal import uniform_temporal_subsample, uniform_temporal_subsample_video
from ._type_conversion import pil_to_tensor, to_image, to_pil_image
from ._deprecated import get_image_size, to_tensor # usort: skip
```
|
========================================================================================================================================
SOURCE CODE FILE: _augment.py
LINES: 1
SIZE: 3.50 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\transforms\v2\functional\_augment.py
ENCODING: utf-8
```py
import io
import PIL.Image
import torch
from torchvision import tv_tensors
from torchvision.io import decode_jpeg, encode_jpeg
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
from torchvision.utils import _log_api_usage_once
from ._utils import _get_kernel, _register_kernel_internal
def erase(
inpt: torch.Tensor,
i: int,
j: int,
h: int,
w: int,
v: torch.Tensor,
inplace: bool = False,
) -> torch.Tensor:
"""See :class:`~torchvision.transforms.v2.RandomErase` for details."""
if torch.jit.is_scripting():
return erase_image(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
_log_api_usage_once(erase)
kernel = _get_kernel(erase, type(inpt))
return kernel(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
@_register_kernel_internal(erase, torch.Tensor)
@_register_kernel_internal(erase, tv_tensors.Image)
def erase_image(
image: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
if not inplace:
image = image.clone()
image[..., i : i + h, j : j + w] = v
return image
@_register_kernel_internal(erase, PIL.Image.Image)
def _erase_image_pil(
image: PIL.Image.Image, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> PIL.Image.Image:
t_img = pil_to_tensor(image)
output = erase_image(t_img, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return to_pil_image(output, mode=image.mode)
@_register_kernel_internal(erase, tv_tensors.Video)
def erase_video(
video: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
return erase_image(video, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
def jpeg(image: torch.Tensor, quality: int) -> torch.Tensor:
"""See :class:`~torchvision.transforms.v2.JPEG` for details."""
if torch.jit.is_scripting():
return jpeg_image(image, quality=quality)
_log_api_usage_once(jpeg)
kernel = _get_kernel(jpeg, type(image))
return kernel(image, quality=quality)
@_register_kernel_internal(jpeg, torch.Tensor)
@_register_kernel_internal(jpeg, tv_tensors.Image)
def jpeg_image(image: torch.Tensor, quality: int) -> torch.Tensor:
original_shape = image.shape
image = image.view((-1,) + image.shape[-3:])
if image.shape[0] == 0: # degenerate
return image.reshape(original_shape).clone()
images = []
for i in range(image.shape[0]):
# isinstance checks are needed for torchscript.
encoded_image = encode_jpeg(image[i], quality=quality)
assert isinstance(encoded_image, torch.Tensor)
decoded_image = decode_jpeg(encoded_image)
assert isinstance(decoded_image, torch.Tensor)
images.append(decoded_image)
images = torch.stack(images, dim=0).view(original_shape)
return images
@_register_kernel_internal(jpeg, tv_tensors.Video)
def jpeg_video(video: torch.Tensor, quality: int) -> torch.Tensor:
return jpeg_image(video, quality=quality)
@_register_kernel_internal(jpeg, PIL.Image.Image)
def _jpeg_image_pil(image: PIL.Image.Image, quality: int) -> PIL.Image.Image:
raw_jpeg = io.BytesIO()
image.save(raw_jpeg, format="JPEG", quality=quality)
# we need to copy since PIL.Image.open() will return PIL.JpegImagePlugin.JpegImageFile
# which is a sub-class of PIL.Image.Image. this will fail check_transform() test.
return PIL.Image.open(raw_jpeg).copy()
```
|
======================================================================================================================================
SOURCE CODE FILE: _color.py
LINES: 1
SIZE: 30.41 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\transforms\v2\functional\_color.py
ENCODING: utf-8
```py
from typing import List
import PIL.Image
import torch
from torch.nn.functional import conv2d
from torchvision import tv_tensors
from torchvision.transforms import _functional_pil as _FP
from torchvision.transforms._functional_tensor import _max_value
from torchvision.utils import _log_api_usage_once
from ._misc import _num_value_bits, to_dtype_image
from ._type_conversion import pil_to_tensor, to_pil_image
from ._utils import _get_kernel, _register_kernel_internal
def rgb_to_grayscale(inpt: torch.Tensor, num_output_channels: int = 1) -> torch.Tensor:
"""See :class:`~torchvision.transforms.v2.Grayscale` for details."""
if torch.jit.is_scripting():
return rgb_to_grayscale_image(inpt, num_output_channels=num_output_channels)
_log_api_usage_once(rgb_to_grayscale)
kernel = _get_kernel(rgb_to_grayscale, type(inpt))
return kernel(inpt, num_output_channels=num_output_channels)
# `to_grayscale` actually predates `rgb_to_grayscale` in v1, but only handles PIL images. Since `rgb_to_grayscale` is a
# superset in terms of functionality and has the same signature, we alias here to avoid disruption.
to_grayscale = rgb_to_grayscale
def _rgb_to_grayscale_image(
image: torch.Tensor, num_output_channels: int = 1, preserve_dtype: bool = True
) -> torch.Tensor:
# TODO: Maybe move the validation that num_output_channels is 1 or 3 to this function instead of callers.
if image.shape[-3] == 1 and num_output_channels == 1:
return image.clone()
if image.shape[-3] == 1 and num_output_channels == 3:
s = [1] * len(image.shape)
s[-3] = 3
return image.repeat(s)
r, g, b = image.unbind(dim=-3)
l_img = r.mul(0.2989).add_(g, alpha=0.587).add_(b, alpha=0.114)
l_img = l_img.unsqueeze(dim=-3)
if preserve_dtype:
l_img = l_img.to(image.dtype)
if num_output_channels == 3:
l_img = l_img.expand(image.shape)
return l_img
@_register_kernel_internal(rgb_to_grayscale, torch.Tensor)
@_register_kernel_internal(rgb_to_grayscale, tv_tensors.Image)
def rgb_to_grayscale_image(image: torch.Tensor, num_output_channels: int = 1) -> torch.Tensor:
if num_output_channels not in (1, 3):
raise ValueError(f"num_output_channels must be 1 or 3, got {num_output_channels}.")
return _rgb_to_grayscale_image(image, num_output_channels=num_output_channels, preserve_dtype=True)
@_register_kernel_internal(rgb_to_grayscale, PIL.Image.Image)
def _rgb_to_grayscale_image_pil(image: PIL.Image.Image, num_output_channels: int = 1) -> PIL.Image.Image:
if num_output_channels not in (1, 3):
raise ValueError(f"num_output_channels must be 1 or 3, got {num_output_channels}.")
return _FP.to_grayscale(image, num_output_channels=num_output_channels)
def grayscale_to_rgb(inpt: torch.Tensor) -> torch.Tensor:
"""See :class:`~torchvision.transforms.v2.RGB` for details."""
if torch.jit.is_scripting():
return grayscale_to_rgb_image(inpt)
_log_api_usage_once(grayscale_to_rgb)
kernel = _get_kernel(grayscale_to_rgb, type(inpt))
return kernel(inpt)
@_register_kernel_internal(grayscale_to_rgb, torch.Tensor)
@_register_kernel_internal(grayscale_to_rgb, tv_tensors.Image)
def grayscale_to_rgb_image(image: torch.Tensor) -> torch.Tensor:
if image.shape[-3] >= 3:
# Image already has RGB channels. We don't need to do anything.
return image
# rgb_to_grayscale can be used to add channels so we reuse that function.
return _rgb_to_grayscale_image(image, num_output_channels=3, preserve_dtype=True)
@_register_kernel_internal(grayscale_to_rgb, PIL.Image.Image)
def grayscale_to_rgb_image_pil(image: PIL.Image.Image) -> PIL.Image.Image:
return image.convert(mode="RGB")
def _blend(image1: torch.Tensor, image2: torch.Tensor, ratio: float) -> torch.Tensor:
ratio = float(ratio)
fp = image1.is_floating_point()
bound = _max_value(image1.dtype)
output = image1.mul(ratio).add_(image2, alpha=(1.0 - ratio)).clamp_(0, bound)
return output if fp else output.to(image1.dtype)
def adjust_brightness(inpt: torch.Tensor, brightness_factor: float) -> torch.Tensor:
"""Adjust brightness."""
if torch.jit.is_scripting():
return adjust_brightness_image(inpt, brightness_factor=brightness_factor)
_log_api_usage_once(adjust_brightness)
kernel = _get_kernel(adjust_brightness, type(inpt))
return kernel(inpt, brightness_factor=brightness_factor)
@_register_kernel_internal(adjust_brightness, torch.Tensor)
@_register_kernel_internal(adjust_brightness, tv_tensors.Image)
def adjust_brightness_image(image: torch.Tensor, brightness_factor: float) -> torch.Tensor:
if brightness_factor < 0:
raise ValueError(f"brightness_factor ({brightness_factor}) is not non-negative.")
c = image.shape[-3]
if c not in [1, 3]:
raise TypeError(f"Input image tensor permitted channel values are 1 or 3, but found {c}")
fp = image.is_floating_point()
bound = _max_value(image.dtype)
output = image.mul(brightness_factor).clamp_(0, bound)
return output if fp else output.to(image.dtype)
@_register_kernel_internal(adjust_brightness, PIL.Image.Image)
def _adjust_brightness_image_pil(image: PIL.Image.Image, brightness_factor: float) -> PIL.Image.Image:
return _FP.adjust_brightness(image, brightness_factor=brightness_factor)
@_register_kernel_internal(adjust_brightness, tv_tensors.Video)
def adjust_brightness_video(video: torch.Tensor, brightness_factor: float) -> torch.Tensor:
return adjust_brightness_image(video, brightness_factor=brightness_factor)
def adjust_saturation(inpt: torch.Tensor, saturation_factor: float) -> torch.Tensor:
"""Adjust saturation."""
if torch.jit.is_scripting():
return adjust_saturation_image(inpt, saturation_factor=saturation_factor)
_log_api_usage_once(adjust_saturation)
kernel = _get_kernel(adjust_saturation, type(inpt))
return kernel(inpt, saturation_factor=saturation_factor)
@_register_kernel_internal(adjust_saturation, torch.Tensor)
@_register_kernel_internal(adjust_saturation, tv_tensors.Image)
def adjust_saturation_image(image: torch.Tensor, saturation_factor: float) -> torch.Tensor:
if saturation_factor < 0:
raise ValueError(f"saturation_factor ({saturation_factor}) is not non-negative.")
c = image.shape[-3]
if c not in [1, 3]:
raise TypeError(f"Input image tensor permitted channel values are 1 or 3, but found {c}")
if c == 1: # Match PIL behaviour
return image
grayscale_image = _rgb_to_grayscale_image(image, num_output_channels=1, preserve_dtype=False)
if not image.is_floating_point():
grayscale_image = grayscale_image.floor_()
return _blend(image, grayscale_image, saturation_factor)
_adjust_saturation_image_pil = _register_kernel_internal(adjust_saturation, PIL.Image.Image)(_FP.adjust_saturation)
@_register_kernel_internal(adjust_saturation, tv_tensors.Video)
def adjust_saturation_video(video: torch.Tensor, saturation_factor: float) -> torch.Tensor:
return adjust_saturation_image(video, saturation_factor=saturation_factor)
def adjust_contrast(inpt: torch.Tensor, contrast_factor: float) -> torch.Tensor:
"""See :class:`~torchvision.transforms.RandomAutocontrast`"""
if torch.jit.is_scripting():
return adjust_contrast_image(inpt, contrast_factor=contrast_factor)
_log_api_usage_once(adjust_contrast)
kernel = _get_kernel(adjust_contrast, type(inpt))
return kernel(inpt, contrast_factor=contrast_factor)
@_register_kernel_internal(adjust_contrast, torch.Tensor)
@_register_kernel_internal(adjust_contrast, tv_tensors.Image)
def adjust_contrast_image(image: torch.Tensor, contrast_factor: float) -> torch.Tensor:
if contrast_factor < 0:
raise ValueError(f"contrast_factor ({contrast_factor}) is not non-negative.")
c = image.shape[-3]
if c not in [1, 3]:
raise TypeError(f"Input image tensor permitted channel values are 1 or 3, but found {c}")
fp = image.is_floating_point()
if c == 3:
grayscale_image = _rgb_to_grayscale_image(image, num_output_channels=1, preserve_dtype=False)
if not fp:
grayscale_image = grayscale_image.floor_()
else:
grayscale_image = image if fp else image.to(torch.float32)
mean = torch.mean(grayscale_image, dim=(-3, -2, -1), keepdim=True)
return _blend(image, mean, contrast_factor)
_adjust_contrast_image_pil = _register_kernel_internal(adjust_contrast, PIL.Image.Image)(_FP.adjust_contrast)
@_register_kernel_internal(adjust_contrast, tv_tensors.Video)
def adjust_contrast_video(video: torch.Tensor, contrast_factor: float) -> torch.Tensor:
return adjust_contrast_image(video, contrast_factor=contrast_factor)
def adjust_sharpness(inpt: torch.Tensor, sharpness_factor: float) -> torch.Tensor:
"""See :class:`~torchvision.transforms.RandomAdjustSharpness`"""
if torch.jit.is_scripting():
return adjust_sharpness_image(inpt, sharpness_factor=sharpness_factor)
_log_api_usage_once(adjust_sharpness)
kernel = _get_kernel(adjust_sharpness, type(inpt))
return kernel(inpt, sharpness_factor=sharpness_factor)
@_register_kernel_internal(adjust_sharpness, torch.Tensor)
@_register_kernel_internal(adjust_sharpness, tv_tensors.Image)
def adjust_sharpness_image(image: torch.Tensor, sharpness_factor: float) -> torch.Tensor:
num_channels, height, width = image.shape[-3:]
if num_channels not in (1, 3):
raise TypeError(f"Input image tensor can have 1 or 3 channels, but found {num_channels}")
if sharpness_factor < 0:
raise ValueError(f"sharpness_factor ({sharpness_factor}) is not non-negative.")
if image.numel() == 0 or height <= 2 or width <= 2:
return image
bound = _max_value(image.dtype)
fp = image.is_floating_point()
shape = image.shape
if image.ndim > 4:
image = image.reshape(-1, num_channels, height, width)
needs_unsquash = True
else:
needs_unsquash = False
# The following is a normalized 3x3 kernel with 1s in the edges and a 5 in the middle.
kernel_dtype = image.dtype if fp else torch.float32
a, b = 1.0 / 13.0, 5.0 / 13.0
kernel = torch.tensor([[a, a, a], [a, b, a], [a, a, a]], dtype=kernel_dtype, device=image.device)
kernel = kernel.expand(num_channels, 1, 3, 3)
# We copy and cast at the same time to avoid modifications on the original data
output = image.to(dtype=kernel_dtype, copy=True)
blurred_degenerate = conv2d(output, kernel, groups=num_channels)
if not fp:
# it is better to round before cast
blurred_degenerate = blurred_degenerate.round_()
# Create a view on the underlying output while pointing at the same data. We do this to avoid indexing twice.
view = output[..., 1:-1, 1:-1]
# We speed up blending by minimizing flops and doing in-place. The 2 blend options are mathematically equivalent:
# x+(1-r)*(y-x) = x + (1-r)*y - (1-r)*x = x*r + y*(1-r)
view.add_(blurred_degenerate.sub_(view), alpha=(1.0 - sharpness_factor))
# The actual data of output have been modified by the above. We only need to clamp and cast now.
output = output.clamp_(0, bound)
if not fp:
output = output.to(image.dtype)
if needs_unsquash:
output = output.reshape(shape)
return output
_adjust_sharpness_image_pil = _register_kernel_internal(adjust_sharpness, PIL.Image.Image)(_FP.adjust_sharpness)
@_register_kernel_internal(adjust_sharpness, tv_tensors.Video)
def adjust_sharpness_video(video: torch.Tensor, sharpness_factor: float) -> torch.Tensor:
return adjust_sharpness_image(video, sharpness_factor=sharpness_factor)
def adjust_hue(inpt: torch.Tensor, hue_factor: float) -> torch.Tensor:
"""Adjust hue"""
if torch.jit.is_scripting():
return adjust_hue_image(inpt, hue_factor=hue_factor)
_log_api_usage_once(adjust_hue)
kernel = _get_kernel(adjust_hue, type(inpt))
return kernel(inpt, hue_factor=hue_factor)
def _rgb_to_hsv(image: torch.Tensor) -> torch.Tensor:
r, g, _ = image.unbind(dim=-3)
# Implementation is based on
# https://github.com/python-pillow/Pillow/blob/4174d4267616897df3746d315d5a2d0f82c656ee/src/libImaging/Convert.c#L330
minc, maxc = torch.aminmax(image, dim=-3)
# The algorithm erases S and H channel where `maxc = minc`. This avoids NaN
# from happening in the results, because
# + S channel has division by `maxc`, which is zero only if `maxc = minc`
# + H channel has division by `(maxc - minc)`.
#
# Instead of overwriting NaN afterwards, we just prevent it from occurring so
# we don't need to deal with it in case we save the NaN in a buffer in
# backprop, if it is ever supported, but it doesn't hurt to do so.
eqc = maxc == minc
channels_range = maxc - minc
# Since `eqc => channels_range = 0`, replacing denominator with 1 when `eqc` is fine.
ones = torch.ones_like(maxc)
s = channels_range / torch.where(eqc, ones, maxc)
# Note that `eqc => maxc = minc = r = g = b`. So the following calculation
# of `h` would reduce to `bc - gc + 2 + rc - bc + 4 + rc - bc = 6` so it
# would not matter what values `rc`, `gc`, and `bc` have here, and thus
# replacing denominator with 1 when `eqc` is fine.
channels_range_divisor = torch.where(eqc, ones, channels_range).unsqueeze_(dim=-3)
rc, gc, bc = ((maxc.unsqueeze(dim=-3) - image) / channels_range_divisor).unbind(dim=-3)
mask_maxc_neq_r = maxc != r
mask_maxc_eq_g = maxc == g
hg = rc.add(2.0).sub_(bc).mul_(mask_maxc_eq_g & mask_maxc_neq_r)
hr = bc.sub_(gc).mul_(~mask_maxc_neq_r)
hb = gc.add_(4.0).sub_(rc).mul_(mask_maxc_neq_r.logical_and_(mask_maxc_eq_g.logical_not_()))
h = hr.add_(hg).add_(hb)
h = h.mul_(1.0 / 6.0).add_(1.0).fmod_(1.0)
return torch.stack((h, s, maxc), dim=-3)
def _hsv_to_rgb(img: torch.Tensor) -> torch.Tensor:
h, s, v = img.unbind(dim=-3)
h6 = h.mul(6)
i = torch.floor(h6)
f = h6.sub_(i)
i = i.to(dtype=torch.int32)
sxf = s * f
one_minus_s = 1.0 - s
q = (1.0 - sxf).mul_(v).clamp_(0.0, 1.0)
t = sxf.add_(one_minus_s).mul_(v).clamp_(0.0, 1.0)
p = one_minus_s.mul_(v).clamp_(0.0, 1.0)
i.remainder_(6)
vpqt = torch.stack((v, p, q, t), dim=-3)
# vpqt -> rgb mapping based on i
select = torch.tensor([[0, 2, 1, 1, 3, 0], [3, 0, 0, 2, 1, 1], [1, 1, 3, 0, 0, 2]], dtype=torch.long)
select = select.to(device=img.device, non_blocking=True)
select = select[:, i]
if select.ndim > 3:
# if input.shape is (B, ..., C, H, W) then
# select.shape is (C, B, ..., H, W)
# thus we move C axis to get (B, ..., C, H, W)
select = select.moveaxis(0, -3)
return vpqt.gather(-3, select)
@_register_kernel_internal(adjust_hue, torch.Tensor)
@_register_kernel_internal(adjust_hue, tv_tensors.Image)
def adjust_hue_image(image: torch.Tensor, hue_factor: float) -> torch.Tensor:
if not (-0.5 <= hue_factor <= 0.5):
raise ValueError(f"hue_factor ({hue_factor}) is not in [-0.5, 0.5].")
c = image.shape[-3]
if c not in [1, 3]:
raise TypeError(f"Input image tensor permitted channel values are 1 or 3, but found {c}")
if c == 1: # Match PIL behaviour
return image
if image.numel() == 0:
# exit earlier on empty images
return image
orig_dtype = image.dtype
image = to_dtype_image(image, torch.float32, scale=True)
image = _rgb_to_hsv(image)
h, s, v = image.unbind(dim=-3)
h.add_(hue_factor).remainder_(1.0)
image = torch.stack((h, s, v), dim=-3)
image_hue_adj = _hsv_to_rgb(image)
return to_dtype_image(image_hue_adj, orig_dtype, scale=True)
_adjust_hue_image_pil = _register_kernel_internal(adjust_hue, PIL.Image.Image)(_FP.adjust_hue)
@_register_kernel_internal(adjust_hue, tv_tensors.Video)
def adjust_hue_video(video: torch.Tensor, hue_factor: float) -> torch.Tensor:
return adjust_hue_image(video, hue_factor=hue_factor)
def adjust_gamma(inpt: torch.Tensor, gamma: float, gain: float = 1) -> torch.Tensor:
"""Adjust gamma."""
if torch.jit.is_scripting():
return adjust_gamma_image(inpt, gamma=gamma, gain=gain)
_log_api_usage_once(adjust_gamma)
kernel = _get_kernel(adjust_gamma, type(inpt))
return kernel(inpt, gamma=gamma, gain=gain)
@_register_kernel_internal(adjust_gamma, torch.Tensor)
@_register_kernel_internal(adjust_gamma, tv_tensors.Image)
def adjust_gamma_image(image: torch.Tensor, gamma: float, gain: float = 1.0) -> torch.Tensor:
if gamma < 0:
raise ValueError("Gamma should be a non-negative real number")
# The input image is either assumed to be at [0, 1] scale (if float) or is converted to that scale (if integer).
# Since the gamma is non-negative, the output remains at [0, 1] scale.
if not torch.is_floating_point(image):
output = to_dtype_image(image, torch.float32, scale=True).pow_(gamma)
else:
output = image.pow(gamma)
if gain != 1.0:
# The clamp operation is needed only if multiplication is performed. It's only when gain != 1, that the scale
# of the output can go beyond [0, 1].
output = output.mul_(gain).clamp_(0.0, 1.0)
return to_dtype_image(output, image.dtype, scale=True)
_adjust_gamma_image_pil = _register_kernel_internal(adjust_gamma, PIL.Image.Image)(_FP.adjust_gamma)
@_register_kernel_internal(adjust_gamma, tv_tensors.Video)
def adjust_gamma_video(video: torch.Tensor, gamma: float, gain: float = 1) -> torch.Tensor:
return adjust_gamma_image(video, gamma=gamma, gain=gain)
def posterize(inpt: torch.Tensor, bits: int) -> torch.Tensor:
"""See :class:`~torchvision.transforms.v2.RandomPosterize` for details."""
if torch.jit.is_scripting():
return posterize_image(inpt, bits=bits)
_log_api_usage_once(posterize)
kernel = _get_kernel(posterize, type(inpt))
return kernel(inpt, bits=bits)
@_register_kernel_internal(posterize, torch.Tensor)
@_register_kernel_internal(posterize, tv_tensors.Image)
def posterize_image(image: torch.Tensor, bits: int) -> torch.Tensor:
if not isinstance(bits, int) or not 0 <= bits <= 8:
raise TypeError(f"bits must be a positive integer in the range [0, 8], got {bits} instead.")
if image.is_floating_point():
levels = 1 << bits
return image.mul(levels).floor_().clamp_(0, levels - 1).mul_(1.0 / levels)
else:
num_value_bits = _num_value_bits(image.dtype)
if bits >= num_value_bits:
return image
mask = ((1 << bits) - 1) << (num_value_bits - bits)
return image & mask
_posterize_image_pil = _register_kernel_internal(posterize, PIL.Image.Image)(_FP.posterize)
@_register_kernel_internal(posterize, tv_tensors.Video)
def posterize_video(video: torch.Tensor, bits: int) -> torch.Tensor:
return posterize_image(video, bits=bits)
def solarize(inpt: torch.Tensor, threshold: float) -> torch.Tensor:
"""See :class:`~torchvision.transforms.v2.RandomSolarize` for details."""
if torch.jit.is_scripting():
return solarize_image(inpt, threshold=threshold)
_log_api_usage_once(solarize)
kernel = _get_kernel(solarize, type(inpt))
return kernel(inpt, threshold=threshold)
@_register_kernel_internal(solarize, torch.Tensor)
@_register_kernel_internal(solarize, tv_tensors.Image)
def solarize_image(image: torch.Tensor, threshold: float) -> torch.Tensor:
if threshold > _max_value(image.dtype):
raise TypeError(f"Threshold should be less or equal the maximum value of the dtype, but got {threshold}")
return torch.where(image >= threshold, invert_image(image), image)
_solarize_image_pil = _register_kernel_internal(solarize, PIL.Image.Image)(_FP.solarize)
@_register_kernel_internal(solarize, tv_tensors.Video)
def solarize_video(video: torch.Tensor, threshold: float) -> torch.Tensor:
return solarize_image(video, threshold=threshold)
def autocontrast(inpt: torch.Tensor) -> torch.Tensor:
"""See :class:`~torchvision.transforms.v2.RandomAutocontrast` for details."""
if torch.jit.is_scripting():
return autocontrast_image(inpt)
_log_api_usage_once(autocontrast)
kernel = _get_kernel(autocontrast, type(inpt))
return kernel(inpt)
@_register_kernel_internal(autocontrast, torch.Tensor)
@_register_kernel_internal(autocontrast, tv_tensors.Image)
def autocontrast_image(image: torch.Tensor) -> torch.Tensor:
c = image.shape[-3]
if c not in [1, 3]:
raise TypeError(f"Input image tensor permitted channel values are 1 or 3, but found {c}")
if image.numel() == 0:
# exit earlier on empty images
return image
bound = _max_value(image.dtype)
fp = image.is_floating_point()
float_image = image if fp else image.to(torch.float32)
minimum = float_image.amin(dim=(-2, -1), keepdim=True)
maximum = float_image.amax(dim=(-2, -1), keepdim=True)
eq_idxs = maximum == minimum
inv_scale = maximum.sub_(minimum).mul_(1.0 / bound)
minimum[eq_idxs] = 0.0
inv_scale[eq_idxs] = 1.0
if fp:
diff = float_image.sub(minimum)
else:
diff = float_image.sub_(minimum)
return diff.div_(inv_scale).clamp_(0, bound).to(image.dtype)
_autocontrast_image_pil = _register_kernel_internal(autocontrast, PIL.Image.Image)(_FP.autocontrast)
@_register_kernel_internal(autocontrast, tv_tensors.Video)
def autocontrast_video(video: torch.Tensor) -> torch.Tensor:
return autocontrast_image(video)
def equalize(inpt: torch.Tensor) -> torch.Tensor:
"""See :class:`~torchvision.transforms.v2.RandomEqualize` for details."""
if torch.jit.is_scripting():
return equalize_image(inpt)
_log_api_usage_once(equalize)
kernel = _get_kernel(equalize, type(inpt))
return kernel(inpt)
@_register_kernel_internal(equalize, torch.Tensor)
@_register_kernel_internal(equalize, tv_tensors.Image)
def equalize_image(image: torch.Tensor) -> torch.Tensor:
if image.numel() == 0:
return image
# 1. The algorithm below can easily be extended to support arbitrary integer dtypes. However, the histogram that
# would be needed to computed will have at least `torch.iinfo(dtype).max + 1` values. That is perfectly fine for
# `torch.int8`, `torch.uint8`, and `torch.int16`, at least questionable for `torch.int32` and completely
# unfeasible for `torch.int64`.
# 2. Floating point inputs need to be binned for this algorithm. Apart from converting them to an integer dtype, we
# could also use PyTorch's builtin histogram functionality. However, that has its own set of issues: in addition
# to being slow in general, PyTorch's implementation also doesn't support batches. In total, that makes it slower
# and more complicated to implement than a simple conversion and a fast histogram implementation for integers.
# Since we need to convert in most cases anyway and out of the acceptable dtypes mentioned in 1. `torch.uint8` is
# by far the most common, we choose it as base.
output_dtype = image.dtype
image = to_dtype_image(image, torch.uint8, scale=True)
# The histogram is computed by using the flattened image as index. For example, a pixel value of 127 in the image
# corresponds to adding 1 to index 127 in the histogram.
batch_shape = image.shape[:-2]
flat_image = image.flatten(start_dim=-2).to(torch.long)
hist = flat_image.new_zeros(batch_shape + (256,), dtype=torch.int32)
hist.scatter_add_(dim=-1, index=flat_image, src=hist.new_ones(1).expand_as(flat_image))
cum_hist = hist.cumsum(dim=-1)
# The simplest form of lookup-table (LUT) that also achieves histogram equalization is
# `lut = cum_hist / flat_image.shape[-1] * 255`
# However, PIL uses a more elaborate scheme:
# https://github.com/python-pillow/Pillow/blob/eb59cb61d5239ee69cbbf12709a0c6fd7314e6d7/src/PIL/ImageOps.py#L368-L385
# `lut = ((cum_hist + num_non_max_pixels // (2 * 255)) // num_non_max_pixels) * 255`
# The last non-zero element in the histogram is the first element in the cumulative histogram with the maximum
# value. Thus, the "max" in `num_non_max_pixels` does not refer to 255 as the maximum value of uint8 images, but
# rather the maximum value in the image, which might be or not be 255.
index = cum_hist.argmax(dim=-1)
num_non_max_pixels = flat_image.shape[-1] - hist.gather(dim=-1, index=index.unsqueeze_(-1))
# This is performance optimization that saves us one multiplication later. With this, the LUT computation simplifies
# to `lut = (cum_hist + step // 2) // step` and thus saving the final multiplication by 255 while keeping the
# division count the same. PIL uses the variable name `step` for this, so we keep that for easier comparison.
step = num_non_max_pixels.div_(255, rounding_mode="floor")
# Although it looks like we could return early if we find `step == 0` like PIL does, that is unfortunately not as
# easy due to our support for batched images. We can only return early if `(step == 0).all()` holds. If it doesn't,
# we have to go through the computation below anyway. Since `step == 0` is an edge case anyway, it makes no sense to
# pay the runtime cost for checking it every time.
valid_equalization = step.ne(0).unsqueeze_(-1)
# `lut[k]` is computed with `cum_hist[k-1]` with `lut[0] == (step // 2) // step == 0`. Thus, we perform the
# computation only for `lut[1:]` with `cum_hist[:-1]` and add `lut[0] == 0` afterwards.
cum_hist = cum_hist[..., :-1]
(
cum_hist.add_(step // 2)
# We need the `clamp_`(min=1) call here to avoid zero division since they fail for integer dtypes. This has no
# effect on the returned result of this kernel since images inside the batch with `step == 0` are returned as is
# instead of equalized version.
.div_(step.clamp_(min=1), rounding_mode="floor")
# We need the `clamp_` call here since PILs LUT computation scheme can produce values outside the valid value
# range of uint8 images
.clamp_(0, 255)
)
lut = cum_hist.to(torch.uint8)
lut = torch.cat([lut.new_zeros(1).expand(batch_shape + (1,)), lut], dim=-1)
equalized_image = lut.gather(dim=-1, index=flat_image).view_as(image)
output = torch.where(valid_equalization, equalized_image, image)
return to_dtype_image(output, output_dtype, scale=True)
_equalize_image_pil = _register_kernel_internal(equalize, PIL.Image.Image)(_FP.equalize)
@_register_kernel_internal(equalize, tv_tensors.Video)
def equalize_video(video: torch.Tensor) -> torch.Tensor:
return equalize_image(video)
def invert(inpt: torch.Tensor) -> torch.Tensor:
"""See :func:`~torchvision.transforms.v2.RandomInvert`."""
if torch.jit.is_scripting():
return invert_image(inpt)
_log_api_usage_once(invert)
kernel = _get_kernel(invert, type(inpt))
return kernel(inpt)
@_register_kernel_internal(invert, torch.Tensor)
@_register_kernel_internal(invert, tv_tensors.Image)
def invert_image(image: torch.Tensor) -> torch.Tensor:
if image.is_floating_point():
return 1.0 - image
elif image.dtype == torch.uint8:
return image.bitwise_not()
else: # signed integer dtypes
# We can't use `Tensor.bitwise_not` here, since we want to retain the leading zero bit that encodes the sign
return image.bitwise_xor((1 << _num_value_bits(image.dtype)) - 1)
_invert_image_pil = _register_kernel_internal(invert, PIL.Image.Image)(_FP.invert)
@_register_kernel_internal(invert, tv_tensors.Video)
def invert_video(video: torch.Tensor) -> torch.Tensor:
return invert_image(video)
def permute_channels(inpt: torch.Tensor, permutation: List[int]) -> torch.Tensor:
"""Permute the channels of the input according to the given permutation.
This function supports plain :class:`~torch.Tensor`'s, :class:`PIL.Image.Image`'s, and
:class:`torchvision.tv_tensors.Image` and :class:`torchvision.tv_tensors.Video`.
Example:
>>> rgb_image = torch.rand(3, 256, 256)
>>> bgr_image = F.permute_channels(rgb_image, permutation=[2, 1, 0])
Args:
permutation (List[int]): Valid permutation of the input channel indices. The index of the element determines the
channel index in the input and the value determines the channel index in the output. For example,
``permutation=[2, 0 , 1]``
- takes ``ìnpt[..., 0, :, :]`` and puts it at ``output[..., 2, :, :]``,
- takes ``ìnpt[..., 1, :, :]`` and puts it at ``output[..., 0, :, :]``, and
- takes ``ìnpt[..., 2, :, :]`` and puts it at ``output[..., 1, :, :]``.
Raises:
ValueError: If ``len(permutation)`` doesn't match the number of channels in the input.
"""
if torch.jit.is_scripting():
return permute_channels_image(inpt, permutation=permutation)
_log_api_usage_once(permute_channels)
kernel = _get_kernel(permute_channels, type(inpt))
return kernel(inpt, permutation=permutation)
@_register_kernel_internal(permute_channels, torch.Tensor)
@_register_kernel_internal(permute_channels, tv_tensors.Image)
def permute_channels_image(image: torch.Tensor, permutation: List[int]) -> torch.Tensor:
shape = image.shape
num_channels, height, width = shape[-3:]
if len(permutation) != num_channels:
raise ValueError(
f"Length of permutation does not match number of channels: " f"{len(permutation)} != {num_channels}"
)
if image.numel() == 0:
return image
image = image.reshape(-1, num_channels, height, width)
image = image[:, permutation, :, :]
return image.reshape(shape)
@_register_kernel_internal(permute_channels, PIL.Image.Image)
def _permute_channels_image_pil(image: PIL.Image.Image, permutation: List[int]) -> PIL.Image.Image:
return to_pil_image(permute_channels_image(pil_to_tensor(image), permutation=permutation))
@_register_kernel_internal(permute_channels, tv_tensors.Video)
def permute_channels_video(video: torch.Tensor, permutation: List[int]) -> torch.Tensor:
return permute_channels_image(video, permutation=permutation)
```
|
===========================================================================================================================================
SOURCE CODE FILE: _deprecated.py
LINES: 1
SIZE: 0.81 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\transforms\v2\functional\_deprecated.py
ENCODING: utf-8
```py
import warnings
from typing import Any, List
import torch
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_tensor(inpt: Any) -> torch.Tensor:
"""[DEPREACTED] Use to_image() and to_dtype() instead."""
warnings.warn(
"The function `to_tensor(...)` is deprecated and will be removed in a future release. "
"Instead, please use `to_image(...)` followed by `to_dtype(..., dtype=torch.float32, scale=True)`."
)
return _F.to_tensor(inpt)
def get_image_size(inpt: torch.Tensor) -> List[int]:
warnings.warn(
"The function `get_image_size(...)` is deprecated and will be removed in a future release. "
"Instead, please use `get_size(...)` which returns `[h, w]` instead of `[w, h]`."
)
return _F.get_image_size(inpt)
```
|
=========================================================================================================================================
SOURCE CODE FILE: _geometry.py
LINES: 1
SIZE: 87.79 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\transforms\v2\functional\_geometry.py
ENCODING: utf-8
```py
import math
import numbers
import warnings
from typing import Any, List, Optional, Sequence, Tuple, Union
import PIL.Image
import torch
from torch.nn.functional import grid_sample, interpolate, pad as torch_pad
from torchvision import tv_tensors
from torchvision.transforms import _functional_pil as _FP
from torchvision.transforms._functional_tensor import _pad_symmetric
from torchvision.transforms.functional import (
_compute_resized_output_size as __compute_resized_output_size,
_get_perspective_coeffs,
_interpolation_modes_from_int,
InterpolationMode,
pil_modes_mapping,
pil_to_tensor,
to_pil_image,
)
from torchvision.utils import _log_api_usage_once
from ._meta import _get_size_image_pil, clamp_bounding_boxes, convert_bounding_box_format
from ._utils import _FillTypeJIT, _get_kernel, _register_five_ten_crop_kernel_internal, _register_kernel_internal
def _check_interpolation(interpolation: Union[InterpolationMode, int]) -> InterpolationMode:
if isinstance(interpolation, int):
interpolation = _interpolation_modes_from_int(interpolation)
elif not isinstance(interpolation, InterpolationMode):
raise ValueError(
f"Argument interpolation should be an `InterpolationMode` or a corresponding Pillow integer constant, "
f"but got {interpolation}."
)
return interpolation
def horizontal_flip(inpt: torch.Tensor) -> torch.Tensor:
"""See :class:`~torchvision.transforms.v2.RandomHorizontalFlip` for details."""
if torch.jit.is_scripting():
return horizontal_flip_image(inpt)
_log_api_usage_once(horizontal_flip)
kernel = _get_kernel(horizontal_flip, type(inpt))
return kernel(inpt)
@_register_kernel_internal(horizontal_flip, torch.Tensor)
@_register_kernel_internal(horizontal_flip, tv_tensors.Image)
def horizontal_flip_image(image: torch.Tensor) -> torch.Tensor:
return image.flip(-1)
@_register_kernel_internal(horizontal_flip, PIL.Image.Image)
def _horizontal_flip_image_pil(image: PIL.Image.Image) -> PIL.Image.Image:
return _FP.hflip(image)
@_register_kernel_internal(horizontal_flip, tv_tensors.Mask)
def horizontal_flip_mask(mask: torch.Tensor) -> torch.Tensor:
return horizontal_flip_image(mask)
def horizontal_flip_bounding_boxes(
bounding_boxes: torch.Tensor, format: tv_tensors.BoundingBoxFormat, canvas_size: Tuple[int, int]
) -> torch.Tensor:
shape = bounding_boxes.shape
bounding_boxes = bounding_boxes.clone().reshape(-1, 4)
if format == tv_tensors.BoundingBoxFormat.XYXY:
bounding_boxes[:, [2, 0]] = bounding_boxes[:, [0, 2]].sub_(canvas_size[1]).neg_()
elif format == tv_tensors.BoundingBoxFormat.XYWH:
bounding_boxes[:, 0].add_(bounding_boxes[:, 2]).sub_(canvas_size[1]).neg_()
else: # format == tv_tensors.BoundingBoxFormat.CXCYWH:
bounding_boxes[:, 0].sub_(canvas_size[1]).neg_()
return bounding_boxes.reshape(shape)
@_register_kernel_internal(horizontal_flip, tv_tensors.BoundingBoxes, tv_tensor_wrapper=False)
def _horizontal_flip_bounding_boxes_dispatch(inpt: tv_tensors.BoundingBoxes) -> tv_tensors.BoundingBoxes:
output = horizontal_flip_bounding_boxes(
inpt.as_subclass(torch.Tensor), format=inpt.format, canvas_size=inpt.canvas_size
)
return tv_tensors.wrap(output, like=inpt)
@_register_kernel_internal(horizontal_flip, tv_tensors.Video)
def horizontal_flip_video(video: torch.Tensor) -> torch.Tensor:
return horizontal_flip_image(video)
def vertical_flip(inpt: torch.Tensor) -> torch.Tensor:
"""See :class:`~torchvision.transforms.v2.RandomVerticalFlip` for details."""
if torch.jit.is_scripting():
return vertical_flip_image(inpt)
_log_api_usage_once(vertical_flip)
kernel = _get_kernel(vertical_flip, type(inpt))
return kernel(inpt)
@_register_kernel_internal(vertical_flip, torch.Tensor)
@_register_kernel_internal(vertical_flip, tv_tensors.Image)
def vertical_flip_image(image: torch.Tensor) -> torch.Tensor:
return image.flip(-2)
@_register_kernel_internal(vertical_flip, PIL.Image.Image)
def _vertical_flip_image_pil(image: PIL.Image.Image) -> PIL.Image.Image:
return _FP.vflip(image)
@_register_kernel_internal(vertical_flip, tv_tensors.Mask)
def vertical_flip_mask(mask: torch.Tensor) -> torch.Tensor:
return vertical_flip_image(mask)
def vertical_flip_bounding_boxes(
bounding_boxes: torch.Tensor, format: tv_tensors.BoundingBoxFormat, canvas_size: Tuple[int, int]
) -> torch.Tensor:
shape = bounding_boxes.shape
bounding_boxes = bounding_boxes.clone().reshape(-1, 4)
if format == tv_tensors.BoundingBoxFormat.XYXY:
bounding_boxes[:, [1, 3]] = bounding_boxes[:, [3, 1]].sub_(canvas_size[0]).neg_()
elif format == tv_tensors.BoundingBoxFormat.XYWH:
bounding_boxes[:, 1].add_(bounding_boxes[:, 3]).sub_(canvas_size[0]).neg_()
else: # format == tv_tensors.BoundingBoxFormat.CXCYWH:
bounding_boxes[:, 1].sub_(canvas_size[0]).neg_()
return bounding_boxes.reshape(shape)
@_register_kernel_internal(vertical_flip, tv_tensors.BoundingBoxes, tv_tensor_wrapper=False)
def _vertical_flip_bounding_boxes_dispatch(inpt: tv_tensors.BoundingBoxes) -> tv_tensors.BoundingBoxes:
output = vertical_flip_bounding_boxes(
inpt.as_subclass(torch.Tensor), format=inpt.format, canvas_size=inpt.canvas_size
)
return tv_tensors.wrap(output, like=inpt)
@_register_kernel_internal(vertical_flip, tv_tensors.Video)
def vertical_flip_video(video: torch.Tensor) -> torch.Tensor:
return vertical_flip_image(video)
# We changed the names to align them with the transforms, i.e. `RandomHorizontalFlip`. Still, `hflip` and `vflip` are
# prevalent and well understood. Thus, we just alias them without deprecating the old names.
hflip = horizontal_flip
vflip = vertical_flip
def _compute_resized_output_size(
canvas_size: Tuple[int, int], size: Optional[List[int]], max_size: Optional[int] = None
) -> List[int]:
if isinstance(size, int):
size = [size]
elif max_size is not None and size is not None and len(size) != 1:
raise ValueError(
"max_size should only be passed if size is None or specifies the length of the smaller edge, "
"i.e. size should be an int or a sequence of length 1 in torchscript mode."
)
return __compute_resized_output_size(canvas_size, size=size, max_size=max_size, allow_size_none=True)
def resize(
inpt: torch.Tensor,
size: Optional[List[int]],
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
max_size: Optional[int] = None,
antialias: Optional[bool] = True,
) -> torch.Tensor:
"""See :class:`~torchvision.transforms.v2.Resize` for details."""
if torch.jit.is_scripting():
return resize_image(inpt, size=size, interpolation=interpolation, max_size=max_size, antialias=antialias)
_log_api_usage_once(resize)
kernel = _get_kernel(resize, type(inpt))
return kernel(inpt, size=size, interpolation=interpolation, max_size=max_size, antialias=antialias)
# This is an internal helper method for resize_image. We should put it here instead of keeping it
# inside resize_image due to torchscript.
# uint8 dtype support for bilinear and bicubic is limited to cpu and
# according to our benchmarks on eager, non-AVX CPUs should still prefer u8->f32->interpolate->u8 path for bilinear
def _do_native_uint8_resize_on_cpu(interpolation: InterpolationMode) -> bool:
if interpolation == InterpolationMode.BILINEAR:
if torch.compiler.is_compiling():
return True
else:
return "AVX2" in torch.backends.cpu.get_cpu_capability()
return interpolation == InterpolationMode.BICUBIC
@_register_kernel_internal(resize, torch.Tensor)
@_register_kernel_internal(resize, tv_tensors.Image)
def resize_image(
image: torch.Tensor,
size: Optional[List[int]],
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
max_size: Optional[int] = None,
antialias: Optional[bool] = True,
) -> torch.Tensor:
interpolation = _check_interpolation(interpolation)
antialias = False if antialias is None else antialias
align_corners: Optional[bool] = None
if interpolation == InterpolationMode.BILINEAR or interpolation == InterpolationMode.BICUBIC:
align_corners = False
else:
# The default of antialias is True from 0.17, so we don't warn or
# error if other interpolation modes are used. This is documented.
antialias = False
shape = image.shape
numel = image.numel()
num_channels, old_height, old_width = shape[-3:]
new_height, new_width = _compute_resized_output_size((old_height, old_width), size=size, max_size=max_size)
if (new_height, new_width) == (old_height, old_width):
return image
elif numel > 0:
dtype = image.dtype
acceptable_dtypes = [torch.float32, torch.float64]
if interpolation == InterpolationMode.NEAREST or interpolation == InterpolationMode.NEAREST_EXACT:
# uint8 dtype can be included for cpu and cuda input if nearest mode
acceptable_dtypes.append(torch.uint8)
elif image.device.type == "cpu":
if _do_native_uint8_resize_on_cpu(interpolation):
acceptable_dtypes.append(torch.uint8)
image = image.reshape(-1, num_channels, old_height, old_width)
strides = image.stride()
if image.is_contiguous(memory_format=torch.channels_last) and image.shape[0] == 1 and numel != strides[0]:
# There is a weird behaviour in torch core where the output tensor of `interpolate()` can be allocated as
# contiguous even though the input is un-ambiguously channels_last (https://github.com/pytorch/pytorch/issues/68430).
# In particular this happens for the typical torchvision use-case of single CHW images where we fake the batch dim
# to become 1CHW. Below, we restride those tensors to trick torch core into properly allocating the output as
# channels_last, thus preserving the memory format of the input. This is not just for format consistency:
# for uint8 bilinear images, this also avoids an extra copy (re-packing) of the output and saves time.
# TODO: when https://github.com/pytorch/pytorch/issues/68430 is fixed (possibly by https://github.com/pytorch/pytorch/pull/100373),
# we should be able to remove this hack.
new_strides = list(strides)
new_strides[0] = numel
image = image.as_strided((1, num_channels, old_height, old_width), new_strides)
need_cast = dtype not in acceptable_dtypes
if need_cast:
image = image.to(dtype=torch.float32)
image = interpolate(
image,
size=[new_height, new_width],
mode=interpolation.value,
align_corners=align_corners,
antialias=antialias,
)
if need_cast:
if interpolation == InterpolationMode.BICUBIC and dtype == torch.uint8:
# This path is hit on non-AVX archs, or on GPU.
image = image.clamp_(min=0, max=255)
if dtype in (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64):
image = image.round_()
image = image.to(dtype=dtype)
return image.reshape(shape[:-3] + (num_channels, new_height, new_width))
def _resize_image_pil(
image: PIL.Image.Image,
size: Union[Sequence[int], int],
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
max_size: Optional[int] = None,
) -> PIL.Image.Image:
old_height, old_width = image.height, image.width
new_height, new_width = _compute_resized_output_size(
(old_height, old_width),
size=size, # type: ignore[arg-type]
max_size=max_size,
)
interpolation = _check_interpolation(interpolation)
if (new_height, new_width) == (old_height, old_width):
return image
return image.resize((new_width, new_height), resample=pil_modes_mapping[interpolation])
@_register_kernel_internal(resize, PIL.Image.Image)
def __resize_image_pil_dispatch(
image: PIL.Image.Image,
size: Union[Sequence[int], int],
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
max_size: Optional[int] = None,
antialias: Optional[bool] = True,
) -> PIL.Image.Image:
if antialias is False:
warnings.warn("Anti-alias option is always applied for PIL Image input. Argument antialias is ignored.")
return _resize_image_pil(image, size=size, interpolation=interpolation, max_size=max_size)
def resize_mask(mask: torch.Tensor, size: Optional[List[int]], max_size: Optional[int] = None) -> torch.Tensor:
if mask.ndim < 3:
mask = mask.unsqueeze(0)
needs_squeeze = True
else:
needs_squeeze = False
output = resize_image(mask, size=size, interpolation=InterpolationMode.NEAREST, max_size=max_size)
if needs_squeeze:
output = output.squeeze(0)
return output
@_register_kernel_internal(resize, tv_tensors.Mask, tv_tensor_wrapper=False)
def _resize_mask_dispatch(
inpt: tv_tensors.Mask, size: List[int], max_size: Optional[int] = None, **kwargs: Any
) -> tv_tensors.Mask:
output = resize_mask(inpt.as_subclass(torch.Tensor), size, max_size=max_size)
return tv_tensors.wrap(output, like=inpt)
def resize_bounding_boxes(
bounding_boxes: torch.Tensor,
canvas_size: Tuple[int, int],
size: Optional[List[int]],
max_size: Optional[int] = None,
) -> Tuple[torch.Tensor, Tuple[int, int]]:
old_height, old_width = canvas_size
new_height, new_width = _compute_resized_output_size(canvas_size, size=size, max_size=max_size)
if (new_height, new_width) == (old_height, old_width):
return bounding_boxes, canvas_size
w_ratio = new_width / old_width
h_ratio = new_height / old_height
ratios = torch.tensor([w_ratio, h_ratio, w_ratio, h_ratio], device=bounding_boxes.device)
return (
bounding_boxes.mul(ratios).to(bounding_boxes.dtype),
(new_height, new_width),
)
@_register_kernel_internal(resize, tv_tensors.BoundingBoxes, tv_tensor_wrapper=False)
def _resize_bounding_boxes_dispatch(
inpt: tv_tensors.BoundingBoxes, size: Optional[List[int]], max_size: Optional[int] = None, **kwargs: Any
) -> tv_tensors.BoundingBoxes:
output, canvas_size = resize_bounding_boxes(
inpt.as_subclass(torch.Tensor), inpt.canvas_size, size, max_size=max_size
)
return tv_tensors.wrap(output, like=inpt, canvas_size=canvas_size)
@_register_kernel_internal(resize, tv_tensors.Video)
def resize_video(
video: torch.Tensor,
size: Optional[List[int]],
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
max_size: Optional[int] = None,
antialias: Optional[bool] = True,
) -> torch.Tensor:
return resize_image(video, size=size, interpolation=interpolation, max_size=max_size, antialias=antialias)
def affine(
inpt: torch.Tensor,
angle: Union[int, float],
translate: List[float],
scale: float,
shear: List[float],
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
fill: _FillTypeJIT = None,
center: Optional[List[float]] = None,
) -> torch.Tensor:
"""See :class:`~torchvision.transforms.v2.RandomAffine` for details."""
if torch.jit.is_scripting():
return affine_image(
inpt,
angle=angle,
translate=translate,
scale=scale,
shear=shear,
interpolation=interpolation,
fill=fill,
center=center,
)
_log_api_usage_once(affine)
kernel = _get_kernel(affine, type(inpt))
return kernel(
inpt,
angle=angle,
translate=translate,
scale=scale,
shear=shear,
interpolation=interpolation,
fill=fill,
center=center,
)
def _affine_parse_args(
angle: Union[int, float],
translate: List[float],
scale: float,
shear: List[float],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
center: Optional[List[float]] = None,
) -> Tuple[float, List[float], List[float], Optional[List[float]]]:
if not isinstance(angle, (int, float)):
raise TypeError("Argument angle should be int or float")
if not isinstance(translate, (list, tuple)):
raise TypeError("Argument translate should be a sequence")
if len(translate) != 2:
raise ValueError("Argument translate should be a sequence of length 2")
if scale <= 0.0:
raise ValueError("Argument scale should be positive")
if not isinstance(shear, (numbers.Number, (list, tuple))):
raise TypeError("Shear should be either a single value or a sequence of two values")
if not isinstance(interpolation, InterpolationMode):
raise TypeError("Argument interpolation should be a InterpolationMode")
if isinstance(angle, int):
angle = float(angle)
if isinstance(translate, tuple):
translate = list(translate)
if isinstance(shear, numbers.Number):
shear = [shear, 0.0]
if isinstance(shear, tuple):
shear = list(shear)
if len(shear) == 1:
shear = [shear[0], shear[0]]
if len(shear) != 2:
raise ValueError(f"Shear should be a sequence containing two values. Got {shear}")
if center is not None:
if not isinstance(center, (list, tuple)):
raise TypeError("Argument center should be a sequence")
else:
center = [float(c) for c in center]
return angle, translate, shear, center
def _get_inverse_affine_matrix(
center: List[float], angle: float, translate: List[float], scale: float, shear: List[float], inverted: bool = True
) -> List[float]:
# Helper method to compute inverse matrix for affine transformation
# Pillow requires inverse affine transformation matrix:
# Affine matrix is : M = T * C * RotateScaleShear * C^-1
#
# where T is translation matrix: [1, 0, tx | 0, 1, ty | 0, 0, 1]
# C is translation matrix to keep center: [1, 0, cx | 0, 1, cy | 0, 0, 1]
# RotateScaleShear is rotation with scale and shear matrix
#
# RotateScaleShear(a, s, (sx, sy)) =
# = R(a) * S(s) * SHy(sy) * SHx(sx)
# = [ s*cos(a - sy)/cos(sy), s*(-cos(a - sy)*tan(sx)/cos(sy) - sin(a)), 0 ]
# [ s*sin(a - sy)/cos(sy), s*(-sin(a - sy)*tan(sx)/cos(sy) + cos(a)), 0 ]
# [ 0 , 0 , 1 ]
# where R is a rotation matrix, S is a scaling matrix, and SHx and SHy are the shears:
# SHx(s) = [1, -tan(s)] and SHy(s) = [1 , 0]
# [0, 1 ] [-tan(s), 1]
#
# Thus, the inverse is M^-1 = C * RotateScaleShear^-1 * C^-1 * T^-1
rot = math.radians(angle)
sx = math.radians(shear[0])
sy = math.radians(shear[1])
cx, cy = center
tx, ty = translate
# Cached results
cos_sy = math.cos(sy)
tan_sx = math.tan(sx)
rot_minus_sy = rot - sy
cx_plus_tx = cx + tx
cy_plus_ty = cy + ty
# Rotate Scale Shear (RSS) without scaling
a = math.cos(rot_minus_sy) / cos_sy
b = -(a * tan_sx + math.sin(rot))
c = math.sin(rot_minus_sy) / cos_sy
d = math.cos(rot) - c * tan_sx
if inverted:
# Inverted rotation matrix with scale and shear
# det([[a, b], [c, d]]) == 1, since det(rotation) = 1 and det(shear) = 1
matrix = [d / scale, -b / scale, 0.0, -c / scale, a / scale, 0.0]
# Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1
# and then apply center translation: C * RSS^-1 * C^-1 * T^-1
matrix[2] += cx - matrix[0] * cx_plus_tx - matrix[1] * cy_plus_ty
matrix[5] += cy - matrix[3] * cx_plus_tx - matrix[4] * cy_plus_ty
else:
matrix = [a * scale, b * scale, 0.0, c * scale, d * scale, 0.0]
# Apply inverse of center translation: RSS * C^-1
# and then apply translation and center : T * C * RSS * C^-1
matrix[2] += cx_plus_tx - matrix[0] * cx - matrix[1] * cy
matrix[5] += cy_plus_ty - matrix[3] * cx - matrix[4] * cy
return matrix
def _compute_affine_output_size(matrix: List[float], w: int, h: int) -> Tuple[int, int]:
if torch.compiler.is_compiling() and not torch.jit.is_scripting():
return _compute_affine_output_size_python(matrix, w, h)
else:
return _compute_affine_output_size_tensor(matrix, w, h)
def _compute_affine_output_size_tensor(matrix: List[float], w: int, h: int) -> Tuple[int, int]:
# Inspired of PIL implementation:
# https://github.com/python-pillow/Pillow/blob/11de3318867e4398057373ee9f12dcb33db7335c/src/PIL/Image.py#L2054
# pts are Top-Left, Top-Right, Bottom-Left, Bottom-Right points.
# Points are shifted due to affine matrix torch convention about
# the center point. Center is (0, 0) for image center pivot point (w * 0.5, h * 0.5)
half_w = 0.5 * w
half_h = 0.5 * h
pts = torch.tensor(
[
[-half_w, -half_h, 1.0],
[-half_w, half_h, 1.0],
[half_w, half_h, 1.0],
[half_w, -half_h, 1.0],
]
)
theta = torch.tensor(matrix, dtype=torch.float).view(2, 3)
new_pts = torch.matmul(pts, theta.T)
min_vals, max_vals = new_pts.aminmax(dim=0)
# shift points to [0, w] and [0, h] interval to match PIL results
halfs = torch.tensor((half_w, half_h))
min_vals.add_(halfs)
max_vals.add_(halfs)
# Truncate precision to 1e-4 to avoid ceil of Xe-15 to 1.0
tol = 1e-4
inv_tol = 1.0 / tol
cmax = max_vals.mul_(inv_tol).trunc_().mul_(tol).ceil_()
cmin = min_vals.mul_(inv_tol).trunc_().mul_(tol).floor_()
size = cmax.sub_(cmin)
return int(size[0]), int(size[1]) # w, h
def _compute_affine_output_size_python(matrix: List[float], w: int, h: int) -> Tuple[int, int]:
# Mostly copied from PIL implementation:
# The only difference is with transformed points as input matrix has zero translation part here and
# PIL has a centered translation part.
# https://github.com/python-pillow/Pillow/blob/11de3318867e4398057373ee9f12dcb33db7335c/src/PIL/Image.py#L2054
a, b, c, d, e, f = matrix
xx = []
yy = []
half_w = 0.5 * w
half_h = 0.5 * h
for x, y in ((-half_w, -half_h), (half_w, -half_h), (half_w, half_h), (-half_w, half_h)):
nx = a * x + b * y + c
ny = d * x + e * y + f
xx.append(nx + half_w)
yy.append(ny + half_h)
nw = math.ceil(max(xx)) - math.floor(min(xx))
nh = math.ceil(max(yy)) - math.floor(min(yy))
return int(nw), int(nh) # w, h
def _apply_grid_transform(img: torch.Tensor, grid: torch.Tensor, mode: str, fill: _FillTypeJIT) -> torch.Tensor:
input_shape = img.shape
output_height, output_width = grid.shape[1], grid.shape[2]
num_channels, input_height, input_width = input_shape[-3:]
output_shape = input_shape[:-3] + (num_channels, output_height, output_width)
if img.numel() == 0:
return img.reshape(output_shape)
img = img.reshape(-1, num_channels, input_height, input_width)
squashed_batch_size = img.shape[0]
# We are using context knowledge that grid should have float dtype
fp = img.dtype == grid.dtype
float_img = img if fp else img.to(grid.dtype)
if squashed_batch_size > 1:
# Apply same grid to a batch of images
grid = grid.expand(squashed_batch_size, -1, -1, -1)
# Append a dummy mask for customized fill colors, should be faster than grid_sample() twice
if fill is not None:
mask = torch.ones(
(squashed_batch_size, 1, input_height, input_width), dtype=float_img.dtype, device=float_img.device
)
float_img = torch.cat((float_img, mask), dim=1)
float_img = grid_sample(float_img, grid, mode=mode, padding_mode="zeros", align_corners=False)
# Fill with required color
if fill is not None:
float_img, mask = torch.tensor_split(float_img, indices=(-1,), dim=-3)
mask = mask.expand_as(float_img)
fill_list = fill if isinstance(fill, (tuple, list)) else [float(fill)] # type: ignore[arg-type]
fill_img = torch.tensor(fill_list, dtype=float_img.dtype, device=float_img.device).view(1, -1, 1, 1)
if mode == "nearest":
float_img = torch.where(mask < 0.5, fill_img.expand_as(float_img), float_img)
else: # 'bilinear'
# The following is mathematically equivalent to:
# img * mask + (1.0 - mask) * fill = img * mask - fill * mask + fill = mask * (img - fill) + fill
float_img = float_img.sub_(fill_img).mul_(mask).add_(fill_img)
img = float_img.round_().to(img.dtype) if not fp else float_img
return img.reshape(output_shape)
def _assert_grid_transform_inputs(
image: torch.Tensor,
matrix: Optional[List[float]],
interpolation: str,
fill: _FillTypeJIT,
supported_interpolation_modes: List[str],
coeffs: Optional[List[float]] = None,
) -> None:
if matrix is not None:
if not isinstance(matrix, list):
raise TypeError("Argument matrix should be a list")
elif len(matrix) != 6:
raise ValueError("Argument matrix should have 6 float values")
if coeffs is not None and len(coeffs) != 8:
raise ValueError("Argument coeffs should have 8 float values")
if fill is not None:
if isinstance(fill, (tuple, list)):
length = len(fill)
num_channels = image.shape[-3]
if length > 1 and length != num_channels:
raise ValueError(
"The number of elements in 'fill' cannot broadcast to match the number of "
f"channels of the image ({length} != {num_channels})"
)
elif not isinstance(fill, (int, float)):
raise ValueError("Argument fill should be either int, float, tuple or list")
if interpolation not in supported_interpolation_modes:
raise ValueError(f"Interpolation mode '{interpolation}' is unsupported with Tensor input")
def _affine_grid(
theta: torch.Tensor,
w: int,
h: int,
ow: int,
oh: int,
) -> torch.Tensor:
# https://github.com/pytorch/pytorch/blob/74b65c32be68b15dc7c9e8bb62459efbfbde33d8/aten/src/ATen/native/
# AffineGridGenerator.cpp#L18
# Difference with AffineGridGenerator is that:
# 1) we normalize grid values after applying theta
# 2) we can normalize by other image size, such that it covers "extend" option like in PIL.Image.rotate
dtype = theta.dtype
device = theta.device
base_grid = torch.empty(1, oh, ow, 3, dtype=dtype, device=device)
x_grid = torch.linspace((1.0 - ow) * 0.5, (ow - 1.0) * 0.5, steps=ow, device=device)
base_grid[..., 0].copy_(x_grid)
y_grid = torch.linspace((1.0 - oh) * 0.5, (oh - 1.0) * 0.5, steps=oh, device=device).unsqueeze_(-1)
base_grid[..., 1].copy_(y_grid)
base_grid[..., 2].fill_(1)
rescaled_theta = theta.transpose(1, 2).div_(torch.tensor([0.5 * w, 0.5 * h], dtype=dtype, device=device))
output_grid = base_grid.view(1, oh * ow, 3).bmm(rescaled_theta)
return output_grid.view(1, oh, ow, 2)
@_register_kernel_internal(affine, torch.Tensor)
@_register_kernel_internal(affine, tv_tensors.Image)
def affine_image(
image: torch.Tensor,
angle: Union[int, float],
translate: List[float],
scale: float,
shear: List[float],
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
fill: _FillTypeJIT = None,
center: Optional[List[float]] = None,
) -> torch.Tensor:
interpolation = _check_interpolation(interpolation)
angle, translate, shear, center = _affine_parse_args(angle, translate, scale, shear, interpolation, center)
height, width = image.shape[-2:]
center_f = [0.0, 0.0]
if center is not None:
# Center values should be in pixel coordinates but translated such that (0, 0) corresponds to image center.
center_f = [(c - s * 0.5) for c, s in zip(center, [width, height])]
translate_f = [float(t) for t in translate]
matrix = _get_inverse_affine_matrix(center_f, angle, translate_f, scale, shear)
_assert_grid_transform_inputs(image, matrix, interpolation.value, fill, ["nearest", "bilinear"])
dtype = image.dtype if torch.is_floating_point(image) else torch.float32
theta = torch.tensor(matrix, dtype=dtype, device=image.device).reshape(1, 2, 3)
grid = _affine_grid(theta, w=width, h=height, ow=width, oh=height)
return _apply_grid_transform(image, grid, interpolation.value, fill=fill)
@_register_kernel_internal(affine, PIL.Image.Image)
def _affine_image_pil(
image: PIL.Image.Image,
angle: Union[int, float],
translate: List[float],
scale: float,
shear: List[float],
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
fill: _FillTypeJIT = None,
center: Optional[List[float]] = None,
) -> PIL.Image.Image:
interpolation = _check_interpolation(interpolation)
angle, translate, shear, center = _affine_parse_args(angle, translate, scale, shear, interpolation, center)
# center = (img_size[0] * 0.5 + 0.5, img_size[1] * 0.5 + 0.5)
# it is visually better to estimate the center without 0.5 offset
# otherwise image rotated by 90 degrees is shifted vs output image of torch.rot90 or F_t.affine
if center is None:
height, width = _get_size_image_pil(image)
center = [width * 0.5, height * 0.5]
matrix = _get_inverse_affine_matrix(center, angle, translate, scale, shear)
return _FP.affine(image, matrix, interpolation=pil_modes_mapping[interpolation], fill=fill)
def _affine_bounding_boxes_with_expand(
bounding_boxes: torch.Tensor,
format: tv_tensors.BoundingBoxFormat,
canvas_size: Tuple[int, int],
angle: Union[int, float],
translate: List[float],
scale: float,
shear: List[float],
center: Optional[List[float]] = None,
expand: bool = False,
) -> Tuple[torch.Tensor, Tuple[int, int]]:
if bounding_boxes.numel() == 0:
return bounding_boxes, canvas_size
original_shape = bounding_boxes.shape
original_dtype = bounding_boxes.dtype
bounding_boxes = bounding_boxes.clone() if bounding_boxes.is_floating_point() else bounding_boxes.float()
dtype = bounding_boxes.dtype
device = bounding_boxes.device
bounding_boxes = (
convert_bounding_box_format(
bounding_boxes, old_format=format, new_format=tv_tensors.BoundingBoxFormat.XYXY, inplace=True
)
).reshape(-1, 4)
angle, translate, shear, center = _affine_parse_args(
angle, translate, scale, shear, InterpolationMode.NEAREST, center
)
if center is None:
height, width = canvas_size
center = [width * 0.5, height * 0.5]
affine_vector = _get_inverse_affine_matrix(center, angle, translate, scale, shear, inverted=False)
transposed_affine_matrix = (
torch.tensor(
affine_vector,
dtype=dtype,
device=device,
)
.reshape(2, 3)
.T
)
# 1) Let's transform bboxes into a tensor of 4 points (top-left, top-right, bottom-left, bottom-right corners).
# Tensor of points has shape (N * 4, 3), where N is the number of bboxes
# Single point structure is similar to
# [(xmin, ymin, 1), (xmax, ymin, 1), (xmax, ymax, 1), (xmin, ymax, 1)]
points = bounding_boxes[:, [[0, 1], [2, 1], [2, 3], [0, 3]]].reshape(-1, 2)
points = torch.cat([points, torch.ones(points.shape[0], 1, device=device, dtype=dtype)], dim=-1)
# 2) Now let's transform the points using affine matrix
transformed_points = torch.matmul(points, transposed_affine_matrix)
# 3) Reshape transformed points to [N boxes, 4 points, x/y coords]
# and compute bounding box from 4 transformed points:
transformed_points = transformed_points.reshape(-1, 4, 2)
out_bbox_mins, out_bbox_maxs = torch.aminmax(transformed_points, dim=1)
out_bboxes = torch.cat([out_bbox_mins, out_bbox_maxs], dim=1)
if expand:
# Compute minimum point for transformed image frame:
# Points are Top-Left, Top-Right, Bottom-Left, Bottom-Right points.
height, width = canvas_size
points = torch.tensor(
[
[0.0, 0.0, 1.0],
[0.0, float(height), 1.0],
[float(width), float(height), 1.0],
[float(width), 0.0, 1.0],
],
dtype=dtype,
device=device,
)
new_points = torch.matmul(points, transposed_affine_matrix)
tr = torch.amin(new_points, dim=0, keepdim=True)
# Translate bounding boxes
out_bboxes.sub_(tr.repeat((1, 2)))
# Estimate meta-data for image with inverted=True
affine_vector = _get_inverse_affine_matrix(center, angle, translate, scale, shear)
new_width, new_height = _compute_affine_output_size(affine_vector, width, height)
canvas_size = (new_height, new_width)
out_bboxes = clamp_bounding_boxes(out_bboxes, format=tv_tensors.BoundingBoxFormat.XYXY, canvas_size=canvas_size)
out_bboxes = convert_bounding_box_format(
out_bboxes, old_format=tv_tensors.BoundingBoxFormat.XYXY, new_format=format, inplace=True
).reshape(original_shape)
out_bboxes = out_bboxes.to(original_dtype)
return out_bboxes, canvas_size
def affine_bounding_boxes(
bounding_boxes: torch.Tensor,
format: tv_tensors.BoundingBoxFormat,
canvas_size: Tuple[int, int],
angle: Union[int, float],
translate: List[float],
scale: float,
shear: List[float],
center: Optional[List[float]] = None,
) -> torch.Tensor:
out_box, _ = _affine_bounding_boxes_with_expand(
bounding_boxes,
format=format,
canvas_size=canvas_size,
angle=angle,
translate=translate,
scale=scale,
shear=shear,
center=center,
expand=False,
)
return out_box
@_register_kernel_internal(affine, tv_tensors.BoundingBoxes, tv_tensor_wrapper=False)
def _affine_bounding_boxes_dispatch(
inpt: tv_tensors.BoundingBoxes,
angle: Union[int, float],
translate: List[float],
scale: float,
shear: List[float],
center: Optional[List[float]] = None,
**kwargs,
) -> tv_tensors.BoundingBoxes:
output = affine_bounding_boxes(
inpt.as_subclass(torch.Tensor),
format=inpt.format,
canvas_size=inpt.canvas_size,
angle=angle,
translate=translate,
scale=scale,
shear=shear,
center=center,
)
return tv_tensors.wrap(output, like=inpt)
def affine_mask(
mask: torch.Tensor,
angle: Union[int, float],
translate: List[float],
scale: float,
shear: List[float],
fill: _FillTypeJIT = None,
center: Optional[List[float]] = None,
) -> torch.Tensor:
if mask.ndim < 3:
mask = mask.unsqueeze(0)
needs_squeeze = True
else:
needs_squeeze = False
output = affine_image(
mask,
angle=angle,
translate=translate,
scale=scale,
shear=shear,
interpolation=InterpolationMode.NEAREST,
fill=fill,
center=center,
)
if needs_squeeze:
output = output.squeeze(0)
return output
@_register_kernel_internal(affine, tv_tensors.Mask, tv_tensor_wrapper=False)
def _affine_mask_dispatch(
inpt: tv_tensors.Mask,
angle: Union[int, float],
translate: List[float],
scale: float,
shear: List[float],
fill: _FillTypeJIT = None,
center: Optional[List[float]] = None,
**kwargs,
) -> tv_tensors.Mask:
output = affine_mask(
inpt.as_subclass(torch.Tensor),
angle=angle,
translate=translate,
scale=scale,
shear=shear,
fill=fill,
center=center,
)
return tv_tensors.wrap(output, like=inpt)
@_register_kernel_internal(affine, tv_tensors.Video)
def affine_video(
video: torch.Tensor,
angle: Union[int, float],
translate: List[float],
scale: float,
shear: List[float],
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
fill: _FillTypeJIT = None,
center: Optional[List[float]] = None,
) -> torch.Tensor:
return affine_image(
video,
angle=angle,
translate=translate,
scale=scale,
shear=shear,
interpolation=interpolation,
fill=fill,
center=center,
)
def rotate(
inpt: torch.Tensor,
angle: float,
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
expand: bool = False,
center: Optional[List[float]] = None,
fill: _FillTypeJIT = None,
) -> torch.Tensor:
"""See :class:`~torchvision.transforms.v2.RandomRotation` for details."""
if torch.jit.is_scripting():
return rotate_image(inpt, angle=angle, interpolation=interpolation, expand=expand, fill=fill, center=center)
_log_api_usage_once(rotate)
kernel = _get_kernel(rotate, type(inpt))
return kernel(inpt, angle=angle, interpolation=interpolation, expand=expand, fill=fill, center=center)
@_register_kernel_internal(rotate, torch.Tensor)
@_register_kernel_internal(rotate, tv_tensors.Image)
def rotate_image(
image: torch.Tensor,
angle: float,
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
expand: bool = False,
center: Optional[List[float]] = None,
fill: _FillTypeJIT = None,
) -> torch.Tensor:
angle = angle % 360 # shift angle to [0, 360) range
# fast path: transpose without affine transform
if center is None:
if angle == 0:
return image.clone()
if angle == 180:
return torch.rot90(image, k=2, dims=(-2, -1))
if expand or image.shape[-1] == image.shape[-2]:
if angle == 90:
return torch.rot90(image, k=1, dims=(-2, -1))
if angle == 270:
return torch.rot90(image, k=3, dims=(-2, -1))
interpolation = _check_interpolation(interpolation)
input_height, input_width = image.shape[-2:]
center_f = [0.0, 0.0]
if center is not None:
# Center values should be in pixel coordinates but translated such that (0, 0) corresponds to image center.
center_f = [(c - s * 0.5) for c, s in zip(center, [input_width, input_height])]
# due to current incoherence of rotation angle direction between affine and rotate implementations
# we need to set -angle.
matrix = _get_inverse_affine_matrix(center_f, -angle, [0.0, 0.0], 1.0, [0.0, 0.0])
_assert_grid_transform_inputs(image, matrix, interpolation.value, fill, ["nearest", "bilinear"])
output_width, output_height = (
_compute_affine_output_size(matrix, input_width, input_height) if expand else (input_width, input_height)
)
dtype = image.dtype if torch.is_floating_point(image) else torch.float32
theta = torch.tensor(matrix, dtype=dtype, device=image.device).reshape(1, 2, 3)
grid = _affine_grid(theta, w=input_width, h=input_height, ow=output_width, oh=output_height)
return _apply_grid_transform(image, grid, interpolation.value, fill=fill)
@_register_kernel_internal(rotate, PIL.Image.Image)
def _rotate_image_pil(
image: PIL.Image.Image,
angle: float,
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
expand: bool = False,
center: Optional[List[float]] = None,
fill: _FillTypeJIT = None,
) -> PIL.Image.Image:
interpolation = _check_interpolation(interpolation)
return _FP.rotate(
image, angle, interpolation=pil_modes_mapping[interpolation], expand=expand, fill=fill, center=center
)
def rotate_bounding_boxes(
bounding_boxes: torch.Tensor,
format: tv_tensors.BoundingBoxFormat,
canvas_size: Tuple[int, int],
angle: float,
expand: bool = False,
center: Optional[List[float]] = None,
) -> Tuple[torch.Tensor, Tuple[int, int]]:
return _affine_bounding_boxes_with_expand(
bounding_boxes,
format=format,
canvas_size=canvas_size,
angle=-angle,
translate=[0.0, 0.0],
scale=1.0,
shear=[0.0, 0.0],
center=center,
expand=expand,
)
@_register_kernel_internal(rotate, tv_tensors.BoundingBoxes, tv_tensor_wrapper=False)
def _rotate_bounding_boxes_dispatch(
inpt: tv_tensors.BoundingBoxes, angle: float, expand: bool = False, center: Optional[List[float]] = None, **kwargs
) -> tv_tensors.BoundingBoxes:
output, canvas_size = rotate_bounding_boxes(
inpt.as_subclass(torch.Tensor),
format=inpt.format,
canvas_size=inpt.canvas_size,
angle=angle,
expand=expand,
center=center,
)
return tv_tensors.wrap(output, like=inpt, canvas_size=canvas_size)
def rotate_mask(
mask: torch.Tensor,
angle: float,
expand: bool = False,
center: Optional[List[float]] = None,
fill: _FillTypeJIT = None,
) -> torch.Tensor:
if mask.ndim < 3:
mask = mask.unsqueeze(0)
needs_squeeze = True
else:
needs_squeeze = False
output = rotate_image(
mask,
angle=angle,
expand=expand,
interpolation=InterpolationMode.NEAREST,
fill=fill,
center=center,
)
if needs_squeeze:
output = output.squeeze(0)
return output
@_register_kernel_internal(rotate, tv_tensors.Mask, tv_tensor_wrapper=False)
def _rotate_mask_dispatch(
inpt: tv_tensors.Mask,
angle: float,
expand: bool = False,
center: Optional[List[float]] = None,
fill: _FillTypeJIT = None,
**kwargs,
) -> tv_tensors.Mask:
output = rotate_mask(inpt.as_subclass(torch.Tensor), angle=angle, expand=expand, fill=fill, center=center)
return tv_tensors.wrap(output, like=inpt)
@_register_kernel_internal(rotate, tv_tensors.Video)
def rotate_video(
video: torch.Tensor,
angle: float,
interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
expand: bool = False,
center: Optional[List[float]] = None,
fill: _FillTypeJIT = None,
) -> torch.Tensor:
return rotate_image(video, angle, interpolation=interpolation, expand=expand, fill=fill, center=center)
def pad(
inpt: torch.Tensor,
padding: List[int],
fill: Optional[Union[int, float, List[float]]] = None,
padding_mode: str = "constant",
) -> torch.Tensor:
"""See :class:`~torchvision.transforms.v2.Pad` for details."""
if torch.jit.is_scripting():
return pad_image(inpt, padding=padding, fill=fill, padding_mode=padding_mode)
_log_api_usage_once(pad)
kernel = _get_kernel(pad, type(inpt))
return kernel(inpt, padding=padding, fill=fill, padding_mode=padding_mode)
def _parse_pad_padding(padding: Union[int, List[int]]) -> List[int]:
if isinstance(padding, int):
pad_left = pad_right = pad_top = pad_bottom = padding
elif isinstance(padding, (tuple, list)):
if len(padding) == 1:
pad_left = pad_right = pad_top = pad_bottom = padding[0]
elif len(padding) == 2:
pad_left = pad_right = padding[0]
pad_top = pad_bottom = padding[1]
elif len(padding) == 4:
pad_left = padding[0]
pad_top = padding[1]
pad_right = padding[2]
pad_bottom = padding[3]
else:
raise ValueError(
f"Padding must be an int or a 1, 2, or 4 element tuple, not a {len(padding)} element tuple"
)
else:
raise TypeError(f"`padding` should be an integer or tuple or list of integers, but got {padding}")
return [pad_left, pad_right, pad_top, pad_bottom]
@_register_kernel_internal(pad, torch.Tensor)
@_register_kernel_internal(pad, tv_tensors.Image)
def pad_image(
image: torch.Tensor,
padding: List[int],
fill: Optional[Union[int, float, List[float]]] = None,
padding_mode: str = "constant",
) -> torch.Tensor:
# Be aware that while `padding` has order `[left, top, right, bottom]`, `torch_padding` uses
# `[left, right, top, bottom]`. This stems from the fact that we align our API with PIL, but need to use `torch_pad`
# internally.
torch_padding = _parse_pad_padding(padding)
if padding_mode not in ("constant", "edge", "reflect", "symmetric"):
raise ValueError(
f"`padding_mode` should be either `'constant'`, `'edge'`, `'reflect'` or `'symmetric'`, "
f"but got `'{padding_mode}'`."
)
if fill is None:
fill = 0
if isinstance(fill, (int, float)):
return _pad_with_scalar_fill(image, torch_padding, fill=fill, padding_mode=padding_mode)
elif len(fill) == 1:
return _pad_with_scalar_fill(image, torch_padding, fill=fill[0], padding_mode=padding_mode)
else:
return _pad_with_vector_fill(image, torch_padding, fill=fill, padding_mode=padding_mode)
def _pad_with_scalar_fill(
image: torch.Tensor,
torch_padding: List[int],
fill: Union[int, float],
padding_mode: str,
) -> torch.Tensor:
shape = image.shape
num_channels, height, width = shape[-3:]
batch_size = 1
for s in shape[:-3]:
batch_size *= s
image = image.reshape(batch_size, num_channels, height, width)
if padding_mode == "edge":
# Similar to the padding order, `torch_pad`'s PIL's padding modes don't have the same names. Thus, we map
# the PIL name for the padding mode, which we are also using for our API, to the corresponding `torch_pad`
# name.
padding_mode = "replicate"
if padding_mode == "constant":
image = torch_pad(image, torch_padding, mode=padding_mode, value=float(fill))
elif padding_mode in ("reflect", "replicate"):
# `torch_pad` only supports `"reflect"` or `"replicate"` padding for floating point inputs.
# TODO: See https://github.com/pytorch/pytorch/issues/40763
dtype = image.dtype
if not image.is_floating_point():
needs_cast = True
image = image.to(torch.float32)
else:
needs_cast = False
image = torch_pad(image, torch_padding, mode=padding_mode)
if needs_cast:
image = image.to(dtype)
else: # padding_mode == "symmetric"
image = _pad_symmetric(image, torch_padding)
new_height, new_width = image.shape[-2:]
return image.reshape(shape[:-3] + (num_channels, new_height, new_width))
# TODO: This should be removed once torch_pad supports non-scalar padding values
def _pad_with_vector_fill(
image: torch.Tensor,
torch_padding: List[int],
fill: List[float],
padding_mode: str,
) -> torch.Tensor:
if padding_mode != "constant":
raise ValueError(f"Padding mode '{padding_mode}' is not supported if fill is not scalar")
output = _pad_with_scalar_fill(image, torch_padding, fill=0, padding_mode="constant")
left, right, top, bottom = torch_padding
# We are creating the tensor in the autodetected dtype first and convert to the right one after to avoid an implicit
# float -> int conversion. That happens for example for the valid input of a uint8 image with floating point fill
# value.
fill = torch.tensor(fill, device=image.device).to(dtype=image.dtype).reshape(-1, 1, 1)
if top > 0:
output[..., :top, :] = fill
if left > 0:
output[..., :, :left] = fill
if bottom > 0:
output[..., -bottom:, :] = fill
if right > 0:
output[..., :, -right:] = fill
return output
_pad_image_pil = _register_kernel_internal(pad, PIL.Image.Image)(_FP.pad)
@_register_kernel_internal(pad, tv_tensors.Mask)
def pad_mask(
mask: torch.Tensor,
padding: List[int],
fill: Optional[Union[int, float, List[float]]] = None,
padding_mode: str = "constant",
) -> torch.Tensor:
if fill is None:
fill = 0
if isinstance(fill, (tuple, list)):
raise ValueError("Non-scalar fill value is not supported")
if mask.ndim < 3:
mask = mask.unsqueeze(0)
needs_squeeze = True
else:
needs_squeeze = False
output = pad_image(mask, padding=padding, fill=fill, padding_mode=padding_mode)
if needs_squeeze:
output = output.squeeze(0)
return output
def pad_bounding_boxes(
bounding_boxes: torch.Tensor,
format: tv_tensors.BoundingBoxFormat,
canvas_size: Tuple[int, int],
padding: List[int],
padding_mode: str = "constant",
) -> Tuple[torch.Tensor, Tuple[int, int]]:
if padding_mode not in ["constant"]:
# TODO: add support of other padding modes
raise ValueError(f"Padding mode '{padding_mode}' is not supported with bounding boxes")
left, right, top, bottom = _parse_pad_padding(padding)
if format == tv_tensors.BoundingBoxFormat.XYXY:
pad = [left, top, left, top]
else:
pad = [left, top, 0, 0]
bounding_boxes = bounding_boxes + torch.tensor(pad, dtype=bounding_boxes.dtype, device=bounding_boxes.device)
height, width = canvas_size
height += top + bottom
width += left + right
canvas_size = (height, width)
return clamp_bounding_boxes(bounding_boxes, format=format, canvas_size=canvas_size), canvas_size
@_register_kernel_internal(pad, tv_tensors.BoundingBoxes, tv_tensor_wrapper=False)
def _pad_bounding_boxes_dispatch(
inpt: tv_tensors.BoundingBoxes, padding: List[int], padding_mode: str = "constant", **kwargs
) -> tv_tensors.BoundingBoxes:
output, canvas_size = pad_bounding_boxes(
inpt.as_subclass(torch.Tensor),
format=inpt.format,
canvas_size=inpt.canvas_size,
padding=padding,
padding_mode=padding_mode,
)
return tv_tensors.wrap(output, like=inpt, canvas_size=canvas_size)
@_register_kernel_internal(pad, tv_tensors.Video)
def pad_video(
video: torch.Tensor,
padding: List[int],
fill: Optional[Union[int, float, List[float]]] = None,
padding_mode: str = "constant",
) -> torch.Tensor:
return pad_image(video, padding, fill=fill, padding_mode=padding_mode)
def crop(inpt: torch.Tensor, top: int, left: int, height: int, width: int) -> torch.Tensor:
"""See :class:`~torchvision.transforms.v2.RandomCrop` for details."""
if torch.jit.is_scripting():
return crop_image(inpt, top=top, left=left, height=height, width=width)
_log_api_usage_once(crop)
kernel = _get_kernel(crop, type(inpt))
return kernel(inpt, top=top, left=left, height=height, width=width)
@_register_kernel_internal(crop, torch.Tensor)
@_register_kernel_internal(crop, tv_tensors.Image)
def crop_image(image: torch.Tensor, top: int, left: int, height: int, width: int) -> torch.Tensor:
h, w = image.shape[-2:]
right = left + width
bottom = top + height
if left < 0 or top < 0 or right > w or bottom > h:
image = image[..., max(top, 0) : bottom, max(left, 0) : right]
torch_padding = [
max(min(right, 0) - left, 0),
max(right - max(w, left), 0),
max(min(bottom, 0) - top, 0),
max(bottom - max(h, top), 0),
]
return _pad_with_scalar_fill(image, torch_padding, fill=0, padding_mode="constant")
return image[..., top:bottom, left:right]
_crop_image_pil = _FP.crop
_register_kernel_internal(crop, PIL.Image.Image)(_crop_image_pil)
def crop_bounding_boxes(
bounding_boxes: torch.Tensor,
format: tv_tensors.BoundingBoxFormat,
top: int,
left: int,
height: int,
width: int,
) -> Tuple[torch.Tensor, Tuple[int, int]]:
# Crop or implicit pad if left and/or top have negative values:
if format == tv_tensors.BoundingBoxFormat.XYXY:
sub = [left, top, left, top]
else:
sub = [left, top, 0, 0]
bounding_boxes = bounding_boxes - torch.tensor(sub, dtype=bounding_boxes.dtype, device=bounding_boxes.device)
canvas_size = (height, width)
return clamp_bounding_boxes(bounding_boxes, format=format, canvas_size=canvas_size), canvas_size
@_register_kernel_internal(crop, tv_tensors.BoundingBoxes, tv_tensor_wrapper=False)
def _crop_bounding_boxes_dispatch(
inpt: tv_tensors.BoundingBoxes, top: int, left: int, height: int, width: int
) -> tv_tensors.BoundingBoxes:
output, canvas_size = crop_bounding_boxes(
inpt.as_subclass(torch.Tensor), format=inpt.format, top=top, left=left, height=height, width=width
)
return tv_tensors.wrap(output, like=inpt, canvas_size=canvas_size)
@_register_kernel_internal(crop, tv_tensors.Mask)
def crop_mask(mask: torch.Tensor, top: int, left: int, height: int, width: int) -> torch.Tensor:
if mask.ndim < 3:
mask = mask.unsqueeze(0)
needs_squeeze = True
else:
needs_squeeze = False
output = crop_image(mask, top, left, height, width)
if needs_squeeze:
output = output.squeeze(0)
return output
@_register_kernel_internal(crop, tv_tensors.Video)
def crop_video(video: torch.Tensor, top: int, left: int, height: int, width: int) -> torch.Tensor:
return crop_image(video, top, left, height, width)
def perspective(
inpt: torch.Tensor,
startpoints: Optional[List[List[int]]],
endpoints: Optional[List[List[int]]],
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
fill: _FillTypeJIT = None,
coefficients: Optional[List[float]] = None,
) -> torch.Tensor:
"""See :class:`~torchvision.transforms.v2.RandomPerspective` for details."""
if torch.jit.is_scripting():
return perspective_image(
inpt,
startpoints=startpoints,
endpoints=endpoints,
interpolation=interpolation,
fill=fill,
coefficients=coefficients,
)
_log_api_usage_once(perspective)
kernel = _get_kernel(perspective, type(inpt))
return kernel(
inpt,
startpoints=startpoints,
endpoints=endpoints,
interpolation=interpolation,
fill=fill,
coefficients=coefficients,
)
def _perspective_grid(coeffs: List[float], ow: int, oh: int, dtype: torch.dtype, device: torch.device) -> torch.Tensor:
# https://github.com/python-pillow/Pillow/blob/4634eafe3c695a014267eefdce830b4a825beed7/
# src/libImaging/Geometry.c#L394
#
# x_out = (coeffs[0] * x + coeffs[1] * y + coeffs[2]) / (coeffs[6] * x + coeffs[7] * y + 1)
# y_out = (coeffs[3] * x + coeffs[4] * y + coeffs[5]) / (coeffs[6] * x + coeffs[7] * y + 1)
#
theta1 = torch.tensor(
[[[coeffs[0], coeffs[1], coeffs[2]], [coeffs[3], coeffs[4], coeffs[5]]]], dtype=dtype, device=device
)
theta2 = torch.tensor([[[coeffs[6], coeffs[7], 1.0], [coeffs[6], coeffs[7], 1.0]]], dtype=dtype, device=device)
d = 0.5
base_grid = torch.empty(1, oh, ow, 3, dtype=dtype, device=device)
x_grid = torch.linspace(d, ow + d - 1.0, steps=ow, device=device, dtype=dtype)
base_grid[..., 0].copy_(x_grid)
y_grid = torch.linspace(d, oh + d - 1.0, steps=oh, device=device, dtype=dtype).unsqueeze_(-1)
base_grid[..., 1].copy_(y_grid)
base_grid[..., 2].fill_(1)
rescaled_theta1 = theta1.transpose(1, 2).div_(torch.tensor([0.5 * ow, 0.5 * oh], dtype=dtype, device=device))
shape = (1, oh * ow, 3)
output_grid1 = base_grid.view(shape).bmm(rescaled_theta1)
output_grid2 = base_grid.view(shape).bmm(theta2.transpose(1, 2))
output_grid = output_grid1.div_(output_grid2).sub_(1.0)
return output_grid.view(1, oh, ow, 2)
def _perspective_coefficients(
startpoints: Optional[List[List[int]]],
endpoints: Optional[List[List[int]]],
coefficients: Optional[List[float]],
) -> List[float]:
if coefficients is not None:
if startpoints is not None and endpoints is not None:
raise ValueError("The startpoints/endpoints and the coefficients shouldn't be defined concurrently.")
elif len(coefficients) != 8:
raise ValueError("Argument coefficients should have 8 float values")
return coefficients
elif startpoints is not None and endpoints is not None:
return _get_perspective_coeffs(startpoints, endpoints)
else:
raise ValueError("Either the startpoints/endpoints or the coefficients must have non `None` values.")
@_register_kernel_internal(perspective, torch.Tensor)
@_register_kernel_internal(perspective, tv_tensors.Image)
def perspective_image(
image: torch.Tensor,
startpoints: Optional[List[List[int]]],
endpoints: Optional[List[List[int]]],
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
fill: _FillTypeJIT = None,
coefficients: Optional[List[float]] = None,
) -> torch.Tensor:
perspective_coeffs = _perspective_coefficients(startpoints, endpoints, coefficients)
interpolation = _check_interpolation(interpolation)
_assert_grid_transform_inputs(
image,
matrix=None,
interpolation=interpolation.value,
fill=fill,
supported_interpolation_modes=["nearest", "bilinear"],
coeffs=perspective_coeffs,
)
oh, ow = image.shape[-2:]
dtype = image.dtype if torch.is_floating_point(image) else torch.float32
grid = _perspective_grid(perspective_coeffs, ow=ow, oh=oh, dtype=dtype, device=image.device)
return _apply_grid_transform(image, grid, interpolation.value, fill=fill)
@_register_kernel_internal(perspective, PIL.Image.Image)
def _perspective_image_pil(
image: PIL.Image.Image,
startpoints: Optional[List[List[int]]],
endpoints: Optional[List[List[int]]],
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
fill: _FillTypeJIT = None,
coefficients: Optional[List[float]] = None,
) -> PIL.Image.Image:
perspective_coeffs = _perspective_coefficients(startpoints, endpoints, coefficients)
interpolation = _check_interpolation(interpolation)
return _FP.perspective(image, perspective_coeffs, interpolation=pil_modes_mapping[interpolation], fill=fill)
def perspective_bounding_boxes(
bounding_boxes: torch.Tensor,
format: tv_tensors.BoundingBoxFormat,
canvas_size: Tuple[int, int],
startpoints: Optional[List[List[int]]],
endpoints: Optional[List[List[int]]],
coefficients: Optional[List[float]] = None,
) -> torch.Tensor:
if bounding_boxes.numel() == 0:
return bounding_boxes
perspective_coeffs = _perspective_coefficients(startpoints, endpoints, coefficients)
original_shape = bounding_boxes.shape
# TODO: first cast to float if bbox is int64 before convert_bounding_box_format
bounding_boxes = (
convert_bounding_box_format(bounding_boxes, old_format=format, new_format=tv_tensors.BoundingBoxFormat.XYXY)
).reshape(-1, 4)
dtype = bounding_boxes.dtype if torch.is_floating_point(bounding_boxes) else torch.float32
device = bounding_boxes.device
# perspective_coeffs are computed as endpoint -> start point
# We have to invert perspective_coeffs for bboxes:
# (x, y) - end point and (x_out, y_out) - start point
# x_out = (coeffs[0] * x + coeffs[1] * y + coeffs[2]) / (coeffs[6] * x + coeffs[7] * y + 1)
# y_out = (coeffs[3] * x + coeffs[4] * y + coeffs[5]) / (coeffs[6] * x + coeffs[7] * y + 1)
# and we would like to get:
# x = (inv_coeffs[0] * x_out + inv_coeffs[1] * y_out + inv_coeffs[2])
# / (inv_coeffs[6] * x_out + inv_coeffs[7] * y_out + 1)
# y = (inv_coeffs[3] * x_out + inv_coeffs[4] * y_out + inv_coeffs[5])
# / (inv_coeffs[6] * x_out + inv_coeffs[7] * y_out + 1)
# and compute inv_coeffs in terms of coeffs
denom = perspective_coeffs[0] * perspective_coeffs[4] - perspective_coeffs[1] * perspective_coeffs[3]
if denom == 0:
raise RuntimeError(
f"Provided perspective_coeffs {perspective_coeffs} can not be inverted to transform bounding boxes. "
f"Denominator is zero, denom={denom}"
)
inv_coeffs = [
(perspective_coeffs[4] - perspective_coeffs[5] * perspective_coeffs[7]) / denom,
(-perspective_coeffs[1] + perspective_coeffs[2] * perspective_coeffs[7]) / denom,
(perspective_coeffs[1] * perspective_coeffs[5] - perspective_coeffs[2] * perspective_coeffs[4]) / denom,
(-perspective_coeffs[3] + perspective_coeffs[5] * perspective_coeffs[6]) / denom,
(perspective_coeffs[0] - perspective_coeffs[2] * perspective_coeffs[6]) / denom,
(-perspective_coeffs[0] * perspective_coeffs[5] + perspective_coeffs[2] * perspective_coeffs[3]) / denom,
(-perspective_coeffs[4] * perspective_coeffs[6] + perspective_coeffs[3] * perspective_coeffs[7]) / denom,
(-perspective_coeffs[0] * perspective_coeffs[7] + perspective_coeffs[1] * perspective_coeffs[6]) / denom,
]
theta1 = torch.tensor(
[[inv_coeffs[0], inv_coeffs[1], inv_coeffs[2]], [inv_coeffs[3], inv_coeffs[4], inv_coeffs[5]]],
dtype=dtype,
device=device,
)
theta2 = torch.tensor(
[[inv_coeffs[6], inv_coeffs[7], 1.0], [inv_coeffs[6], inv_coeffs[7], 1.0]], dtype=dtype, device=device
)
# 1) Let's transform bboxes into a tensor of 4 points (top-left, top-right, bottom-left, bottom-right corners).
# Tensor of points has shape (N * 4, 3), where N is the number of bboxes
# Single point structure is similar to
# [(xmin, ymin, 1), (xmax, ymin, 1), (xmax, ymax, 1), (xmin, ymax, 1)]
points = bounding_boxes[:, [[0, 1], [2, 1], [2, 3], [0, 3]]].reshape(-1, 2)
points = torch.cat([points, torch.ones(points.shape[0], 1, device=points.device)], dim=-1)
# 2) Now let's transform the points using perspective matrices
# x_out = (coeffs[0] * x + coeffs[1] * y + coeffs[2]) / (coeffs[6] * x + coeffs[7] * y + 1)
# y_out = (coeffs[3] * x + coeffs[4] * y + coeffs[5]) / (coeffs[6] * x + coeffs[7] * y + 1)
numer_points = torch.matmul(points, theta1.T)
denom_points = torch.matmul(points, theta2.T)
transformed_points = numer_points.div_(denom_points)
# 3) Reshape transformed points to [N boxes, 4 points, x/y coords]
# and compute bounding box from 4 transformed points:
transformed_points = transformed_points.reshape(-1, 4, 2)
out_bbox_mins, out_bbox_maxs = torch.aminmax(transformed_points, dim=1)
out_bboxes = clamp_bounding_boxes(
torch.cat([out_bbox_mins, out_bbox_maxs], dim=1).to(bounding_boxes.dtype),
format=tv_tensors.BoundingBoxFormat.XYXY,
canvas_size=canvas_size,
)
# out_bboxes should be of shape [N boxes, 4]
return convert_bounding_box_format(
out_bboxes, old_format=tv_tensors.BoundingBoxFormat.XYXY, new_format=format, inplace=True
).reshape(original_shape)
@_register_kernel_internal(perspective, tv_tensors.BoundingBoxes, tv_tensor_wrapper=False)
def _perspective_bounding_boxes_dispatch(
inpt: tv_tensors.BoundingBoxes,
startpoints: Optional[List[List[int]]],
endpoints: Optional[List[List[int]]],
coefficients: Optional[List[float]] = None,
**kwargs,
) -> tv_tensors.BoundingBoxes:
output = perspective_bounding_boxes(
inpt.as_subclass(torch.Tensor),
format=inpt.format,
canvas_size=inpt.canvas_size,
startpoints=startpoints,
endpoints=endpoints,
coefficients=coefficients,
)
return tv_tensors.wrap(output, like=inpt)
def perspective_mask(
mask: torch.Tensor,
startpoints: Optional[List[List[int]]],
endpoints: Optional[List[List[int]]],
fill: _FillTypeJIT = None,
coefficients: Optional[List[float]] = None,
) -> torch.Tensor:
if mask.ndim < 3:
mask = mask.unsqueeze(0)
needs_squeeze = True
else:
needs_squeeze = False
output = perspective_image(
mask, startpoints, endpoints, interpolation=InterpolationMode.NEAREST, fill=fill, coefficients=coefficients
)
if needs_squeeze:
output = output.squeeze(0)
return output
@_register_kernel_internal(perspective, tv_tensors.Mask, tv_tensor_wrapper=False)
def _perspective_mask_dispatch(
inpt: tv_tensors.Mask,
startpoints: Optional[List[List[int]]],
endpoints: Optional[List[List[int]]],
fill: _FillTypeJIT = None,
coefficients: Optional[List[float]] = None,
**kwargs,
) -> tv_tensors.Mask:
output = perspective_mask(
inpt.as_subclass(torch.Tensor),
startpoints=startpoints,
endpoints=endpoints,
fill=fill,
coefficients=coefficients,
)
return tv_tensors.wrap(output, like=inpt)
@_register_kernel_internal(perspective, tv_tensors.Video)
def perspective_video(
video: torch.Tensor,
startpoints: Optional[List[List[int]]],
endpoints: Optional[List[List[int]]],
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
fill: _FillTypeJIT = None,
coefficients: Optional[List[float]] = None,
) -> torch.Tensor:
return perspective_image(
video, startpoints, endpoints, interpolation=interpolation, fill=fill, coefficients=coefficients
)
def elastic(
inpt: torch.Tensor,
displacement: torch.Tensor,
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
fill: _FillTypeJIT = None,
) -> torch.Tensor:
"""See :class:`~torchvision.transforms.v2.ElasticTransform` for details."""
if torch.jit.is_scripting():
return elastic_image(inpt, displacement=displacement, interpolation=interpolation, fill=fill)
_log_api_usage_once(elastic)
kernel = _get_kernel(elastic, type(inpt))
return kernel(inpt, displacement=displacement, interpolation=interpolation, fill=fill)
elastic_transform = elastic
@_register_kernel_internal(elastic, torch.Tensor)
@_register_kernel_internal(elastic, tv_tensors.Image)
def elastic_image(
image: torch.Tensor,
displacement: torch.Tensor,
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
fill: _FillTypeJIT = None,
) -> torch.Tensor:
if not isinstance(displacement, torch.Tensor):
raise TypeError("Argument displacement should be a Tensor")
interpolation = _check_interpolation(interpolation)
height, width = image.shape[-2:]
device = image.device
dtype = image.dtype if torch.is_floating_point(image) else torch.float32
# Patch: elastic transform should support (cpu,f16) input
is_cpu_half = device.type == "cpu" and dtype == torch.float16
if is_cpu_half:
image = image.to(torch.float32)
dtype = torch.float32
# We are aware that if input image dtype is uint8 and displacement is float64 then
# displacement will be cast to float32 and all computations will be done with float32
# We can fix this later if needed
expected_shape = (1, height, width, 2)
if expected_shape != displacement.shape:
raise ValueError(f"Argument displacement shape should be {expected_shape}, but given {displacement.shape}")
grid = _create_identity_grid((height, width), device=device, dtype=dtype).add_(
displacement.to(dtype=dtype, device=device)
)
output = _apply_grid_transform(image, grid, interpolation.value, fill=fill)
if is_cpu_half:
output = output.to(torch.float16)
return output
@_register_kernel_internal(elastic, PIL.Image.Image)
def _elastic_image_pil(
image: PIL.Image.Image,
displacement: torch.Tensor,
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
fill: _FillTypeJIT = None,
) -> PIL.Image.Image:
t_img = pil_to_tensor(image)
output = elastic_image(t_img, displacement, interpolation=interpolation, fill=fill)
return to_pil_image(output, mode=image.mode)
def _create_identity_grid(size: Tuple[int, int], device: torch.device, dtype: torch.dtype) -> torch.Tensor:
sy, sx = size
base_grid = torch.empty(1, sy, sx, 2, device=device, dtype=dtype)
x_grid = torch.linspace((-sx + 1) / sx, (sx - 1) / sx, sx, device=device, dtype=dtype)
base_grid[..., 0].copy_(x_grid)
y_grid = torch.linspace((-sy + 1) / sy, (sy - 1) / sy, sy, device=device, dtype=dtype).unsqueeze_(-1)
base_grid[..., 1].copy_(y_grid)
return base_grid
def elastic_bounding_boxes(
bounding_boxes: torch.Tensor,
format: tv_tensors.BoundingBoxFormat,
canvas_size: Tuple[int, int],
displacement: torch.Tensor,
) -> torch.Tensor:
expected_shape = (1, canvas_size[0], canvas_size[1], 2)
if not isinstance(displacement, torch.Tensor):
raise TypeError("Argument displacement should be a Tensor")
elif displacement.shape != expected_shape:
raise ValueError(f"Argument displacement shape should be {expected_shape}, but given {displacement.shape}")
if bounding_boxes.numel() == 0:
return bounding_boxes
# TODO: add in docstring about approximation we are doing for grid inversion
device = bounding_boxes.device
dtype = bounding_boxes.dtype if torch.is_floating_point(bounding_boxes) else torch.float32
if displacement.dtype != dtype or displacement.device != device:
displacement = displacement.to(dtype=dtype, device=device)
original_shape = bounding_boxes.shape
# TODO: first cast to float if bbox is int64 before convert_bounding_box_format
bounding_boxes = (
convert_bounding_box_format(bounding_boxes, old_format=format, new_format=tv_tensors.BoundingBoxFormat.XYXY)
).reshape(-1, 4)
id_grid = _create_identity_grid(canvas_size, device=device, dtype=dtype)
# We construct an approximation of inverse grid as inv_grid = id_grid - displacement
# This is not an exact inverse of the grid
inv_grid = id_grid.sub_(displacement)
# Get points from bboxes
points = bounding_boxes[:, [[0, 1], [2, 1], [2, 3], [0, 3]]].reshape(-1, 2)
if points.is_floating_point():
points = points.ceil_()
index_xy = points.to(dtype=torch.long)
index_x, index_y = index_xy[:, 0], index_xy[:, 1]
# Transform points:
t_size = torch.tensor(canvas_size[::-1], device=displacement.device, dtype=displacement.dtype)
transformed_points = inv_grid[0, index_y, index_x, :].add_(1).mul_(0.5 * t_size).sub_(0.5)
transformed_points = transformed_points.reshape(-1, 4, 2)
out_bbox_mins, out_bbox_maxs = torch.aminmax(transformed_points, dim=1)
out_bboxes = clamp_bounding_boxes(
torch.cat([out_bbox_mins, out_bbox_maxs], dim=1).to(bounding_boxes.dtype),
format=tv_tensors.BoundingBoxFormat.XYXY,
canvas_size=canvas_size,
)
return convert_bounding_box_format(
out_bboxes, old_format=tv_tensors.BoundingBoxFormat.XYXY, new_format=format, inplace=True
).reshape(original_shape)
@_register_kernel_internal(elastic, tv_tensors.BoundingBoxes, tv_tensor_wrapper=False)
def _elastic_bounding_boxes_dispatch(
inpt: tv_tensors.BoundingBoxes, displacement: torch.Tensor, **kwargs
) -> tv_tensors.BoundingBoxes:
output = elastic_bounding_boxes(
inpt.as_subclass(torch.Tensor), format=inpt.format, canvas_size=inpt.canvas_size, displacement=displacement
)
return tv_tensors.wrap(output, like=inpt)
def elastic_mask(
mask: torch.Tensor,
displacement: torch.Tensor,
fill: _FillTypeJIT = None,
) -> torch.Tensor:
if mask.ndim < 3:
mask = mask.unsqueeze(0)
needs_squeeze = True
else:
needs_squeeze = False
output = elastic_image(mask, displacement=displacement, interpolation=InterpolationMode.NEAREST, fill=fill)
if needs_squeeze:
output = output.squeeze(0)
return output
@_register_kernel_internal(elastic, tv_tensors.Mask, tv_tensor_wrapper=False)
def _elastic_mask_dispatch(
inpt: tv_tensors.Mask, displacement: torch.Tensor, fill: _FillTypeJIT = None, **kwargs
) -> tv_tensors.Mask:
output = elastic_mask(inpt.as_subclass(torch.Tensor), displacement=displacement, fill=fill)
return tv_tensors.wrap(output, like=inpt)
@_register_kernel_internal(elastic, tv_tensors.Video)
def elastic_video(
video: torch.Tensor,
displacement: torch.Tensor,
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
fill: _FillTypeJIT = None,
) -> torch.Tensor:
return elastic_image(video, displacement, interpolation=interpolation, fill=fill)
def center_crop(inpt: torch.Tensor, output_size: List[int]) -> torch.Tensor:
"""See :class:`~torchvision.transforms.v2.RandomCrop` for details."""
if torch.jit.is_scripting():
return center_crop_image(inpt, output_size=output_size)
_log_api_usage_once(center_crop)
kernel = _get_kernel(center_crop, type(inpt))
return kernel(inpt, output_size=output_size)
def _center_crop_parse_output_size(output_size: List[int]) -> List[int]:
if isinstance(output_size, numbers.Number):
s = int(output_size)
return [s, s]
elif isinstance(output_size, (tuple, list)) and len(output_size) == 1:
return [output_size[0], output_size[0]]
else:
return list(output_size)
def _center_crop_compute_padding(crop_height: int, crop_width: int, image_height: int, image_width: int) -> List[int]:
return [
(crop_width - image_width) // 2 if crop_width > image_width else 0,
(crop_height - image_height) // 2 if crop_height > image_height else 0,
(crop_width - image_width + 1) // 2 if crop_width > image_width else 0,
(crop_height - image_height + 1) // 2 if crop_height > image_height else 0,
]
def _center_crop_compute_crop_anchor(
crop_height: int, crop_width: int, image_height: int, image_width: int
) -> Tuple[int, int]:
crop_top = int(round((image_height - crop_height) / 2.0))
crop_left = int(round((image_width - crop_width) / 2.0))
return crop_top, crop_left
@_register_kernel_internal(center_crop, torch.Tensor)
@_register_kernel_internal(center_crop, tv_tensors.Image)
def center_crop_image(image: torch.Tensor, output_size: List[int]) -> torch.Tensor:
crop_height, crop_width = _center_crop_parse_output_size(output_size)
shape = image.shape
if image.numel() == 0:
return image.reshape(shape[:-2] + (crop_height, crop_width))
image_height, image_width = shape[-2:]
if crop_height > image_height or crop_width > image_width:
padding_ltrb = _center_crop_compute_padding(crop_height, crop_width, image_height, image_width)
image = torch_pad(image, _parse_pad_padding(padding_ltrb), value=0.0)
image_height, image_width = image.shape[-2:]
if crop_width == image_width and crop_height == image_height:
return image
crop_top, crop_left = _center_crop_compute_crop_anchor(crop_height, crop_width, image_height, image_width)
return image[..., crop_top : (crop_top + crop_height), crop_left : (crop_left + crop_width)]
@_register_kernel_internal(center_crop, PIL.Image.Image)
def _center_crop_image_pil(image: PIL.Image.Image, output_size: List[int]) -> PIL.Image.Image:
crop_height, crop_width = _center_crop_parse_output_size(output_size)
image_height, image_width = _get_size_image_pil(image)
if crop_height > image_height or crop_width > image_width:
padding_ltrb = _center_crop_compute_padding(crop_height, crop_width, image_height, image_width)
image = _pad_image_pil(image, padding_ltrb, fill=0)
image_height, image_width = _get_size_image_pil(image)
if crop_width == image_width and crop_height == image_height:
return image
crop_top, crop_left = _center_crop_compute_crop_anchor(crop_height, crop_width, image_height, image_width)
return _crop_image_pil(image, crop_top, crop_left, crop_height, crop_width)
def center_crop_bounding_boxes(
bounding_boxes: torch.Tensor,
format: tv_tensors.BoundingBoxFormat,
canvas_size: Tuple[int, int],
output_size: List[int],
) -> Tuple[torch.Tensor, Tuple[int, int]]:
crop_height, crop_width = _center_crop_parse_output_size(output_size)
crop_top, crop_left = _center_crop_compute_crop_anchor(crop_height, crop_width, *canvas_size)
return crop_bounding_boxes(
bounding_boxes, format, top=crop_top, left=crop_left, height=crop_height, width=crop_width
)
@_register_kernel_internal(center_crop, tv_tensors.BoundingBoxes, tv_tensor_wrapper=False)
def _center_crop_bounding_boxes_dispatch(
inpt: tv_tensors.BoundingBoxes, output_size: List[int]
) -> tv_tensors.BoundingBoxes:
output, canvas_size = center_crop_bounding_boxes(
inpt.as_subclass(torch.Tensor), format=inpt.format, canvas_size=inpt.canvas_size, output_size=output_size
)
return tv_tensors.wrap(output, like=inpt, canvas_size=canvas_size)
@_register_kernel_internal(center_crop, tv_tensors.Mask)
def center_crop_mask(mask: torch.Tensor, output_size: List[int]) -> torch.Tensor:
if mask.ndim < 3:
mask = mask.unsqueeze(0)
needs_squeeze = True
else:
needs_squeeze = False
output = center_crop_image(image=mask, output_size=output_size)
if needs_squeeze:
output = output.squeeze(0)
return output
@_register_kernel_internal(center_crop, tv_tensors.Video)
def center_crop_video(video: torch.Tensor, output_size: List[int]) -> torch.Tensor:
return center_crop_image(video, output_size)
def resized_crop(
inpt: torch.Tensor,
top: int,
left: int,
height: int,
width: int,
size: List[int],
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
antialias: Optional[bool] = True,
) -> torch.Tensor:
"""See :class:`~torchvision.transforms.v2.RandomResizedCrop` for details."""
if torch.jit.is_scripting():
return resized_crop_image(
inpt,
top=top,
left=left,
height=height,
width=width,
size=size,
interpolation=interpolation,
antialias=antialias,
)
_log_api_usage_once(resized_crop)
kernel = _get_kernel(resized_crop, type(inpt))
return kernel(
inpt,
top=top,
left=left,
height=height,
width=width,
size=size,
interpolation=interpolation,
antialias=antialias,
)
@_register_kernel_internal(resized_crop, torch.Tensor)
@_register_kernel_internal(resized_crop, tv_tensors.Image)
def resized_crop_image(
image: torch.Tensor,
top: int,
left: int,
height: int,
width: int,
size: List[int],
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
antialias: Optional[bool] = True,
) -> torch.Tensor:
image = crop_image(image, top, left, height, width)
return resize_image(image, size, interpolation=interpolation, antialias=antialias)
def _resized_crop_image_pil(
image: PIL.Image.Image,
top: int,
left: int,
height: int,
width: int,
size: List[int],
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
) -> PIL.Image.Image:
image = _crop_image_pil(image, top, left, height, width)
return _resize_image_pil(image, size, interpolation=interpolation)
@_register_kernel_internal(resized_crop, PIL.Image.Image)
def _resized_crop_image_pil_dispatch(
image: PIL.Image.Image,
top: int,
left: int,
height: int,
width: int,
size: List[int],
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
antialias: Optional[bool] = True,
) -> PIL.Image.Image:
if antialias is False:
warnings.warn("Anti-alias option is always applied for PIL Image input. Argument antialias is ignored.")
return _resized_crop_image_pil(
image,
top=top,
left=left,
height=height,
width=width,
size=size,
interpolation=interpolation,
)
def resized_crop_bounding_boxes(
bounding_boxes: torch.Tensor,
format: tv_tensors.BoundingBoxFormat,
top: int,
left: int,
height: int,
width: int,
size: List[int],
) -> Tuple[torch.Tensor, Tuple[int, int]]:
bounding_boxes, canvas_size = crop_bounding_boxes(bounding_boxes, format, top, left, height, width)
return resize_bounding_boxes(bounding_boxes, canvas_size=canvas_size, size=size)
@_register_kernel_internal(resized_crop, tv_tensors.BoundingBoxes, tv_tensor_wrapper=False)
def _resized_crop_bounding_boxes_dispatch(
inpt: tv_tensors.BoundingBoxes, top: int, left: int, height: int, width: int, size: List[int], **kwargs
) -> tv_tensors.BoundingBoxes:
output, canvas_size = resized_crop_bounding_boxes(
inpt.as_subclass(torch.Tensor), format=inpt.format, top=top, left=left, height=height, width=width, size=size
)
return tv_tensors.wrap(output, like=inpt, canvas_size=canvas_size)
def resized_crop_mask(
mask: torch.Tensor,
top: int,
left: int,
height: int,
width: int,
size: List[int],
) -> torch.Tensor:
mask = crop_mask(mask, top, left, height, width)
return resize_mask(mask, size)
@_register_kernel_internal(resized_crop, tv_tensors.Mask, tv_tensor_wrapper=False)
def _resized_crop_mask_dispatch(
inpt: tv_tensors.Mask, top: int, left: int, height: int, width: int, size: List[int], **kwargs
) -> tv_tensors.Mask:
output = resized_crop_mask(
inpt.as_subclass(torch.Tensor), top=top, left=left, height=height, width=width, size=size
)
return tv_tensors.wrap(output, like=inpt)
@_register_kernel_internal(resized_crop, tv_tensors.Video)
def resized_crop_video(
video: torch.Tensor,
top: int,
left: int,
height: int,
width: int,
size: List[int],
interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
antialias: Optional[bool] = True,
) -> torch.Tensor:
return resized_crop_image(
video, top, left, height, width, antialias=antialias, size=size, interpolation=interpolation
)
def five_crop(
inpt: torch.Tensor, size: List[int]
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""See :class:`~torchvision.transforms.v2.FiveCrop` for details."""
if torch.jit.is_scripting():
return five_crop_image(inpt, size=size)
_log_api_usage_once(five_crop)
kernel = _get_kernel(five_crop, type(inpt))
return kernel(inpt, size=size)
def _parse_five_crop_size(size: List[int]) -> List[int]:
if isinstance(size, numbers.Number):
s = int(size)
size = [s, s]
elif isinstance(size, (tuple, list)) and len(size) == 1:
s = size[0]
size = [s, s]
if len(size) != 2:
raise ValueError("Please provide only two dimensions (h, w) for size.")
return size
@_register_five_ten_crop_kernel_internal(five_crop, torch.Tensor)
@_register_five_ten_crop_kernel_internal(five_crop, tv_tensors.Image)
def five_crop_image(
image: torch.Tensor, size: List[int]
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
crop_height, crop_width = _parse_five_crop_size(size)
image_height, image_width = image.shape[-2:]
if crop_width > image_width or crop_height > image_height:
raise ValueError(f"Requested crop size {size} is bigger than input size {(image_height, image_width)}")
tl = crop_image(image, 0, 0, crop_height, crop_width)
tr = crop_image(image, 0, image_width - crop_width, crop_height, crop_width)
bl = crop_image(image, image_height - crop_height, 0, crop_height, crop_width)
br = crop_image(image, image_height - crop_height, image_width - crop_width, crop_height, crop_width)
center = center_crop_image(image, [crop_height, crop_width])
return tl, tr, bl, br, center
@_register_five_ten_crop_kernel_internal(five_crop, PIL.Image.Image)
def _five_crop_image_pil(
image: PIL.Image.Image, size: List[int]
) -> Tuple[PIL.Image.Image, PIL.Image.Image, PIL.Image.Image, PIL.Image.Image, PIL.Image.Image]:
crop_height, crop_width = _parse_five_crop_size(size)
image_height, image_width = _get_size_image_pil(image)
if crop_width > image_width or crop_height > image_height:
raise ValueError(f"Requested crop size {size} is bigger than input size {(image_height, image_width)}")
tl = _crop_image_pil(image, 0, 0, crop_height, crop_width)
tr = _crop_image_pil(image, 0, image_width - crop_width, crop_height, crop_width)
bl = _crop_image_pil(image, image_height - crop_height, 0, crop_height, crop_width)
br = _crop_image_pil(image, image_height - crop_height, image_width - crop_width, crop_height, crop_width)
center = _center_crop_image_pil(image, [crop_height, crop_width])
return tl, tr, bl, br, center
@_register_five_ten_crop_kernel_internal(five_crop, tv_tensors.Video)
def five_crop_video(
video: torch.Tensor, size: List[int]
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
return five_crop_image(video, size)
def ten_crop(
inpt: torch.Tensor, size: List[int], vertical_flip: bool = False
) -> Tuple[
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
]:
"""See :class:`~torchvision.transforms.v2.TenCrop` for details."""
if torch.jit.is_scripting():
return ten_crop_image(inpt, size=size, vertical_flip=vertical_flip)
_log_api_usage_once(ten_crop)
kernel = _get_kernel(ten_crop, type(inpt))
return kernel(inpt, size=size, vertical_flip=vertical_flip)
@_register_five_ten_crop_kernel_internal(ten_crop, torch.Tensor)
@_register_five_ten_crop_kernel_internal(ten_crop, tv_tensors.Image)
def ten_crop_image(
image: torch.Tensor, size: List[int], vertical_flip: bool = False
) -> Tuple[
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
]:
non_flipped = five_crop_image(image, size)
if vertical_flip:
image = vertical_flip_image(image)
else:
image = horizontal_flip_image(image)
flipped = five_crop_image(image, size)
return non_flipped + flipped
@_register_five_ten_crop_kernel_internal(ten_crop, PIL.Image.Image)
def _ten_crop_image_pil(
image: PIL.Image.Image, size: List[int], vertical_flip: bool = False
) -> Tuple[
PIL.Image.Image,
PIL.Image.Image,
PIL.Image.Image,
PIL.Image.Image,
PIL.Image.Image,
PIL.Image.Image,
PIL.Image.Image,
PIL.Image.Image,
PIL.Image.Image,
PIL.Image.Image,
]:
non_flipped = _five_crop_image_pil(image, size)
if vertical_flip:
image = _vertical_flip_image_pil(image)
else:
image = _horizontal_flip_image_pil(image)
flipped = _five_crop_image_pil(image, size)
return non_flipped + flipped
@_register_five_ten_crop_kernel_internal(ten_crop, tv_tensors.Video)
def ten_crop_video(
video: torch.Tensor, size: List[int], vertical_flip: bool = False
) -> Tuple[
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
]:
return ten_crop_image(video, size, vertical_flip=vertical_flip)
```
|
=====================================================================================================================================
SOURCE CODE FILE: _meta.py
LINES: 1
SIZE: 10.57 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\transforms\v2\functional\_meta.py
ENCODING: utf-8
```py
from typing import List, Optional, Tuple
import PIL.Image
import torch
from torchvision import tv_tensors
from torchvision.transforms import _functional_pil as _FP
from torchvision.tv_tensors import BoundingBoxFormat
from torchvision.utils import _log_api_usage_once
from ._utils import _get_kernel, _register_kernel_internal, is_pure_tensor
def get_dimensions(inpt: torch.Tensor) -> List[int]:
if torch.jit.is_scripting():
return get_dimensions_image(inpt)
_log_api_usage_once(get_dimensions)
kernel = _get_kernel(get_dimensions, type(inpt))
return kernel(inpt)
@_register_kernel_internal(get_dimensions, torch.Tensor)
@_register_kernel_internal(get_dimensions, tv_tensors.Image, tv_tensor_wrapper=False)
def get_dimensions_image(image: torch.Tensor) -> List[int]:
chw = list(image.shape[-3:])
ndims = len(chw)
if ndims == 3:
return chw
elif ndims == 2:
chw.insert(0, 1)
return chw
else:
raise TypeError(f"Input tensor should have at least two dimensions, but got {ndims}")
_get_dimensions_image_pil = _register_kernel_internal(get_dimensions, PIL.Image.Image)(_FP.get_dimensions)
@_register_kernel_internal(get_dimensions, tv_tensors.Video, tv_tensor_wrapper=False)
def get_dimensions_video(video: torch.Tensor) -> List[int]:
return get_dimensions_image(video)
def get_num_channels(inpt: torch.Tensor) -> int:
if torch.jit.is_scripting():
return get_num_channels_image(inpt)
_log_api_usage_once(get_num_channels)
kernel = _get_kernel(get_num_channels, type(inpt))
return kernel(inpt)
@_register_kernel_internal(get_num_channels, torch.Tensor)
@_register_kernel_internal(get_num_channels, tv_tensors.Image, tv_tensor_wrapper=False)
def get_num_channels_image(image: torch.Tensor) -> int:
chw = image.shape[-3:]
ndims = len(chw)
if ndims == 3:
return chw[0]
elif ndims == 2:
return 1
else:
raise TypeError(f"Input tensor should have at least two dimensions, but got {ndims}")
_get_num_channels_image_pil = _register_kernel_internal(get_num_channels, PIL.Image.Image)(_FP.get_image_num_channels)
@_register_kernel_internal(get_num_channels, tv_tensors.Video, tv_tensor_wrapper=False)
def get_num_channels_video(video: torch.Tensor) -> int:
return get_num_channels_image(video)
# We changed the names to ensure it can be used not only for images but also videos. Thus, we just alias it without
# deprecating the old names.
get_image_num_channels = get_num_channels
def get_size(inpt: torch.Tensor) -> List[int]:
if torch.jit.is_scripting():
return get_size_image(inpt)
_log_api_usage_once(get_size)
kernel = _get_kernel(get_size, type(inpt))
return kernel(inpt)
@_register_kernel_internal(get_size, torch.Tensor)
@_register_kernel_internal(get_size, tv_tensors.Image, tv_tensor_wrapper=False)
def get_size_image(image: torch.Tensor) -> List[int]:
hw = list(image.shape[-2:])
ndims = len(hw)
if ndims == 2:
return hw
else:
raise TypeError(f"Input tensor should have at least two dimensions, but got {ndims}")
@_register_kernel_internal(get_size, PIL.Image.Image)
def _get_size_image_pil(image: PIL.Image.Image) -> List[int]:
width, height = _FP.get_image_size(image)
return [height, width]
@_register_kernel_internal(get_size, tv_tensors.Video, tv_tensor_wrapper=False)
def get_size_video(video: torch.Tensor) -> List[int]:
return get_size_image(video)
@_register_kernel_internal(get_size, tv_tensors.Mask, tv_tensor_wrapper=False)
def get_size_mask(mask: torch.Tensor) -> List[int]:
return get_size_image(mask)
@_register_kernel_internal(get_size, tv_tensors.BoundingBoxes, tv_tensor_wrapper=False)
def get_size_bounding_boxes(bounding_box: tv_tensors.BoundingBoxes) -> List[int]:
return list(bounding_box.canvas_size)
def get_num_frames(inpt: torch.Tensor) -> int:
if torch.jit.is_scripting():
return get_num_frames_video(inpt)
_log_api_usage_once(get_num_frames)
kernel = _get_kernel(get_num_frames, type(inpt))
return kernel(inpt)
@_register_kernel_internal(get_num_frames, torch.Tensor)
@_register_kernel_internal(get_num_frames, tv_tensors.Video, tv_tensor_wrapper=False)
def get_num_frames_video(video: torch.Tensor) -> int:
return video.shape[-4]
def _xywh_to_xyxy(xywh: torch.Tensor, inplace: bool) -> torch.Tensor:
xyxy = xywh if inplace else xywh.clone()
xyxy[..., 2:] += xyxy[..., :2]
return xyxy
def _xyxy_to_xywh(xyxy: torch.Tensor, inplace: bool) -> torch.Tensor:
xywh = xyxy if inplace else xyxy.clone()
xywh[..., 2:] -= xywh[..., :2]
return xywh
def _cxcywh_to_xyxy(cxcywh: torch.Tensor, inplace: bool) -> torch.Tensor:
if not inplace:
cxcywh = cxcywh.clone()
# Trick to do fast division by 2 and ceil, without casting. It produces the same result as
# `torchvision.ops._box_convert._box_cxcywh_to_xyxy`.
half_wh = cxcywh[..., 2:].div(-2, rounding_mode=None if cxcywh.is_floating_point() else "floor").abs_()
# (cx - width / 2) = x1, same for y1
cxcywh[..., :2].sub_(half_wh)
# (x1 + width) = x2, same for y2
cxcywh[..., 2:].add_(cxcywh[..., :2])
return cxcywh
def _xyxy_to_cxcywh(xyxy: torch.Tensor, inplace: bool) -> torch.Tensor:
if not inplace:
xyxy = xyxy.clone()
# (x2 - x1) = width, same for height
xyxy[..., 2:].sub_(xyxy[..., :2])
# (x1 * 2 + width) / 2 = x1 + width / 2 = x1 + (x2-x1)/2 = (x1 + x2)/2 = cx, same for cy
xyxy[..., :2].mul_(2).add_(xyxy[..., 2:]).div_(2, rounding_mode=None if xyxy.is_floating_point() else "floor")
return xyxy
def _convert_bounding_box_format(
bounding_boxes: torch.Tensor, old_format: BoundingBoxFormat, new_format: BoundingBoxFormat, inplace: bool = False
) -> torch.Tensor:
if new_format == old_format:
return bounding_boxes
# TODO: Add _xywh_to_cxcywh and _cxcywh_to_xywh to improve performance
if old_format == BoundingBoxFormat.XYWH:
bounding_boxes = _xywh_to_xyxy(bounding_boxes, inplace)
elif old_format == BoundingBoxFormat.CXCYWH:
bounding_boxes = _cxcywh_to_xyxy(bounding_boxes, inplace)
if new_format == BoundingBoxFormat.XYWH:
bounding_boxes = _xyxy_to_xywh(bounding_boxes, inplace)
elif new_format == BoundingBoxFormat.CXCYWH:
bounding_boxes = _xyxy_to_cxcywh(bounding_boxes, inplace)
return bounding_boxes
def convert_bounding_box_format(
inpt: torch.Tensor,
old_format: Optional[BoundingBoxFormat] = None,
new_format: Optional[BoundingBoxFormat] = None,
inplace: bool = False,
) -> torch.Tensor:
"""See :func:`~torchvision.transforms.v2.ConvertBoundingBoxFormat` for details."""
# This being a kernel / functional hybrid, we need an option to pass `old_format` explicitly for pure tensor
# inputs as well as extract it from `tv_tensors.BoundingBoxes` inputs. However, putting a default value on
# `old_format` means we also need to put one on `new_format` to have syntactically correct Python. Here we mimic the
# default error that would be thrown if `new_format` had no default value.
if new_format is None:
raise TypeError("convert_bounding_box_format() missing 1 required argument: 'new_format'")
if not torch.jit.is_scripting():
_log_api_usage_once(convert_bounding_box_format)
if isinstance(old_format, str):
old_format = BoundingBoxFormat[old_format.upper()]
if isinstance(new_format, str):
new_format = BoundingBoxFormat[new_format.upper()]
if torch.jit.is_scripting() or is_pure_tensor(inpt):
if old_format is None:
raise ValueError("For pure tensor inputs, `old_format` has to be passed.")
return _convert_bounding_box_format(inpt, old_format=old_format, new_format=new_format, inplace=inplace)
elif isinstance(inpt, tv_tensors.BoundingBoxes):
if old_format is not None:
raise ValueError("For bounding box tv_tensor inputs, `old_format` must not be passed.")
output = _convert_bounding_box_format(
inpt.as_subclass(torch.Tensor), old_format=inpt.format, new_format=new_format, inplace=inplace
)
return tv_tensors.wrap(output, like=inpt, format=new_format)
else:
raise TypeError(
f"Input can either be a plain tensor or a bounding box tv_tensor, but got {type(inpt)} instead."
)
def _clamp_bounding_boxes(
bounding_boxes: torch.Tensor, format: BoundingBoxFormat, canvas_size: Tuple[int, int]
) -> torch.Tensor:
# TODO: Investigate if it makes sense from a performance perspective to have an implementation for every
# BoundingBoxFormat instead of converting back and forth
in_dtype = bounding_boxes.dtype
bounding_boxes = bounding_boxes.clone() if bounding_boxes.is_floating_point() else bounding_boxes.float()
xyxy_boxes = convert_bounding_box_format(
bounding_boxes, old_format=format, new_format=tv_tensors.BoundingBoxFormat.XYXY, inplace=True
)
xyxy_boxes[..., 0::2].clamp_(min=0, max=canvas_size[1])
xyxy_boxes[..., 1::2].clamp_(min=0, max=canvas_size[0])
out_boxes = convert_bounding_box_format(
xyxy_boxes, old_format=BoundingBoxFormat.XYXY, new_format=format, inplace=True
)
return out_boxes.to(in_dtype)
def clamp_bounding_boxes(
inpt: torch.Tensor,
format: Optional[BoundingBoxFormat] = None,
canvas_size: Optional[Tuple[int, int]] = None,
) -> torch.Tensor:
"""See :func:`~torchvision.transforms.v2.ClampBoundingBoxes` for details."""
if not torch.jit.is_scripting():
_log_api_usage_once(clamp_bounding_boxes)
if torch.jit.is_scripting() or is_pure_tensor(inpt):
if format is None or canvas_size is None:
raise ValueError("For pure tensor inputs, `format` and `canvas_size` have to be passed.")
return _clamp_bounding_boxes(inpt, format=format, canvas_size=canvas_size)
elif isinstance(inpt, tv_tensors.BoundingBoxes):
if format is not None or canvas_size is not None:
raise ValueError("For bounding box tv_tensor inputs, `format` and `canvas_size` must not be passed.")
output = _clamp_bounding_boxes(inpt.as_subclass(torch.Tensor), format=inpt.format, canvas_size=inpt.canvas_size)
return tv_tensors.wrap(output, like=inpt)
else:
raise TypeError(
f"Input can either be a plain tensor or a bounding box tv_tensor, but got {type(inpt)} instead."
)
```
|
=====================================================================================================================================
SOURCE CODE FILE: _misc.py
LINES: 1
SIZE: 17.52 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\transforms\v2\functional\_misc.py
ENCODING: utf-8
```py
import math
from typing import List, Optional, Tuple
import PIL.Image
import torch
from torch.nn.functional import conv2d, pad as torch_pad
from torchvision import tv_tensors
from torchvision.transforms._functional_tensor import _max_value
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
from torchvision.utils import _log_api_usage_once
from ._meta import _convert_bounding_box_format
from ._utils import _get_kernel, _register_kernel_internal, is_pure_tensor
def normalize(
inpt: torch.Tensor,
mean: List[float],
std: List[float],
inplace: bool = False,
) -> torch.Tensor:
"""See :class:`~torchvision.transforms.v2.Normalize` for details."""
if torch.jit.is_scripting():
return normalize_image(inpt, mean=mean, std=std, inplace=inplace)
_log_api_usage_once(normalize)
kernel = _get_kernel(normalize, type(inpt))
return kernel(inpt, mean=mean, std=std, inplace=inplace)
@_register_kernel_internal(normalize, torch.Tensor)
@_register_kernel_internal(normalize, tv_tensors.Image)
def normalize_image(image: torch.Tensor, mean: List[float], std: List[float], inplace: bool = False) -> torch.Tensor:
if not image.is_floating_point():
raise TypeError(f"Input tensor should be a float tensor. Got {image.dtype}.")
if image.ndim < 3:
raise ValueError(f"Expected tensor to be a tensor image of size (..., C, H, W). Got {image.shape}.")
if isinstance(std, (tuple, list)):
divzero = not all(std)
elif isinstance(std, (int, float)):
divzero = std == 0
else:
divzero = False
if divzero:
raise ValueError("std evaluated to zero, leading to division by zero.")
dtype = image.dtype
device = image.device
mean = torch.as_tensor(mean, dtype=dtype, device=device)
std = torch.as_tensor(std, dtype=dtype, device=device)
if mean.ndim == 1:
mean = mean.view(-1, 1, 1)
if std.ndim == 1:
std = std.view(-1, 1, 1)
if inplace:
image = image.sub_(mean)
else:
image = image.sub(mean)
return image.div_(std)
@_register_kernel_internal(normalize, tv_tensors.Video)
def normalize_video(video: torch.Tensor, mean: List[float], std: List[float], inplace: bool = False) -> torch.Tensor:
return normalize_image(video, mean, std, inplace=inplace)
def gaussian_blur(inpt: torch.Tensor, kernel_size: List[int], sigma: Optional[List[float]] = None) -> torch.Tensor:
"""See :class:`~torchvision.transforms.v2.GaussianBlur` for details."""
if torch.jit.is_scripting():
return gaussian_blur_image(inpt, kernel_size=kernel_size, sigma=sigma)
_log_api_usage_once(gaussian_blur)
kernel = _get_kernel(gaussian_blur, type(inpt))
return kernel(inpt, kernel_size=kernel_size, sigma=sigma)
def _get_gaussian_kernel1d(kernel_size: int, sigma: float, dtype: torch.dtype, device: torch.device) -> torch.Tensor:
lim = (kernel_size - 1) / (2.0 * math.sqrt(2.0))
x = torch.linspace(-lim, lim, steps=kernel_size, dtype=dtype, device=device)
kernel1d = torch.softmax(x.div(sigma).pow(2).neg(), dim=0)
return kernel1d
def _get_gaussian_kernel2d(
kernel_size: List[int], sigma: List[float], dtype: torch.dtype, device: torch.device
) -> torch.Tensor:
kernel1d_x = _get_gaussian_kernel1d(kernel_size[0], sigma[0], dtype, device)
kernel1d_y = _get_gaussian_kernel1d(kernel_size[1], sigma[1], dtype, device)
kernel2d = kernel1d_y.unsqueeze(-1) * kernel1d_x
return kernel2d
@_register_kernel_internal(gaussian_blur, torch.Tensor)
@_register_kernel_internal(gaussian_blur, tv_tensors.Image)
def gaussian_blur_image(
image: torch.Tensor, kernel_size: List[int], sigma: Optional[List[float]] = None
) -> torch.Tensor:
# TODO: consider deprecating integers from sigma on the future
if isinstance(kernel_size, int):
kernel_size = [kernel_size, kernel_size]
elif len(kernel_size) != 2:
raise ValueError(f"If kernel_size is a sequence its length should be 2. Got {len(kernel_size)}")
for ksize in kernel_size:
if ksize % 2 == 0 or ksize < 0:
raise ValueError(f"kernel_size should have odd and positive integers. Got {kernel_size}")
if sigma is None:
sigma = [ksize * 0.15 + 0.35 for ksize in kernel_size]
else:
if isinstance(sigma, (list, tuple)):
length = len(sigma)
if length == 1:
s = sigma[0]
sigma = [s, s]
elif length != 2:
raise ValueError(f"If sigma is a sequence, its length should be 2. Got {length}")
elif isinstance(sigma, (int, float)):
s = float(sigma)
sigma = [s, s]
else:
raise TypeError(f"sigma should be either float or sequence of floats. Got {type(sigma)}")
for s in sigma:
if s <= 0.0:
raise ValueError(f"sigma should have positive values. Got {sigma}")
if image.numel() == 0:
return image
dtype = image.dtype
shape = image.shape
ndim = image.ndim
if ndim == 3:
image = image.unsqueeze(dim=0)
elif ndim > 4:
image = image.reshape((-1,) + shape[-3:])
fp = torch.is_floating_point(image)
kernel = _get_gaussian_kernel2d(kernel_size, sigma, dtype=dtype if fp else torch.float32, device=image.device)
kernel = kernel.expand(shape[-3], 1, kernel.shape[0], kernel.shape[1])
output = image if fp else image.to(dtype=torch.float32)
# padding = (left, right, top, bottom)
padding = [kernel_size[0] // 2, kernel_size[0] // 2, kernel_size[1] // 2, kernel_size[1] // 2]
output = torch_pad(output, padding, mode="reflect")
output = conv2d(output, kernel, groups=shape[-3])
if ndim == 3:
output = output.squeeze(dim=0)
elif ndim > 4:
output = output.reshape(shape)
if not fp:
output = output.round_().to(dtype=dtype)
return output
@_register_kernel_internal(gaussian_blur, PIL.Image.Image)
def _gaussian_blur_image_pil(
image: PIL.Image.Image, kernel_size: List[int], sigma: Optional[List[float]] = None
) -> PIL.Image.Image:
t_img = pil_to_tensor(image)
output = gaussian_blur_image(t_img, kernel_size=kernel_size, sigma=sigma)
return to_pil_image(output, mode=image.mode)
@_register_kernel_internal(gaussian_blur, tv_tensors.Video)
def gaussian_blur_video(
video: torch.Tensor, kernel_size: List[int], sigma: Optional[List[float]] = None
) -> torch.Tensor:
return gaussian_blur_image(video, kernel_size, sigma)
def gaussian_noise(inpt: torch.Tensor, mean: float = 0.0, sigma: float = 0.1, clip: bool = True) -> torch.Tensor:
"""See :class:`~torchvision.transforms.v2.GaussianNoise`"""
if torch.jit.is_scripting():
return gaussian_noise_image(inpt, mean=mean, sigma=sigma)
_log_api_usage_once(gaussian_noise)
kernel = _get_kernel(gaussian_noise, type(inpt))
return kernel(inpt, mean=mean, sigma=sigma, clip=clip)
@_register_kernel_internal(gaussian_noise, torch.Tensor)
@_register_kernel_internal(gaussian_noise, tv_tensors.Image)
def gaussian_noise_image(image: torch.Tensor, mean: float = 0.0, sigma: float = 0.1, clip: bool = True) -> torch.Tensor:
if not image.is_floating_point():
raise ValueError(f"Input tensor is expected to be in float dtype, got dtype={image.dtype}")
if sigma < 0:
raise ValueError(f"sigma shouldn't be negative. Got {sigma}")
noise = mean + torch.randn_like(image) * sigma
out = image + noise
if clip:
out = torch.clamp(out, 0, 1)
return out
@_register_kernel_internal(gaussian_noise, tv_tensors.Video)
def gaussian_noise_video(video: torch.Tensor, mean: float = 0.0, sigma: float = 0.1, clip: bool = True) -> torch.Tensor:
return gaussian_noise_image(video, mean=mean, sigma=sigma, clip=clip)
@_register_kernel_internal(gaussian_noise, PIL.Image.Image)
def _gaussian_noise_pil(
video: torch.Tensor, mean: float = 0.0, sigma: float = 0.1, clip: bool = True
) -> PIL.Image.Image:
raise ValueError("Gaussian Noise is not implemented for PIL images.")
def to_dtype(inpt: torch.Tensor, dtype: torch.dtype = torch.float, scale: bool = False) -> torch.Tensor:
"""See :func:`~torchvision.transforms.v2.ToDtype` for details."""
if torch.jit.is_scripting():
return to_dtype_image(inpt, dtype=dtype, scale=scale)
_log_api_usage_once(to_dtype)
kernel = _get_kernel(to_dtype, type(inpt))
return kernel(inpt, dtype=dtype, scale=scale)
def _num_value_bits(dtype: torch.dtype) -> int:
if dtype == torch.uint8:
return 8
elif dtype == torch.int8:
return 7
elif dtype == torch.int16:
return 15
elif dtype == torch.uint16:
return 16
elif dtype == torch.int32:
return 31
elif dtype == torch.int64:
return 63
else:
raise TypeError(f"Number of value bits is only defined for integer dtypes, but got {dtype}.")
@_register_kernel_internal(to_dtype, torch.Tensor)
@_register_kernel_internal(to_dtype, tv_tensors.Image)
def to_dtype_image(image: torch.Tensor, dtype: torch.dtype = torch.float, scale: bool = False) -> torch.Tensor:
if image.dtype == dtype:
return image
elif not scale:
return image.to(dtype)
float_input = image.is_floating_point()
if torch.jit.is_scripting():
# TODO: remove this branch as soon as `dtype.is_floating_point` is supported by JIT
float_output = torch.tensor(0, dtype=dtype).is_floating_point()
else:
float_output = dtype.is_floating_point
if float_input:
# float to float
if float_output:
return image.to(dtype)
# float to int
if (image.dtype == torch.float32 and dtype in (torch.int32, torch.int64)) or (
image.dtype == torch.float64 and dtype == torch.int64
):
raise RuntimeError(f"The conversion from {image.dtype} to {dtype} cannot be performed safely.")
# For data in the range `[0.0, 1.0]`, just multiplying by the maximum value of the integer range and converting
# to the integer dtype is not sufficient. For example, `torch.rand(...).mul(255).to(torch.uint8)` will only
# be `255` if the input is exactly `1.0`. See https://github.com/pytorch/vision/pull/2078#issuecomment-612045321
# for a detailed analysis.
# To mitigate this, we could round before we convert to the integer dtype, but this is an extra operation.
# Instead, we can also multiply by the maximum value plus something close to `1`. See
# https://github.com/pytorch/vision/pull/2078#issuecomment-613524965 for details.
eps = 1e-3
max_value = float(_max_value(dtype))
# We need to scale first since the conversion would otherwise turn the input range `[0.0, 1.0]` into the
# discrete set `{0, 1}`.
return image.mul(max_value + 1.0 - eps).to(dtype)
else:
# int to float
if float_output:
return image.to(dtype).mul_(1.0 / _max_value(image.dtype))
# int to int
num_value_bits_input = _num_value_bits(image.dtype)
num_value_bits_output = _num_value_bits(dtype)
# TODO: Remove if/else inner blocks once uint16 dtype supports bitwise shift operations.
shift_by = abs(num_value_bits_input - num_value_bits_output)
if num_value_bits_input > num_value_bits_output:
if image.dtype == torch.uint16:
return (image / 2 ** (shift_by)).to(dtype)
else:
return image.bitwise_right_shift(shift_by).to(dtype)
else:
if dtype == torch.uint16:
return image.to(dtype) * 2 ** (shift_by)
else:
return image.to(dtype).bitwise_left_shift_(shift_by)
# We encourage users to use to_dtype() instead but we keep this for BC
def convert_image_dtype(image: torch.Tensor, dtype: torch.dtype = torch.float32) -> torch.Tensor:
"""[DEPRECATED] Use to_dtype() instead."""
return to_dtype_image(image, dtype=dtype, scale=True)
@_register_kernel_internal(to_dtype, tv_tensors.Video)
def to_dtype_video(video: torch.Tensor, dtype: torch.dtype = torch.float, scale: bool = False) -> torch.Tensor:
return to_dtype_image(video, dtype, scale=scale)
@_register_kernel_internal(to_dtype, tv_tensors.BoundingBoxes, tv_tensor_wrapper=False)
@_register_kernel_internal(to_dtype, tv_tensors.Mask, tv_tensor_wrapper=False)
def _to_dtype_tensor_dispatch(inpt: torch.Tensor, dtype: torch.dtype, scale: bool = False) -> torch.Tensor:
# We don't need to unwrap and rewrap here, since TVTensor.to() preserves the type
return inpt.to(dtype)
def sanitize_bounding_boxes(
bounding_boxes: torch.Tensor,
format: Optional[tv_tensors.BoundingBoxFormat] = None,
canvas_size: Optional[Tuple[int, int]] = None,
min_size: float = 1.0,
min_area: float = 1.0,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Remove degenerate/invalid bounding boxes and return the corresponding indexing mask.
This removes bounding boxes that:
- are below a given ``min_size`` or ``min_area``: by default this also removes degenerate boxes that have e.g. X2 <= X1.
- have any coordinate outside of their corresponding image. You may want to
call :func:`~torchvision.transforms.v2.functional.clamp_bounding_boxes` first to avoid undesired removals.
It is recommended to call it at the end of a pipeline, before passing the
input to the models. It is critical to call this transform if
:class:`~torchvision.transforms.v2.RandomIoUCrop` was called.
If you want to be extra careful, you may call it after all transforms that
may modify bounding boxes but once at the end should be enough in most
cases.
Args:
bounding_boxes (Tensor or :class:`~torchvision.tv_tensors.BoundingBoxes`): The bounding boxes to be sanitized.
format (str or :class:`~torchvision.tv_tensors.BoundingBoxFormat`, optional): The format of the bounding boxes.
Must be left to none if ``bounding_boxes`` is a :class:`~torchvision.tv_tensors.BoundingBoxes` object.
canvas_size (tuple of int, optional): The canvas_size of the bounding boxes
(size of the corresponding image/video).
Must be left to none if ``bounding_boxes`` is a :class:`~torchvision.tv_tensors.BoundingBoxes` object.
min_size (float, optional) The size below which bounding boxes are removed. Default is 1.
min_area (float, optional) The area below which bounding boxes are removed. Default is 1.
Returns:
out (tuple of Tensors): The subset of valid bounding boxes, and the corresponding indexing mask.
The mask can then be used to subset other tensors (e.g. labels) that are associated with the bounding boxes.
"""
if torch.jit.is_scripting() or is_pure_tensor(bounding_boxes):
if format is None or canvas_size is None:
raise ValueError(
"format and canvas_size cannot be None if bounding_boxes is a pure tensor. "
f"Got format={format} and canvas_size={canvas_size}."
"Set those to appropriate values or pass bounding_boxes as a tv_tensors.BoundingBoxes object."
)
if isinstance(format, str):
format = tv_tensors.BoundingBoxFormat[format.upper()]
valid = _get_sanitize_bounding_boxes_mask(
bounding_boxes, format=format, canvas_size=canvas_size, min_size=min_size, min_area=min_area
)
bounding_boxes = bounding_boxes[valid]
else:
if not isinstance(bounding_boxes, tv_tensors.BoundingBoxes):
raise ValueError("bounding_boxes must be a tv_tensors.BoundingBoxes instance or a pure tensor.")
if format is not None or canvas_size is not None:
raise ValueError(
"format and canvas_size must be None when bounding_boxes is a tv_tensors.BoundingBoxes instance. "
f"Got format={format} and canvas_size={canvas_size}. "
"Leave those to None or pass bounding_boxes as a pure tensor."
)
valid = _get_sanitize_bounding_boxes_mask(
bounding_boxes,
format=bounding_boxes.format,
canvas_size=bounding_boxes.canvas_size,
min_size=min_size,
min_area=min_area,
)
bounding_boxes = tv_tensors.wrap(bounding_boxes[valid], like=bounding_boxes)
return bounding_boxes, valid
def _get_sanitize_bounding_boxes_mask(
bounding_boxes: torch.Tensor,
format: tv_tensors.BoundingBoxFormat,
canvas_size: Tuple[int, int],
min_size: float = 1.0,
min_area: float = 1.0,
) -> torch.Tensor:
bounding_boxes = _convert_bounding_box_format(
bounding_boxes, new_format=tv_tensors.BoundingBoxFormat.XYXY, old_format=format
)
image_h, image_w = canvas_size
ws, hs = bounding_boxes[:, 2] - bounding_boxes[:, 0], bounding_boxes[:, 3] - bounding_boxes[:, 1]
valid = (ws >= min_size) & (hs >= min_size) & (bounding_boxes >= 0).all(dim=-1) & (ws * hs >= min_area)
# TODO: Do we really need to check for out of bounds here? All
# transforms should be clamping anyway, so this should never happen?
image_h, image_w = canvas_size
valid &= (bounding_boxes[:, 0] <= image_w) & (bounding_boxes[:, 2] <= image_w)
valid &= (bounding_boxes[:, 1] <= image_h) & (bounding_boxes[:, 3] <= image_h)
return valid
```
|
=========================================================================================================================================
SOURCE CODE FILE: _temporal.py
LINES: 1
SIZE: 1.14 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\transforms\v2\functional\_temporal.py
ENCODING: utf-8
```py
import torch
from torchvision import tv_tensors
from torchvision.utils import _log_api_usage_once
from ._utils import _get_kernel, _register_kernel_internal
def uniform_temporal_subsample(inpt: torch.Tensor, num_samples: int) -> torch.Tensor:
"""See :class:`~torchvision.transforms.v2.UniformTemporalSubsample` for details."""
if torch.jit.is_scripting():
return uniform_temporal_subsample_video(inpt, num_samples=num_samples)
_log_api_usage_once(uniform_temporal_subsample)
kernel = _get_kernel(uniform_temporal_subsample, type(inpt))
return kernel(inpt, num_samples=num_samples)
@_register_kernel_internal(uniform_temporal_subsample, torch.Tensor)
@_register_kernel_internal(uniform_temporal_subsample, tv_tensors.Video)
def uniform_temporal_subsample_video(video: torch.Tensor, num_samples: int) -> torch.Tensor:
# Reference: https://github.com/facebookresearch/pytorchvideo/blob/a0a131e/pytorchvideo/transforms/functional.py#L19
t_max = video.shape[-4] - 1
indices = torch.linspace(0, t_max, num_samples, device=video.device).long()
return torch.index_select(video, -4, indices)
```
|
================================================================================================================================================
SOURCE CODE FILE: _type_conversion.py
LINES: 1
SIZE: 0.88 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\transforms\v2\functional\_type_conversion.py
ENCODING: utf-8
```py
from typing import Union
import numpy as np
import PIL.Image
import torch
from torchvision import tv_tensors
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_image(inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray]) -> tv_tensors.Image:
"""See :class:`~torchvision.transforms.v2.ToImage` for details."""
if isinstance(inpt, np.ndarray):
output = torch.from_numpy(np.atleast_3d(inpt)).permute((2, 0, 1)).contiguous()
elif isinstance(inpt, PIL.Image.Image):
output = pil_to_tensor(inpt)
elif isinstance(inpt, torch.Tensor):
output = inpt
else:
raise TypeError(
f"Input can either be a pure Tensor, a numpy array, or a PIL image, but got {type(inpt)} instead."
)
return tv_tensors.Image(output)
to_pil_image = _F.to_pil_image
pil_to_tensor = _F.pil_to_tensor
```
|
======================================================================================================================================
SOURCE CODE FILE: _utils.py
LINES: 1
SIZE: 5.49 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\transforms\v2\functional\_utils.py
ENCODING: utf-8
```py
import functools
from typing import Any, Callable, Dict, List, Optional, Sequence, Type, Union
import torch
from torchvision import tv_tensors
_FillType = Union[int, float, Sequence[int], Sequence[float], None]
_FillTypeJIT = Optional[List[float]]
def is_pure_tensor(inpt: Any) -> bool:
return isinstance(inpt, torch.Tensor) and not isinstance(inpt, tv_tensors.TVTensor)
# {functional: {input_type: type_specific_kernel}}
_KERNEL_REGISTRY: Dict[Callable, Dict[Type, Callable]] = {}
def _kernel_tv_tensor_wrapper(kernel):
@functools.wraps(kernel)
def wrapper(inpt, *args, **kwargs):
# If you're wondering whether we could / should get rid of this wrapper,
# the answer is no: we want to pass pure Tensors to avoid the overhead
# of the __torch_function__ machinery. Note that this is always valid,
# regardless of whether we override __torch_function__ in our base class
# or not.
# Also, even if we didn't call `as_subclass` here, we would still need
# this wrapper to call wrap(), because the TVTensor type would be
# lost after the first operation due to our own __torch_function__
# logic.
output = kernel(inpt.as_subclass(torch.Tensor), *args, **kwargs)
return tv_tensors.wrap(output, like=inpt)
return wrapper
def _register_kernel_internal(functional, input_type, *, tv_tensor_wrapper=True):
registry = _KERNEL_REGISTRY.setdefault(functional, {})
if input_type in registry:
raise ValueError(f"Functional {functional} already has a kernel registered for type {input_type}.")
def decorator(kernel):
registry[input_type] = (
_kernel_tv_tensor_wrapper(kernel)
if issubclass(input_type, tv_tensors.TVTensor) and tv_tensor_wrapper
else kernel
)
return kernel
return decorator
def _name_to_functional(name):
import torchvision.transforms.v2.functional # noqa
try:
return getattr(torchvision.transforms.v2.functional, name)
except AttributeError:
raise ValueError(
f"Could not find functional with name '{name}' in torchvision.transforms.v2.functional."
) from None
_BUILTIN_DATAPOINT_TYPES = {
obj for obj in tv_tensors.__dict__.values() if isinstance(obj, type) and issubclass(obj, tv_tensors.TVTensor)
}
def register_kernel(functional, tv_tensor_cls):
"""Decorate a kernel to register it for a functional and a (custom) tv_tensor type.
See :ref:`sphx_glr_auto_examples_transforms_plot_custom_tv_tensors.py` for usage
details.
"""
if isinstance(functional, str):
functional = _name_to_functional(name=functional)
elif not (
callable(functional)
and getattr(functional, "__module__", "").startswith("torchvision.transforms.v2.functional")
):
raise ValueError(
f"Kernels can only be registered on functionals from the torchvision.transforms.v2.functional namespace, "
f"but got {functional}."
)
if not (isinstance(tv_tensor_cls, type) and issubclass(tv_tensor_cls, tv_tensors.TVTensor)):
raise ValueError(
f"Kernels can only be registered for subclasses of torchvision.tv_tensors.TVTensor, "
f"but got {tv_tensor_cls}."
)
if tv_tensor_cls in _BUILTIN_DATAPOINT_TYPES:
raise ValueError(f"Kernels cannot be registered for the builtin tv_tensor classes, but got {tv_tensor_cls}")
return _register_kernel_internal(functional, tv_tensor_cls, tv_tensor_wrapper=False)
def _get_kernel(functional, input_type, *, allow_passthrough=False):
registry = _KERNEL_REGISTRY.get(functional)
if not registry:
raise ValueError(f"No kernel registered for functional {functional.__name__}.")
for cls in input_type.__mro__:
if cls in registry:
return registry[cls]
elif cls is tv_tensors.TVTensor:
# We don't want user-defined tv_tensors to dispatch to the pure Tensor kernels, so we explicit stop the
# MRO traversal before hitting torch.Tensor. We can even stop at tv_tensors.TVTensor, since we don't
# allow kernels to be registered for tv_tensors.TVTensor anyway.
break
if allow_passthrough:
return lambda inpt, *args, **kwargs: inpt
raise TypeError(
f"Functional F.{functional.__name__} supports inputs of type {registry.keys()}, "
f"but got {input_type} instead."
)
# This basically replicates _register_kernel_internal, but with a specialized wrapper for five_crop / ten_crop
# We could get rid of this by letting _register_kernel_internal take arbitrary functionals rather than wrap_kernel: bool
def _register_five_ten_crop_kernel_internal(functional, input_type):
registry = _KERNEL_REGISTRY.setdefault(functional, {})
if input_type in registry:
raise TypeError(f"Functional '{functional}' already has a kernel registered for type '{input_type}'.")
def wrap(kernel):
@functools.wraps(kernel)
def wrapper(inpt, *args, **kwargs):
output = kernel(inpt, *args, **kwargs)
container_type = type(output)
return container_type(tv_tensors.wrap(o, like=inpt) for o in output)
return wrapper
def decorator(kernel):
registry[input_type] = wrap(kernel) if issubclass(input_type, tv_tensors.TVTensor) else kernel
return kernel
return decorator
```
|
==========================================================================================================================
SOURCE CODE FILE: __init__.py
LINES: 1
SIZE: 1.51 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\tv_tensors\__init__.py
ENCODING: utf-8
```py
import torch
from ._bounding_boxes import BoundingBoxes, BoundingBoxFormat
from ._image import Image
from ._mask import Mask
from ._torch_function_helpers import set_return_type
from ._tv_tensor import TVTensor
from ._video import Video
# TODO: Fix this. We skip this method as it leads to
# RecursionError: maximum recursion depth exceeded while calling a Python object
# Until `disable` is removed, there will be graph breaks after all calls to functional transforms
@torch.compiler.disable
def wrap(wrappee, *, like, **kwargs):
"""Convert a :class:`torch.Tensor` (``wrappee``) into the same :class:`~torchvision.tv_tensors.TVTensor` subclass as ``like``.
If ``like`` is a :class:`~torchvision.tv_tensors.BoundingBoxes`, the ``format`` and ``canvas_size`` of
``like`` are assigned to ``wrappee``, unless they are passed as ``kwargs``.
Args:
wrappee (Tensor): The tensor to convert.
like (:class:`~torchvision.tv_tensors.TVTensor`): The reference.
``wrappee`` will be converted into the same subclass as ``like``.
kwargs: Can contain "format" and "canvas_size" if ``like`` is a :class:`~torchvision.tv_tensor.BoundingBoxes`.
Ignored otherwise.
"""
if isinstance(like, BoundingBoxes):
return BoundingBoxes._wrap(
wrappee,
format=kwargs.get("format", like.format),
canvas_size=kwargs.get("canvas_size", like.canvas_size),
)
else:
return wrappee.as_subclass(type(like))
```
|
=================================================================================================================================
SOURCE CODE FILE: _bounding_boxes.py
LINES: 1
SIZE: 4.49 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\tv_tensors\_bounding_boxes.py
ENCODING: utf-8
```py
from __future__ import annotations
from enum import Enum
from typing import Any, Mapping, Optional, Sequence, Tuple, Union
import torch
from torch.utils._pytree import tree_flatten
from ._tv_tensor import TVTensor
class BoundingBoxFormat(Enum):
"""Coordinate format of a bounding box.
Available formats are
* ``XYXY``
* ``XYWH``
* ``CXCYWH``
"""
XYXY = "XYXY"
XYWH = "XYWH"
CXCYWH = "CXCYWH"
class BoundingBoxes(TVTensor):
""":class:`torch.Tensor` subclass for bounding boxes with shape ``[N, 4]``.
.. note::
There should be only one :class:`~torchvision.tv_tensors.BoundingBoxes`
instance per sample e.g. ``{"img": img, "bbox": BoundingBoxes(...)}``,
although one :class:`~torchvision.tv_tensors.BoundingBoxes` object can
contain multiple bounding boxes.
Args:
data: Any data that can be turned into a tensor with :func:`torch.as_tensor`.
format (BoundingBoxFormat, str): Format of the bounding box.
canvas_size (two-tuple of ints): Height and width of the corresponding image or video.
dtype (torch.dtype, optional): Desired data type of the bounding box. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device of the bounding box. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the bounding box is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations on the bounding box. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
format: BoundingBoxFormat
canvas_size: Tuple[int, int]
@classmethod
def _wrap(cls, tensor: torch.Tensor, *, format: Union[BoundingBoxFormat, str], canvas_size: Tuple[int, int], check_dims: bool = True) -> BoundingBoxes: # type: ignore[override]
if check_dims:
if tensor.ndim == 1:
tensor = tensor.unsqueeze(0)
elif tensor.ndim != 2:
raise ValueError(f"Expected a 1D or 2D tensor, got {tensor.ndim}D")
if isinstance(format, str):
format = BoundingBoxFormat[format.upper()]
bounding_boxes = tensor.as_subclass(cls)
bounding_boxes.format = format
bounding_boxes.canvas_size = canvas_size
return bounding_boxes
def __new__(
cls,
data: Any,
*,
format: Union[BoundingBoxFormat, str],
canvas_size: Tuple[int, int],
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> BoundingBoxes:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return cls._wrap(tensor, format=format, canvas_size=canvas_size)
@classmethod
def _wrap_output(
cls,
output: torch.Tensor,
args: Sequence[Any] = (),
kwargs: Optional[Mapping[str, Any]] = None,
) -> BoundingBoxes:
# If there are BoundingBoxes instances in the output, their metadata got lost when we called
# super().__torch_function__. We need to restore the metadata somehow, so we choose to take
# the metadata from the first bbox in the parameters.
# This should be what we want in most cases. When it's not, it's probably a mis-use anyway, e.g.
# something like some_xyxy_bbox + some_xywh_bbox; we don't guard against those cases.
flat_params, _ = tree_flatten(args + (tuple(kwargs.values()) if kwargs else ())) # type: ignore[operator]
first_bbox_from_args = next(x for x in flat_params if isinstance(x, BoundingBoxes))
format, canvas_size = first_bbox_from_args.format, first_bbox_from_args.canvas_size
if isinstance(output, torch.Tensor) and not isinstance(output, BoundingBoxes):
output = BoundingBoxes._wrap(output, format=format, canvas_size=canvas_size, check_dims=False)
elif isinstance(output, (tuple, list)):
output = type(output)(
BoundingBoxes._wrap(part, format=format, canvas_size=canvas_size, check_dims=False) for part in output
)
return output
def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
return self._make_repr(format=self.format, canvas_size=self.canvas_size)
```
|
==================================================================================================================================
SOURCE CODE FILE: _dataset_wrapper.py
LINES: 4
SIZE: 24.58 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\tv_tensors\_dataset_wrapper.py
ENCODING: utf-8
```py
# type: ignore
from __future__ import annotations
import collections.abc
import contextlib
from collections import defaultdict
from copy import copy
import torch
from torchvision import datasets, tv_tensors
from torchvision.transforms.v2 import functional as F
__all__ = ["wrap_dataset_for_transforms_v2"]
def wrap_dataset_for_transforms_v2(dataset, target_keys=None):
"""Wrap a ``torchvision.dataset`` for usage with :mod:`torchvision.transforms.v2`.
Example:
>>> dataset = torchvision.datasets.CocoDetection(...)
>>> dataset = wrap_dataset_for_transforms_v2(dataset)
.. note::
For now, only the most popular datasets are supported. Furthermore, the wrapper only supports dataset
configurations that are fully supported by ``torchvision.transforms.v2``. If you encounter an error prompting you
to raise an issue to ``torchvision`` for a dataset or configuration that you need, please do so.
The dataset samples are wrapped according to the description below.
Special cases:
* :class:`~torchvision.datasets.CocoDetection`: Instead of returning the target as list of dicts, the wrapper
returns a dict of lists. In addition, the key-value-pairs ``"boxes"`` (in ``XYXY`` coordinate format),
``"masks"`` and ``"labels"`` are added and wrap the data in the corresponding ``torchvision.tv_tensors``.
The original keys are preserved. If ``target_keys`` is omitted, returns only the values for the
``"image_id"``, ``"boxes"``, and ``"labels"``.
* :class:`~torchvision.datasets.VOCDetection`: The key-value-pairs ``"boxes"`` and ``"labels"`` are added to
the target and wrap the data in the corresponding ``torchvision.tv_tensors``. The original keys are
preserved. If ``target_keys`` is omitted, returns only the values for the ``"boxes"`` and ``"labels"``.
* :class:`~torchvision.datasets.CelebA`: The target for ``target_type="bbox"`` is converted to the ``XYXY``
coordinate format and wrapped into a :class:`~torchvision.tv_tensors.BoundingBoxes` tv_tensor.
* :class:`~torchvision.datasets.Kitti`: Instead returning the target as list of dicts, the wrapper returns a
dict of lists. In addition, the key-value-pairs ``"boxes"`` and ``"labels"`` are added and wrap the data
in the corresponding ``torchvision.tv_tensors``. The original keys are preserved. If ``target_keys`` is
omitted, returns only the values for the ``"boxes"`` and ``"labels"``.
* :class:`~torchvision.datasets.OxfordIIITPet`: The target for ``target_type="segmentation"`` is wrapped into a
:class:`~torchvision.tv_tensors.Mask` tv_tensor.
* :class:`~torchvision.datasets.Cityscapes`: The target for ``target_type="semantic"`` is wrapped into a
:class:`~torchvision.tv_tensors.Mask` tv_tensor. The target for ``target_type="instance"`` is *replaced* by
a dictionary with the key-value-pairs ``"masks"`` (as :class:`~torchvision.tv_tensors.Mask` tv_tensor) and
``"labels"``.
* :class:`~torchvision.datasets.WIDERFace`: The value for key ``"bbox"`` in the target is converted to ``XYXY``
coordinate format and wrapped into a :class:`~torchvision.tv_tensors.BoundingBoxes` tv_tensor.
Image classification datasets
This wrapper is a no-op for image classification datasets, since they were already fully supported by
:mod:`torchvision.transforms` and thus no change is needed for :mod:`torchvision.transforms.v2`.
Segmentation datasets
Segmentation datasets, e.g. :class:`~torchvision.datasets.VOCSegmentation`, return a two-tuple of
:class:`PIL.Image.Image`'s. This wrapper leaves the image as is (first item), while wrapping the
segmentation mask into a :class:`~torchvision.tv_tensors.Mask` (second item).
Video classification datasets
Video classification datasets, e.g. :class:`~torchvision.datasets.Kinetics`, return a three-tuple containing a
:class:`torch.Tensor` for the video and audio and a :class:`int` as label. This wrapper wraps the video into a
:class:`~torchvision.tv_tensors.Video` while leaving the other items as is.
.. note::
Only datasets constructed with ``output_format="TCHW"`` are supported, since the alternative
``output_format="THWC"`` is not supported by :mod:`torchvision.transforms.v2`.
Args:
dataset: the dataset instance to wrap for compatibility with transforms v2.
target_keys: Target keys to return in case the target is a dictionary. If ``None`` (default), selected keys are
specific to the dataset. If ``"all"``, returns the full target. Can also be a collection of strings for
fine grained access. Currently only supported for :class:`~torchvision.datasets.CocoDetection`,
:class:`~torchvision.datasets.VOCDetection`, :class:`~torchvision.datasets.Kitti`, and
:class:`~torchvision.datasets.WIDERFace`. See above for details.
"""
if not (
target_keys is None
or target_keys == "all"
or (isinstance(target_keys, collections.abc.Collection) and all(isinstance(key, str) for key in target_keys))
):
raise ValueError(
f"`target_keys` can be None, 'all', or a collection of strings denoting the keys to be returned, "
f"but got {target_keys}"
)
# Imagine we have isinstance(dataset, datasets.ImageNet). This will create a new class with the name
# "WrappedImageNet" at runtime that doubly inherits from VisionDatasetTVTensorWrapper (see below) as well as the
# original ImageNet class. This allows the user to do regular isinstance(wrapped_dataset, datasets.ImageNet) checks,
# while we can still inject everything that we need.
wrapped_dataset_cls = type(f"Wrapped{type(dataset).__name__}", (VisionDatasetTVTensorWrapper, type(dataset)), {})
# Since VisionDatasetTVTensorWrapper comes before ImageNet in the MRO, calling the class hits
# VisionDatasetTVTensorWrapper.__init__ first. Since we are never doing super().__init__(...), the constructor of
# ImageNet is never hit. That is by design, since we don't want to create the dataset instance again, but rather
# have the existing instance as attribute on the new object.
return wrapped_dataset_cls(dataset, target_keys)
class WrapperFactories(dict):
def register(self, dataset_cls):
def decorator(wrapper_factory):
self[dataset_cls] = wrapper_factory
return wrapper_factory
return decorator
# We need this two-stage design, i.e. a wrapper factory producing the actual wrapper, since some wrappers depend on the
# dataset instance rather than just the class, since they require the user defined instance attributes. Thus, we can
# provide a wrapping from the dataset class to the factory here, but can only instantiate the wrapper at runtime when
# we have access to the dataset instance.
WRAPPER_FACTORIES = WrapperFactories()
class VisionDatasetTVTensorWrapper:
def __init__(self, dataset, target_keys):
dataset_cls = type(dataset)
if not isinstance(dataset, datasets.VisionDataset):
raise TypeError(
f"This wrapper is meant for subclasses of `torchvision.datasets.VisionDataset`, "
f"but got a '{dataset_cls.__name__}' instead.\n"
f"For an example of how to perform the wrapping for custom datasets, see\n\n"
"https://pytorch.org/vision/main/auto_examples/plot_tv_tensors.html#do-i-have-to-wrap-the-output-of-the-datasets-myself"
)
for cls in dataset_cls.mro():
if cls in WRAPPER_FACTORIES:
wrapper_factory = WRAPPER_FACTORIES[cls]
if target_keys is not None and cls not in {
datasets.CocoDetection,
datasets.VOCDetection,
datasets.Kitti,
datasets.WIDERFace,
}:
raise ValueError(
f"`target_keys` is currently only supported for `CocoDetection`, `VOCDetection`, `Kitti`, "
f"and `WIDERFace`, but got {cls.__name__}."
)
break
elif cls is datasets.VisionDataset:
# TODO: If we have documentation on how to do that, put a link in the error message.
msg = f"No wrapper exists for dataset class {dataset_cls.__name__}. Please wrap the output yourself."
if dataset_cls in datasets.__dict__.values():
msg = (
f"{msg} If an automated wrapper for this dataset would be useful for you, "
f"please open an issue at https://github.com/pytorch/vision/issues."
)
raise TypeError(msg)
self._dataset = dataset
self._target_keys = target_keys
self._wrapper = wrapper_factory(dataset, target_keys)
# We need to disable the transforms on the dataset here to be able to inject the wrapping before we apply them.
# Although internally, `datasets.VisionDataset` merges `transform` and `target_transform` into the joint
# `transforms`
# https://github.com/pytorch/vision/blob/135a0f9ea9841b6324b4fe8974e2543cbb95709a/torchvision/datasets/vision.py#L52-L54
# some (if not most) datasets still use `transform` and `target_transform` individually. Thus, we need to
# disable all three here to be able to extract the untransformed sample to wrap.
self.transform, dataset.transform = dataset.transform, None
self.target_transform, dataset.target_transform = dataset.target_transform, None
self.transforms, dataset.transforms = dataset.transforms, None
def __getattr__(self, item):
with contextlib.suppress(AttributeError):
return object.__getattribute__(self, item)
return getattr(self._dataset, item)
def __getitem__(self, idx):
# This gets us the raw sample since we disabled the transforms for the underlying dataset in the constructor
# of this class
sample = self._dataset[idx]
sample = self._wrapper(idx, sample)
# Regardless of whether the user has supplied the transforms individually (`transform` and `target_transform`)
# or joint (`transforms`), we can access the full functionality through `transforms`
if self.transforms is not None:
sample = self.transforms(*sample)
return sample
def __len__(self):
return len(self._dataset)
# TODO: maybe we should use __getstate__ and __setstate__ instead of __reduce__, as recommended in the docs.
def __reduce__(self):
# __reduce__ gets called when we try to pickle the dataset.
# In a DataLoader with spawn context, this gets called `num_workers` times from the main process.
# We have to reset the [target_]transform[s] attributes of the dataset
# to their original values, because we previously set them to None in __init__().
dataset = copy(self._dataset)
dataset.transform = self.transform
dataset.transforms = self.transforms
dataset.target_transform = self.target_transform
return wrap_dataset_for_transforms_v2, (dataset, self._target_keys)
def raise_not_supported(description):
raise RuntimeError(
f"{description} is currently not supported by this wrapper. "
f"If this would be helpful for you, please open an issue at https://github.com/pytorch/vision/issues."
)
def identity(item):
return item
def identity_wrapper_factory(dataset, target_keys):
def wrapper(idx, sample):
return sample
return wrapper
def pil_image_to_mask(pil_image):
return tv_tensors.Mask(pil_image)
def parse_target_keys(target_keys, *, available, default):
if target_keys is None:
target_keys = default
if target_keys == "all":
target_keys = available
else:
target_keys = set(target_keys)
extra = target_keys - available
if extra:
raise ValueError(f"Target keys {sorted(extra)} are not available")
return target_keys
def list_of_dicts_to_dict_of_lists(list_of_dicts):
dict_of_lists = defaultdict(list)
for dct in list_of_dicts:
for key, value in dct.items():
dict_of_lists[key].append(value)
return dict(dict_of_lists)
def wrap_target_by_type(target, *, target_types, type_wrappers):
if not isinstance(target, (tuple, list)):
target = [target]
wrapped_target = tuple(
type_wrappers.get(target_type, identity)(item) for target_type, item in zip(target_types, target)
)
if len(wrapped_target) == 1:
wrapped_target = wrapped_target[0]
return wrapped_target
def classification_wrapper_factory(dataset, target_keys):
return identity_wrapper_factory(dataset, target_keys)
for dataset_cls in [
datasets.Caltech256,
datasets.CIFAR10,
datasets.CIFAR100,
datasets.ImageNet,
datasets.MNIST,
datasets.FashionMNIST,
datasets.GTSRB,
datasets.DatasetFolder,
datasets.ImageFolder,
datasets.Imagenette,
]:
WRAPPER_FACTORIES.register(dataset_cls)(classification_wrapper_factory)
def segmentation_wrapper_factory(dataset, target_keys):
def wrapper(idx, sample):
image, mask = sample
return image, pil_image_to_mask(mask)
return wrapper
for dataset_cls in [
datasets.VOCSegmentation,
]:
WRAPPER_FACTORIES.register(dataset_cls)(segmentation_wrapper_factory)
def video_classification_wrapper_factory(dataset, target_keys):
if dataset.video_clips.output_format == "THWC":
raise RuntimeError(
f"{type(dataset).__name__} with `output_format='THWC'` is not supported by this wrapper, "
f"since it is not compatible with the transformations. Please use `output_format='TCHW'` instead."
)
def wrapper(idx, sample):
video, audio, label = sample
video = tv_tensors.Video(video)
return video, audio, label
return wrapper
for dataset_cls in [
datasets.HMDB51,
datasets.Kinetics,
datasets.UCF101,
]:
WRAPPER_FACTORIES.register(dataset_cls)(video_classification_wrapper_factory)
@WRAPPER_FACTORIES.register(datasets.Caltech101)
def caltech101_wrapper_factory(dataset, target_keys):
if "annotation" in dataset.target_type:
raise_not_supported("Caltech101 dataset with `target_type=['annotation', ...]`")
return classification_wrapper_factory(dataset, target_keys)
@WRAPPER_FACTORIES.register(datasets.CocoDetection)
def coco_dectection_wrapper_factory(dataset, target_keys):
target_keys = parse_target_keys(
target_keys,
available={
# native
"segmentation",
"area",
"iscrowd",
"image_id",
"bbox",
"category_id",
# added by the wrapper
"boxes",
"masks",
"labels",
},
default={"image_id", "boxes", "labels"},
)
def segmentation_to_mask(segmentation, *, canvas_size):
from pycocotools import mask
if isinstance(segmentation, dict):
# if counts is a string, it is already an encoded RLE mask
if not isinstance(segmentation["counts"], str):
segmentation = mask.frPyObjects(segmentation, *canvas_size)
elif isinstance(segmentation, list):
segmentation = mask.merge(mask.frPyObjects(segmentation, *canvas_size))
else:
raise ValueError(f"COCO segmentation expected to be a dict or a list, got {type(segmentation)}")
return torch.from_numpy(mask.decode(segmentation))
def wrapper(idx, sample):
image_id = dataset.ids[idx]
image, target = sample
if not target:
return image, dict(image_id=image_id)
canvas_size = tuple(F.get_size(image))
batched_target = list_of_dicts_to_dict_of_lists(target)
target = {}
if "image_id" in target_keys:
target["image_id"] = image_id
if "boxes" in target_keys:
target["boxes"] = F.convert_bounding_box_format(
tv_tensors.BoundingBoxes(
batched_target["bbox"],
format=tv_tensors.BoundingBoxFormat.XYWH,
canvas_size=canvas_size,
),
new_format=tv_tensors.BoundingBoxFormat.XYXY,
)
if "masks" in target_keys:
target["masks"] = tv_tensors.Mask(
torch.stack(
[
segmentation_to_mask(segmentation, canvas_size=canvas_size)
for segmentation in batched_target["segmentation"]
]
),
)
if "labels" in target_keys:
target["labels"] = torch.tensor(batched_target["category_id"])
for target_key in target_keys - {"image_id", "boxes", "masks", "labels"}:
target[target_key] = batched_target[target_key]
return image, target
return wrapper
WRAPPER_FACTORIES.register(datasets.CocoCaptions)(identity_wrapper_factory)
VOC_DETECTION_CATEGORIES = [
"__background__",
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"pottedplant",
"sheep",
"sofa",
"train",
"tvmonitor",
]
VOC_DETECTION_CATEGORY_TO_IDX = dict(zip(VOC_DETECTION_CATEGORIES, range(len(VOC_DETECTION_CATEGORIES))))
@WRAPPER_FACTORIES.register(datasets.VOCDetection)
def voc_detection_wrapper_factory(dataset, target_keys):
target_keys = parse_target_keys(
target_keys,
available={
# native
"annotation",
# added by the wrapper
"boxes",
"labels",
},
default={"boxes", "labels"},
)
def wrapper(idx, sample):
image, target = sample
batched_instances = list_of_dicts_to_dict_of_lists(target["annotation"]["object"])
if "annotation" not in target_keys:
target = {}
if "boxes" in target_keys:
target["boxes"] = tv_tensors.BoundingBoxes(
[
[int(bndbox[part]) for part in ("xmin", "ymin", "xmax", "ymax")]
for bndbox in batched_instances["bndbox"]
],
format=tv_tensors.BoundingBoxFormat.XYXY,
canvas_size=(image.height, image.width),
)
if "labels" in target_keys:
target["labels"] = torch.tensor(
[VOC_DETECTION_CATEGORY_TO_IDX[category] for category in batched_instances["name"]]
)
return image, target
return wrapper
@WRAPPER_FACTORIES.register(datasets.SBDataset)
def sbd_wrapper(dataset, target_keys):
if dataset.mode == "boundaries":
raise_not_supported("SBDataset with mode='boundaries'")
return segmentation_wrapper_factory(dataset, target_keys)
@WRAPPER_FACTORIES.register(datasets.CelebA)
def celeba_wrapper_factory(dataset, target_keys):
if any(target_type in dataset.target_type for target_type in ["attr", "landmarks"]):
raise_not_supported("`CelebA` dataset with `target_type=['attr', 'landmarks', ...]`")
def wrapper(idx, sample):
image, target = sample
target = wrap_target_by_type(
target,
target_types=dataset.target_type,
type_wrappers={
"bbox": lambda item: F.convert_bounding_box_format(
tv_tensors.BoundingBoxes(
item,
format=tv_tensors.BoundingBoxFormat.XYWH,
canvas_size=(image.height, image.width),
),
new_format=tv_tensors.BoundingBoxFormat.XYXY,
),
},
)
return image, target
return wrapper
KITTI_CATEGORIES = ["Car", "Van", "Truck", "Pedestrian", "Person_sitting", "Cyclist", "Tram", "Misc", "DontCare"]
KITTI_CATEGORY_TO_IDX = dict(zip(KITTI_CATEGORIES, range(len(KITTI_CATEGORIES))))
@WRAPPER_FACTORIES.register(datasets.Kitti)
def kitti_wrapper_factory(dataset, target_keys):
target_keys = parse_target_keys(
target_keys,
available={
# native
"type",
"truncated",
"occluded",
"alpha",
"bbox",
"dimensions",
"location",
"rotation_y",
# added by the wrapper
"boxes",
"labels",
},
default={"boxes", "labels"},
)
def wrapper(idx, sample):
image, target = sample
if target is None:
return image, target
batched_target = list_of_dicts_to_dict_of_lists(target)
target = {}
if "boxes" in target_keys:
target["boxes"] = tv_tensors.BoundingBoxes(
batched_target["bbox"],
format=tv_tensors.BoundingBoxFormat.XYXY,
canvas_size=(image.height, image.width),
)
if "labels" in target_keys:
target["labels"] = torch.tensor([KITTI_CATEGORY_TO_IDX[category] for category in batched_target["type"]])
for target_key in target_keys - {"boxes", "labels"}:
target[target_key] = batched_target[target_key]
return image, target
return wrapper
@WRAPPER_FACTORIES.register(datasets.OxfordIIITPet)
def oxford_iiit_pet_wrapper_factor(dataset, target_keys):
def wrapper(idx, sample):
image, target = sample
if target is not None:
target = wrap_target_by_type(
target,
target_types=dataset._target_types,
type_wrappers={
"segmentation": pil_image_to_mask,
},
)
return image, target
return wrapper
@WRAPPER_FACTORIES.register(datasets.Cityscapes)
def cityscapes_wrapper_factory(dataset, target_keys):
if any(target_type in dataset.target_type for target_type in ["polygon", "color"]):
raise_not_supported("`Cityscapes` dataset with `target_type=['polygon', 'color', ...]`")
def instance_segmentation_wrapper(mask):
# See https://github.com/mcordts/cityscapesScripts/blob/8da5dd00c9069058ccc134654116aac52d4f6fa2/cityscapesscripts/preparation/json2instanceImg.py#L7-L21
data = pil_image_to_mask(mask)
masks = []
labels = []
for id in data.unique():
masks.append(data == id)
label = id
if label >= 1_000:
label //= 1_000
labels.append(label)
return dict(masks=tv_tensors.Mask(torch.stack(masks)), labels=torch.stack(labels))
def wrapper(idx, sample):
image, target = sample
target = wrap_target_by_type(
target,
target_types=dataset.target_type,
type_wrappers={
"instance": instance_segmentation_wrapper,
"semantic": pil_image_to_mask,
},
)
return image, target
return wrapper
@WRAPPER_FACTORIES.register(datasets.WIDERFace)
def widerface_wrapper(dataset, target_keys):
target_keys = parse_target_keys(
target_keys,
available={
"bbox",
"blur",
"expression",
"illumination",
"occlusion",
"pose",
"invalid",
},
default="all",
)
def wrapper(idx, sample):
image, target = sample
if target is None:
return image, target
target = {key: target[key] for key in target_keys}
if "bbox" in target_keys:
target["bbox"] = F.convert_bounding_box_format(
tv_tensors.BoundingBoxes(
target["bbox"], format=tv_tensors.BoundingBoxFormat.XYWH, canvas_size=(image.height, image.width)
),
new_format=tv_tensors.BoundingBoxFormat.XYXY,
)
return image, target
return wrapper
```
|
========================================================================================================================
SOURCE CODE FILE: _image.py
LINES: 1
SIZE: 1.94 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\tv_tensors\_image.py
ENCODING: utf-8
```py
from __future__ import annotations
from typing import Any, Optional, Union
import PIL.Image
import torch
from ._tv_tensor import TVTensor
class Image(TVTensor):
""":class:`torch.Tensor` subclass for images with shape ``[..., C, H, W]``.
.. note::
In the :ref:`transforms <transforms>`, ``Image`` instances are largely
interchangeable with pure :class:`torch.Tensor`. See
:ref:`this note <passthrough_heuristic>` for more details.
Args:
data (tensor-like, PIL.Image.Image): Any data that can be turned into a tensor with :func:`torch.as_tensor` as
well as PIL images.
dtype (torch.dtype, optional): Desired data type. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the image is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Image:
if isinstance(data, PIL.Image.Image):
from torchvision.transforms.v2 import functional as F
data = F.pil_to_tensor(data)
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
if tensor.ndim < 2:
raise ValueError
elif tensor.ndim == 2:
tensor = tensor.unsqueeze(0)
return tensor.as_subclass(cls)
def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
return self._make_repr()
```
|
=======================================================================================================================
SOURCE CODE FILE: _mask.py
LINES: 1
SIZE: 1.48 KB
PATH: scripts\freecad_env\Lib\site-packages\torchvision\tv_tensors\_mask.py
ENCODING: utf-8
```py
from __future__ import annotations
from typing import Any, Optional, Union
import PIL.Image
import torch
from ._tv_tensor import TVTensor
class Mask(TVTensor):
""":class:`torch.Tensor` subclass for segmentation and detection masks with shape ``[..., H, W]``.
Args:
data (tensor-like, PIL.Image.Image): Any data that can be turned into a tensor with :func:`torch.as_tensor` as
well as PIL images.
dtype (torch.dtype, optional): Desired data type. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the mask is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Mask:
if isinstance(data, PIL.Image.Image):
from torchvision.transforms.v2 import functional as F
data = F.pil_to_tensor(data)
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return tensor.as_subclass(cls)
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.